Init commit

Still needs documentation
This commit is contained in:
Blizzard Finnegan 2024-11-07 17:00:14 -05:00
parent 5ddf29d660
commit f445800231
Signed by: blizzardfinnegan
GPG key ID: 61C1E13067E0018E
39 changed files with 4976 additions and 2 deletions

View file

@ -1,3 +1,10 @@
# server_config
# Server Config backup
Backups of Server Configuration Information
# HomeAssistant packages
Simply install the latest release of HomeAssistant, and then restore from backup.
If installing from scratch, a list of plugins will be added to this repository in due time.
Backups of Server Configuration Information

View file

@ -0,0 +1,19 @@
# Monitoring Server
Monitoring is often done entirely separately from the primary server configuration, so that it does not interfere with daily usage, and can detect when issues arise on the primary server.
Previously, this has been done with a FreeBSD server, partially for experience sake, partially for stable and long-term support, partially for its relatively small install requirements.
The `rc.d` directory included here configures system services for the monitoring server, after the packages have been installed. Services then need to be enabled, as documented in the [FreeBSD handbook](https://docs.freebsd.org/en/books/handbook/book/#config-tuning).
Prometheus, InfluxDB, Telegraf, Grafana, and AlertManager have dedicated documentation for being setup, which will be added and/or summarised to this repository and/or its wiki in the future.
## Packages
|Program|Purpose|
|---|---|
|`prometheus`|Data collection backend|
|`influxdb`|Data collection backend|
|`telegraf`|Data collection backend|
|`grafana`|Data visualisation software; requires data collection endpoint(s)|
|`alertmanager`| Used for sending notifications to admin|
|`openssh`| SSH backend, for management purposes|

View file

@ -0,0 +1,42 @@
route:
group_by: ['alertname','severity']
group_wait: 30s
group_interval: 5m
repeat_interval: 1h
receiver: 'discord'
routes:
- reciever: 'email'
matchers:
- severity="info"
- reciever: 'all'
matchers:
- severity="critical"
receivers:
- name: 'all'
discord_configs:
- webhook_url: DISCORD_WEBHOOK_URL
email_configs:
- to: blizzardfinnegan@gmail.com
from: blizzardfinnegan@gmail.com
smarthost: smtp.gmail.com:587
auth_username: "blizzardfinnegan@gmail.com"
auth_identity: "blizzardfinnegan@gmail.com"
auth_password: APP_PASSWORD
- name: 'discord'
discord_configs:
- webhook_url: DISCORD_WEBHOOK_URL
- name: "email"
email_configs:
- to: blizzardfinnegan@gmail.com
from: blizzardfinnegan@gmail.com
smarthost: smtp.gmail.com:587
auth_username: "blizzardfinnegan@gmail.com"
auth_identity: "blizzardfinnegan@gmail.com"
auth_password: APP_PASSWORD
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'dev', 'instance']

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,596 @@
### Welcome to the InfluxDB configuration file.
# The values in this file override the default values used by the system if
# a config option is not specified. The commented out lines are the configuration
# field and the default value used. Uncommenting a line and changing the value
# will change the value used at runtime when the process is restarted.
# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com
# The data includes a random ID, os, arch, version, the number of series and other
# usage data. No data from user databases is ever transmitted.
# Change this option to true to disable reporting.
# reporting-disabled = false
# Bind address to use for the RPC service for backup and restore.
# bind-address = "127.0.0.1:8088"
###
### [meta]
###
### Controls the parameters for the Raft consensus group that stores metadata
### about the InfluxDB cluster.
###
[meta]
# Where the metadata/raft database is stored
dir = "/var/db/influxdb/meta"
# Automatically create a default retention policy when creating a database.
# retention-autocreate = true
# If log messages are printed for the meta service
# logging-enabled = true
###
### [data]
###
### Controls where the actual shard data for InfluxDB lives and how it is
### flushed from the WAL. "dir" may need to be changed to a suitable place
### for your system, but the WAL settings are an advanced configuration. The
### defaults should work for most systems.
###
[data]
# The directory where the TSM storage engine stores TSM files.
dir = "/var/db/influxdb/data"
# The directory where the TSM storage engine stores WAL files.
wal-dir = "/var/db/influxdb/wal"
# The amount of time that a write will wait before fsyncing. A duration
# greater than 0 can be used to batch up multiple fsync calls. This is useful for slower
# disks or when WAL write contention is seen. A value of 0s fsyncs every write to the WAL.
# Values in the range of 0-100ms are recommended for non-SSD disks.
# wal-fsync-delay = "0s"
# The type of shard index to use for new shards. The default is an in-memory index that is
# recreated at startup. A value of "tsi1" will use a disk based index that supports higher
# cardinality datasets.
# index-version = "inmem"
# Trace logging provides more verbose output around the tsm engine. Turning
# this on can provide more useful output for debugging tsm engine issues.
# trace-logging-enabled = false
# Whether queries should be logged before execution. Very useful for troubleshooting, but will
# log any sensitive data contained within a query.
# query-log-enabled = true
# Provides more error checking. For example, SELECT INTO will err out inserting an +/-Inf value
# rather than silently failing.
# strict-error-handling = false
# Validates incoming writes to ensure keys only have valid unicode characters.
# This setting will incur a small overhead because every key must be checked.
# validate-keys = false
# Settings for the TSM engine
# CacheMaxMemorySize is the maximum size a shard's cache can
# reach before it starts rejecting writes.
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
# Values without a size suffix are in bytes.
# cache-max-memory-size = "1g"
# CacheSnapshotMemorySize is the size at which the engine will
# snapshot the cache and write it to a TSM file, freeing up memory
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
# Values without a size suffix are in bytes.
# cache-snapshot-memory-size = "25m"
# CacheSnapshotWriteColdDuration is the length of time at
# which the engine will snapshot the cache and write it to
# a new TSM file if the shard hasn't received writes or deletes
# cache-snapshot-write-cold-duration = "10m"
# CompactFullWriteColdDuration is the duration at which the engine
# will compact all TSM files in a shard if it hasn't received a
# write or delete
# compact-full-write-cold-duration = "4h"
# The maximum number of concurrent full and level compactions that can run at one time. A
# value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater
# than 0 limits compactions to that value. This setting does not apply
# to cache snapshotting.
# max-concurrent-compactions = 0
# CompactThroughput is the rate limit in bytes per second that we
# will allow TSM compactions to write to disk. Note that short bursts are allowed
# to happen at a possibly larger value, set by CompactThroughputBurst
# compact-throughput = "48m"
# CompactThroughputBurst is the rate limit in bytes per second that we
# will allow TSM compactions to write to disk.
# compact-throughput-burst = "48m"
# If true, then the mmap advise value MADV_WILLNEED will be provided to the kernel with respect to
# TSM files. This setting has been found to be problematic on some kernels, and defaults to off.
# It might help users who have slow disks in some cases.
# tsm-use-madv-willneed = false
# Settings for the inmem index
# The maximum series allowed per database before writes are dropped. This limit can prevent
# high cardinality issues at the database level. This limit can be disabled by setting it to
# 0.
# max-series-per-database = 1000000
# The maximum number of tag values per tag that are allowed before writes are dropped. This limit
# can prevent high cardinality tag values from being written to a measurement. This limit can be
# disabled by setting it to 0.
# max-values-per-tag = 100000
# Settings for the tsi1 index
# The threshold, in bytes, when an index write-ahead log file will compact
# into an index file. Lower sizes will cause log files to be compacted more
# quickly and result in lower heap usage at the expense of write throughput.
# Higher sizes will be compacted less frequently, store more series in-memory,
# and provide higher write throughput.
# Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k).
# Values without a size suffix are in bytes.
# max-index-log-file-size = "1m"
# The size of the internal cache used in the TSI index to store previously
# calculated series results. Cached results will be returned quickly from the cache rather
# than needing to be recalculated when a subsequent query with a matching tag key/value
# predicate is executed. Setting this value to 0 will disable the cache, which may
# lead to query performance issues.
# This value should only be increased if it is known that the set of regularly used
# tag key/value predicates across all measurements for a database is larger than 100. An
# increase in cache size may lead to an increase in heap usage.
series-id-set-cache-size = 100
###
### [coordinator]
###
### Controls the clustering service configuration.
###
[coordinator]
# The default time a write request will wait until a "timeout" error is returned to the caller.
# write-timeout = "10s"
# The maximum number of concurrent queries allowed to be executing at one time. If a query is
# executed and exceeds this limit, an error is returned to the caller. This limit can be disabled
# by setting it to 0.
# max-concurrent-queries = 0
# The maximum time a query will is allowed to execute before being killed by the system. This limit
# can help prevent run away queries. Setting the value to 0 disables the limit.
# query-timeout = "0s"
# The time threshold when a query will be logged as a slow query. This limit can be set to help
# discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging.
# log-queries-after = "0s"
# The maximum number of points a SELECT can process. A value of 0 will make
# the maximum point count unlimited. This will only be checked every second so queries will not
# be aborted immediately when hitting the limit.
# max-select-point = 0
# The maximum number of series a SELECT can run. A value of 0 will make the maximum series
# count unlimited.
# max-select-series = 0
# The maximum number of group by time bucket a SELECT can create. A value of zero will max the maximum
# number of buckets unlimited.
# max-select-buckets = 0
###
### [retention]
###
### Controls the enforcement of retention policies for evicting old data.
###
[retention]
# Determines whether retention policy enforcement enabled.
# enabled = true
# The interval of time when retention policy enforcement checks run.
# check-interval = "30m"
###
### [shard-precreation]
###
### Controls the precreation of shards, so they are available before data arrives.
### Only shards that, after creation, will have both a start- and end-time in the
### future, will ever be created. Shards are never precreated that would be wholly
### or partially in the past.
[shard-precreation]
# Determines whether shard pre-creation service is enabled.
# enabled = true
# The interval of time when the check to pre-create new shards runs.
# check-interval = "10m"
# The default period ahead of the endtime of a shard group that its successor
# group is created.
# advance-period = "30m"
###
### Controls the system self-monitoring, statistics and diagnostics.
###
### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database.
[monitor]
# Whether to record statistics internally.
store-enabled = true
# The destination database for recorded statistics
store-database = "_internal"
# The interval at which to record statistics
store-interval = "10s"
###
### [http]
###
### Controls how the HTTP endpoints are configured. These are the primary
### mechanism for getting data into and out of InfluxDB.
###
[http]
# Determines whether HTTP endpoint is enabled.
# enabled = true
# Determines whether the Flux query endpoint is enabled.
# flux-enabled = false
# Determines whether the Flux query logging is enabled.
# flux-log-enabled = false
# The bind address used by the HTTP service.
# bind-address = ":8086"
# Determines whether user authentication is enabled over HTTP/HTTPS.
# auth-enabled = false
# The default realm sent back when issuing a basic auth challenge.
# realm = "InfluxDB"
# Determines whether HTTP request logging is enabled.
# log-enabled = true
# Determines whether the HTTP write request logs should be suppressed when the log is enabled.
# suppress-write-log = false
# When HTTP request logging is enabled, this option specifies the path where
# log entries should be written. If unspecified, the default is to write to stderr, which
# intermingles HTTP logs with internal InfluxDB logging.
#
# If influxd is unable to access the specified path, it will log an error and fall back to writing
# the request log to stderr.
# access-log-path = ""
# Filters which requests should be logged. Each filter is of the pattern NNN, NNX, or NXX where N is
# a number and X is a wildcard for any number. To filter all 5xx responses, use the string 5xx.
# If multiple filters are used, then only one has to match. The default is to have no filters which
# will cause every request to be printed.
# access-log-status-filters = []
# Determines whether detailed write logging is enabled.
# write-tracing = false
# Determines whether the pprof endpoint is enabled. This endpoint is used for
# troubleshooting and monitoring.
# pprof-enabled = true
# Enables authentication on pprof endpoints. Users will need admin permissions
# to access the pprof endpoints when this setting is enabled. This setting has
# no effect if either auth-enabled or pprof-enabled are set to false.
# pprof-auth-enabled = false
# Enables a pprof endpoint that binds to localhost:6060 immediately on startup.
# This is only needed to debug startup issues.
# debug-pprof-enabled = false
# Enables authentication on the /ping, /metrics, and deprecated /status
# endpoints. This setting has no effect if auth-enabled is set to false.
# ping-auth-enabled = false
# Determines whether HTTPS is enabled.
# https-enabled = false
# The SSL certificate to use when HTTPS is enabled.
# https-certificate = "/etc/ssl/influxdb.pem"
# Use a separate private key location.
# https-private-key = ""
# The JWT auth shared secret to validate requests using JSON web tokens.
# shared-secret = ""
# The default chunk size for result sets that should be chunked.
# max-row-limit = 0
# The maximum number of HTTP connections that may be open at once. New connections that
# would exceed this limit are dropped. Setting this value to 0 disables the limit.
# max-connection-limit = 0
# Enable http service over unix domain socket
# unix-socket-enabled = false
# The path of the unix domain socket.
# bind-socket = "/var/run/influxdb.sock"
# The maximum size of a client request body, in bytes. Setting this value to 0 disables the limit.
# max-body-size = 25000000
# The maximum number of writes processed concurrently.
# Setting this to 0 disables the limit.
# max-concurrent-write-limit = 0
# The maximum number of writes queued for processing.
# Setting this to 0 disables the limit.
# max-enqueued-write-limit = 0
# The maximum duration for a write to wait in the queue to be processed.
# Setting this to 0 or setting max-concurrent-write-limit to 0 disables the limit.
# enqueued-write-timeout = 0
# User supplied HTTP response headers
#
# [http.headers]
# X-Header-1 = "Header Value 1"
# X-Header-2 = "Header Value 2"
###
### [logging]
###
### Controls how the logger emits logs to the output.
###
[logging]
# Determines which log encoder to use for logs. Available options
# are auto, logfmt, and json. auto will use a more a more user-friendly
# output format if the output terminal is a TTY, but the format is not as
# easily machine-readable. When the output is a non-TTY, auto will use
# logfmt.
# format = "auto"
# Determines which level of logs will be emitted. The available levels
# are error, warn, info, and debug. Logs that are equal to or above the
# specified level will be emitted.
# level = "info"
# Suppresses the logo output that is printed when the program is started.
# The logo is always suppressed if STDOUT is not a TTY.
# suppress-logo = false
###
### [subscriber]
###
### Controls the subscriptions, which can be used to fork a copy of all data
### received by the InfluxDB host.
###
[subscriber]
# Determines whether the subscriber service is enabled.
# enabled = true
# The default timeout for HTTP writes to subscribers.
# http-timeout = "30s"
# Allows insecure HTTPS connections to subscribers. This is useful when testing with self-
# signed certificates.
# insecure-skip-verify = false
# The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used
# ca-certs = ""
# The number of writer goroutines processing the write channel.
# write-concurrency = 40
# The number of in-flight writes buffered in the write channel.
# write-buffer-size = 1000
###
### [[graphite]]
###
### Controls one or many listeners for Graphite data.
###
[[graphite]]
# Determines whether the graphite endpoint is enabled.
# enabled = false
# database = "graphite"
# retention-policy = ""
# bind-address = ":2003"
# protocol = "tcp"
# consistency-level = "one"
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# Flush if this many points get buffered
# batch-size = 5000
# number of batches that may be pending in memory
# batch-pending = 10
# Flush at least this often even if we haven't hit buffer limit
# batch-timeout = "1s"
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
# udp-read-buffer = 0
### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
# separator = "."
### Default tags that will be added to all metrics. These can be overridden at the template level
### or by tags extracted from metric
# tags = ["region=us-east", "zone=1c"]
### Each template line requires a template pattern. It can have an optional
### filter before the template and separated by spaces. It can also have optional extra
### tags following the template. Multiple tags should be separated by commas and no spaces
### similar to the line protocol format. There can be only one default template.
# templates = [
# "*.app env.service.resource.measurement",
# # Default template
# "server.*",
# ]
###
### [collectd]
###
### Controls one or many listeners for collectd data.
###
[[collectd]]
# enabled = false
# bind-address = ":25826"
# database = "collectd"
# retention-policy = ""
#
# The collectd service supports either scanning a directory for multiple types
# db files, or specifying a single db file.
# typesdb = "/usr/local/share/collectd"
#
# security-level = "none"
# auth-file = "/etc/collectd/auth_file"
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# Flush if this many points get buffered
# batch-size = 5000
# Number of batches that may be pending in memory
# batch-pending = 10
# Flush at least this often even if we haven't hit buffer limit
# batch-timeout = "10s"
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
# read-buffer = 0
# Multi-value plugins can be handled two ways.
# "split" will parse and store the multi-value plugin data into separate measurements
# "join" will parse and store the multi-value plugin as a single multi-value measurement.
# "split" is the default behavior for backward compatibility with previous versions of influxdb.
# parse-multivalue-plugin = "split"
###
### [opentsdb]
###
### Controls one or many listeners for OpenTSDB data.
###
[[opentsdb]]
# enabled = false
# bind-address = ":4242"
# database = "opentsdb"
# retention-policy = ""
# consistency-level = "one"
# tls-enabled = false
# certificate= "/etc/ssl/influxdb.pem"
# Log an error for every malformed point.
# log-point-errors = true
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Only points
# metrics received over the telnet protocol undergo batching.
# Flush if this many points get buffered
# batch-size = 1000
# Number of batches that may be pending in memory
# batch-pending = 5
# Flush at least this often even if we haven't hit buffer limit
# batch-timeout = "1s"
###
### [[udp]]
###
### Controls the listeners for InfluxDB line protocol data via UDP.
###
[[udp]]
# enabled = false
# bind-address = ":8089"
# database = "udp"
# retention-policy = ""
# InfluxDB precision for timestamps on received points ("" or "n", "u", "ms", "s", "m", "h")
# precision = ""
# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.
# Flush if this many points get buffered
# batch-size = 5000
# Number of batches that may be pending in memory
# batch-pending = 10
# Will flush at least this often even if we haven't hit buffer limit
# batch-timeout = "1s"
# UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.
# read-buffer = 0
###
### [continuous_queries]
###
### Controls how continuous queries are run within InfluxDB.
###
[continuous_queries]
# Determines whether the continuous query service is enabled.
# enabled = true
# Controls whether queries are logged when executed by the CQ service.
# log-enabled = true
# Controls whether queries are logged to the self-monitoring data store.
# query-stats-enabled = false
# interval for how often continuous queries will be checked if they need to run
# run-interval = "1s"
###
### [tls]
###
### Global configuration settings for TLS in InfluxDB.
###
[tls]
# Determines the available set of cipher suites. See https://golang.org/pkg/crypto/tls/#pkg-constants
# for a list of available ciphers, which depends on the version of Go (use the query
# SHOW DIAGNOSTICS to see the version of Go used to build InfluxDB). If not specified, uses
# the default settings from Go's crypto/tls package.
# ciphers = [
# "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
# "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
# ]
# Minimum version of the tls protocol that will be negotiated. If not specified, uses the
# default settings from Go's crypto/tls package.
# min-version = "tls1.2"
# Maximum version of the tls protocol that will be negotiated. If not specified, uses the
# default settings from Go's crypto/tls package.
# max-version = "tls1.3"

View file

@ -0,0 +1,60 @@
groups:
- name: primaryServer
rules:
- alert: InstanceDown
expr: up == 0
for: 30s
labels:
severity: critical
annotations:
summary: "Instance {{ $labels.instance }} down!"
description: "{{ $labels.instance }} of job {{ $labels.job }} has been down for 30 seconds. Please respond immediately."
- alert: LowRemainingStorage
expr: (node_filesystem_size_bytes{instance="PRIMARY_SERVER:9100",fstype=~"btrfs|vfat|zfs|ext4"} - node_filesystem_avail_bytes{instance="PRIMARY_SERVER:9100",fstype=~"btrfs|vfat|zfs|ext4"}) / node_filesystem_size_bytes{instance="PRIMARY_SERVER:9100"} > 0.7
labels:
severity: warning
annotations:
summary: "Low storage on {{ $labels.instance }}"
description: "{{ $labels.device }} on, mounted at {{ $labels.mountpoint }}, has less than 30% free. Consider purchasing more storage, or purging storage device."
- alert: VeryLowRemainingStorage
expr: (node_filesystem_size_bytes{instance="PRIMARY_SERVER:9100",fstype=~"btrfs|vfat|zfs|ext4"} - node_filesystem_avail_bytes{instance="PRIMARY_SERVER:9100",fstype=~"btrfs|vfat|zfs|ext4"}) / node_filesystem_size_bytes{instance="PRIMARY_SERVER:9100"} > 0.8
labels:
severity: critical
annotations:
summary: "Very low storage on {{ $labels.instance }}"
description: "{{ $labels.device }} on, mounted at {{ $labels.mountpoint }}, has less than 20% free. Expect performance issues. Purchase more storage immediately, and/or purge storage device of excess files."
- alert: erroredContainer
expr: engine_daemon_container_states_containers{state=~"paused|stopped"} > 0
labels:
severity: warning
annotations:
summary: "Container(s) errored out!"
description: "Check primary server immediately."
- name: HomeAssistant
rules:
- alert: IoTLowBattery
expr: homeassistant_sensor_battery_percent{entity=~".*airsensor.*"} < 50
labels:
severity: info
annotations:
summary: "Air sensor {{ $labels.entity }} has low battery."
description: "{{ $labels.entity }} battery is currently under 50%. Please consider replacing battery."
- alert: IoTVeryLowBattery
expr: homeassistant_sensor_battery_percent{entity=~".*airsensor.*"} < 25
labels:
severity: warning
annotations:
summary: "Air sensor {{ $labels.entity }} has low battery."
description: "{{ $labels.entity }} battery is currently under 25%. Replace battery."
- alert: UpdateAvailable
expr: homeassistant_update_state > 0
labels:
severity: info
annotations:
summary: "{{ $labels.friendly_name }} Available!"
description: "Please update the appropriate HomeAssistant add-on/module at your earliest convenience."

View file

@ -0,0 +1,53 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- localhost:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "prometheus-rules.yml"
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:9090"]
- job_name: "node_exporter"
static_configs:
- targets:
- HOME_ASSISTANT_IP:9100
- "localhost:9100" #Local Prometheus exporter
- PRIMARY_SERVER:9100
- job_name: "container_mgmt"
static_configs:
- targets:
- PRIMARY_SERVER:CADVISOR_PORT
- job_name: "engine_daemon"
static_configs:
- targets: [PRIMARY_SERVER:DAEMON_IP]
- job_name: "home_assistant"
static_configs:
- targets: [HOME_ASSISTANT_IP:8123]
metrics_path: /api/prometheus
bearer_token: "FILLER TEXT"

View file

@ -0,0 +1,67 @@
#!/bin/sh
# PROVIDE: alertmanager
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
# to enable this service:
#
# alertmanager_enable (bool): Set to NO by default
# Set it to YES to enable alertmanager
# alertmanager_user (string): Set user to run alertmanager
# Default is "alertmanager"
# alertmanager_group (string): Set group to run alertmanager
# Default is "alertmanager"
# alertmanager_config (string): The configuration file
# Default is "/usr/local/etc/alertmanager/alertmanager.yml"
# alertmanager_data_dir (string): The configuration file
# Default is "/var/db/alertmanager"
# alertmanager_log_file (string): Set file that alertmanager will log to
# Default is "/var/log/alertmanager.log"
# alertmanager_args (string): Set additional command line arguments
# Default is ""
. /etc/rc.subr
name=alertmanager
rcvar=alertmanager_enable
load_rc_config $name
: ${alertmanager_enable:="NO"}
: ${alertmanager_user:="alertmanager"}
: ${alertmanager_group:="alertmanager"}
: ${alertmanager_config:="/usr/local/etc/alertmanager/alertmanager.yml"}
: ${alertmanager_data_dir:="/var/db/alertmanager"}
: ${alertmanager_log_file:="/var/log/alertmanager.log"}
: ${alertmanager_args:=""}
pidfile=/var/run/alertmanager.pid
required_files="${alertmanager_config}"
command="/usr/sbin/daemon"
procname="/usr/local/bin/alertmanager"
sig_reload=HUP
extra_commands="reload"
command_args="-o ${alertmanager_log_file} -p ${pidfile} ${procname} \
--config.file=${alertmanager_config} \
--storage.path=${alertmanager_data_dir} \
${alertmanager_args}"
start_precmd=alertmanager_startprecmd
alertmanager_startprecmd()
{
if [ ! -e ${pidfile} ]; then
install -o ${alertmanager_user} -g ${alertmanager_group} /dev/null ${pidfile};
fi
if [ ! -f "${alertmanager_log_file}" ]; then
install -o ${alertmanager_user} -g ${alertmanager_group} -m 640 /dev/null ${alertmanager_log_file};
fi
if [ ! -d ${alertmanager_data_dir} ]; then
install -d -o ${alertmanager_user} -g ${alertmanager_group} -m 750 ${alertmanager_data_dir}
fi
}
load_rc_config $name
run_rc_command "$1"

View file

@ -0,0 +1,30 @@
#! /bin/sh
# PROVIDE: git_daemon
# REQUIRE: DAEMON
# KEYWORD: shutdown
#
# Add the following lines to /etc/rc.conf to enable git_daemon:
#
#git_daemon_enable="YES"
. /etc/rc.subr
name="git_daemon"
rcvar="git_daemon_enable"
load_rc_config $name
: ${git_daemon_user:=git_daemon}
: ${git_daemon_group:=git_daemon}
: ${git_daemon_enable:=NO}
: ${git_daemon_directory:=/usr/local/git}
: ${git_daemon_flags:=--syslog --reuseaddr --detach}
command="/usr/local/libexec/git-core/git-daemon"
command_args="${git_daemon_directory}"
PATH="${PATH}:/usr/local/libexec/git-core"
run_rc_command "$1"

78
monitoringServer/rc.d/grafana Executable file
View file

@ -0,0 +1,78 @@
#!/bin/sh
# PROVIDE: grafana
# REQUIRE: LOGIN
# KEYWORD: shutdown
# Add the following lines to /etc/rc.conf to enable grafana
# grafana_enable="YES"
#
# grafana_enable (bool):
# Set it to YES to enable grafana
# Set to NO by default
# grafana_user (string):
# Set user that grafana will run under
# Default is "grafana"
# grafana_group (string):
# Set group that own grafana files
# Default is "grafana"
# grafana_config (string)
# Set full path to config file
# Default is "/usr/local/etc/grafana/grafana.ini"
# grafana_homepath (string)
# Set full path to homepath dir
# Default is "/usr/local/share/grafana"
# grafana_syslog_output_enable (bool)
# Set it to YES to enable stdout/stderr syslog output
# Set to NO by default
# grafana_syslog_output_tag (str)
# Set syslog tag if syslog enabled
# Default is "grafana"
# grafana_args (string)
# Set additional command line arguments
# Default is ""
. /etc/rc.subr
name=grafana
rcvar=grafana_enable
load_rc_config $name
: ${grafana_enable:="NO"}
: ${grafana_user:="grafana"}
: ${grafana_group:="grafana"}
: ${grafana_config:="/usr/local/etc/grafana/grafana.ini"}
: ${grafana_homepath:="/usr/local/share/grafana"}
: ${grafana_syslog_output_enable:="NO"}
if checkyesno grafana_syslog_output_enable; then
if [ -n "${grafana_syslog_output_tag}" ]; then
grafana_syslog_output_flags="-T ${grafana_syslog_output_tag}"
else
grafana_syslog_output_flags="-T ${name}"
fi
fi
pidfile="/var/run/${name}/${name}.pid"
required_files="${grafana_config}"
procname="/usr/local/bin/grafana"
command="/usr/sbin/daemon"
command_args="-f ${grafana_syslog_output_flags} -p ${pidfile} -t ${name} \
/usr/bin/env ${grafana_env} ${procname} server \
-config=${grafana_config} \
-homepath=${grafana_homepath} \
${grafana_args}"
grafana_chdir="${grafana_homepath}"
start_precmd="grafana_start_precmd"
grafana_start_precmd()
{
if [ ! -d "/var/run/${name}" ]; then
install -d -m 0750 -o ${grafana_user} -g ${grafana_group} "/var/run/${name}"
fi
}
run_rc_command "$1"

67
monitoringServer/rc.d/influxd Executable file
View file

@ -0,0 +1,67 @@
#!/bin/sh
# PROVIDE: influxd
# REQUIRE: DAEMON NETWORKING
# BEFORE: LOGIN
# KEYWORD: shutdown
# Add the following lines to /etc/rc.conf to enable influxdb:
# influxd_enable="YES"
#
# influxd_enable (bool): Set to YES to enable influxd
# Default: NO
# influxd_conf (str): influxd configuration file
# Default: ${PREFIX}/etc/influxd.conf
# influxd_user (str): influxd daemon user
# Default: influxd
# influxd_group (str): influxd daemon group
# Default: influxd
# influxd_flags (str): Extra flags passed to influxd
#
# influxd_facility (str): Syslog facility to use
# Default: daemon
# influxd_priority (str): Syslog priority to use
# Default: info
. /etc/rc.subr
name="influxd"
rcvar=influxd_enable
load_rc_config $name
: ${influxd_enable:="NO"}
: ${influxd_user:="influxd"}
: ${influxd_group:="influxd"}
: ${influxd_flags:=""}
: ${influxd_facility:="daemon"}
: ${influxd_priority:="info"}
: ${influxd_conf:="/usr/local/etc/${name}.conf"}
: ${influxd_options:="${influxd_flags} -config=${influxd_conf}"}
# daemon
influxd_pidfile="/var/run/influxdb/${name}.pid"
procname="/usr/local/bin/${name}"
command=/usr/sbin/daemon
start_precmd="influxd_precmd"
start_cmd="influxd_startcmd_daemon"
influxd_precmd()
{
install -d -o ${influxd_user} /var/run/influxdb/
}
influxd_startcmd_daemon()
{
echo "Starting ${name}."
/usr/sbin/daemon -c -p ${influxd_pidfile} -S -s ${influxd_priority} -l ${influxd_facility} -T ${name} \
-u ${influxd_user} ${procname} ${influxd_options}
}
influxd_startcmd_logger()
{
echo "Starting ${name}."
/usr/sbin/daemon -c -p ${influxd_pidfile} -u ${influxd_user} /bin/sh -c "${procname} ${influxd_options} 2>&1 \
| /usr/bin/logger -t ${name} -p ${influxd_facility}.${influxd_priority}"
}
run_rc_command "$1"

View file

@ -0,0 +1,68 @@
#!/bin/sh
# PROVIDE: node_exporter
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
# to enable this service:
#
# node_exporter_enable (bool): Set to NO by default.
# Set it to YES to enable node_exporter.
# node_exporter_user (string): Set user that node_exporter will run under
# Default is "nobody".
# node_exporter_group (string): Set group that node_exporter will run under
# Default is "nobody".
# node_exporter_args (string): Set extra arguments to pass to node_exporter
# Default is "".
# node_exporter_listen_address (string):Set ip:port that node_exporter will listen on
# Default is ":9100".
# node_exporter_textfile_dir (string): Set directory that node_exporter will watch
# Default is "/var/tmp/node_exporter".
. /etc/rc.subr
name=node_exporter
rcvar=node_exporter_enable
load_rc_config $name
: ${node_exporter_enable:="NO"}
: ${node_exporter_user:="nobody"}
: ${node_exporter_group:="nobody"}
: ${node_exporter_args:=""}
: ${node_exporter_listen_address:=":9100"}
: ${node_exporter_textfile_dir:="/var/tmp/node_exporter"}
pidfile=/var/run/node_exporter.pid
command="/usr/sbin/daemon"
procname="/usr/local/bin/node_exporter"
command_args="-f -p ${pidfile} -T ${name} \
/usr/bin/env ${procname} \
--web.listen-address=${node_exporter_listen_address} \
--collector.textfile.directory=${node_exporter_textfile_dir} \
${node_exporter_args}"
start_precmd=node_exporter_startprecmd
node_exporter_startprecmd()
{
if [ ! -e ${pidfile} ]; then
install \
-o ${node_exporter_user} \
-g ${node_exporter_group} \
/dev/null ${pidfile};
fi
if [ ! -d ${node_exporter_textfile_dir} ]; then
install \
-d \
-o ${node_exporter_user} \
-g ${node_exporter_group} \
-m 1755 \
${node_exporter_textfile_dir}
fi
}
load_rc_config $name
run_rc_command "$1"

141
monitoringServer/rc.d/prometheus Executable file
View file

@ -0,0 +1,141 @@
#!/bin/sh
# PROVIDE: prometheus
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
# to enable this service:
#
# prometheus_enable (bool)
# Set it to YES to enable prometheus
# Set to NO by default
# prometheus_user (string)
# Set user that prometheus will run under
# Default is "prometheus"
# prometheus_group (string)
# Set group that own prometheus files
# Default is "prometheus"
# prometheus_config (string)
# Set full path to config file
# Default is "/usr/local/etc/prometheus.yml"
# prometheus_pidfile (string)
# Set full path to pid file
# Default is "/var/run/prometheus.pid"
# prometheus_syslog_output_enable (bool)
# Set it to NO to disable syslog output
# Set to YES by default
# prometheus_syslog_output_tag (str)
# Set syslog tag if syslog enabled
# Default is "prometheus"
# prometheus_syslog_output_priority (string)
# Set syslog priority if syslog enabled
# Default is "info"
# prometheus_syslog_output_facility (string)
# Set syslog facility if syslog enabled
# Default is "daemon"
# prometheus_consoles (string)
# Set dir that contains Prometheus consoles
# Default is "/usr/local/share/prometheus/consoles"
# prometheus_console_libraries (string)
# Set dir containing Prometheus console libraries
# Default is "/usr/local/share/prometheus/console_libraries"
# prometheus_data_dir (string)
# Set dir to run prometheus in
# Default is "/var/db/prometheus"
# prometheus_loglevel (string)
# Set one of [debug, info, warn, error]
# Default is "info"
# prometheus_logformat (string)
# Set one of [logfmt, json]
# Default is "logfmt"
# prometheus_env (string)
# Set environment variables used with prometheus
# Default is ""
# prometheus_args (string)
# Set additional command line arguments
# Default is ""
. /etc/rc.subr
name=prometheus
rcvar=prometheus_enable
load_rc_config $name
: ${prometheus_enable:="NO"}
: ${prometheus_user:="prometheus"}
: ${prometheus_group:="prometheus"}
: ${prometheus_config:="/usr/local/etc/prometheus.yml"}
: ${prometheus_pidfile:="/var/run/prometheus.pid"}
: ${prometheus_syslog_output_enable:="YES"}
: ${prometheus_consoles_dir:="/usr/local/share/prometheus/consoles"}
: ${prometheus_console_libraries_dir:="/usr/local/share/prometheus/console_libraries"}
: ${prometheus_data_dir:="/var/db/prometheus"}
: ${prometheus_loglevel:="info"}
: ${prometheus_logformat:="logfmt"}
if checkyesno prometheus_syslog_output_enable; then
if [ -n "${prometheus_syslog_output_tag}" ]; then
prometheus_syslog_output_flags="-T ${prometheus_syslog_output_tag}"
else
prometheus_syslog_output_flags="-T ${name}"
fi
if [ -n "${prometheus_syslog_output_priority}" ]; then
prometheus_syslog_output_flags="${prometheus_syslog_output_flags} -s ${prometheus_syslog_output_priority}"
fi
if [ -n "${prometheus_syslog_output_facility}" ]; then
prometheus_syslog_output_flags="${prometheus_syslog_output_flags} -l ${prometheus_syslog_output_facility}"
fi
fi
pidfile="${prometheus_pidfile}"
required_files="${prometheus_config}"
procname="/usr/local/bin/prometheus"
command="/usr/sbin/daemon"
command_args="-f ${prometheus_syslog_output_flags} -p ${pidfile} -t ${name} \
/usr/bin/env ${prometheus_env} ${procname} \
--config.file=${prometheus_config} \
--web.console.templates=${prometheus_consoles_dir} \
--web.console.libraries=${prometheus_console_libraries_dir} \
--storage.tsdb.path=${prometheus_data_dir} \
--log.level=${prometheus_loglevel} \
--log.format=${prometheus_logformat} \
${prometheus_args}"
start_precmd="prometheus_start_precmd"
extra_commands="reload"
# This checks for the existence of a prometheus 1.x data at the
# $prometheus_data_dir location. If one is found, Prometheus will not start.
prometheus_check_data_version()
{
local _version
local _version_file="${prometheus_data_dir}/VERSION"
if [ -f "${_version_file}" ]; then
read _version < "${_version_file}"
if [ "${_version}" = "1" ]; then
return 1
fi
fi
}
prometheus_start_precmd()
{
if [ ! -e "${pidfile}" ]; then
install -m 0600 -o "${prometheus_user}" -g "${prometheus_group}" /dev/null "${pidfile}"
fi
if [ ! -d "${prometheus_data_dir}" ]; then
install -d -m 750 -o "${prometheus_user}" -g "${prometheus_group}" "${prometheus_data_dir}"
else
# Ensure it's not a prometheus 1.x data
if [ ! prometheus_check_data_version ]; then
err 1 "Found \"net-mgmt/prometheus1\" data, refusing to start."
fi
fi
}
run_rc_command "$1"

43
monitoringServer/rc.d/rsyncd Executable file
View file

@ -0,0 +1,43 @@
#!/bin/sh
# PROVIDE: rsyncd
# REQUIRE: LOGIN
# BEFORE: securelevel
# KEYWORD: shutdown
# Add the following lines to /etc/rc.conf to enable `rsyncd':
#
# rsyncd_enable="YES"
# rsyncd_flags="<set as needed>"
#
# See rsync(1) for rsyncd_flags
#
. /etc/rc.subr
name="rsyncd"
rcvar=rsyncd_enable
command="/usr/local/bin/rsync"
start_precmd="rsyncd_precmd"
pidfile="/var/run/$name.pid"
# read configuration and set defaults
load_rc_config "$name"
: ${rsyncd_enable="NO"}
: ${rsyncd_configfile:=/usr/local/etc/rsync/$name.conf}
required_files="${rsyncd_configfile}"
command_args="--daemon --config ${rsyncd_configfile}"
rsyncd_precmd()
{
if [ -f "/usr/local/etc/$name.conf" ] && [ ! -L "/usr/local/etc/$name.conf" ]; then
echo "Found /usr/local/etc/$name.conf in old location. Migrating to /usr/local/etc/rsync/$name.conf."
mv /usr/local/etc/$name.conf /usr/local/etc/rsync/$name.conf
ln -s /usr/local/etc/rsync/$name.conf /usr/local/etc/$name.conf
fi
}
run_rc_command "$1"

75
monitoringServer/rc.d/telegraf Executable file
View file

@ -0,0 +1,75 @@
#!/bin/sh
# PROVIDE: telegraf
# REQUIRE: DAEMON NETWORKING
# BEFORE: LOGIN
# KEYWORD: shutdown
# Add the following lines to /etc/rc.conf to enable telegrafb:
# telegraf_enable="YES"
#
# telegraf_enable (bool): Set to YES to enable telegraf
# Default: NO
# telegraf_conf (str): telegraf configuration file
# Default: ${PREFIX}/etc/telegraf.conf
# telegraf_confdir (str): telegraf configuration directory
# Default: none
# telegraf_user (str): telegraf daemon user
# Default: telegraf
# telegraf_group (str): telegraf daemon group
# Default: telegraf
# telegraf_flags (str): Extra flags passed to telegraf
# Default: --quiet
. /etc/rc.subr
PATH=${PATH}:/usr/local/sbin:/usr/local/bin
name="telegraf"
rcvar=telegraf_enable
load_rc_config $name
: ${telegraf_enable:="NO"}
: ${telegraf_user:="telegraf"}
: ${telegraf_group:="telegraf"}
: ${telegraf_flags:="--quiet"}
: ${telegraf_conf:="/usr/local/etc/${name}.conf"}
: ${telegraf_confdir:=""}
: ${telegraf_options:="${telegraf_flags} --config=${telegraf_conf}"}
if [ -n "${telegraf_confdir}" ]; then
telegraf_options="${telegraf_options} --config-directory=${telegraf_confdir}"
fi
logfile="/var/log/telegraf/${name}.log"
pidfile="/var/run/${name}.pid"
command=/usr/sbin/daemon
start_precmd="telegraf_prestart"
start_cmd="telegraf_start"
stop_cmd="telegraf_stop"
telegraf_prestart()
{
install -d -o ${telegraf_user} -g ${telegraf_group} -m750 /var/log/telegraf
}
telegraf_start()
{
echo "Starting ${name}"
/usr/sbin/daemon -fcr -P ${pidfile} -u ${telegraf_user} -o ${logfile} \
/usr/local/bin/${name} ${telegraf_options}
}
telegraf_stop()
{
pid=$(check_pidfile $pidfile $command)
if [ -n "${pid}" ]; then
echo "Stopping ${name} (pid=${pid})"
kill -- -${pid}
wait_for_pids ${pid}
else
echo "${name} isn't running"
fi
}
run_rc_command "$1"

13
primaryServer/.env Normal file
View file

@ -0,0 +1,13 @@
#All values here are EXAMPLE values. Please change them before using in production.
SUBNET=10.15.0.0/24
GATEWAY=10.15.0.1
AUDIOBOOK_VERSION=v2.16.2
QBT_VERSION=4.6.0
SONARR_VERSION=3.0.10
PROWLARR_VERSION=1.12.2
RADARR_VERSION=5.14.0
BAZARR_VERSION=1.4.5
PLEROMA_VERSION=v2.7.0
CADDY_VERSION=2.8.4
MINIFLUX_VERSION=2.2.2
FORGEJO_VERSION=9.0.1

24
primaryServer/README.md Normal file
View file

@ -0,0 +1,24 @@
# Primary Server
The "primary" server denotes the server where the majority of the work will be done. This should ideally be the highest-spec'ed machine in the setup.
# Packages
|Package|Usage|
|---|---|
|`rsync`|File transfers; faster and safer than `scp`|
|`fish`| Preference; Shell that comes with autofills and reasonable defaults; ***NOT POSIX COMPLIANT***\*|
|`fwupd`| FirmWare Update Program and Daemon|
|`git`| Used for pulling down packages and config files|
|`links`| CLI-mode browser; May also be listed as `links2`|
|`neovim`| Derivative of Vim; text editor|
|`samba`| Server for SMB/Windows Network Drive protocol|
|`zfs`| File system protocol for accessing bulk storage|
|`apcupsd`|APC Uninterruptible Power Supply Daemon; for communicating with UPS|
|`docker`| Most services running on the server run in docker|
|`docker-compose`|Allows for docker configs to be stored in files|
|`openssh`|SSH backend|
|`openssl`|HTTPS library|
|`man-db`|Manual for installed software; Often called "man pages"|
*: "POSIX Compliance" is a standard for most shell programs, like `bash` and `sh`, which allow scripts to run largely regardless of which shell is installed. Some scripts and copy-paste commands expect POSIX compliance, so if/when scripts don't work in `fish`, `bash` is used instead.

View file

@ -0,0 +1,24 @@
services:
audiobookshelf:
container_name: audiobookshelf
build:
context: https://git.blizzard.systems/github-mirrors/audiobookshelf.git#${AUDIOBOOK_VERSION}
volumes:
- audiobooks:/audiobooks
- podcasts:/podcasts
- books:/ebooks
- ./config:/config
- ./metadata:/metadata
restart: unless-stopped
labels:
- homepage.group=Streaming
- homepage.name=Audiobooks
- homepage.href=https://audiobook.blizzard.systems
- homepage.icon=audiobookshelf.png
- homepage.description=Audiobook and Podcast client
volumes:
audiobooks:
podcasts:
books:

View file

@ -0,0 +1,116 @@
services:
transmission-openvpn:
image: yacht7/openvpn-client
container_name: mullvad
privileged: true
cap_add:
- NET_ADMIN
environment:
- KILL_SWITCH=off
- SUBNETS=${SUBNET}
devices:
- /dev/net/tun
ports:
#VPN/Web port
- 1500:1500
- 5665:5665
#QBT UI Port
- 8090:8090
volumes:
- ./mullvad_openvpn:/data/vpn
logging:
driver: json-file
options:
max-size: 10m
restart: unless-stopped
qbittorrent:
image: linuxserver/qbittorrent:${QBT_VERSION}
container_name: qbittorrent
environment:
- TZ=America/New_York
- UMASK_SET=022
- WEBUI_PORT=8090
volumes:
- ./automations/qbt:/config
- tempFolder:/downloads
- tvDownloads:/tvdownloads
- filmDownloads:/filmdownloads
- bookDownloads:/bookdownloads
- musicDownloads:/musicdownloads
network_mode: service:transmission-openvpn
restart: unless-stopped
depends_on:
- transmission-openvpn
sonarr: #TV
image: linuxserver/sonarr:${SONARR_VERSION}
container_name: sonarr
environment:
- TZ=America/New_York
volumes:
- /etc/localtime:/etc/localtime:ro
- ./sonarr:/config
- tv:/tv
- tvDownloads:/downloads
restart: unless-stopped
depends_on:
- qbittorrent
prowlarr: #Indexer management
image: lscr.io/linuxserver/prowlarr:${PROWLARR_VERSION}
container_name: prowlarr
environment:
- TZ=America/New_York
volumes:
- ./prowlarr:/config
restart: unless-stopped
depends_on:
- qbittorrent
radarr: #Movies
image: linuxserver/radarr:${RADARR_VERSION}
container_name: radarr
environment:
- TZ=America/New_York
volumes:
- /etc/localtime:/etc/localtime:ro
- ./automations/radarr:/config
- movies:/movies
- filmDownloads:/downloads
restart: unless-stopped
depends_on:
- qbittorrent
bazarr: #Subtitles
image: lscr.io/linuxserver/bazarr:${BAZARR_VERSION}
container_name: bazarr
environment:
- TZ=America/New_York
volumes:
- ./automations/bazarr:/config
- movies:/movies
- tv:/tv
restart: unless-stopped
# readarr:
# image: lscr.io/linuxserver/readarr:develop
# container_name: readarr
# environment:
# - TZ=America/New_York
# volumes:
# - ./automations/readarr:/config
# - /mnt/glacier/Audiobooks:/books
# - /mnt/glacier/torrentSync/readarrParsing:/downloads
# restart: unless-stopped
# depends_on:
# - qbittorrent
#
volumes:
tempFolder:
tvDownloads:
filmDownloads:
bookDownloads:
musicDownloads:
tv:
movies:

View file

@ -0,0 +1,44 @@
services:
db:
image: postgres:12.1-alpine
container_name: pleroma_db
restart: unless-stopped
healthcheck:
test: ["CMD", "pg_isready", "-U", "pleroma"]
environment:
POSTGRES_USER: pleroma
POSTGRES_PASSWORD: CHANGEME
POSTGRES_DB: pleroma
volumes:
- ./blog/postgres:/var/lib/postgresql/data
web:
container_name: pleroma_web
build:
context: blog
args:
- "UID=1000"
- "GID=1000"
- "PLEROMA_VER=${PLEROMA_VERSION}"
image: pleroma #Note: THIS DOES NOT PULL THE IMAGE; This names the image in the local storage
healthcheck:
test:
[
"CMD-SHELL",
"wget -q --spider --proxy=off localhost:4000 || exit 1",
]
restart: unless-stopped
volumes:
- ./uploads:/var/lib/pleroma/uploads
- ./static:/var/lib/pleroma/static
- ./config.exs:/etc/pleroma/config.exs:ro
environment:
DOMAIN: mastodon.blizzard.systems
INSTANCE_NAME: Pleroma
ADMIN_EMAIL: blizzardfinnegan@gmail.com
NOTIFY_EMAIL: blizzardfinnegan@gmail.com
DB_USER: pleroma
DB_PASS: CHANGEME
DB_NAME: pleroma
depends_on:
- db

View file

@ -0,0 +1,72 @@
{
email blizzardfinnegan@gmail.com
}
audiobook.blizzard.systems {
reverse_proxy http://audiobookshelf:80
}
subtitles.blizzard.systems {
reverse_proxy http://bazarr:6767
}
ci.blizzard.systems {
reverse_proxy http://ci_server:8000
}
home.blizzard.systems {
reverse_proxy http://flame:5005
}
git.blizzard.systems {
reverse_proxy http://forgejo:3000
}
jellyfin.blizzard.systems {
reverse_proxy http://jellyfin:8096
}
matrix-frontend.blizzard.systems {
reverse_proxy http://matrix-frontend:80
}
matrix.blizzard.systems {
reverse_proxy http://matrix-backend:8008
}
12ft.blizzard.systems {
reverse_proxy http://ladder:8080
}
feed.blizzard.systems {
reverse_proxy http://miniflux:8080
}
search.blizzard.systems {
reverse_proxy http://mirror-serve:8080
}
qbt.blizzard.systems {
reverse_proxy http://mullvad:8090
}
pictures.blizzard.systems {
reverse_proxy http://piwigo:80
}
mastodon.blizzard.systems {
reverse_proxy http://pleroma_web:4000
}
indexers.blizzard.systems {
reverse_proxy http://prowlarr:9696
}
movies.blizzard.systems {
reverse_proxy http://radarr:7878
}
tv.blizzard.systems {
reverse_proxy http://sonarr:8989
}

View file

@ -0,0 +1,17 @@
services:
caddy:
image: caddy:${CADDY_VERSION}
container_name: caddy
restart: unless-stopped
ports:
- "80:80"
- "81:81"
- "443:443"
- "443:443/udp"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
#Static file serving
#- ./site:/srv
- ./data:/data
- ./config:/config

View file

@ -0,0 +1,41 @@
include:
- ./audiobook/docker-compose.yml
#- ./automations/docker-compose.yml
- ./blog/docker-compose.yml
#- ./budget/docker-compose.yml
- ./caddy/docker-compose.yml
- ./factorio/docker-compose.yml
- ./feedReader/docker-compose.yml
#- ./focalboard/docker-compose.yml
#- ./gitRepo/docker-compose.yml
#- ./inventory/docker-compose.yml
#- ./journal/docker-compose.yml
- ./jellyfin/docker-compose.yml
- ./landingPage/docker-compose.yml
#- ./logging/docker-compose.yml
- ./ladder/docker-compose.yml
#- ./matrix/docker-compose.yml
#- ./minecraft/docker-compose.yml
#- ./mirrorHosting/docker-compose.yml
#- ./monitoring/docker-compose.yml
#- ./music/docker-compose.yml
#- ./piwigo/docker-compose.yml
#- ./tickets/docker-compose.yml
#- ./wekan/docker-compose.yml
#If network is manually defined:
#networks:
# default:
# name: internal
# external: true
networks:
default:
name: internal
driver: bridge
ipam:
driver: default
config:
- subnet: ${SUBNET}/24
ip_range: ${SUBNET}/24
gateway: ${GATEWAY}

View file

@ -0,0 +1,10 @@
services:
factorio:
image: factoriotools/factorio:stable
#container_name: "factorio"
#ports:
# - "42069:42069/udp"
environment:
- PORT=42069
volumes:
- .:/factorio

View file

@ -0,0 +1,26 @@
services:
miniflux:
image: miniflux/miniflux:${MINIFLUX_VERSION}
container_name: miniflux
depends_on:
db:
condition: service_healthy
environment:
- DATABASE_URL=postgres://miniflux:CHANGEME@minifluxdb/miniflux?sslmode=disable
- RUN_MIGRATIONS=1
- CREATE_ADMIN=1
- ADMIN_USERNAME=admin
- ADMIN_PASSWORD=CHANGEME
minifluxdb:
container_name: minifluxdb
image: postgres:15
environment:
- POSTGRES_USER=miniflux
- POSTGRES_PASSWORD=CHANGEME
- POSTGRES_DB=miniflux
volumes:
- ./db:/var/lib/postgresql/data
healthcheck:
test: ["CMD", "pg_isready", "-U", "miniflux"]
interval: 10s
start_period: 30s

View file

@ -0,0 +1,51 @@
services:
forgejo:
container_name: forgejo
build: https://codeberg.org/forgejo/forgejo.git#{FORGEJO_VERSION}
environment:
- USER_GID=1004
- USER_UID=1004
- GNUPGHOME=/data/gitea/home/.gnupg
ports:
- "26:22"
restart: unless-stopped
volumes:
- ./gitRepo/.ssh:/data/git/.ssh
- ./gitRepo/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
cap_add:
- SYS_CHROOT
woodpecker-server:
image: woodpeckerci/woodpecker-server:latest
container_name: ci_server
ports:
- 9001:9000
volumes:
- ./gitRepo/ci_data:/var/lib/woodpecker/
environment:
- WOODPECKER_OPEN=false
- WOODPECKER_ADMIN=blizzardfinnegan
- WOODPECKER_HOST=https://ci.blizzard.systems
- WOODPECKER_GITEA=true
- WOODPECKER_GITEA_URL=https://git.blizzard.systems
- WOODPECKER_GITEA_CLIENT=
- WOODPECKER_GITEA_SECRET=
- WOODPECKER_AGENT_SECRET=
restart: unless-stopped
woodpecker-agent:
image: woodpeckerci/woodpecker-agent:latest
container_name: ci_local_runner
command: agent
restart: unless-stopped
depends_on:
- woodpecker-server
privileged: true
volumes:
- /var/run/docker.sock:/var/run/docker.sock
environment:
- WOODPECKER_SERVER=ci_server:9000
- WOODPECKER_AGENT_SECRET=
- WOODPECKER_MAX_PROCS=6

View file

@ -0,0 +1,12 @@
# NOT IN USE
services:
homebox:
container_name: homebox
build: https://github.com/sysadminsmedia/homebox.git#v0.15.2
restart: unless-stopped
environment:
- HBOX_LOG_LEVEL=info
- HBOX_LOG_FORMAT=text
- HBOX_WEB_MAX_UPLOAD_SIZE=10
volumes:
- ./data:/data/

View file

@ -0,0 +1,29 @@
services:
jellyfin:
build:
context: git
dockerfile: docker/Dockerfile
args:
JELLYFIN_VERSION: 10.10.0
#Don't modify these; necessary for build
DOTNET_ARCH: x64
IMAGE_ARCH: amd64
QEMU_ARCH: x86_64
PACKAGE_ARCH: amd64
container_name: jellyfin
group_add:
- "104" #input group; required for Intel GPU. See https://jellyfin.org/docs/general/administration/hardware-acceleration/intel#configure-with-linux-virtualization
volumes:
- ./config:/config
- ./cache:/cache
- /mnt/glacier/Movies:/movies
- /mnt/glacier/TV:/tv
restart: "unless-stopped"
environment:
- PGID=997
- PUID=998
#Intel GPU Passthrough
#devices:
# - /dev/dri/renderD128:/dev/dri/renderD128

View file

@ -0,0 +1,10 @@
services:
memos:
container_name: journal
image: nephatrine/write-freely:latest
environment:
TZ: America/New_York
volumes:
- ./journal/config:/mnt/config
restart:
unless-stopped

View file

@ -0,0 +1,24 @@
services:
ladder:
image: ghcr.io/everywall/ladder:latest
container_name: ladder
#build: ladder/git/.
restart: unless-stopped
#command: sh -c ./ladder
environment:
- PORT=8080
- RULESET=/app/ruleset.yaml
#- ALLOWED_DOMAINS=example.com,example.org
#- ALLOWED_DOMAINS_RULESET=false
#- EXPOSE_RULESET=true
#- PREFORK=false
#- DISABLE_FORM=false
#- FORM_PATH=/app/form.html
#- X_FORWARDED_FOR=66.249.66.1
#- USER_AGENT=Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)
#- USERPASS=foo:bar
#- LOG_URLS=true
#- GODEBUG=netdns=go
volumes:
- ./git/ruleset.yaml:/app/ruleset.yaml
- ./git/handlers/form.html:/app/form.html

View file

@ -0,0 +1,12 @@
services:
flame:
container_name: flame
build:
context: https://github.com/pawelmalak/flame#v2.3.1
dockerfile: .docker/Dockerfile
volumes:
- ./data:/app/data
- /var/run/docker.sock:/var/run/docker.sock # optional but required for Docker integration
environment:
- PASSWORD=CHANGEME
restart: unless-stopped

View file

@ -0,0 +1,37 @@
## TO BE FIXED
services:
frontend:
container_name: "matrix-frontend"
build: "./matrix/sources/frontend"
restart: unless-stopped
volumes:
- ./matrix/frontend/config.json:/app/config.json
voip:
container_name: "matrix-voip"
build:
context: "./matrix/sources/voip/."
dockerfile: "./docker/coturn/debian/Dockerfile"
restart: unless-stopped
volumes:
- ./matrix/turnserver.conf:/etc/coturn/turnserver.conf
ports:
- 49160:49160/udp
- 3478:3478
- 5349:5349
admin:
container_name: "matrix-admin"
build: "./matrix/sources/admin/."
restart: unless-stopped
backend:
container_name: "matrix-backend"
build:
context: "./matrix/sources/backend/."
dockerfile: "./docker/Dockerfile"
restart: unless-stopped
volumes:
- ./matrix/backend:/data
ports:
- 8448:8448

View file

@ -0,0 +1,20 @@
services:
mc:
image: itzg/minecraft-server
container_name: "minecraft"
environment:
- EULA=true
- MEMORY=8G
- ENABLE_ROLLING_LOGS=true
- VERSION=1.20.4
- DIFFUCULTY=normal
- FORCE_GAMEMODE=true
- SNOOPER_ENABLED=false
- MODE=survivor
ports:
- "25565:25565"
volumes:
- ./minecraft:/data
stdin_open: true
tty: true
restart: unless-stopped

View file

@ -0,0 +1,9 @@
services:
mirror-serve:
image: ghcr.io/kiwix/kiwix-serve:latest
container_name: "mirror-serve"
volumes:
- .:/data
command:
- "*.zim"
restart: unless-stopped

View file

@ -0,0 +1,28 @@
services:
node-exporter:
image: prom/node-exporter:latest
container_name: prometheus-exporter
restart: unless-stopped
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
ports:
- 9100:9100
command:
- '--path.procfs=/host/proc'
- '--path.rootfs=/rootfs'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc) ($$|/)'
container-monitor:
image: gcr.io/cadvisor/cadvisor:latest
container_name: daemon-monitor
restart: unless-stopped
ports:
- 9999:8080
volumes:
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro

View file

@ -0,0 +1,12 @@
services:
music-stream:
container_name: navidrome
image: deluan/navidrome:0.51.1
volumes:
- music:/music
- ./music/data:/data
restart: unless-stopped
environment:
ND_JUKEBOX_ENABLED: true
ND_SCANSCHEDULE: 1h
ND_BASEURL: https://stream.blizzard.systems

View file

@ -0,0 +1,25 @@
services:
piwigo:
container_name: "piwigo"
build: https://github.com/linuxserver/docker-piwigo.git
volumes:
- ./images:/gallery
- ./config:/config
restart: unless-stopped
ports:
- 1928:443
- 1927:80
depends_on:
- piwigo-db
piwigo-db:
image: mysql
container_name: piwigo-db
restart: unless-stopped
environment:
MYSQL_USER: random_username
MYSQL_PASSWORD: CHANGEME
MYSQL_ROOT_PASSWORD: CHANGEME
MYSQL_DATABASE: piwigo
volumes:
- ./piwigo/db:/var/lib/mysql

View file

@ -0,0 +1,222 @@
# NOT IN USE
services:
wekandb:
#-------------------------------------------------------------------------------------
# ==== MONGODB FROM DOCKER HUB ====
image: mongo:6
#-------------------------------------------------------------------------------------
container_name: wekan-db
restart: unless-stopped
# command: mongod --oplogSize 128
# Syslog: mongod --syslog --oplogSize 128 --quiet
# Disable MongoDB logs:
command: mongod --logpath /dev/null --oplogSize 128 --quiet
expose:
- 27017
volumes:
- /etc/localtime:/etc/localtime:ro
- ./wekan-db:/data/db
- ./wekan-db-dump:/dump
#- /etc/timezone:/etc/timezone:ro # Do not use https://github.com/wekan/wekan/issues/5123
wekan:
build: git
container_name: wekan-app
restart: unless-stopped
# remove port mapping if you use nginx reverse proxy, port 8080 is already exposed to wekan-tier network
environment:
#-----------------------------------------------------------------
# ==== WRITEABLE PATH FOR FILE UPLOADS ====
- ./data=/data
# ==== MONGO_URL ====
- MONGO_URL=mongodb://wekandb:27017/wekan
#---------------------------------------------------------------
# ==== ROOT_URL SETTING ====
- ROOT_URL=https://wekan.blizzard.systems/
#---------------------------------------------------------------
# ==== EMAIL SETTINGS ====
# Email settings are only at MAIL_URL and MAIL_FROM.
# Admin Panel has test button, but it's not used for settings.
# see https://github.com/wekan/wekan/wiki/Troubleshooting-Mail
# For SSL in email, change smtp:// to smtps://
# NOTE: Special characters need to be url-encoded in MAIL_URL.
# You can encode those characters for example at: https://www.urlencoder.org
#- MAIL_URL=smtp://user:pass@mailserver.example.com:25/
#- MAIL_URL=smtp://<mail_url>:25/?ignoreTLS=true&tls={rejectUnauthorized:false}
#- MAIL_FROM=Wekan Notifications <noreply.wekan@mydomain.com>
# Currently MAIL_SERVICE is not in use.
#- MAIL_SERVICE=Outlook365
#- MAIL_SERVICE_USER=firstname.lastname@hotmail.com
#- MAIL_SERVICE_PASSWORD=SecretPassword
#---------------------------------------------------------------
# ==== OPTIONAL: LOGS AND STATS ====
# https://github.com/wekan/wekan/wiki/Logs
#
# Daily export of Wekan changes as JSON to Logstash and ElasticSearch / Kibana (ELK)
# https://github.com/wekan/wekan-logstash
#
# Statistics Python script for Wekan Dashboard
# https://github.com/wekan/wekan-stats
#
# Console, file, and zulip logger on database changes https://github.com/wekan/wekan/pull/1010
# with fix to replace console.log by winston logger https://github.com/wekan/wekan/pull/1033
# but there could be bug https://github.com/wekan/wekan/issues/1094
#
# There is Feature Request: Logging date and time of all activity with summary reports,
# and requesting reason for changing card to other column https://github.com/wekan/wekan/issues/1598
#---------------------------------------------------------------
# ==== NUMBER OF SEARCH RESULTS PER PAGE BY DEFAULT ====
#- RESULTS_PER_PAGE=20
#---------------------------------------------------------------
# ==== AFTER OIDC LOGIN, ADD USERS AUTOMATICALLY TO THIS BOARD ID ====
# https://github.com/wekan/wekan/pull/5098
#- DEFAULT_BOARD_ID=abcd1234
#---------------------------------------------------------------
# ==== WEKAN API AND EXPORT BOARD ====
# Wekan Export Board works when WITH_API=true.
# https://github.com/wekan/wekan/wiki/REST-API
# https://github.com/wekan/wekan-gogs
# If you disable Wekan API with false, Export Board does not work.
- WITH_API=true
#---------------------------------------------------------------
# ==== PASSWORD BRUTE FORCE PROTECTION ====
#https://atmospherejs.com/lucasantoniassi/accounts-lockout
#Defaults below. Uncomment to change. wekan/server/accounts-lockout.js
#- ACCOUNTS_LOCKOUT_KNOWN_USERS_FAILURES_BEFORE=3
#- ACCOUNTS_LOCKOUT_KNOWN_USERS_PERIOD=60
#- ACCOUNTS_LOCKOUT_KNOWN_USERS_FAILURE_WINDOW=15
#- ACCOUNTS_LOCKOUT_UNKNOWN_USERS_FAILURES_BERORE=3
#- ACCOUNTS_LOCKOUT_UNKNOWN_USERS_LOCKOUT_PERIOD=60
#- ACCOUNTS_LOCKOUT_UNKNOWN_USERS_FAILURE_WINDOW=15
#---------------------------------------------------------------
# ==== ACCOUNT OPTIONS ====
# https://docs.meteor.com/api/accounts-multi.html#AccountsCommon-config
# Defaults below. Uncomment to change. wekan/server/accounts-common.js
# - ACCOUNTS_COMMON_LOGIN_EXPIRATION_IN_DAYS=90
#---------------------------------------------------------------
# ==== RICH TEXT EDITOR IN CARD COMMENTS ====
# https://github.com/wekan/wekan/pull/2560
- RICHER_CARD_COMMENT_EDITOR=true
#---------------------------------------------------------------
# ==== CARD OPENED, SEND WEBHOOK MESSAGE ====
# https://github.com/wekan/wekan/issues/2518
- CARD_OPENED_WEBHOOK_ENABLED=true
#---------------------------------------------------------------
# ==== Allow configuration to validate uploaded attachments ====
#-ATTACHMENTS_UPLOAD_EXTERNAL_PROGRAM=/usr/local/bin/avscan {file}
#-ATTACHMENTS_UPLOAD_MIME_TYPES=image/*,text/*
#-ATTACHMENTS_UPLOAD_MAX_SIZE=5000000
#---------------------------------------------------------------
# ==== Allow configuration to validate uploaded avatars ====
#-AVATARS_UPLOAD_EXTERNAL_PROGRAM=/usr/local/bin/avscan {file}
#-AVATARS_UPLOAD_MIME_TYPES=image/*
#-AVATARS_UPLOAD_MAX_SIZE=500000
#---------------------------------------------------------------
# ==== Allow to shrink attached/pasted image ====
# https://github.com/wekan/wekan/pull/2544
- MAX_IMAGE_PIXEL=1024
- IMAGE_COMPRESS_RATIO=80
#---------------------------------------------------------------
# ==== NOTIFICATION TRAY AFTER READ DAYS BEFORE REMOVE =====
# Number of days after a notification is read before we remove it.
# Default: 2
#- NOTIFICATION_TRAY_AFTER_READ_DAYS_BEFORE_REMOVE=2
#---------------------------------------------------------------
# ==== BIGEVENTS DUE ETC NOTIFICATIONS =====
# https://github.com/wekan/wekan/pull/2541
# Introduced a system env var BIGEVENTS_PATTERN default as "NONE",
# so any activityType matches the pattern, system will send out
# notifications to all board members no matter they are watching
# or tracking the board or not. Owner of the wekan server can
# disable the feature by setting this variable to "NONE" or
# change the pattern to any valid regex. i.e. '|' delimited
# activityType names.
# a) Example
- BIGEVENTS_PATTERN=due|end
# b) All
#- BIGEVENTS_PATTERN=received|start|due|end
# c) Disabled
#- BIGEVENTS_PATTERN=NONE
#---------------------------------------------------------------
# ==== EMAIL DUE DATE NOTIFICATION =====
# https://github.com/wekan/wekan/pull/2536
# System timelines will be showing any user modification for
# dueat startat endat receivedat, also notification to
# the watchers and if any card is due, about due or past due.
#
# Notify due days, default is None, 2 days before and on the event day
#- NOTIFY_DUE_DAYS_BEFORE_AND_AFTER=2,0
#
# Notify due at hour of day. Default every morning at 8am. Can be 0-23.
# If env variable has parsing error, use default. Notification sent to watchers.
#- NOTIFY_DUE_AT_HOUR_OF_DAY=8
#-----------------------------------------------------------------
# ==== EMAIL NOTIFICATION TIMEOUT, ms =====
# Default: 30000 ms = 30s
#- EMAIL_NOTIFICATION_TIMEOUT=30000
#-----------------------------------------------------------------
# ==== CORS =====
# CORS: Set Access-Control-Allow-Origin header.
#- CORS=*
# CORS_ALLOW_HEADERS: Set Access-Control-Allow-Headers header. "Authorization,Content-Type" is required for cross-origin use of the API.
#- CORS_ALLOW_HEADERS=Authorization,Content-Type
# CORS_EXPOSE_HEADERS: Set Access-Control-Expose-Headers header. This is not needed for typical CORS situations
#- CORS_EXPOSE_HEADERS=*
#-----------------------------------------------------------------
# ==== BROWSER POLICY AND TRUSTED IFRAME URL ====
# Enable browser policy and allow one trusted URL that can have iframe that has Wekan embedded inside.
# Setting this to false is not recommended, it also disables all other browser policy protections
# and allows all iframing etc. See wekan/server/policy.js
- BROWSER_POLICY_ENABLED=true
# When browser policy is enabled, HTML code at this Trusted URL can have iframe that embeds Wekan inside.
#- TRUSTED_URL=https://intra.example.com
#-----------------------------------------------------------------
# ==== METRICS ALLOWED IP ADDRESSES ====
# https://github.com/wekan/wekan/wiki/Metrics
#- METRICS_ALLOWED_IP_ADDRESSES=
#-----------------------------------------------------------------
# ==== OUTGOING WEBHOOKS ====
# What to send to Outgoing Webhook, or leave out. If commented out the default values will be: cardId,listId,oldListId,boardId,comment,user,card,commentId,swimlaneId,customerField,customFieldValue
#- WEBHOOKS_ATTRIBUTES=cardId,listId,oldListId,boardId,comment,user,card,board,list,swimlane,commentId
#-----------------------------------------------------------------
# ==== AUTOLOGIN WITH OIDC/OAUTH2 ====
# https://github.com/wekan/wekan/wiki/autologin
#- OIDC_REDIRECTION_ENABLED=true
#-----------------------------------------------------------------
# ==== OAUTH2 Nextcloud ====
# 1) Register the application with Nextcloud: https://your.nextcloud/index.php/settings/admin/security
# Make sure you capture the application ID as well as generate a secret key.
# Use https://your.wekan/_oauth/oidc for the redirect URI.
# 2) Configure the environment variables. This differs slightly
# by installation type, but make sure you have the following:
#- OAUTH2_ENABLED=true
# OAuth2 login style: popup or redirect.
#- OAUTH2_LOGIN_STYLE=redirect
# Application GUID captured during app registration:
#- OAUTH2_CLIENT_ID=xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx
# Secret key generated during app registration:
#- OAUTH2_SECRET=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
#- OAUTH2_SERVER_URL=https://your-nextcloud.tld
#- OAUTH2_AUTH_ENDPOINT=/index.php/apps/oauth2/authorize
#- OAUTH2_USERINFO_ENDPOINT=/ocs/v2.php/cloud/user?format=json
#- OAUTH2_TOKEN_ENDPOINT=/index.php/apps/oauth2/api/v1/token
# The claim name you want to map to the unique ID field:
#- OAUTH2_ID_MAP=id
# The claim name you want to map to the username field:
#- OAUTH2_USERNAME_MAP=id
# The claim name you want to map to the full name field:
#- OAUTH2_FULLNAME_MAP=display-name
# The claim name you want to map to the email field:
#- OAUTH2_EMAIL_MAP=email
#-------------------------------------------------------------------
# Hide password login form
# - PASSWORD_LOGIN_ENABLED=true
#---------------------------------------------------------------------
# Wait spinner to use
# - WAIT_SPINNER=Bounce
#---------------------------------------------------------------------
depends_on:
- wekandb
volumes:
- /etc/localtime:/etc/localtime:ro
- ./wekan-files:/data:rw