first commit
This commit is contained in:
commit
3ec5ce616e
38
common/users.sls
Normal file
38
common/users.sls
Normal file
@ -0,0 +1,38 @@
|
||||
{% for user, args in pillar['users'].items() %}
|
||||
# remove users
|
||||
{% if args['remove'] is defined %}
|
||||
{{ user }}:
|
||||
user.absent
|
||||
{% if 'alias_target' in args %}
|
||||
alias.present:
|
||||
- target: {{ args['alias_target'] }}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# add users
|
||||
{{ user }}:
|
||||
user.present:
|
||||
- fullname: {{ args['fullname'] | default('') }}
|
||||
- home: {{ args['home'] | default('/home/'+user) }}
|
||||
- shell: {{ args['shell'] | default('/bin/bash') }}
|
||||
{% if args['uid'] is defined %}
|
||||
- uid: {{ args['uid'] }}
|
||||
{% endif %}
|
||||
- password: {{ args['password'] }}
|
||||
- enforce_password: {{ args['enforce_password'] | default('True') }}
|
||||
{% if 'groups' in args %}
|
||||
- groups: {{ args['groups'] }}
|
||||
{% endif %}
|
||||
{% if 'alias_target' in args %}
|
||||
alias.present:
|
||||
- target: {{ args['alias_target'] }}
|
||||
{% endif %}
|
||||
|
||||
{% if 'ssh_auth' in args %}
|
||||
{{ user }}_autherized_keys:
|
||||
ssh_auth:
|
||||
- present
|
||||
- user: {{ user }}
|
||||
- names: {{ args['ssh_auth'] }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
26
containers/alertmanager.sls
Normal file
26
containers/alertmanager.sls
Normal file
@ -0,0 +1,26 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create alertmanager config directory:
|
||||
file.recurse:
|
||||
- name: /etc/alertmanager
|
||||
- source: salt://containers/files/alertmanager
|
||||
- user: nobody
|
||||
- group: root
|
||||
- dir_mode: "0755"
|
||||
- file_mode: "0644"
|
||||
|
||||
Create prometheus data directory:
|
||||
file.directory:
|
||||
- name: /srv/alertmanager
|
||||
- user: nobody
|
||||
- group: nobody
|
||||
- mode: "0755"
|
||||
|
||||
{{ container_deploy('alertmanager') }}
|
||||
|
||||
Start prometheus service:
|
||||
service.running:
|
||||
- name: alertmanager
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: Create alertmanager config directory
|
3
containers/fail2ban_exporter.sls
Normal file
3
containers/fail2ban_exporter.sls
Normal file
@ -0,0 +1,3 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
{{ container_deploy('fail2ban_exporter') }}
|
1
containers/files/99-rfxcom-serial.rules
Normal file
1
containers/files/99-rfxcom-serial.rules
Normal file
@ -0,0 +1 @@
|
||||
SUBSYSTEM=="tty", ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6001", SYMLINK+="rfxcom", MODE="0666"
|
1
containers/files/99-zigbee-serial.rules
Normal file
1
containers/files/99-zigbee-serial.rules
Normal file
@ -0,0 +1 @@
|
||||
SUBSYSTEM=="tty", ATTRS{idVendor}=="0451", ATTRS{idProduct}=="16a8", SYMLINK+="zigbee-serial", MODE="0666"
|
16
containers/files/alertmanager/alertmanager.yml
Normal file
16
containers/files/alertmanager/alertmanager.yml
Normal file
@ -0,0 +1,16 @@
|
||||
route:
|
||||
group_by: ['alertname']
|
||||
group_wait: 30s
|
||||
group_interval: 5m
|
||||
repeat_interval: 1h
|
||||
receiver: 'web.hook'
|
||||
receivers:
|
||||
- name: 'web.hook'
|
||||
webhook_configs:
|
||||
- url: 'http://127.0.0.1:5001/'
|
||||
inhibit_rules:
|
||||
- source_match:
|
||||
severity: 'critical'
|
||||
target_match:
|
||||
severity: 'warning'
|
||||
equal: ['alertname', 'dev', 'instance']
|
10
containers/files/check_image_updates.service.jinja
Normal file
10
containers/files/check_image_updates.service.jinja
Normal file
@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Check for image updates on configured podman containers
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
User=root
|
||||
ExecStart=/root/bin/check_image_updates.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
35
containers/files/check_image_updates.sh.jinja
Normal file
35
containers/files/check_image_updates.sh.jinja
Normal file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
URL="{{ pillar['podman']['gotify']['url'] }}"
|
||||
TOKEN="{{ pillar['podman']['gotify']['token'] }}"
|
||||
TITLE="Updates on $HOSTNAME"
|
||||
PRIORITY="{{ pillar['podman']['gotify']['priority'] }}"
|
||||
|
||||
{% raw -%}
|
||||
function check_update(){
|
||||
IFS=',' read -r -a container_info <<< "$(podman container inspect $1 --format '{{ .Name }},{{ .ImageName }},{{ .Image }}')"
|
||||
|
||||
podman pull "${container_info[1]}"
|
||||
if [[ "$(podman image inspect "${container_info[1]}" --format "{{.Id}}")" != "${container_info[2]}" ]];then
|
||||
containers[${#containers[@]}]="${container_info[0]}"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
IFS=$'\n'
|
||||
for line in $(podman container ls -q); do
|
||||
check_update "$line"
|
||||
done
|
||||
if [[ "${#containers[@]}" == "0" ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
MESSAGE=$(cat << EOM
|
||||
Following ${#containers[@]} container(s) has updates:
|
||||
${containers[*]}
|
||||
EOM
|
||||
)
|
||||
|
||||
curl "$URL/message?token=$TOKEN" -F "title=$TITLE" -F "priority=$PRIORITY" -F "message=$MESSAGE"
|
||||
echo " "
|
||||
{% endraw -%}
|
9
containers/files/check_image_updates.timer.jinja
Normal file
9
containers/files/check_image_updates.timer.jinja
Normal file
@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Restic backup timer
|
||||
|
||||
[Timer]
|
||||
OnCalendar=Sun, 12:00
|
||||
Unit=check_image_updates.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
144
containers/files/container.sh.jinja
Normal file
144
containers/files/container.sh.jinja
Normal file
@ -0,0 +1,144 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
function pull_image(){
|
||||
if ! podman image exists {{ args['image'] }}:{{ args['tag'] }}; then
|
||||
podman pull {{ args['image'] }}:{{ args['tag'] }}
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
function create_container() {
|
||||
if ! podman container exists {{ container }};then
|
||||
podman container create \
|
||||
--name {{ container }} \
|
||||
{%- if args['podman_options'] is defined %}
|
||||
{%- for option, value in args['podman_options'].items() %}
|
||||
--{{ option }} {{ value }} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if args['volumes'] is defined %}
|
||||
{%- for volume, mount in args['volumes'].items() %}
|
||||
-v {{ volume }}:{{ mount }} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if args['ports'] is defined %}
|
||||
{%- for ports in args['ports'] %}
|
||||
-p {{ ports['host'] }}:{{ ports['container'] }}{% if ports['protocol'] is defined %}/{{ ports['protocol'] }}{% endif %} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if args['env'] is defined %}
|
||||
{%- for key, value in args['env'].items() %}
|
||||
-e {{ key }}={{ value }} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if args['devices'] is defined %}
|
||||
{%- for key, value in args['devices'].items() %}
|
||||
--device {{ key }}:{{ value}} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{{ args['image'] }}:{{ args['tag'] }}{%- if args['run'] is defined %} \
|
||||
{{ args['run'] }}
|
||||
{%- endif %}
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
function generate_systemd_unit_file() {
|
||||
podman generate systemd --name {{ container }} > /etc/systemd/system/{{ container }}.service
|
||||
}
|
||||
|
||||
function check_update() {
|
||||
podman pull {{ args['image'] }}:{{ args['tag'] }}
|
||||
if [[ "$(podman image inspect {{ args['image'] }}:{{ args['tag'] }} --format "{% raw %}{{.Id}}{% endraw %}")" == "$(podman inspect {{ container }} --format "{% raw %}{{ .Image }}{% endraw %}")" ]];then
|
||||
echo "No image updates available"
|
||||
return 0
|
||||
else
|
||||
echo "Image update available"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function update() {
|
||||
systemctl stop {{ container }}
|
||||
podman container rm {{ container }}
|
||||
create_container
|
||||
generate_systemd_unit_file
|
||||
systemctl daemon-reload
|
||||
systemctl enable --now {{ container }}.service
|
||||
}
|
||||
|
||||
|
||||
|
||||
function printHelp(){
|
||||
cat << EOF
|
||||
Usage ${0##*/} [options..]
|
||||
-h,-?, --help Show help and exit
|
||||
-p, --pull pull container image ({{ container }}:{{ args['tag'] }})
|
||||
-v, --volumes create container volumes
|
||||
-c, --create create {{ container }} containers
|
||||
-s, --start start and enables {{ container }} container
|
||||
-S, --stop stop {{ container }} container
|
||||
-i, --is-running check to see if container service is running
|
||||
-u, --check-update check if there are image updates avaiable
|
||||
--update perform image update if it exists
|
||||
-g, --generate-systemd generate user systemd service unit file
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
-h|-\?|--help)
|
||||
printHelp
|
||||
exit
|
||||
;;
|
||||
-p|--pull)
|
||||
pull_image
|
||||
shift
|
||||
;;
|
||||
-v|--volumes)
|
||||
create_volumes
|
||||
shift
|
||||
;;
|
||||
-c|--create)
|
||||
create_container
|
||||
shift
|
||||
;;
|
||||
-s|--start)
|
||||
systemctl --user enable --now {{ container }}.service
|
||||
shift
|
||||
;;
|
||||
-S|--stop)
|
||||
systemctl --user stop {{ container }}.service
|
||||
shift
|
||||
;;
|
||||
-i|--is-running)
|
||||
systemctl --user is-active {{ container }}.service
|
||||
exit $?
|
||||
shift
|
||||
;;
|
||||
-g|--generate-systemd)
|
||||
generate_systemd_unit_file
|
||||
shift
|
||||
;;
|
||||
-u|--check-update)
|
||||
check_update
|
||||
shift
|
||||
;;
|
||||
--update)
|
||||
update
|
||||
shift
|
||||
;;
|
||||
--) #End of all options
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-?*)
|
||||
printf "'%s' is not a valid option\n" "$1" >&2
|
||||
exit 1
|
||||
;;
|
||||
*) #Break out of case, no more options
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
3
containers/files/env_file.jinja
Normal file
3
containers/files/env_file.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
{% for key, value in env_vars.items() -%}
|
||||
{{ key }}={{ value }}
|
||||
{% endfor -%}
|
40
containers/files/loki-config.yaml
Normal file
40
containers/files/loki-config.yaml
Normal file
@ -0,0 +1,40 @@
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
|
||||
ingester:
|
||||
lifecycler:
|
||||
address: 127.0.0.1
|
||||
ring:
|
||||
kvstore:
|
||||
store: inmemory
|
||||
replication_factor: 1
|
||||
final_sleep: 0s
|
||||
chunk_idle_period: 5m
|
||||
chunk_retain_period: 30s
|
||||
wal:
|
||||
dir: /data/wal
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-05-15
|
||||
store: boltdb
|
||||
object_store: filesystem
|
||||
schema: v11
|
||||
index:
|
||||
prefix: index_
|
||||
period: 168h
|
||||
|
||||
storage_config:
|
||||
boltdb:
|
||||
directory: /data/loki/index
|
||||
|
||||
filesystem:
|
||||
directory: /data/loki/chunks
|
||||
|
||||
limits_config:
|
||||
enforce_metric_name: false
|
||||
reject_old_samples: true
|
||||
reject_old_samples_max_age: 168h
|
||||
|
12
containers/files/mysql-dump.service.jinja
Normal file
12
containers/files/mysql-dump.service.jinja
Normal file
@ -0,0 +1,12 @@
|
||||
{%- set user = salt['pillar.get']('podman:user', 'root') %}
|
||||
{%- set home = salt['user.info'](user).home %}
|
||||
[Unit]
|
||||
Description=Dump all mariadb databases
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
User={{ user }}
|
||||
ExecStart={{ home }}/bin/mysql-dump.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
15
containers/files/mysql-dump.sh.jinja
Normal file
15
containers/files/mysql-dump.sh.jinja
Normal file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
umask 0077
|
||||
BACKUP_DIR={{ pillar.containers.mariadb.backup_dir }}
|
||||
databases=$(podman exec -it mariadb mysql -B -u root -p{{ pillar.containers.mariadb.env.MYSQL_ROOT_PASSWORD }} -e "SHOW DATABASES;" | tr -d "| " | grep -v Database)
|
||||
|
||||
for db in ${databases[@]}; do
|
||||
db=${db::-1}
|
||||
if [[ "$db" != "information_schema" ]] && [[ "$db" != "performance_schema" ]] && [[ "$db" != "mysql" ]] && [[ "$db" != _* ]] && [[ "$db" != "sys" ]]; then
|
||||
echo "Dumping database: $db"
|
||||
podman exec -it mariadb mysqldump -u root -p{{ pillar.containers.mariadb.env.MYSQL_ROOT_PASSWORD }} --databases $db | gzip > ${BACKUP_DIR}/$(date +"%Y-%m-%d_%H-%M-%S")_$db-sql.gz
|
||||
fi
|
||||
done
|
||||
# Delete the files older than 3 days
|
||||
find $BACKUP_DIR/* -type f -name *-sql.gz -mtime +3 -exec rm {} \;
|
||||
|
9
containers/files/mysql-dump.timer.jinja
Normal file
9
containers/files/mysql-dump.timer.jinja
Normal file
@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Restic backup timer
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ pillar.containers.mariadb.OnCalendar }}
|
||||
Unit=mysql-dump.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
1
containers/files/npm-container.conf
Normal file
1
containers/files/npm-container.conf
Normal file
@ -0,0 +1 @@
|
||||
net.ipv4.ip_unprivileged_port_start=80
|
292
containers/files/prometheus/alert.node.yml
Normal file
292
containers/files/prometheus/alert.node.yml
Normal file
@ -0,0 +1,292 @@
|
||||
groups:
|
||||
- name: node_exporter_alerts
|
||||
rules:
|
||||
- alert: Node down
|
||||
expr: up{job="monitoring-pi"} == 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
title: Node {{ $labels.instance }} is down
|
||||
description: Failed to scrape {{ $labels.job }} on {{ $labels.instance }} for more than 2 minutes. Node seems down.
|
||||
|
||||
|
||||
- alert: HostOutOfMemory
|
||||
expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host out of memory (instance {{ $labels.instance }})
|
||||
description: Node memory is filling up (< 10% left)\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostMemoryUnderMemoryPressure
|
||||
expr: rate(node_vmstat_pgmajfault[1m]) > 1000
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host memory under memory pressure (instance {{ $labels.instance }})
|
||||
description: The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostUnusualNetworkThroughputIn
|
||||
expr: sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual network throughput in (instance {{ $labels.instance }})
|
||||
description: Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostUnusualNetworkThroughputOut
|
||||
expr: sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual network throughput out (instance {{ $labels.instance }})
|
||||
description: Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostUnusualDiskReadRate
|
||||
expr: sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk read rate (instance {{ $labels.instance }})
|
||||
description: Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostUnusualDiskWriteRate
|
||||
expr: sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk write rate (instance {{ $labels.instance }})
|
||||
description: Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}
|
||||
|
||||
# Please add ignored mountpoints in node_exporter parameters like
|
||||
# "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)".
|
||||
# Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users.
|
||||
- alert: HostOutOfDiskSpace
|
||||
expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host out of disk space (instance {{ $labels.instance }})
|
||||
description: Disk is almost full (< 10% left)\n VALUE = {{ $value }}
|
||||
|
||||
# Please add ignored mountpoints in node_exporter parameters like
|
||||
# "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)".
|
||||
# Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users.
|
||||
- alert: HostDiskWillFillIn24Hours
|
||||
expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
|
||||
description: Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostOutOfInodes
|
||||
expr: node_filesystem_files_free{mountpoint ="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host out of inodes (instance {{ $labels.instance }})
|
||||
description: Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostInodesWillFillIn24Hours
|
||||
expr: node_filesystem_files_free{mountpoint ="/rootfs"} / node_filesystem_files{mountpoint="/rootfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{mountpoint="/rootfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{mountpoint="/rootfs"} == 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
|
||||
description: Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostUnusualDiskReadLatency
|
||||
expr: rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk read latency (instance {{ $labels.instance }})
|
||||
description: Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostUnusualDiskWriteLatency
|
||||
expr: rate(node_disk_write_time_seconds_totali{device!~"mmcblk.+"}[1m]) / rate(node_disk_writes_completed_total{device!~"mmcblk.+"}[1m]) > 0.1 and rate(node_disk_writes_completed_total{device!~"mmcblk.+"}[1m]) > 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host unusual disk write latency (instance {{ $labels.instance }})
|
||||
description: Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostHighCpuLoad
|
||||
expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host high CPU load (instance {{ $labels.instance }})
|
||||
description: CPU load is > 80%\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostCpuStealNoisyNeighbor
|
||||
expr: avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
|
||||
description: CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}
|
||||
|
||||
# 1000 context switches is an arbitrary number.
|
||||
# Alert threshold depends on nature of application.
|
||||
# Please read: https://github.com/samber/awesome-prometheus-alerts/issues/58
|
||||
- alert: HostContextSwitching
|
||||
expr: (rate(node_context_switches_total[5m])) / (count without(cpu, mode) (node_cpu_seconds_total{mode="idle"})) > 1000
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host context switching (instance {{ $labels.instance }})
|
||||
description: Context switching is growing on node (> 1000 / s)\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostSwapIsFillingUp
|
||||
expr: (1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host swap is filling up (instance {{ $labels.instance }})
|
||||
description: Swap is filling up (>80%)\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostSystemdServiceCrashed
|
||||
expr: node_systemd_unit_state{state="failed"} == 1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host SystemD service crashed (instance {{ $labels.instance }})
|
||||
description: SystemD service crashed\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostPhysicalComponentTooHot
|
||||
expr: node_hwmon_temp_celsius > 75
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host physical component too hot (instance {{ $labels.instance }})
|
||||
description: Physical hardware component too hot\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostNodeOvertemperatureAlarm
|
||||
expr: node_hwmon_temp_crit_alarm_celsius == 1
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
|
||||
description: Physical node temperature alarm triggered\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostRaidArrayGotInactive
|
||||
expr: node_md_state{state="inactive"} > 0
|
||||
for: 0m
|
||||
labels:
|
||||
severity: critical
|
||||
annotations:
|
||||
summary: Host RAID array got inactive (instance {{ $labels.instance }})
|
||||
description: RAID array {{ $labels.device }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostRaidDiskFailure
|
||||
expr: node_md_disks{state="failed"} > 0
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host RAID disk failure (instance {{ $labels.instance }})
|
||||
description: At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostOomKillDetected
|
||||
expr: increase(node_vmstat_oom_kill[1m]) > 0
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host OOM kill detected (instance {{ $labels.instance }})
|
||||
description: OOM kill detected\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostEdacCorrectableErrorsDetected
|
||||
expr: increase(node_edac_correctable_errors_total[1m]) > 0
|
||||
for: 0m
|
||||
labels:
|
||||
severity: info
|
||||
annotations:
|
||||
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
|
||||
description: Instance has had {{ printf "%.0f" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostEdacUncorrectableErrorsDetected
|
||||
expr: node_edac_uncorrectable_errors_total > 0
|
||||
for: 0m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
|
||||
description: Instance has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostNetworkReceiveErrors
|
||||
expr: rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Receive Errors (instance {{ $labels.instance }}:{{ $labels.device }})
|
||||
description: Instance interface has encountered {{ printf "%.0f" $value }} receive errors in the last five minutes.\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostNetworkTransmitErrors
|
||||
expr: rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Transmit Errors (instance {{ $labels.instance }}:{{ $labels.device }})
|
||||
description: Instance has encountered {{ printf "%.0f" $value }} transmit errors in the last five minutes.\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostNetworkInterfaceSaturated
|
||||
expr: (rate(node_network_receive_bytes_total{device!~"^tap.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*"} > 0.8
|
||||
for: 1m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host Network Interface Saturated (instance {{ $labels.instance }}:{{ $labels.interface }})
|
||||
description: The network interface is getting overloaded.\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostConntrackLimit
|
||||
expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
|
||||
for: 5m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host conntrack limit (instance {{ $labels.instance }})
|
||||
description: The number of conntrack is approching limit\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostClockSkew
|
||||
expr: (node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host clock skew (instance {{ $labels.instance }})
|
||||
description: Clock skew detected. Clock is out of sync.\n VALUE = {{ $value }}
|
||||
|
||||
- alert: HostClockNotSynchronising
|
||||
expr: min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16
|
||||
for: 2m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: Host clock not synchronising (instance {{ $labels.instance }})
|
||||
description: Clock not synchronising.\n VALUE = {{ $value }}
|
||||
|
59
containers/files/prometheus/prometheus.yml
Normal file
59
containers/files/prometheus/prometheus.yml
Normal file
@ -0,0 +1,59 @@
|
||||
# my global config #
|
||||
global:
|
||||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
|
||||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
# Alertmanager configuration
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- static_configs:
|
||||
- targets:
|
||||
- 10.2.0.22:9093
|
||||
|
||||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||
rule_files:
|
||||
- "alert.node.yml"
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
# A scrape configuration containing exactly one endpoint to scrape:
|
||||
# Here it's Prometheus itself.
|
||||
scrape_configs:
|
||||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||
- job_name: "prometheus"
|
||||
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
static_configs:
|
||||
- targets: ["localhost:9090"]
|
||||
- job_name: "node"
|
||||
static_configs:
|
||||
- targets:
|
||||
- "poblano.rre.nu:9100"
|
||||
- "salt.rre.nu:9100"
|
||||
- "pepper.rre.nu:9100"
|
||||
- "woody.rre.nu:9100"
|
||||
- "serrano.rre.nu:9100"
|
||||
- "coronado.rre.nu:9100"
|
||||
- job_name: "unpoller"
|
||||
static_configs:
|
||||
- targets:
|
||||
- "unpoller.rre.nu:9130"
|
||||
- job_name: "fail2ban"
|
||||
static_configs:
|
||||
- targets:
|
||||
- "poblano.rre.nu:9191"
|
||||
- "salt.rre.nu:9191"
|
||||
- "pepper.rre.nu:9191"
|
||||
- job_name: "nginx"
|
||||
static_configs:
|
||||
- targets:
|
||||
- "10.2.0.22:9193"
|
||||
|
||||
- job_name: "promtail"
|
||||
static_configs:
|
||||
- targets:
|
||||
- "serrano.rre.nu:9080"
|
||||
- "coronado.rre.nu:9080"
|
29
containers/files/promtail.conf.jinja
Normal file
29
containers/files/promtail.conf.jinja
Normal file
@ -0,0 +1,29 @@
|
||||
server:
|
||||
http_listen_port: {{ http_listen_port }}
|
||||
grpc_listen_port: 0
|
||||
|
||||
positions:
|
||||
filename: /tmp/positions.yaml
|
||||
|
||||
clients:
|
||||
- url: "{{ client_url }}"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: journal
|
||||
journal:
|
||||
max_age: 12h
|
||||
path: /var/log/journal
|
||||
labels:
|
||||
job: systemd-journal
|
||||
relabel_configs:
|
||||
- source_labels: ['__journal__systemd_unit']
|
||||
target_label: 'unit'
|
||||
- source_labels: ["__journal__hostname"]
|
||||
target_label: host
|
||||
- source_labels: ["__journal_priority_keyword"]
|
||||
target_label: level
|
||||
- source_labels: ["__journal_syslog_identifier"]
|
||||
target_label: syslog_identifier
|
||||
- source_labels: ["__journal_container_name"]
|
||||
target_label: container_name
|
||||
|
234
containers/files/unpoller.conf
Normal file
234
containers/files/unpoller.conf
Normal file
@ -0,0 +1,234 @@
|
||||
# Unpoller v2 primary configuration file. TOML FORMAT #
|
||||
###########################################################
|
||||
|
||||
[poller]
|
||||
# Turns on line numbers, microsecond logging, and a per-device log.
|
||||
# The default is false, but I personally leave this on at home (four devices).
|
||||
# This may be noisy if you have a lot of devices. It adds one line per device.
|
||||
debug = false
|
||||
|
||||
# Turns off per-interval logs. Only startup and error logs will be emitted.
|
||||
# Recommend enabling debug with this setting for better error logging.
|
||||
quiet = false
|
||||
|
||||
# Load dynamic plugins. Advanced use; only sample mysql plugin provided by default.
|
||||
plugins = []
|
||||
|
||||
#### OUTPUTS
|
||||
|
||||
# If you don't use an output, you can disable it.
|
||||
|
||||
[prometheus]
|
||||
disable = false
|
||||
# This controls on which ip and port /metrics is exported when mode is "prometheus".
|
||||
# This has no effect in other modes. Must contain a colon and port.
|
||||
http_listen = "0.0.0.0:9130"
|
||||
# Adding an SSL Cert and Cert Key will make Poller listen with SSL/https.
|
||||
ssl_cert_path = ""
|
||||
ssl_key_path = ""
|
||||
# Errors are rare. Setting this to true will report them to Prometheus.
|
||||
report_errors = false
|
||||
## Record data for disabled or down (unlinked) switch ports.
|
||||
dead_ports = false
|
||||
|
||||
[influxdb]
|
||||
disable = true
|
||||
# InfluxDB does not require auth by default, so the user/password are probably unimportant.
|
||||
url = "http://127.0.0.1:8086"
|
||||
user = "unifipoller"
|
||||
# Password for InfluxDB user (above).
|
||||
# If the password provided here begins with file:// then the password is read in from
|
||||
# the file path that follows the file:// prefix. ex: file:///etc/influxdb/passwd.file
|
||||
pass = "unifipoller"
|
||||
# Be sure to create this database. See the InfluxDB Wiki page for more info.
|
||||
db = "unifi"
|
||||
# If your InfluxDB uses a valid SSL cert, set this to true.
|
||||
verify_ssl = false
|
||||
# The UniFi Controller only updates traffic stats about every 30 seconds.
|
||||
# Setting this to something lower may lead to "zeros" in your data.
|
||||
# If you're getting zeros now, set this to "1m"
|
||||
interval = "30s"
|
||||
## Record data for disabled or down (unlinked) switch ports.
|
||||
dead_ports = false
|
||||
|
||||
# To enable output of UniFi Events to Loki, add a URL; it's disabled otherwise.
|
||||
# User, pass and tenant_id are optional and most folks wont set them.
|
||||
# Pick which logs you want per-controller in the [unifi.controller] section.
|
||||
# This is a new feature. Feedback welcome!
|
||||
[loki]
|
||||
disable = false
|
||||
url = "{{ pillar['containers']['unpoller']['loki_url'] }}"
|
||||
# The rest of this is advanced & optional. See wiki.
|
||||
user = ""
|
||||
pass = ""
|
||||
verify_ssl = false
|
||||
tenant_id = ""
|
||||
interval = "2m"
|
||||
timeout = "10s"
|
||||
|
||||
[datadog]
|
||||
# How often to poll UniFi and report to Datadog.
|
||||
interval = "2m"
|
||||
|
||||
# To enable this output plugin
|
||||
enable = false
|
||||
|
||||
# Datadog Custom Options
|
||||
|
||||
# address to talk to the datadog agent, by default this uses the local statsd UDP interface
|
||||
# address = "localhost:8125"
|
||||
|
||||
# namespace to prepend to all data, default is no additional prefix.
|
||||
# namespace = ""
|
||||
|
||||
# tags to append to all data
|
||||
# tags = [ "customer:abc_corp" ]
|
||||
|
||||
# For more advanced options for very large amount of data collected see the upstream
|
||||
# github.com/unpoller/unpoller/pkg/datadogunifi repository README.
|
||||
|
||||
|
||||
# Unpoller has an optional web server. To turn it on, set enable to true. If you
|
||||
# wish to use SSL, provide SSL cert and key paths. This interface is currently
|
||||
# read-only; it just displays information, like logs, devices and clients.
|
||||
# Notice: Enabling the web server with many sites will increase memory usage.
|
||||
# This is a new feature and lacks a UI, enabling only recommended for testing.
|
||||
[webserver]
|
||||
enable = false
|
||||
port = 37288
|
||||
# The HTML path is different on Windows and BSD/macOS.
|
||||
html_path = "/usr/lib/unifi-poller/web"
|
||||
ssl_cert_path = ""
|
||||
ssl_key_path = ""
|
||||
# How many events per event group to hold. 200-2000. Use fewer with many sites.
|
||||
# With 1 site, you'll have a max total of 9 event groups; 1 per plugin, 4 per site.
|
||||
# Each site adds 1 event group for each of these inputs that is enabled:
|
||||
# save_ids, save_events, save_anomalies, save_alarms.
|
||||
max_events = 200
|
||||
|
||||
# By default the web interface does not require authentication. You can change
|
||||
# that by adding a username and password hash (or multiple) below.
|
||||
# To create a hash, run unifi-poller with the -e CLI argument. See Wiki for more!
|
||||
[webserver.accounts]
|
||||
# username = "password-hash"
|
||||
# captain = "$2a$04$mxw6i0LKH6u46oaLK2cq5eCTAAFkfNiRpzNbz.EyvJZZWNa2FzIlS"
|
||||
|
||||
#### INPUTS
|
||||
|
||||
[unifi]
|
||||
# Setting this to true and providing default credentials allows you to skip
|
||||
# configuring controllers in this config file. Instead you configure them in
|
||||
# your prometheus.yml config. Prometheus then sends the controller URL to
|
||||
# Unpoller when it performs the scrape. This is useful if you have many,
|
||||
# or changing controllers. See wiki for more.
|
||||
dynamic = false
|
||||
|
||||
# The following section contains the default credentials/configuration for any
|
||||
# dynamic controller (see above section), or the primary controller if you do not
|
||||
# provide one and dynamic is disabled. In other words, you can just add your
|
||||
# controller here and delete the following section. The internal defaults are
|
||||
# shown below. Any missing values will assume these displayed defaults.
|
||||
|
||||
[unifi.defaults]
|
||||
# URL for the UniFi Controller. Do not add any paths after the host:port.
|
||||
# Do not use port 8443 if you have a UDM; just use "https://ip".
|
||||
url = "{{ pillar['containers']['unpoller']['unifi_url'] }}"
|
||||
|
||||
# Make a read-only user in the UniFi Admin Settings, allow it access to all sites.
|
||||
user = "{{ pillar['containers']['unpoller']['unifi_user'] }}"
|
||||
|
||||
# Password for UniFi controller user (above).
|
||||
# If the password provided here begins with file:// then the password is read in from
|
||||
# the file path that follows the file:// prefix. ex: file:///etc/unifi/password.file
|
||||
# ex: file:///etc/unifi/passwd.file, windows: file://C:\\UserData\\Unifi\\Passwd.txt
|
||||
pass = "{{ pillar['containers']['unpoller']['unifi_pass'] }}"
|
||||
|
||||
# If the controller has more than one site, specify which sites to poll here.
|
||||
# Set this to ["default"] to poll only the first site on the controller.
|
||||
# A setting of ["all"] will poll all sites; this works if you only have 1 site too.
|
||||
sites = ["all"]
|
||||
|
||||
# Specify a timeout, leave missing to declare infinite wait. This determines the maximum
|
||||
# time to wait for a response from the unifi controller on any API request.
|
||||
# timeout = 60s
|
||||
|
||||
# Enable collection of site data. This data powers the Network Sites dashboard.
|
||||
# It's not valuable to everyone and setting this to false will save resources.
|
||||
save_sites = true
|
||||
|
||||
# Hash, with md5, client names and MAC addresses. This attempts to protect
|
||||
# personally identifiable information. Most users won't want to enable this.
|
||||
hash_pii = false
|
||||
|
||||
# Enable collection of Intrusion Detection System Data (InfluxDB/Loki only).
|
||||
# Only useful if IDS or IPS are enabled on one of the sites. This may store
|
||||
# a lot of information. Only recommended for testing and debugging. There
|
||||
# may not be any dashboards to display this data. It can be used for annotations.
|
||||
# Enable this only if using InfluxDB or Loki. This will leak PII data!
|
||||
save_ids = false
|
||||
|
||||
# Enable collection of UniFi Events (InfluxDB/Loki only).
|
||||
# This may store a lot of information. Only recommended for testing and debugging.
|
||||
# There are no dashboards to display this data. It can be used for annotations.
|
||||
# This is a new (June, 2020) feature. Please provide feedback if you try it out!
|
||||
# Enable this only if using InfluxDB or Loki. This will leak PII data!
|
||||
save_events = true
|
||||
|
||||
# Enable collection of UniFi Alarms (InfluxDB/Loki only).
|
||||
# There are no dashboards to display this data. It can be used for annotations.
|
||||
# This is a new (June, 2020) feature. Please provide feedback if you try it out!
|
||||
# Enable this only if using InfluxDB or Loki. This will leak PII data!
|
||||
save_alarms = true
|
||||
|
||||
# Enable collection of UniFi Anomalies (InfluxDB/Loki only).
|
||||
# There are no dashboards to display this data. It can be used for annotations.
|
||||
# This is a new (June, 2020) feature. Please provide feedback if you try it out!
|
||||
# Enable this only if using InfluxDB or Loki.
|
||||
save_anomalies = true
|
||||
|
||||
# Enable collection of Deep Packet Inspection data. This data breaks down traffic
|
||||
# types for each client and site, it powers a dedicated DPI dashboard.
|
||||
# Enabling this adds roughly 150 data points per client. That's 6000 metrics for
|
||||
# 40 clients. This adds a little bit of poller run time per interval and causes
|
||||
# more API requests to your controller(s). Don't let these "cons" sway you:
|
||||
# it's cool data. Please provide feedback on your experience with this feature.
|
||||
save_dpi = false
|
||||
|
||||
## Enabling save_rogue stores even more data in your time series databases.
|
||||
## This saves neighboring access point metrics in a dedicated table or namespace.
|
||||
save_rogue = false
|
||||
|
||||
# If your UniFi controller has a valid SSL certificate (like lets encrypt),
|
||||
# you can enable this option to validate it. Otherwise, any SSL certificate is
|
||||
# valid. If you don't know if you have a valid SSL cert, then you don't have one.
|
||||
verify_ssl = false
|
||||
|
||||
## You may provide a list of SSL cert files (PEM format) that you expect your
|
||||
## controller to use. As long as one of the certs you provide here shows up in
|
||||
## the cert trust chain the controller presents it will be accepted and allowed.
|
||||
## These files may be re-read while poller is running.
|
||||
## Example: ssl_cert_paths = ["/path/to/cert.pem", "/another/cert.pem"]
|
||||
ssl_cert_paths = []
|
||||
|
||||
# The following is optional and used for configurations with multiple UniFi controllers.
|
||||
|
||||
# You may repeat the following [[unifi.controller]] section as many times as needed to
|
||||
# poll multiple controllers. Uncomment the entire section including [[unifi.controller]].
|
||||
# Omitted variables will have their values taken from the defaults, above.
|
||||
#
|
||||
#[[unifi.controller]]
|
||||
# url = "https://127.0.0.1:8443"
|
||||
# user = "unifipoller"
|
||||
# pass = "unifipoller"
|
||||
# sites = ["all"]
|
||||
# save_sites = true
|
||||
# hash_pii = false
|
||||
# save_ids = false
|
||||
# save_events = false
|
||||
# save_alarms = false
|
||||
# save_anomalies = false
|
||||
# save_dpi = false
|
||||
# save_rogue = false
|
||||
# verify_ssl = false
|
||||
# ssl_cert_paths = []
|
||||
|
17
containers/freeipa.sls
Normal file
17
containers/freeipa.sls
Normal file
@ -0,0 +1,17 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create freeipa root directory:
|
||||
file.directory:
|
||||
- name: /srv/freeipa
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create freeipa config directory:
|
||||
file.directory:
|
||||
- name: /srv/freeipa/data
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
{{ container_deploy('freeipa') }}
|
10
containers/gitea.sls
Normal file
10
containers/gitea.sls
Normal file
@ -0,0 +1,10 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create gitea data directory:
|
||||
file.directory:
|
||||
- name: /srv/gitea
|
||||
- user: 1000
|
||||
- group: 1000
|
||||
- mode: "0750"
|
||||
|
||||
{{ container_deploy('gitea') }}
|
10
containers/gotify.sls
Normal file
10
containers/gotify.sls
Normal file
@ -0,0 +1,10 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create gotify data directory:
|
||||
file.directory:
|
||||
- name: /srv/gotify
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
{{ container_deploy('gotify') }}
|
10
containers/grafana.sls
Normal file
10
containers/grafana.sls
Normal file
@ -0,0 +1,10 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create grafana data directory:
|
||||
file.directory:
|
||||
- name: /srv/grafana
|
||||
- user: 472
|
||||
- group: root
|
||||
- mode: "0750"
|
||||
|
||||
{{ container_deploy('grafana') }}
|
46
containers/init.sls
Normal file
46
containers/init.sls
Normal file
@ -0,0 +1,46 @@
|
||||
Copy check image update script:
|
||||
file.managed:
|
||||
- name: /root/bin/check_image_updates.sh
|
||||
- source: salt://containers/files/check_image_updates.sh.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 0700
|
||||
|
||||
Create check image update service:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/check_image_updates.service
|
||||
- source: salt://containers/files/check_image_updates.service.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Create check image update timer:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/check_image_updates.timer
|
||||
- source: salt://containers/files/check_image_updates.timer.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Systemd daemon reload for image update:
|
||||
cmd.run:
|
||||
- name: systemctl daemon-reload
|
||||
- watch:
|
||||
- file: Create check image update service
|
||||
- file: Create check image update timer
|
||||
|
||||
Start check image update timer:
|
||||
service.running:
|
||||
- name: check_image_updates.timer
|
||||
- enable: True
|
||||
|
||||
{% if pillar.containers is defined %}
|
||||
include:
|
||||
{%- for container, args in pillar.containers.items() %}
|
||||
- containers.{{ container }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
32
containers/loki.sls
Normal file
32
containers/loki.sls
Normal file
@ -0,0 +1,32 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create loki data directory:
|
||||
file.directory:
|
||||
- name: /srv/loki
|
||||
- user: 10001
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create loki config directory:
|
||||
file.directory:
|
||||
- name: /etc/loki
|
||||
- user: 10001
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create loki configuration file:
|
||||
file.managed:
|
||||
- name: /etc/loki/config.yaml
|
||||
- source: salt://containers/files/loki-config.yaml
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
{{ container_deploy('loki') }}
|
||||
|
||||
Start loki service:
|
||||
service.running:
|
||||
- name: loki
|
||||
- enable: True
|
||||
- onchanges:
|
||||
- file: Create loki configuration file
|
10
containers/mariadb.sls
Normal file
10
containers/mariadb.sls
Normal file
@ -0,0 +1,10 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create data directory for mariadb:
|
||||
file.directory:
|
||||
- name: /srv/mariadb
|
||||
- user: 999
|
||||
- group: 999
|
||||
- mode: "0755"
|
||||
|
||||
{{ container_deploy('mariadb') }}
|
17
containers/mosquitto.sls
Normal file
17
containers/mosquitto.sls
Normal file
@ -0,0 +1,17 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create mosquitto configuration directory:
|
||||
file.directory:
|
||||
- name: /etc/mosquitto
|
||||
- user: 1883
|
||||
- group: 1883
|
||||
- mode: "0750"
|
||||
|
||||
Create mosquitto data directory:
|
||||
file.directory:
|
||||
- name: /srv/mosquitto
|
||||
- user: 1883
|
||||
- group: 1883
|
||||
- mode: "0750"
|
||||
|
||||
{{ container_deploy('mosquitto') }}
|
11
containers/nextcloud.sls
Normal file
11
containers/nextcloud.sls
Normal file
@ -0,0 +1,11 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create nextcloud data directory:
|
||||
file.directory:
|
||||
- name: /srv/nextcloud
|
||||
- user: 33
|
||||
- group: 33
|
||||
- mode: "0755"
|
||||
|
||||
{{ container_deploy('nextcloud') }}
|
||||
{{ container_deploy('nextcloud-cron') }}
|
3
containers/node_exporter.sls
Normal file
3
containers/node_exporter.sls
Normal file
@ -0,0 +1,3 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
{{ container_deploy('node_exporter') }}
|
25
containers/nodered.sls
Normal file
25
containers/nodered.sls
Normal file
@ -0,0 +1,25 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create udev-rule for rfxcom usb dongel:
|
||||
file.managed:
|
||||
- name: /etc/udev/rules.d/99-rfxcom-serial.rules
|
||||
- source: salt://containers/files/99-rfxcom-serial.rules
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Relead udev rules for rfxcom dongel:
|
||||
cmd.run:
|
||||
- name: udevadm control --reload-rules
|
||||
- onchanges:
|
||||
- file: Create udev-rule for rfxcom usb dongel
|
||||
|
||||
Create data folder for nodered:
|
||||
file.directory:
|
||||
- name: /srv/nodered
|
||||
- user: 1000
|
||||
- group: 1000
|
||||
- mode: "0750"
|
||||
|
||||
{{ container_deploy('nodered') }}
|
||||
|
24
containers/npm.sls
Normal file
24
containers/npm.sls
Normal file
@ -0,0 +1,24 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create nextcloud root directory:
|
||||
file.directory:
|
||||
- name: /srv/npm
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create nextcloud data directory:
|
||||
file.directory:
|
||||
- name: /srv/npm/data
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0750"
|
||||
|
||||
Create nextcloud letsencrypt directory:
|
||||
file.directory:
|
||||
- name: /srv/npm/letsencrypt
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0750"
|
||||
|
||||
{{ container_deploy('npm') }}
|
24
containers/piwigo.sls
Normal file
24
containers/piwigo.sls
Normal file
@ -0,0 +1,24 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create piwigo root directory:
|
||||
file.directory:
|
||||
- name: /srv/piwigo
|
||||
- user: {{ pillar.containers.piwigo.env.PUID }}
|
||||
- group: {{ pillar.containers.piwigo.env.GUID }}
|
||||
- mode: "0750"
|
||||
|
||||
Create piwigo config directory:
|
||||
file.directory:
|
||||
- name: /srv/piwigo/config
|
||||
- user: {{ pillar.containers.piwigo.env.PUID }}
|
||||
- group: {{ pillar.containers.piwigo.env.GUID }}
|
||||
- mode: "0750"
|
||||
|
||||
Create piwigo gallery directory:
|
||||
file.directory:
|
||||
- name: /srv/piwigo/gallery
|
||||
- user: {{ pillar.containers.piwigo.env.PUID }}
|
||||
- group: {{ pillar.containers.piwigo.env.GUID }}
|
||||
- mode: "0750"
|
||||
|
||||
{{ container_deploy('piwigo') }}
|
26
containers/prometheus.sls
Normal file
26
containers/prometheus.sls
Normal file
@ -0,0 +1,26 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create prometheus config directory:
|
||||
file.recurse:
|
||||
- name: /etc/prometheus
|
||||
- source: salt://containers/files/prometheus
|
||||
- user: nobody
|
||||
- group: root
|
||||
- dir_mode: "0755"
|
||||
- file_mode: "0644"
|
||||
|
||||
Create prometheus data directory:
|
||||
file.directory:
|
||||
- name: /srv/prometheus
|
||||
- user: nobody
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
{{ container_deploy('prometheus') }}
|
||||
|
||||
Start prometheus service:
|
||||
service.running:
|
||||
- name: prometheus
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: Create prometheus config directory
|
34
containers/promtail.sls
Normal file
34
containers/promtail.sls
Normal file
@ -0,0 +1,34 @@
|
||||
{%- set client_url = "http://loki.rre.nu:3100/loki/api/v1/push" %}
|
||||
{%- set http_listen_port = 9080 %}
|
||||
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create promtail configuration folder:
|
||||
file.directory:
|
||||
- name: /etc/promtail
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create promtail configuration:
|
||||
file.managed:
|
||||
- name: /etc/promtail/promtail.conf
|
||||
- source: salt://containers/files/promtail.conf.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
- require:
|
||||
- file: Create promtail configuration folder
|
||||
- context:
|
||||
client_url: {{ client_url }}
|
||||
http_listen_port: {{ http_listen_port }}
|
||||
|
||||
{{ container_deploy('promtail') }}
|
||||
|
||||
Start promtail service:
|
||||
service.running:
|
||||
- name: promtail.service
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: Create promtail configuration
|
3
containers/salt.sls
Normal file
3
containers/salt.sls
Normal file
@ -0,0 +1,3 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
{{ container_deploy('salt') }}
|
10
containers/unifi.sls
Normal file
10
containers/unifi.sls
Normal file
@ -0,0 +1,10 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create unifi data directory:
|
||||
file.directory:
|
||||
- name: /srv/unifi
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0750"
|
||||
|
||||
{{ container_deploy('unifi') }}
|
20
containers/unpoller.sls
Normal file
20
containers/unpoller.sls
Normal file
@ -0,0 +1,20 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
create config directory for unpoller:
|
||||
file.directory:
|
||||
- name: /srv/unpoller
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0750"
|
||||
|
||||
create unpoller config:
|
||||
file.managed:
|
||||
- name: /srv/unpoller/up.conf
|
||||
- source: salt://containers/files/unpoller.conf
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0640"
|
||||
|
||||
|
||||
{{ container_deploy('unpoller') }}
|
25
containers/zigbee2mqtt.sls
Normal file
25
containers/zigbee2mqtt.sls
Normal file
@ -0,0 +1,25 @@
|
||||
{% from 'lib.sls' import container_deploy with context %}
|
||||
|
||||
Create udev-rule for zigbee usb dongel:
|
||||
file.managed:
|
||||
- name: /etc/udev/rules.d/99-zigbee-serial.rules
|
||||
- source: salt://containers/files/99-zigbee-serial.rules
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Relead udev rules for zigbee dongel:
|
||||
cmd.run:
|
||||
- name: udevadm control --reload-rules
|
||||
- onchanges:
|
||||
- file: Create udev-rule for zigbee usb dongel
|
||||
|
||||
Create zigbee2mqtt data folder:
|
||||
file.directory:
|
||||
- name: /srv/zigbee2mqtt
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
{{ container_deploy('zigbee2mqtt') }}
|
||||
|
696
dnsmasq/dnsmasq.conf
Normal file
696
dnsmasq/dnsmasq.conf
Normal file
@ -0,0 +1,696 @@
|
||||
# Configuration file for dnsmasq.
|
||||
#
|
||||
# Format is one option per line, legal options are the same
|
||||
# as the long options legal on the command line. See
|
||||
# "/usr/sbin/dnsmasq --help" or "man 8 dnsmasq" for details.
|
||||
|
||||
# Accept DNS queries only from hosts whose address is on a local
|
||||
# subnet, ie a subnet for which an interface exists on the server.
|
||||
# It is intended to be set as a default on installation, to allow
|
||||
# unconfigured installations to be useful but also safe from being
|
||||
# used for DNS amplification attacks.
|
||||
#local-service
|
||||
|
||||
# Listen on this specific port instead of the standard DNS port
|
||||
# (53). Setting this to zero completely disables DNS function,
|
||||
# leaving only DHCP and/or TFTP.
|
||||
#port=5353
|
||||
|
||||
# The following two options make you a better netizen, since they
|
||||
# tell dnsmasq to filter out queries which the public DNS cannot
|
||||
# answer, and which load the servers (especially the root servers)
|
||||
# unnecessarily. If you have a dial-on-demand link they also stop
|
||||
# these requests from bringing up the link unnecessarily.
|
||||
|
||||
# Never forward plain names (without a dot or domain part)
|
||||
#domain-needed
|
||||
# Never forward addresses in the non-routed address spaces.
|
||||
#bogus-priv
|
||||
|
||||
# Uncomment these to enable DNSSEC validation and caching:
|
||||
# (Requires dnsmasq to be built with DNSSEC option.)
|
||||
#conf-file=/etc/dnsmasq.d/trust-anchors.conf
|
||||
#dnssec
|
||||
|
||||
# Replies which are not DNSSEC signed may be legitimate, because the domain
|
||||
# is unsigned, or may be forgeries. Setting this option tells dnsmasq to
|
||||
# check that an unsigned reply is OK, by finding a secure proof that a DS
|
||||
# record somewhere between the root and the domain does not exist.
|
||||
# The cost of setting this is that even queries in unsigned domains will need
|
||||
# one or more extra DNS queries to verify.
|
||||
#dnssec-check-unsigned
|
||||
|
||||
# Uncomment this to filter useless windows-originated DNS requests
|
||||
# which can trigger dial-on-demand links needlessly.
|
||||
# Note that (amongst other things) this blocks all SRV requests,
|
||||
# so don't use it if you use eg Kerberos, SIP, XMMP or Google-talk.
|
||||
# This option only affects forwarding, SRV records originating for
|
||||
# dnsmasq (via srv-host= lines) are not suppressed by it.
|
||||
#filterwin2k
|
||||
|
||||
# Change this line if you want dns to get its upstream servers from
|
||||
# somewhere other that /etc/resolv.conf
|
||||
#resolv-file=
|
||||
|
||||
# By default, dnsmasq will send queries to any of the upstream
|
||||
# servers it knows about and tries to favour servers to are known
|
||||
# to be up. Uncommenting this forces dnsmasq to try each query
|
||||
# with each server strictly in the order they appear in
|
||||
# /etc/resolv.conf
|
||||
#strict-order
|
||||
|
||||
# If you don't want dnsmasq to read /etc/resolv.conf or any other
|
||||
# file, getting its servers from this file instead (see below), then
|
||||
# uncomment this.
|
||||
#no-resolv
|
||||
|
||||
# If you don't want dnsmasq to poll /etc/resolv.conf or other resolv
|
||||
# files for changes and re-read them then uncomment this.
|
||||
#no-poll
|
||||
|
||||
# Add other name servers here, with domain specs if they are for
|
||||
# non-public domains.
|
||||
#server=/localnet/192.168.0.1
|
||||
|
||||
# Example of routing PTR queries to nameservers: this will send all
|
||||
# address->name queries for 192.168.3/24 to nameserver 10.1.2.3
|
||||
#server=/3.168.192.in-addr.arpa/10.1.2.3
|
||||
|
||||
# Add local-only domains here, queries in these domains are answered
|
||||
# from /etc/hosts or DHCP only.
|
||||
#local=/localnet/
|
||||
|
||||
# Add domains which you want to force to an IP address here.
|
||||
# The example below send any host in double-click.net to a local
|
||||
# web-server.
|
||||
#address=/double-click.net/127.0.0.1
|
||||
|
||||
# --address (and --server) work with IPv6 addresses too.
|
||||
#address=/www.thekelleys.org.uk/fe80::20d:60ff:fe36:f83
|
||||
|
||||
# Add the IPs of all queries to yahoo.com, google.com, and their
|
||||
# subdomains to the vpn and search ipsets:
|
||||
#ipset=/yahoo.com/google.com/vpn,search
|
||||
|
||||
# Add the IPs of all queries to yahoo.com, google.com, and their
|
||||
# subdomains to netfilters sets, which is equivalent to
|
||||
# 'nft add element ip test vpn { ... }; nft add element ip test search { ... }'
|
||||
#nftset=/yahoo.com/google.com/ip#test#vpn,ip#test#search
|
||||
|
||||
# Use netfilters sets for both IPv4 and IPv6:
|
||||
# This adds all addresses in *.yahoo.com to vpn4 and vpn6 for IPv4 and IPv6 addresses.
|
||||
#nftset=/yahoo.com/4#ip#test#vpn4
|
||||
#nftset=/yahoo.com/6#ip#test#vpn6
|
||||
|
||||
# You can control how dnsmasq talks to a server: this forces
|
||||
# queries to 10.1.2.3 to be routed via eth1
|
||||
# server=10.1.2.3@eth1
|
||||
|
||||
# and this sets the source (ie local) address used to talk to
|
||||
# 10.1.2.3 to 192.168.1.1 port 55 (there must be an interface with that
|
||||
# IP on the machine, obviously).
|
||||
# server=10.1.2.3@192.168.1.1#55
|
||||
|
||||
# If you want dnsmasq to change uid and gid to something other
|
||||
# than the default, edit the following lines.
|
||||
#user=
|
||||
#group=
|
||||
|
||||
# If you want dnsmasq to listen for DHCP and DNS requests only on
|
||||
# specified interfaces (and the loopback) give the name of the
|
||||
# interface (eg eth0) here.
|
||||
# Repeat the line for more than one interface.
|
||||
#interface=
|
||||
# Or you can specify which interface _not_ to listen on
|
||||
#except-interface=
|
||||
# Or which to listen on by address (remember to include 127.0.0.1 if
|
||||
# you use this.)
|
||||
#listen-address=
|
||||
# If you want dnsmasq to provide only DNS service on an interface,
|
||||
# configure it as shown above, and then use the following line to
|
||||
# disable DHCP and TFTP on it.
|
||||
#no-dhcp-interface=
|
||||
|
||||
# On systems which support it, dnsmasq binds the wildcard address,
|
||||
# even when it is listening on only some interfaces. It then discards
|
||||
# requests that it shouldn't reply to. This has the advantage of
|
||||
# working even when interfaces come and go and change address. If you
|
||||
# want dnsmasq to really bind only the interfaces it is listening on,
|
||||
# uncomment this option. About the only time you may need this is when
|
||||
# running another nameserver on the same machine.
|
||||
#bind-interfaces
|
||||
|
||||
# If you don't want dnsmasq to read /etc/hosts, uncomment the
|
||||
# following line.
|
||||
#no-hosts
|
||||
# or if you want it to read another file, as well as /etc/hosts, use
|
||||
# this.
|
||||
#addn-hosts=/etc/banner_add_hosts
|
||||
|
||||
# Set this (and domain: see below) if you want to have a domain
|
||||
# automatically added to simple names in a hosts-file.
|
||||
#expand-hosts
|
||||
|
||||
# Set the domain for dnsmasq. this is optional, but if it is set, it
|
||||
# does the following things.
|
||||
# 1) Allows DHCP hosts to have fully qualified domain names, as long
|
||||
# as the domain part matches this setting.
|
||||
# 2) Sets the "domain" DHCP option thereby potentially setting the
|
||||
# domain of all systems configured by DHCP
|
||||
# 3) Provides the domain part for "expand-hosts"
|
||||
#domain=thekelleys.org.uk
|
||||
|
||||
# Set a different domain for a particular subnet
|
||||
#domain=wireless.thekelleys.org.uk,192.168.2.0/24
|
||||
|
||||
# Same idea, but range rather then subnet
|
||||
#domain=reserved.thekelleys.org.uk,192.68.3.100,192.168.3.200
|
||||
|
||||
# Uncomment this to enable the integrated DHCP server, you need
|
||||
# to supply the range of addresses available for lease and optionally
|
||||
# a lease time. If you have more than one network, you will need to
|
||||
# repeat this for each network on which you want to supply DHCP
|
||||
# service.
|
||||
#dhcp-range=192.168.0.50,192.168.0.150,12h
|
||||
|
||||
# This is an example of a DHCP range where the netmask is given. This
|
||||
# is needed for networks we reach the dnsmasq DHCP server via a relay
|
||||
# agent. If you don't know what a DHCP relay agent is, you probably
|
||||
# don't need to worry about this.
|
||||
#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h
|
||||
|
||||
# This is an example of a DHCP range which sets a tag, so that
|
||||
# some DHCP options may be set only for this network.
|
||||
#dhcp-range=set:red,192.168.0.50,192.168.0.150
|
||||
|
||||
# Use this DHCP range only when the tag "green" is set.
|
||||
#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h
|
||||
|
||||
# Specify a subnet which can't be used for dynamic address allocation,
|
||||
# is available for hosts with matching --dhcp-host lines. Note that
|
||||
# dhcp-host declarations will be ignored unless there is a dhcp-range
|
||||
# of some type for the subnet in question.
|
||||
# In this case the netmask is implied (it comes from the network
|
||||
# configuration on the machine running dnsmasq) it is possible to give
|
||||
# an explicit netmask instead.
|
||||
#dhcp-range=192.168.0.0,static
|
||||
|
||||
# Enable DHCPv6. Note that the prefix-length does not need to be specified
|
||||
# and defaults to 64 if missing/
|
||||
#dhcp-range=1234::2, 1234::500, 64, 12h
|
||||
|
||||
# Do Router Advertisements, BUT NOT DHCP for this subnet.
|
||||
#dhcp-range=1234::, ra-only
|
||||
|
||||
# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and
|
||||
# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack
|
||||
# hosts. Use the DHCPv4 lease to derive the name, network segment and
|
||||
# MAC address and assume that the host will also have an
|
||||
# IPv6 address calculated using the SLAAC algorithm.
|
||||
#dhcp-range=1234::, ra-names
|
||||
|
||||
# Do Router Advertisements, BUT NOT DHCP for this subnet.
|
||||
# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.)
|
||||
#dhcp-range=1234::, ra-only, 48h
|
||||
|
||||
# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA
|
||||
# so that clients can use SLAAC addresses as well as DHCP ones.
|
||||
#dhcp-range=1234::2, 1234::500, slaac
|
||||
|
||||
# Do Router Advertisements and stateless DHCP for this subnet. Clients will
|
||||
# not get addresses from DHCP, but they will get other configuration information.
|
||||
# They will use SLAAC for addresses.
|
||||
#dhcp-range=1234::, ra-stateless
|
||||
|
||||
# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses
|
||||
# from DHCPv4 leases.
|
||||
#dhcp-range=1234::, ra-stateless, ra-names
|
||||
|
||||
# Do router advertisements for all subnets where we're doing DHCPv6
|
||||
# Unless overridden by ra-stateless, ra-names, et al, the router
|
||||
# advertisements will have the M and O bits set, so that the clients
|
||||
# get addresses and configuration from DHCPv6, and the A bit reset, so the
|
||||
# clients don't use SLAAC addresses.
|
||||
#enable-ra
|
||||
|
||||
# Supply parameters for specified hosts using DHCP. There are lots
|
||||
# of valid alternatives, so we will give examples of each. Note that
|
||||
# IP addresses DO NOT have to be in the range given above, they just
|
||||
# need to be on the same network. The order of the parameters in these
|
||||
# do not matter, it's permissible to give name, address and MAC in any
|
||||
# order.
|
||||
|
||||
# Always allocate the host with Ethernet address 11:22:33:44:55:66
|
||||
# The IP address 192.168.0.60
|
||||
#dhcp-host=11:22:33:44:55:66,192.168.0.60
|
||||
|
||||
# Always set the name of the host with hardware address
|
||||
# 11:22:33:44:55:66 to be "fred"
|
||||
#dhcp-host=11:22:33:44:55:66,fred
|
||||
|
||||
# Always give the host with Ethernet address 11:22:33:44:55:66
|
||||
# the name fred and IP address 192.168.0.60 and lease time 45 minutes
|
||||
#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m
|
||||
|
||||
# Give a host with Ethernet address 11:22:33:44:55:66 or
|
||||
# 12:34:56:78:90:12 the IP address 192.168.0.60. Dnsmasq will assume
|
||||
# that these two Ethernet interfaces will never be in use at the same
|
||||
# time, and give the IP address to the second, even if it is already
|
||||
# in use by the first. Useful for laptops with wired and wireless
|
||||
# addresses.
|
||||
#dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60
|
||||
|
||||
# Give the machine which says its name is "bert" IP address
|
||||
# 192.168.0.70 and an infinite lease
|
||||
#dhcp-host=bert,192.168.0.70,infinite
|
||||
|
||||
# Always give the host with client identifier 01:02:02:04
|
||||
# the IP address 192.168.0.60
|
||||
#dhcp-host=id:01:02:02:04,192.168.0.60
|
||||
|
||||
# Always give the InfiniBand interface with hardware address
|
||||
# 80:00:00:48:fe:80:00:00:00:00:00:00:f4:52:14:03:00:28:05:81 the
|
||||
# ip address 192.168.0.61. The client id is derived from the prefix
|
||||
# ff:00:00:00:00:00:02:00:00:02:c9:00 and the last 8 pairs of
|
||||
# hex digits of the hardware address.
|
||||
#dhcp-host=id:ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:28:05:81,192.168.0.61
|
||||
|
||||
# Always give the host with client identifier "marjorie"
|
||||
# the IP address 192.168.0.60
|
||||
#dhcp-host=id:marjorie,192.168.0.60
|
||||
|
||||
# Enable the address given for "judge" in /etc/hosts
|
||||
# to be given to a machine presenting the name "judge" when
|
||||
# it asks for a DHCP lease.
|
||||
#dhcp-host=judge
|
||||
|
||||
# Never offer DHCP service to a machine whose Ethernet
|
||||
# address is 11:22:33:44:55:66
|
||||
#dhcp-host=11:22:33:44:55:66,ignore
|
||||
|
||||
# Ignore any client-id presented by the machine with Ethernet
|
||||
# address 11:22:33:44:55:66. This is useful to prevent a machine
|
||||
# being treated differently when running under different OS's or
|
||||
# between PXE boot and OS boot.
|
||||
#dhcp-host=11:22:33:44:55:66,id:*
|
||||
|
||||
# Send extra options which are tagged as "red" to
|
||||
# the machine with Ethernet address 11:22:33:44:55:66
|
||||
#dhcp-host=11:22:33:44:55:66,set:red
|
||||
|
||||
# Send extra options which are tagged as "red" to
|
||||
# any machine with Ethernet address starting 11:22:33:
|
||||
#dhcp-host=11:22:33:*:*:*,set:red
|
||||
|
||||
# Give a fixed IPv6 address and name to client with
|
||||
# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2
|
||||
# Note the MAC addresses CANNOT be used to identify DHCPv6 clients.
|
||||
# Note also that the [] around the IPv6 address are obligatory.
|
||||
#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5]
|
||||
|
||||
# Ignore any clients which are not specified in dhcp-host lines
|
||||
# or /etc/ethers. Equivalent to ISC "deny unknown-clients".
|
||||
# This relies on the special "known" tag which is set when
|
||||
# a host is matched.
|
||||
#dhcp-ignore=tag:!known
|
||||
|
||||
# Send extra options which are tagged as "red" to any machine whose
|
||||
# DHCP vendorclass string includes the substring "Linux"
|
||||
#dhcp-vendorclass=set:red,Linux
|
||||
|
||||
# Send extra options which are tagged as "red" to any machine one
|
||||
# of whose DHCP userclass strings includes the substring "accounts"
|
||||
#dhcp-userclass=set:red,accounts
|
||||
|
||||
# Send extra options which are tagged as "red" to any machine whose
|
||||
# MAC address matches the pattern.
|
||||
#dhcp-mac=set:red,00:60:8C:*:*:*
|
||||
|
||||
# If this line is uncommented, dnsmasq will read /etc/ethers and act
|
||||
# on the ethernet-address/IP pairs found there just as if they had
|
||||
# been given as --dhcp-host options. Useful if you keep
|
||||
# MAC-address/host mappings there for other purposes.
|
||||
#read-ethers
|
||||
|
||||
# Send options to hosts which ask for a DHCP lease.
|
||||
# See RFC 2132 for details of available options.
|
||||
# Common options can be given to dnsmasq by name:
|
||||
# run "dnsmasq --help dhcp" to get a list.
|
||||
# Note that all the common settings, such as netmask and
|
||||
# broadcast address, DNS server and default route, are given
|
||||
# sane defaults by dnsmasq. You very likely will not need
|
||||
# any dhcp-options. If you use Windows clients and Samba, there
|
||||
# are some options which are recommended, they are detailed at the
|
||||
# end of this section.
|
||||
|
||||
# Override the default route supplied by dnsmasq, which assumes the
|
||||
# router is the same machine as the one running dnsmasq.
|
||||
#dhcp-option=3,1.2.3.4
|
||||
|
||||
# Do the same thing, but using the option name
|
||||
#dhcp-option=option:router,1.2.3.4
|
||||
|
||||
# Override the default route supplied by dnsmasq and send no default
|
||||
# route at all. Note that this only works for the options sent by
|
||||
# default (1, 3, 6, 12, 28) the same line will send a zero-length option
|
||||
# for all other option numbers.
|
||||
#dhcp-option=3
|
||||
|
||||
# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5
|
||||
#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5
|
||||
|
||||
# Send DHCPv6 option. Note [] around IPv6 addresses.
|
||||
#dhcp-option=option6:dns-server,[1234::77],[1234::88]
|
||||
|
||||
# Send DHCPv6 option for namservers as the machine running
|
||||
# dnsmasq and another.
|
||||
#dhcp-option=option6:dns-server,[::],[1234::88]
|
||||
|
||||
# Ask client to poll for option changes every six hours. (RFC4242)
|
||||
#dhcp-option=option6:information-refresh-time,6h
|
||||
|
||||
# Set option 58 client renewal time (T1). Defaults to half of the
|
||||
# lease time if not specified. (RFC2132)
|
||||
#dhcp-option=option:T1,1m
|
||||
|
||||
# Set option 59 rebinding time (T2). Defaults to 7/8 of the
|
||||
# lease time if not specified. (RFC2132)
|
||||
#dhcp-option=option:T2,2m
|
||||
|
||||
# Set the NTP time server address to be the same machine as
|
||||
# is running dnsmasq
|
||||
#dhcp-option=42,0.0.0.0
|
||||
|
||||
# Set the NIS domain name to "welly"
|
||||
#dhcp-option=40,welly
|
||||
|
||||
# Set the default time-to-live to 50
|
||||
#dhcp-option=23,50
|
||||
|
||||
# Set the "all subnets are local" flag
|
||||
#dhcp-option=27,1
|
||||
|
||||
# Send the etherboot magic flag and then etherboot options (a string).
|
||||
#dhcp-option=128,e4:45:74:68:00:00
|
||||
#dhcp-option=129,NIC=eepro100
|
||||
|
||||
# Specify an option which will only be sent to the "red" network
|
||||
# (see dhcp-range for the declaration of the "red" network)
|
||||
# Note that the tag: part must precede the option: part.
|
||||
#dhcp-option = tag:red, option:ntp-server, 192.168.1.1
|
||||
|
||||
# The following DHCP options set up dnsmasq in the same way as is specified
|
||||
# for the ISC dhcpcd in
|
||||
# https://web.archive.org/web/20040313070105/http://us1.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt
|
||||
# adapted for a typical dnsmasq installation where the host running
|
||||
# dnsmasq is also the host running samba.
|
||||
# you may want to uncomment some or all of them if you use
|
||||
# Windows clients and Samba.
|
||||
#dhcp-option=19,0 # option ip-forwarding off
|
||||
#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s)
|
||||
#dhcp-option=45,0.0.0.0 # netbios datagram distribution server
|
||||
#dhcp-option=46,8 # netbios node type
|
||||
|
||||
# Send an empty WPAD option. This may be REQUIRED to get windows 7 to behave.
|
||||
#dhcp-option=252,"\n"
|
||||
|
||||
# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client
|
||||
# probably doesn't support this......
|
||||
#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com
|
||||
|
||||
# Send RFC-3442 classless static routes (note the netmask encoding)
|
||||
#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8
|
||||
|
||||
# Send vendor-class specific options encapsulated in DHCP option 43.
|
||||
# The meaning of the options is defined by the vendor-class so
|
||||
# options are sent only when the client supplied vendor class
|
||||
# matches the class given here. (A substring match is OK, so "MSFT"
|
||||
# matches "MSFT" and "MSFT 5.0"). This example sets the
|
||||
# mtftp address to 0.0.0.0 for PXEClients.
|
||||
#dhcp-option=vendor:PXEClient,1,0.0.0.0
|
||||
|
||||
# Send microsoft-specific option to tell windows to release the DHCP lease
|
||||
# when it shuts down. Note the "i" flag, to tell dnsmasq to send the
|
||||
# value as a four-byte integer - that's what microsoft wants. See
|
||||
# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true
|
||||
#dhcp-option=vendor:MSFT,2,1i
|
||||
|
||||
# Send the Encapsulated-vendor-class ID needed by some configurations of
|
||||
# Etherboot to allow is to recognise the DHCP server.
|
||||
#dhcp-option=vendor:Etherboot,60,"Etherboot"
|
||||
|
||||
# Send options to PXELinux. Note that we need to send the options even
|
||||
# though they don't appear in the parameter request list, so we need
|
||||
# to use dhcp-option-force here.
|
||||
# See http://syslinux.zytor.com/pxe.php#special for details.
|
||||
# Magic number - needed before anything else is recognised
|
||||
#dhcp-option-force=208,f1:00:74:7e
|
||||
# Configuration file name
|
||||
#dhcp-option-force=209,configs/common
|
||||
# Path prefix
|
||||
#dhcp-option-force=210,/tftpboot/pxelinux/files/
|
||||
# Reboot time. (Note 'i' to send 32-bit value)
|
||||
#dhcp-option-force=211,30i
|
||||
|
||||
# Set the boot filename for netboot/PXE. You will only need
|
||||
# this if you want to boot machines over the network and you will need
|
||||
# a TFTP server; either dnsmasq's built-in TFTP server or an
|
||||
# external one. (See below for how to enable the TFTP server.)
|
||||
#dhcp-boot=pxelinux.0
|
||||
|
||||
# The same as above, but use custom tftp-server instead machine running dnsmasq
|
||||
#dhcp-boot=pxelinux,server.name,192.168.1.100
|
||||
|
||||
# Boot for iPXE. The idea is to send two different
|
||||
# filenames, the first loads iPXE, and the second tells iPXE what to
|
||||
# load. The dhcp-match sets the ipxe tag for requests from iPXE.
|
||||
#dhcp-boot=undionly.kpxe
|
||||
#dhcp-match=set:ipxe,175 # iPXE sends a 175 option.
|
||||
#dhcp-boot=tag:ipxe,http://boot.ipxe.org/demo/boot.php
|
||||
|
||||
# Encapsulated options for iPXE. All the options are
|
||||
# encapsulated within option 175
|
||||
#dhcp-option=encap:175, 1, 5b # priority code
|
||||
#dhcp-option=encap:175, 176, 1b # no-proxydhcp
|
||||
#dhcp-option=encap:175, 177, string # bus-id
|
||||
#dhcp-option=encap:175, 189, 1b # BIOS drive code
|
||||
#dhcp-option=encap:175, 190, user # iSCSI username
|
||||
#dhcp-option=encap:175, 191, pass # iSCSI password
|
||||
|
||||
# Test for the architecture of a netboot client. PXE clients are
|
||||
# supposed to send their architecture as option 93. (See RFC 4578)
|
||||
#dhcp-match=peecees, option:client-arch, 0 #x86-32
|
||||
#dhcp-match=itanics, option:client-arch, 2 #IA64
|
||||
#dhcp-match=hammers, option:client-arch, 6 #x86-64
|
||||
#dhcp-match=mactels, option:client-arch, 7 #EFI x86-64
|
||||
|
||||
# Do real PXE, rather than just booting a single file, this is an
|
||||
# alternative to dhcp-boot.
|
||||
#pxe-prompt="What system shall I netboot?"
|
||||
# or with timeout before first available action is taken:
|
||||
#pxe-prompt="Press F8 for menu.", 60
|
||||
|
||||
# Available boot services. for PXE.
|
||||
#pxe-service=x86PC, "Boot from local disk"
|
||||
|
||||
# Loads <tftp-root>/pxelinux.0 from dnsmasq TFTP server.
|
||||
#pxe-service=x86PC, "Install Linux", pxelinux
|
||||
|
||||
# Loads <tftp-root>/pxelinux.0 from TFTP server at 1.2.3.4.
|
||||
# Beware this fails on old PXE ROMS.
|
||||
#pxe-service=x86PC, "Install Linux", pxelinux, 1.2.3.4
|
||||
|
||||
# Use bootserver on network, found my multicast or broadcast.
|
||||
#pxe-service=x86PC, "Install windows from RIS server", 1
|
||||
|
||||
# Use bootserver at a known IP address.
|
||||
#pxe-service=x86PC, "Install windows from RIS server", 1, 1.2.3.4
|
||||
|
||||
# If you have multicast-FTP available,
|
||||
# information for that can be passed in a similar way using options 1
|
||||
# to 5. See page 19 of
|
||||
# http://download.intel.com/design/archives/wfm/downloads/pxespec.pdf
|
||||
|
||||
|
||||
# Enable dnsmasq's built-in TFTP server
|
||||
#enable-tftp
|
||||
|
||||
# Set the root directory for files available via FTP.
|
||||
#tftp-root=/var/ftpd
|
||||
|
||||
# Do not abort if the tftp-root is unavailable
|
||||
#tftp-no-fail
|
||||
|
||||
# Make the TFTP server more secure: with this set, only files owned by
|
||||
# the user dnsmasq is running as will be send over the net.
|
||||
#tftp-secure
|
||||
|
||||
# This option stops dnsmasq from negotiating a larger blocksize for TFTP
|
||||
# transfers. It will slow things down, but may rescue some broken TFTP
|
||||
# clients.
|
||||
#tftp-no-blocksize
|
||||
|
||||
# Set the boot file name only when the "red" tag is set.
|
||||
#dhcp-boot=tag:red,pxelinux.red-net
|
||||
|
||||
# An example of dhcp-boot with an external TFTP server: the name and IP
|
||||
# address of the server are given after the filename.
|
||||
# Can fail with old PXE ROMS. Overridden by --pxe-service.
|
||||
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3
|
||||
|
||||
# If there are multiple external tftp servers having a same name
|
||||
# (using /etc/hosts) then that name can be specified as the
|
||||
# tftp_servername (the third option to dhcp-boot) and in that
|
||||
# case dnsmasq resolves this name and returns the resultant IP
|
||||
# addresses in round robin fashion. This facility can be used to
|
||||
# load balance the tftp load among a set of servers.
|
||||
#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name
|
||||
|
||||
# Set the limit on DHCP leases, the default is 150
|
||||
#dhcp-lease-max=150
|
||||
|
||||
# The DHCP server needs somewhere on disk to keep its lease database.
|
||||
# This defaults to a sane location, but if you want to change it, use
|
||||
# the line below.
|
||||
#dhcp-leasefile=/var/lib/misc/dnsmasq.leases
|
||||
|
||||
# Set the DHCP server to authoritative mode. In this mode it will barge in
|
||||
# and take over the lease for any client which broadcasts on the network,
|
||||
# whether it has a record of the lease or not. This avoids long timeouts
|
||||
# when a machine wakes up on a new network. DO NOT enable this if there's
|
||||
# the slightest chance that you might end up accidentally configuring a DHCP
|
||||
# server for your campus/company accidentally. The ISC server uses
|
||||
# the same option, and this URL provides more information:
|
||||
# http://www.isc.org/files/auth.html
|
||||
#dhcp-authoritative
|
||||
|
||||
# Set the DHCP server to enable DHCPv4 Rapid Commit Option per RFC 4039.
|
||||
# In this mode it will respond to a DHCPDISCOVER message including a Rapid Commit
|
||||
# option with a DHCPACK including a Rapid Commit option and fully committed address
|
||||
# and configuration information. This must only be enabled if either the server is
|
||||
# the only server for the subnet, or multiple servers are present and they each
|
||||
# commit a binding for all clients.
|
||||
#dhcp-rapid-commit
|
||||
|
||||
# Run an executable when a DHCP lease is created or destroyed.
|
||||
# The arguments sent to the script are "add" or "del",
|
||||
# then the MAC address, the IP address and finally the hostname
|
||||
# if there is one.
|
||||
#dhcp-script=/bin/echo
|
||||
|
||||
# Set the cachesize here.
|
||||
#cache-size=150
|
||||
|
||||
# If you want to disable negative caching, uncomment this.
|
||||
#no-negcache
|
||||
|
||||
# Normally responses which come from /etc/hosts and the DHCP lease
|
||||
# file have Time-To-Live set as zero, which conventionally means
|
||||
# do not cache further. If you are happy to trade lower load on the
|
||||
# server for potentially stale date, you can set a time-to-live (in
|
||||
# seconds) here.
|
||||
#local-ttl=
|
||||
|
||||
# If you want dnsmasq to detect attempts by Verisign to send queries
|
||||
# to unregistered .com and .net hosts to its sitefinder service and
|
||||
# have dnsmasq instead return the correct NXDOMAIN response, uncomment
|
||||
# this line. You can add similar lines to do the same for other
|
||||
# registries which have implemented wildcard A records.
|
||||
#bogus-nxdomain=64.94.110.11
|
||||
|
||||
# If you want to fix up DNS results from upstream servers, use the
|
||||
# alias option. This only works for IPv4.
|
||||
# This alias makes a result of 1.2.3.4 appear as 5.6.7.8
|
||||
#alias=1.2.3.4,5.6.7.8
|
||||
# and this maps 1.2.3.x to 5.6.7.x
|
||||
#alias=1.2.3.0,5.6.7.0,255.255.255.0
|
||||
# and this maps 192.168.0.10->192.168.0.40 to 10.0.0.10->10.0.0.40
|
||||
#alias=192.168.0.10-192.168.0.40,10.0.0.0,255.255.255.0
|
||||
|
||||
# Change these lines if you want dnsmasq to serve MX records.
|
||||
|
||||
# Return an MX record named "maildomain.com" with target
|
||||
# servermachine.com and preference 50
|
||||
#mx-host=maildomain.com,servermachine.com,50
|
||||
|
||||
# Set the default target for MX records created using the localmx option.
|
||||
#mx-target=servermachine.com
|
||||
|
||||
# Return an MX record pointing to the mx-target for all local
|
||||
# machines.
|
||||
#localmx
|
||||
|
||||
# Return an MX record pointing to itself for all local machines.
|
||||
#selfmx
|
||||
|
||||
# Change the following lines if you want dnsmasq to serve SRV
|
||||
# records. These are useful if you want to serve ldap requests for
|
||||
# Active Directory and other windows-originated DNS requests.
|
||||
# See RFC 2782.
|
||||
# You may add multiple srv-host lines.
|
||||
# The fields are <name>,<target>,<port>,<priority>,<weight>
|
||||
# If the domain part if missing from the name (so that is just has the
|
||||
# service and protocol sections) then the domain given by the domain=
|
||||
# config option is used. (Note that expand-hosts does not need to be
|
||||
# set for this to work.)
|
||||
|
||||
# A SRV record sending LDAP for the example.com domain to
|
||||
# ldapserver.example.com port 389
|
||||
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389
|
||||
|
||||
# A SRV record sending LDAP for the example.com domain to
|
||||
# ldapserver.example.com port 389 (using domain=)
|
||||
#domain=example.com
|
||||
#srv-host=_ldap._tcp,ldapserver.example.com,389
|
||||
|
||||
# Two SRV records for LDAP, each with different priorities
|
||||
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1
|
||||
#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2
|
||||
|
||||
# A SRV record indicating that there is no LDAP server for the domain
|
||||
# example.com
|
||||
#srv-host=_ldap._tcp.example.com
|
||||
|
||||
# The following line shows how to make dnsmasq serve an arbitrary PTR
|
||||
# record. This is useful for DNS-SD. (Note that the
|
||||
# domain-name expansion done for SRV records _does_not
|
||||
# occur for PTR records.)
|
||||
#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services"
|
||||
|
||||
# Change the following lines to enable dnsmasq to serve TXT records.
|
||||
# These are used for things like SPF and zeroconf. (Note that the
|
||||
# domain-name expansion done for SRV records _does_not
|
||||
# occur for TXT records.)
|
||||
|
||||
#Example SPF.
|
||||
#txt-record=example.com,"v=spf1 a -all"
|
||||
|
||||
#Example zeroconf
|
||||
#txt-record=_http._tcp.example.com,name=value,paper=A4
|
||||
|
||||
# Provide an alias for a "local" DNS name. Note that this _only_ works
|
||||
# for targets which are names from DHCP or /etc/hosts. Give host
|
||||
# "bert" another name, bertrand
|
||||
#cname=bertand,bert
|
||||
|
||||
# For debugging purposes, log each DNS query as it passes through
|
||||
# dnsmasq.
|
||||
#log-queries
|
||||
|
||||
# Log lots of extra information about DHCP transactions.
|
||||
#log-dhcp
|
||||
|
||||
# Include another lot of configuration options.
|
||||
#conf-file=/etc/dnsmasq.more.conf
|
||||
#conf-dir=/etc/dnsmasq.d
|
||||
|
||||
# Include all the files in a directory except those ending in .bak
|
||||
#conf-dir=/etc/dnsmasq.d,.bak
|
||||
|
||||
# Include all files in a directory which end in .conf
|
||||
conf-dir=/config/dnsmasq.d/,*.conf
|
||||
|
||||
# If a DHCP client claims that its name is "wpad", ignore that.
|
||||
# This fixes a security hole. see CERT Vulnerability VU#598349
|
||||
#dhcp-name-match=set:wpad-ignore,wpad
|
||||
#dhcp-ignore-names=tag:wpad-ignore
|
23
dnsmasq/dnsmasq.d/rre.conf
Normal file
23
dnsmasq/dnsmasq.d/rre.conf
Normal file
@ -0,0 +1,23 @@
|
||||
server=10.2.0.1
|
||||
local=/rre.nu/
|
||||
expand-hosts
|
||||
domain=rre.nu
|
||||
|
||||
# public cnames
|
||||
cname=push.rre.nu,npm.rre.nu
|
||||
cname=git.rre.nu,npm.rre.nu
|
||||
cname=home.rre.nu,npm.rre.nu
|
||||
cname=nextcloud.rre.nu,npm.rre.nu
|
||||
cname=nodered.rre.nu,npm.rre.nu
|
||||
cname=rre.nu,npm.rre.nu
|
||||
cname=zigbee2mqtt.rre.nu,npm.rre.nu
|
||||
cname=grafana.rre.nu,npm.rre.nu
|
||||
cname=nv.rre.nu,npm.rre.nu
|
||||
cname=esphome.rre.nu,npm.rre.nu
|
||||
cname=unifi.rre.nu,npm.rre.nu
|
||||
cname=prometheus.rre.nu,npm.rre.nu
|
||||
cname=unpoller.rre.nu,serrano.rre.nu
|
||||
cname=loki.rre.nu,serrano.rre.nu
|
||||
cname=db.rre.nu,serrano.rre.nu
|
||||
cname=mqtt.rre.nu,serrano.rre.nu
|
||||
cname=foto.rre.nu,serrano.rre.nu
|
53
dnsmasq/hosts/hosts
Normal file
53
dnsmasq/hosts/hosts
Normal file
@ -0,0 +1,53 @@
|
||||
#
|
||||
# hosts This file describes a number of hostname-to-address
|
||||
# mappings for the TCP/IP subsystem. It is mostly
|
||||
# used at boot time, when no name servers are running.
|
||||
# On small systems, this file can be used instead of a
|
||||
# "named" name server.
|
||||
# Syntax:
|
||||
#
|
||||
# IP-Address Full-Qualified-Hostname Short-Hostname
|
||||
#
|
||||
|
||||
127.0.0.1 localhost
|
||||
|
||||
# special IPv6 addresses
|
||||
::1 localhost ipv6-localhost ipv6-loopback
|
||||
|
||||
fe00::0 ipv6-localnet
|
||||
|
||||
ff00::0 ipv6-mcastprefix
|
||||
ff02::1 ipv6-allnodes
|
||||
ff02::2 ipv6-allrouters
|
||||
ff02::3 ipv6-allhosts
|
||||
|
||||
## rre infrasturcture
|
||||
10.2.0.2 ldap
|
||||
10.2.0.22 poblano
|
||||
10.2.0.23 npm
|
||||
10.2.0.23 serrano
|
||||
10.2.0.24 coronado
|
||||
10.2.0.101 salt
|
||||
10.2.0.102 pepper
|
||||
|
||||
|
||||
#lab k3s cluster
|
||||
10.2.0.250 demo-k3s
|
||||
10.2.0.251 srv1
|
||||
10.2.0.252 srv2
|
||||
10.2.0.253 srv3
|
||||
|
||||
# MetalLB
|
||||
# 10.2.0.60-70
|
||||
10.2.0.60 public
|
||||
10.2.0.61 private
|
||||
10.2.0.63 gitea
|
||||
|
||||
# Client network
|
||||
10.0.10.3 sw2
|
||||
10.0.10.4 ap1
|
||||
10.0.10.5 ap2
|
||||
10.0.10.6 ap3
|
||||
10.0.10.7 ap4
|
||||
10.0.10.20 woody
|
||||
10.0.10.30 laserjet
|
102
lib.sls
Normal file
102
lib.sls
Normal file
@ -0,0 +1,102 @@
|
||||
{% macro pod_deploy(pod) -%}
|
||||
|
||||
{%- if pillar['pods'][pod]['ip'] is defined %}
|
||||
Configure static IP for {{ pod }}:
|
||||
cmd.run:
|
||||
- name: nmcli connection modify {{ pillar['pods'][pod]['ip']['dev'] }} +ipv4.addresses "{{ pillar['pods'][pod]['ip']['address'] }}/24"
|
||||
- unless: ip -brief a | grep "{{ pillar['pods'][pod]['ip']['address'] }}/24"
|
||||
|
||||
Restart connection for {{ pod }} IP config:
|
||||
cmd.run:
|
||||
- name: nmcli connection down {{ pillar['pods'][pod]['ip']['dev'] }}; nmcli connection up {{ pillar['pods'][pod]['ip']['dev'] }}
|
||||
- onchanges:
|
||||
- cmd: Configure static IP for {{ pod }}
|
||||
{%- endif %}
|
||||
|
||||
Create pod manage file for {{ pod }}:
|
||||
file.managed:
|
||||
- name: "/root/bin/{{ pod }}.sh"
|
||||
- source: salt://pod/files/pod.sh.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0750"
|
||||
- context:
|
||||
pod: {{ pod }}
|
||||
args: {{ pillar['pods'][pod] }}
|
||||
|
||||
Create pod {{ pod }}:
|
||||
cmd.run:
|
||||
- name: "/root/bin/{{ pod }}.sh --create"
|
||||
- unless: podman pod exists {{ pod }}
|
||||
|
||||
Create pod {{ pod }} unit file:
|
||||
cmd.run:
|
||||
- name: "/root/bin/{{ pod }}.sh --generate-systemd"
|
||||
- onchanges:
|
||||
- cmd: Create pod {{ pod }}
|
||||
|
||||
Run user daemon reload for {{ pod }} unit:
|
||||
cmd.run:
|
||||
- name: systemctl daemon-reload
|
||||
- onchanges:
|
||||
- cmd: Create pod {{ pod }} unit file
|
||||
|
||||
start pod {{ pod }}:
|
||||
service.running:
|
||||
- name: {{ pod }}.service
|
||||
- enable: True
|
||||
- onchanges:
|
||||
- cmd: Run user daemon reload for {{ pod }} unit
|
||||
{% endmacro -%}
|
||||
{% macro container_deploy(container) -%}
|
||||
|
||||
{%- if pillar['containers'][container]['ip'] is defined %}
|
||||
Configure static IP for {{ container }}:
|
||||
cmd.run:
|
||||
- name: nmcli connection modify {{ pillar['containers'][container]['ip']['dev'] }} +ipv4.addresses "{{ pillar['containers'][container]['ip']['address'] }}/24"
|
||||
- unless: ip -brief a | grep "{{ pillar['containers'][container]['ip']['address'] }}/24"
|
||||
|
||||
Restart connection for {{ container }} IP config:
|
||||
cmd.run:
|
||||
- name: nmcli connection down {{ pillar['containers'][container]['ip']['dev'] }}; nmcli connection up {{ pillar['containers'][container]['ip']['dev'] }}
|
||||
- onchanges:
|
||||
- cmd: Configure static IP for {{ container }}
|
||||
{%- endif %}
|
||||
|
||||
Create container manage file for {{ container }}:
|
||||
file.managed:
|
||||
- name: "/root/bin/{{ container }}.sh"
|
||||
- source: salt://containers/files/container.sh.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0750"
|
||||
- context:
|
||||
container: {{ container }}
|
||||
args: {{ pillar['containers'][container] }}
|
||||
|
||||
Create container {{ container }}:
|
||||
cmd.run:
|
||||
- name: "/root/bin/{{ container }}.sh --create"
|
||||
- unless: podman container exists {{ container }}
|
||||
|
||||
Create container {{ container }} unit file:
|
||||
cmd.run:
|
||||
- name: podman generate systemd --name {{ container }} > /etc/systemd/system/{{ container }}.service
|
||||
- onchanges:
|
||||
- cmd: Create container {{ container }}
|
||||
|
||||
Run user daemon reload for {{ container }} unit:
|
||||
cmd.run:
|
||||
- name: systemctl daemon-reload
|
||||
- onchanges:
|
||||
- cmd: Create container {{ container }} unit file
|
||||
|
||||
start container {{ container }}:
|
||||
service.running:
|
||||
- name: {{ container }}.service
|
||||
- enable: True
|
||||
- onchanges:
|
||||
- cmd: Run user daemon reload for {{ container }} unit
|
||||
{% endmacro -%}
|
21
pod/dnsmasq.sls
Normal file
21
pod/dnsmasq.sls
Normal file
@ -0,0 +1,21 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create dnsmasq config directory:
|
||||
file.recurse:
|
||||
- name: /srv/dnsmasq
|
||||
- source: salt://dnsmasq
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: "0755"
|
||||
- file_mode: "0644"
|
||||
- include_empty: True
|
||||
|
||||
{{ pod_deploy('dnsmasq') }}
|
||||
|
||||
Make sure dnsmasq is running:
|
||||
service.running:
|
||||
- name: dnsmasq
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: Create dnsmasq config directory
|
1
pod/files/99-rfxcom-serial.rules
Normal file
1
pod/files/99-rfxcom-serial.rules
Normal file
@ -0,0 +1 @@
|
||||
SUBSYSTEM=="tty", ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6001", SYMLINK+="rfxcom", MODE="0666"
|
1
pod/files/99-zigbee-serial.rules
Normal file
1
pod/files/99-zigbee-serial.rules
Normal file
@ -0,0 +1 @@
|
||||
SUBSYSTEM=="tty", ATTRS{idVendor}=="0451", ATTRS{idProduct}=="16a8", SYMLINK+="zigbee-serial", MODE="0666"
|
10
pod/files/check_image_updates.service.jinja
Normal file
10
pod/files/check_image_updates.service.jinja
Normal file
@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Check for image updates on configured podman containers
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
User=root
|
||||
ExecStart=/root/bin/check_image_updates.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
35
pod/files/check_image_updates.sh.jinja
Normal file
35
pod/files/check_image_updates.sh.jinja
Normal file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
URL="{{ pillar['podman']['gotify']['url'] }}"
|
||||
TOKEN="{{ pillar['podman']['gotify']['token'] }}"
|
||||
TITLE="Updates on $HOSTNAME"
|
||||
PRIORITY="{{ pillar['podman']['gotify']['priority'] }}"
|
||||
|
||||
{% raw -%}
|
||||
function check_update(){
|
||||
IFS=',' read -r -a container_info <<< "$(podman container inspect $1 --format '{{ .Name }},{{ .ImageName }},{{ .Image }}')"
|
||||
|
||||
podman pull "${container_info[1]}"
|
||||
if [[ "$(podman image inspect "${container_info[1]}" --format "{{.Id}}")" != "${container_info[2]}" ]];then
|
||||
containers[${#containers[@]}]="${container_info[0]}"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
IFS=$'\n'
|
||||
for line in $(podman container ls -q); do
|
||||
check_update "$line"
|
||||
done
|
||||
if [[ "${#containers[@]}" == "0" ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
MESSAGE=$(cat << EOM
|
||||
Following ${#containers[@]} container(s) has updates:
|
||||
${containers[*]}
|
||||
EOM
|
||||
)
|
||||
|
||||
curl "$URL/message?token=$TOKEN" -F "title=$TITLE" -F "priority=$PRIORITY" -F "message=$MESSAGE"
|
||||
echo " "
|
||||
{% endraw -%}
|
9
pod/files/check_image_updates.timer.jinja
Normal file
9
pod/files/check_image_updates.timer.jinja
Normal file
@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Restic backup timer
|
||||
|
||||
[Timer]
|
||||
OnCalendar=Sun, 12:00
|
||||
Unit=check_image_updates.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
158
pod/files/pod.sh.jinja
Normal file
158
pod/files/pod.sh.jinja
Normal file
@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
function pull_image(){
|
||||
{%- for container, cargs in pillar['pods'][pod]['containers'].items() %}
|
||||
if ! podman image exists {{ cargs['image'] }}:{{ cargs['tag'] }}; then
|
||||
podman pull {{ cargs['image'] }}:{{ cargs['tag'] }}
|
||||
fi
|
||||
{%- endfor %}
|
||||
}
|
||||
|
||||
|
||||
|
||||
function create_pod() {
|
||||
if ! podman pod exists {{ pod }};then
|
||||
podman pod create \
|
||||
--name {{ pod }} \
|
||||
--infra-name {{ pod }}-infra \
|
||||
{%- if args['ports'] is defined %}
|
||||
{%- for ports in args['ports'] %}
|
||||
-p {{ ports['host'] }}:{{ ports['container'] }}{% if ports['protocol'] is defined %}/{{ ports['protocol'] }}{% endif %} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
fi
|
||||
{%- for container, cargs in pillar['pods'][pod]['containers'].items() %}
|
||||
if ! podman container exists {{ pod }}-{{ container }};then
|
||||
podman container create \
|
||||
--name {{ pod }}-{{ container }} \
|
||||
--pod {{ pod }} \
|
||||
{%- if cargs['podman_options'] is defined %}
|
||||
{%- for option, value in cargs['podman_options'].items() %}
|
||||
--{{ option }} {{ value }} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if cargs['volumes'] is defined %}
|
||||
{%- for volume, mount in cargs['volumes'].items() %}
|
||||
-v {{ volume }}:{{ mount }} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if cargs['env'] is defined %}
|
||||
{%- for key, value in cargs['env'].items() %}
|
||||
-e {{ key }}={{ value }} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if cargs['devices'] is defined %}
|
||||
{%- for key, value in cargs['devices'].items() %}
|
||||
--device {{ key }}:{{ value}} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{{ cargs['image'] }}:{{ cargs['tag'] }}{%- if cargs['run'] is defined %} \
|
||||
{{ cargs['run'] }}
|
||||
{%- endif %}
|
||||
fi
|
||||
{%- endfor %}
|
||||
}
|
||||
|
||||
function generate_systemd_unit_file() {
|
||||
cd /etc/systemd/system
|
||||
podman generate systemd --files --name {{ pod }} --pod-prefix="" --container-prefix=""
|
||||
}
|
||||
|
||||
function check_update() {
|
||||
ImageUpdate=0
|
||||
{%- for container, cargs in pillar['pods'][pod]['containers'].items() %}
|
||||
podman pull {{ cargs['image'] }}:{{ cargs['tag'] }}
|
||||
if [[ "$(podman image inspect {{ cargs['image'] }}:{{ cargs['tag'] }} --format "{% raw %}{{.Id}}{% endraw %}")" == "$(podman inspect {{ pod }}-{{ container }} --format "{% raw %}{{ .Image }}{% endraw %}")" ]];then
|
||||
echo "No image updates available for {{ pod }}-{{ container }}"
|
||||
else
|
||||
echo "Image update available for {{ pod }}-{{ container }}"
|
||||
ImageUpdate=1
|
||||
fi
|
||||
{%- endfor %}
|
||||
return $ImageUpdate
|
||||
}
|
||||
|
||||
function update() {
|
||||
systemctl stop {{ pod }}
|
||||
podman pod rm {{ pod }}
|
||||
create_pod
|
||||
generate_systemd_unit_file
|
||||
systemctl daemon-reload
|
||||
systemctl enable --now {{ pod }}.service
|
||||
}
|
||||
|
||||
|
||||
|
||||
function printHelp(){
|
||||
cat << EOF
|
||||
Usage ${0##*/} [options..]
|
||||
-h,-?, --help Show help and exit
|
||||
-p, --pull pull container images for all containers in pod {{ pod }}
|
||||
-c, --create create {{ pod }} pod
|
||||
-s, --start start and enables {{ pod }} pod
|
||||
-S, --stop stop {{ pod }} pod
|
||||
-i, --is-running check to see if pod service is running
|
||||
-u, --check-update check if there are image updates avaiable
|
||||
--update perform image update if it exists
|
||||
-g, --generate-systemd generate user systemd service unit file
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
-h|-\?|--help)
|
||||
printHelp
|
||||
exit
|
||||
;;
|
||||
-p|--pull)
|
||||
pull_image
|
||||
shift
|
||||
;;
|
||||
-v|--volumes)
|
||||
create_volumes
|
||||
shift
|
||||
;;
|
||||
-c|--create)
|
||||
create_pod
|
||||
shift
|
||||
;;
|
||||
-s|--start)
|
||||
systemctl --user enable --now {{ pod }}.service
|
||||
shift
|
||||
;;
|
||||
-S|--stop)
|
||||
systemctl --user stop {{ pod }}.service
|
||||
shift
|
||||
;;
|
||||
-i|--is-running)
|
||||
systemctl --user is-active {{ pod }}.service
|
||||
exit $?
|
||||
shift
|
||||
;;
|
||||
-g|--generate-systemd)
|
||||
generate_systemd_unit_file
|
||||
shift
|
||||
;;
|
||||
-u|--check-update)
|
||||
check_update
|
||||
shift
|
||||
;;
|
||||
--update)
|
||||
update
|
||||
shift
|
||||
;;
|
||||
--) #End of all options
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-?*)
|
||||
printf "'%s' is not a valid option\n" "$1" >&2
|
||||
exit 1
|
||||
;;
|
||||
*) #Break out of case, no more options
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
11
pod/gotify.sls
Normal file
11
pod/gotify.sls
Normal file
@ -0,0 +1,11 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create gotify data directory:
|
||||
file.directory:
|
||||
- name: /srv/gotify
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('gotify') }}
|
||||
|
46
pod/init.sls
Normal file
46
pod/init.sls
Normal file
@ -0,0 +1,46 @@
|
||||
Copy check image update script:
|
||||
file.managed:
|
||||
- name: /root/bin/check_image_updates.sh
|
||||
- source: salt://pod/files/check_image_updates.sh.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 0700
|
||||
|
||||
Create check image update service:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/check_image_updates.service
|
||||
- source: salt://pod/files/check_image_updates.service.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Create check image update timer:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/check_image_updates.timer
|
||||
- source: salt://pod/files/check_image_updates.timer.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Systemd daemon reload for image update:
|
||||
cmd.run:
|
||||
- name: systemctl daemon-reload
|
||||
- watch:
|
||||
- file: Create check image update service
|
||||
- file: Create check image update timer
|
||||
|
||||
Start check image update timer:
|
||||
service.running:
|
||||
- name: check_image_updates.timer
|
||||
- enable: True
|
||||
|
||||
{% if pillar.pods is defined %}
|
||||
include:
|
||||
{%- for pod, args in pillar.pods.items() %}
|
||||
- containers.{{ pod }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
24
pod/loki.sls
Normal file
24
pod/loki.sls
Normal file
@ -0,0 +1,24 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create loki root directory:
|
||||
file.directory:
|
||||
- name: /srv/loki
|
||||
- user: 10001
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create loki data directory:
|
||||
file.directory:
|
||||
- name: /srv/loki
|
||||
- user: 10001
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create loki config directory:
|
||||
file.directory:
|
||||
- name: /srv/loki/config
|
||||
- user: 10001
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('loki') }}
|
11
pod/mariadb.sls
Normal file
11
pod/mariadb.sls
Normal file
@ -0,0 +1,11 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create data directory for mariadb:
|
||||
file.directory:
|
||||
- name: /srv/mariadb
|
||||
- user: 999
|
||||
- group: 999
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('mariadb') }}
|
||||
|
17
pod/mosquitto.sls
Normal file
17
pod/mosquitto.sls
Normal file
@ -0,0 +1,17 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create mosquitto configuration directory:
|
||||
file.directory:
|
||||
- name: /etc/mosquitto
|
||||
- user: 1883
|
||||
- group: 1883
|
||||
- mode: "0750"
|
||||
|
||||
Create mosquitto data directory:
|
||||
file.directory:
|
||||
- name: /srv/mosquitto
|
||||
- user: 1883
|
||||
- group: 1883
|
||||
- mode: "0750"
|
||||
|
||||
{{ pod_deploy('mosquitto') }}
|
10
pod/nextcloud.sls
Normal file
10
pod/nextcloud.sls
Normal file
@ -0,0 +1,10 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create nextcloud data directory:
|
||||
file.directory:
|
||||
- name: /srv/nextcloud
|
||||
- user: 33
|
||||
- group: 33
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('nextcloud') }}
|
25
pod/nodered.sls
Normal file
25
pod/nodered.sls
Normal file
@ -0,0 +1,25 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create udev-rule for rfxcom usb dongel:
|
||||
file.managed:
|
||||
- name: /etc/udev/rules.d/99-rfxcom-serial.rules
|
||||
- source: salt://pod/files/99-rfxcom-serial.rules
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Relead udev rules for rfxcom dongel:
|
||||
cmd.run:
|
||||
- name: udevadm control --reload-rules
|
||||
- onchanges:
|
||||
- file: Create udev-rule for rfxcom usb dongel
|
||||
|
||||
Create data folder for nodered:
|
||||
file.directory:
|
||||
- name: /srv/nodered
|
||||
- user: 1000
|
||||
- group: 1000
|
||||
- mode: "0750"
|
||||
|
||||
{{ pod_deploy('nodered') }}
|
||||
|
25
pod/piwigo.sls
Normal file
25
pod/piwigo.sls
Normal file
@ -0,0 +1,25 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create piwigo root directory:
|
||||
file.directory:
|
||||
- name: /srv/piwigo
|
||||
- user: {{ pillar.pods.piwigo.containers.main.env.PUID }}
|
||||
- group: {{ pillar.pods.piwigo.containers.main.env.GUID }}
|
||||
- mode: "0750"
|
||||
|
||||
Create piwigo config directory:
|
||||
file.directory:
|
||||
- name: /srv/piwigo/config
|
||||
- user: {{ pillar.pods.piwigo.containers.main.env.PUID }}
|
||||
- group: {{ pillar.pods.piwigo.containers.main.env.GUID }}
|
||||
- mode: "0750"
|
||||
|
||||
Create piwigo gallery directory:
|
||||
file.directory:
|
||||
- name: /srv/piwigo/gallery
|
||||
- user: {{ pillar.pods.piwigo.containers.main.env.PUID }}
|
||||
- group: {{ pillar.pods.piwigo.containers.main.env.GUID }}
|
||||
- mode: "0750"
|
||||
|
||||
{{ pod_deploy('piwigo') }}
|
||||
|
47
pod/prometheus.sls
Normal file
47
pod/prometheus.sls
Normal file
@ -0,0 +1,47 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create prometheus root directory:
|
||||
file.directory:
|
||||
- name: /srv/prometheus
|
||||
- user: nobody
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create prometheus data directory:
|
||||
file.directory:
|
||||
- name: /srv/prometheus/data
|
||||
- user: nobody
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create prometheus config directory:
|
||||
file.directory:
|
||||
- name: /srv/prometheus/config
|
||||
- user: nobody
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create alertmanager root directory:
|
||||
file.directory:
|
||||
- name: /srv/alertmanager
|
||||
- source: salt://containers/files/alertmanager
|
||||
- user: nobody
|
||||
- group: root
|
||||
- dir_mode: "0755"
|
||||
- file_mode: "0644"
|
||||
|
||||
Create alertmanager data directory:
|
||||
file.directory:
|
||||
- name: /srv/alertmanager/data
|
||||
- user: nobody
|
||||
- group: nobody
|
||||
- mode: "0755"
|
||||
|
||||
Create alertmanager config directory:
|
||||
file.directory:
|
||||
- name: /srv/alertmanager/config
|
||||
- user: nobody
|
||||
- group: nobody
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('prometheus') }}
|
10
pod/unifi.sls
Normal file
10
pod/unifi.sls
Normal file
@ -0,0 +1,10 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create unifi data directory:
|
||||
file.directory:
|
||||
- name: /srv/unifi
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0750"
|
||||
|
||||
{{ pod_deploy('unifi') }}
|
99
restic/files/backup.sh.jinja
Normal file
99
restic/files/backup.sh.jinja
Normal file
@ -0,0 +1,99 @@
|
||||
#!/bin/bash
|
||||
|
||||
{%- if pillar['pods']['mariadb'] is defined %}
|
||||
umask 0077
|
||||
BACKUP_DIR={{ pillar.pods.mariadb.containers.main.backup_dir }}
|
||||
databases=$(podman exec -it mariadb-main mysql -B -u root -p{{ pillar.pods.mariadb.containers.main.env.MYSQL_ROOT_PASSWORD }} -e "SHOW DATABASES;" | tr -d "| " | grep -v Database)
|
||||
|
||||
for db in ${databases[@]}; do
|
||||
db=${db::-1}
|
||||
if [[ "$db" != "information_schema" ]] && [[ "$db" != "performance_schema" ]] && [[ "$db" != "mysql" ]] && [[ "$db" != _* ]] && [[ "$db" != "sys" ]]; then
|
||||
echo "Dumping database: $db"
|
||||
podman exec -it mariadb-main mysqldump -u root -p{{ pillar.pods.mariadb.containers.main.env.MYSQL_ROOT_PASSWORD }} --databases $db | gzip > ${BACKUP_DIR}/$(date +"%Y-%m-%d_%H-%M-%S")_$db-sql.gz
|
||||
fi
|
||||
done
|
||||
# Delete the files older than 3 days
|
||||
echo "removing old mysql dumps"
|
||||
find $BACKUP_DIR/* -type f -name *-sql.gz -mtime +3 -exec rm {} \;
|
||||
umask 0022
|
||||
{%- endif %}
|
||||
|
||||
{%- if pillar['containers']['freeipa'] is defined %}
|
||||
echo "Stopping FreeIPA"
|
||||
systemctl stop freeipa.service
|
||||
{%- endif %}
|
||||
|
||||
podman run --rm \
|
||||
--name=restic \
|
||||
--hostname="$HOSTNAME" \
|
||||
-v /root/.restic.password:/restic-password:ro \
|
||||
-v /root:/root \
|
||||
{%- if pillar.restic.repository is defined %}
|
||||
-v {{ pillar.restic.mount }}{{ pillar.restic.suffix }}:/repo \
|
||||
{%- endif %}
|
||||
{%- for target in pillar.restic.targets %}
|
||||
-v {{ target }}:{{ target }} \
|
||||
{%- endfor %}
|
||||
-v /root/.restic.password:/root/.restic.password \
|
||||
{{ pillar.restic.image.url }}:{{ pillar.restic.image.tag }} \
|
||||
restic \
|
||||
backup \
|
||||
--password-file=/restic-password \
|
||||
{%- if pillar.restic.repository is defined %}
|
||||
--repo=/repo \
|
||||
{%- else %}
|
||||
-r sftp:{{ pillar.restic.user }}@{{ pillar.restic.host }}:{{ pillar.restic.mount }}{{ pillar.restic.suffix }} \
|
||||
{%- endif %}
|
||||
--exclude="*.tmp" \
|
||||
--exclude="lost+found" \
|
||||
--exclude="Cache" \
|
||||
--exclude="cache" \
|
||||
--exclude=".cache" \
|
||||
--exclude="tmp" \
|
||||
--exclude="temp" \
|
||||
--exclude="Temp" \
|
||||
--exclude="/home/*/go" \
|
||||
--exclude="/home/*/.local/share/virtualenv" \
|
||||
--exclude="/home/*/.local/share/virtualenvs" \
|
||||
--exclude="/home/*/VirtualBox VMs" \
|
||||
--exclude="/home/*/.mozillla/firefox/*/minidumps" \
|
||||
--exclude="/home/*/.mozillla/firefox/*/storage" \
|
||||
--exclude="/home/*/.mozillla/firefox/*/extensions.sqlite" \
|
||||
--exclude="/home/*/.mozillla/firefox/*/urlclassifier3.sqlite" \
|
||||
--exclude="/home/*/.config/google-chrome/*/Local Storage" \
|
||||
--exclude="/home/*/.config/google-chrome/*/Session Storage" \
|
||||
--exclude="/home/*/.config/google-chrome/*/Application Cache" \
|
||||
--exclude="/home/*/.config/google-chrome/*/History" \
|
||||
--exclude="/home/*/.config/google-chrome/*/History-journal" \
|
||||
--exclude="/home/*/.config/google-chrome/*/History Provider Cache" \
|
||||
--exclude="/home/*/.local/share/flatpak" \
|
||||
--exclude="/home/*/.var/app/com.slack.Slack" \
|
||||
--exclude="/home/*/.local/share/Trash" \
|
||||
--exclude="/home/*/.config/Microsoft/Microsoft Teams" \
|
||||
--exclude="/home/*/.wine" \
|
||||
--exclude="/home/*/.vim/bundle" \
|
||||
--exclude="/home/*/snap" \
|
||||
--exclude="/home/*/Downloads" \
|
||||
--exclude="/home/*/Nextcloud" \
|
||||
--exclude="/home/*/git" \
|
||||
--exclude="/srv/backup" \
|
||||
--verbose \
|
||||
{%- for target in pillar.restic.targets %}
|
||||
{{ target }} \
|
||||
{%- endfor %}
|
||||
|
||||
return_code=$?
|
||||
|
||||
{%- if pillar['containers']['freeipa'] is defined %}
|
||||
echo "Starting FreeIPA"
|
||||
systemctl start freeipa.service
|
||||
{%- endif %}
|
||||
|
||||
if [[ $return_code -eq 0 ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
MESSAGE="$(journalctl -u restic-backup.service -p 5 --since today)"
|
||||
curl "$GOTIFY_URL/message?token=$GOTIFY_TOKEN" -F "title=$GOTIFY_TITLE" -F "priority=$GOTIFY_PRIO" -F "message=$MESSAGE"
|
||||
|
16
restic/files/mysql-backup.sh
Normal file
16
restic/files/mysql-backup.sh
Normal file
@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
umask 0077
|
||||
BACKUP_DIR={{ pillar.containers.mariadb.backup_dir }}
|
||||
databases=$(podman exec -it mariadb mysql -B -u root -p{{ pillar.containers.mariadb.env.MYSQL_ROOT_PASSWORD }} -e "SHOW DATABASES;" | tr -d "| " | grep -v Database)
|
||||
|
||||
for db in ${databases[@]}; do
|
||||
db=${db::-1}
|
||||
if [[ "$db" != "information_schema" ]] && [[ "$db" != "performance_schema" ]] && [[ "$db" != "mysql" ]] && [[ "$db" != _* ]] && [[ "$db" != "sys" ]]; then
|
||||
echo "Dumping database: $db"
|
||||
podman exec -it mariadb mysqldump -u root -p{{ pillar.containers.mariadb.env.MYSQL_ROOT_PASSWORD }} --databases $db | gzip > ${BACKUP_DIR}/$(date +"%Y-%m-%d_%H-%M-%S")_$db-sql.gz
|
||||
fi
|
||||
done
|
||||
# Delete the files older than 3 days
|
||||
echo "removing old mysql dumps"
|
||||
find $BACKUP_DIR/* -type f -name *-sql.gz -mtime +3 -exec rm {} \;
|
||||
umask 0022
|
11
restic/files/restic-backup.service.jinja
Normal file
11
restic/files/restic-backup.service.jinja
Normal file
@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Run restic backup
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
User={{ pillar.restic.user }}
|
||||
ExecStart={{ salt['user.info'](pillar.restic.user).home }}/bin/backup.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
|
11
restic/files/restic-backup.timer.jinja
Normal file
11
restic/files/restic-backup.timer.jinja
Normal file
@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Restic backup timer
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ pillar.restic.OnCalendar }}
|
||||
RandomizedDelaySec=300
|
||||
Unit=restic-backup.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
|
8
restic/files/restic.automount.jinja
Normal file
8
restic/files/restic.automount.jinja
Normal file
@ -0,0 +1,8 @@
|
||||
[Unit]
|
||||
Description=Automount for restic repository
|
||||
|
||||
[Automount]
|
||||
Where={{ pillar['restic']['mount'] }}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
23
restic/files/restic.jinja
Normal file
23
restic/files/restic.jinja
Normal file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
if [[ "$EUID" -ne 0 ]]; then
|
||||
echo "Needs to be run as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
podman run --rm \
|
||||
--name=restic \
|
||||
--hostname="$HOSTNAME" \
|
||||
-v /root/.restic.password:/restic-password:ro \
|
||||
{%- if pillar.restic.repository is defined %}
|
||||
-v {{ pillar.restic.mount }}{{ pillar.restic.suffix }}:/repo \
|
||||
{%- endif %}
|
||||
-v /root:/root \
|
||||
{{ pillar.restic.image.url }}:{{ pillar.restic.image.tag }} \
|
||||
restic \
|
||||
--password-file=/restic-password \
|
||||
{%- if pillar.restic.repository is defined %}
|
||||
--repo=/repo \
|
||||
{%- else %}
|
||||
-r sftp:{{ pillar.restic.user }}@{{ pillar.restic.host }}:{{ pillar.restic.mount }}{{ pillar.restic.suffix }} \
|
||||
{%- endif %}
|
||||
"$@"
|
11
restic/files/restic.mount.jinja
Normal file
11
restic/files/restic.mount.jinja
Normal file
@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=Mount the USB disk used as restic repository
|
||||
|
||||
[Mount]
|
||||
What=/dev/disk/by-uuid/{{ pillar['restic']['repository']['disk_uuid'] }}
|
||||
Where={{ pillar['restic']['mount'] }}
|
||||
Type=auto
|
||||
Options=defaults
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
77
restic/init.sls
Normal file
77
restic/init.sls
Normal file
@ -0,0 +1,77 @@
|
||||
{%- if pillar['containers']['mariadb'] is defined %}
|
||||
Create mariadb dump script:
|
||||
file.managed:
|
||||
- name: /root/bin/mysql-backup.sh
|
||||
- source: salt://restic/files/mysql-backup.sh
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0700"
|
||||
|
||||
Create backup destination for mariadn:
|
||||
file.directory:
|
||||
- name: {{ pillar.containers.mariadb.backup_dir }}
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0700"
|
||||
|
||||
{%- endif %}
|
||||
|
||||
Create restiv password-file:
|
||||
file.managed:
|
||||
- name: /root/.restic.password
|
||||
- contents:
|
||||
- {{ pillar.restic.password }}
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0600"
|
||||
|
||||
Create restic script:
|
||||
file.managed:
|
||||
- name: /usr/local/bin/restic
|
||||
- source: salt://restic/files/restic.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create backup script:
|
||||
file.managed:
|
||||
- name: /root/bin/backup.sh
|
||||
- source: salt://restic/files/backup.sh.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0700"
|
||||
Create the restic backup service unit:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/restic-backup.service
|
||||
- source: salt://restic/files/restic-backup.service.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Create the restic backup timer:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/restic-backup.timer
|
||||
- source: salt://restic/files/restic-backup.timer.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Run systemctl daemon reload for restic:
|
||||
cmd.run:
|
||||
- name: systemctl daemon-reload
|
||||
- onchanges:
|
||||
- file: Create the restic backup service unit
|
||||
- file: Create the restic backup timer
|
||||
|
||||
Start the restic backup timer:
|
||||
service.running:
|
||||
- name: restic-backup.timer
|
||||
- enable: True
|
||||
- onchanges:
|
||||
- cmd: Run systemctl daemon reload for restic
|
||||
|
39
restic/repository.sls
Normal file
39
restic/repository.sls
Normal file
@ -0,0 +1,39 @@
|
||||
{% set escape_command = [ "systemd-escape -p", pillar['restic']['mount']]|join(" ") %}
|
||||
{% set unit_name = salt['cmd.shell'](escape_command) %}
|
||||
|
||||
Create restic repository mount unit:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/{{ unit_name }}.mount
|
||||
- source: salt://restic/files/restic.mount.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Create restic repository automount unit:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/{{ unit_name }}.automount
|
||||
- source: salt://restic/files/restic.automount.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Run systemd daemon reload for repository:
|
||||
cmd.run:
|
||||
- name: systemctl daemon-reload
|
||||
- onchanges:
|
||||
- file: Create restic repository mount unit
|
||||
- file: Create restic repository automount unit
|
||||
|
||||
Start restic repository automount unit:
|
||||
service.running:
|
||||
- name: {{ unit_name }}.automount
|
||||
- enable: True
|
||||
|
||||
Add autherized keys for root-restic:
|
||||
ssh_auth.present:
|
||||
- user: root
|
||||
- enc: ssh-ed25519
|
||||
- comment: restic-backup
|
||||
- names: {{ pillar.restic.repository.auth_keys }}
|
8
top.sls
Normal file
8
top.sls
Normal file
@ -0,0 +1,8 @@
|
||||
base:
|
||||
'*':
|
||||
- common.users
|
||||
- pod
|
||||
- containers
|
||||
- restic
|
||||
'woody':
|
||||
- wifi-voucher
|
3
wifi-voucher/files/voucher.env.jinja
Normal file
3
wifi-voucher/files/voucher.env.jinja
Normal file
@ -0,0 +1,3 @@
|
||||
{% for key, value in pillar['voucher']['config'].items() -%}
|
||||
{{ key }}="{{ value }}"
|
||||
{% endfor -%}
|
13
wifi-voucher/files/wifi-voucher.service
Normal file
13
wifi-voucher/files/wifi-voucher.service
Normal file
@ -0,0 +1,13 @@
|
||||
[Unit]
|
||||
Description=WIFI-Voucher system for my guest network
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Restart=always
|
||||
User=voucher
|
||||
ExecStart=/usr/local/bin/voucher
|
||||
EnvironmentFile=/etc/default/wifi-voucher
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
4
wifi-voucher/init.sls
Normal file
4
wifi-voucher/init.sls
Normal file
@ -0,0 +1,4 @@
|
||||
include:
|
||||
- wifi-voucher.packages
|
||||
- wifi-voucher.user
|
||||
- wifi-voucher.service
|
9
wifi-voucher/packages.sls
Normal file
9
wifi-voucher/packages.sls
Normal file
@ -0,0 +1,9 @@
|
||||
Install CUPS and bsd tools:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- cups
|
||||
- cups-bsd
|
||||
|
||||
Install wifi-voucher:
|
||||
pip.installed:
|
||||
- name: https://git.rre.nu/jonas/wifi-voucher/raw/branch/main/dist/voucher-{{ pillar['voucher']['version'] }}-py39-none-any.whl
|
4
wifi-voucher/remove.sls
Normal file
4
wifi-voucher/remove.sls
Normal file
@ -0,0 +1,4 @@
|
||||
remove voucher user:
|
||||
user.absent:
|
||||
- name: voucher
|
||||
- purge: True
|
31
wifi-voucher/service.sls
Normal file
31
wifi-voucher/service.sls
Normal file
@ -0,0 +1,31 @@
|
||||
WIFI-Voucher Unit environment file:
|
||||
file.managed:
|
||||
- name: /etc/default/wifi-voucher
|
||||
- source: salt://wifi-voucher/files/voucher.env.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: voucher
|
||||
- mode: "0640"
|
||||
|
||||
WIFI-Voucher unit service file:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/wifi-voucher.service
|
||||
- source: salt://wifi-voucher/files/wifi-voucher.service
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
SystemD Reload:
|
||||
cmd.run:
|
||||
- name: systemctl --system daemon-reload
|
||||
- onchanges:
|
||||
- file: WIFI-Voucher unit service file
|
||||
|
||||
Start wifi-voucher:
|
||||
service.running:
|
||||
- name: wifi-voucher
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: WIFI-Voucher Unit environment file
|
||||
- file: WIFI-Voucher unit service file
|
||||
- cmd: SystemD Reload
|
8
wifi-voucher/user.sls
Normal file
8
wifi-voucher/user.sls
Normal file
@ -0,0 +1,8 @@
|
||||
WIFI-Voucher user:
|
||||
user.present:
|
||||
- name: voucher
|
||||
- fullname: wifi-voucher system
|
||||
- home: /home/voucher
|
||||
- shell: /bin/bash
|
||||
- groups:
|
||||
- gpio
|
Loading…
Reference in New Issue
Block a user