first commit
This commit is contained in:
21
pod/dnsmasq.sls
Normal file
21
pod/dnsmasq.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create dnsmasq config directory:
|
||||
file.recurse:
|
||||
- name: /srv/dnsmasq
|
||||
- source: salt://dnsmasq
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: "0755"
|
||||
- file_mode: "0644"
|
||||
- include_empty: True
|
||||
|
||||
{{ pod_deploy('dnsmasq') }}
|
||||
|
||||
Make sure dnsmasq is running:
|
||||
service.running:
|
||||
- name: dnsmasq
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: Create dnsmasq config directory
|
1
pod/files/99-rfxcom-serial.rules
Normal file
1
pod/files/99-rfxcom-serial.rules
Normal file
@@ -0,0 +1 @@
|
||||
SUBSYSTEM=="tty", ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6001", SYMLINK+="rfxcom", MODE="0666"
|
1
pod/files/99-zigbee-serial.rules
Normal file
1
pod/files/99-zigbee-serial.rules
Normal file
@@ -0,0 +1 @@
|
||||
SUBSYSTEM=="tty", ATTRS{idVendor}=="0451", ATTRS{idProduct}=="16a8", SYMLINK+="zigbee-serial", MODE="0666"
|
10
pod/files/check_image_updates.service.jinja
Normal file
10
pod/files/check_image_updates.service.jinja
Normal file
@@ -0,0 +1,10 @@
|
||||
[Unit]
|
||||
Description=Check for image updates on configured podman containers
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
User=root
|
||||
ExecStart=/root/bin/check_image_updates.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
35
pod/files/check_image_updates.sh.jinja
Normal file
35
pod/files/check_image_updates.sh.jinja
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
URL="{{ pillar['podman']['gotify']['url'] }}"
|
||||
TOKEN="{{ pillar['podman']['gotify']['token'] }}"
|
||||
TITLE="Updates on $HOSTNAME"
|
||||
PRIORITY="{{ pillar['podman']['gotify']['priority'] }}"
|
||||
|
||||
{% raw -%}
|
||||
function check_update(){
|
||||
IFS=',' read -r -a container_info <<< "$(podman container inspect $1 --format '{{ .Name }},{{ .ImageName }},{{ .Image }}')"
|
||||
|
||||
podman pull "${container_info[1]}"
|
||||
if [[ "$(podman image inspect "${container_info[1]}" --format "{{.Id}}")" != "${container_info[2]}" ]];then
|
||||
containers[${#containers[@]}]="${container_info[0]}"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
IFS=$'\n'
|
||||
for line in $(podman container ls -q); do
|
||||
check_update "$line"
|
||||
done
|
||||
if [[ "${#containers[@]}" == "0" ]]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
MESSAGE=$(cat << EOM
|
||||
Following ${#containers[@]} container(s) has updates:
|
||||
${containers[*]}
|
||||
EOM
|
||||
)
|
||||
|
||||
curl "$URL/message?token=$TOKEN" -F "title=$TITLE" -F "priority=$PRIORITY" -F "message=$MESSAGE"
|
||||
echo " "
|
||||
{% endraw -%}
|
9
pod/files/check_image_updates.timer.jinja
Normal file
9
pod/files/check_image_updates.timer.jinja
Normal file
@@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Restic backup timer
|
||||
|
||||
[Timer]
|
||||
OnCalendar=Sun, 12:00
|
||||
Unit=check_image_updates.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
158
pod/files/pod.sh.jinja
Normal file
158
pod/files/pod.sh.jinja
Normal file
@@ -0,0 +1,158 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
function pull_image(){
|
||||
{%- for container, cargs in pillar['pods'][pod]['containers'].items() %}
|
||||
if ! podman image exists {{ cargs['image'] }}:{{ cargs['tag'] }}; then
|
||||
podman pull {{ cargs['image'] }}:{{ cargs['tag'] }}
|
||||
fi
|
||||
{%- endfor %}
|
||||
}
|
||||
|
||||
|
||||
|
||||
function create_pod() {
|
||||
if ! podman pod exists {{ pod }};then
|
||||
podman pod create \
|
||||
--name {{ pod }} \
|
||||
--infra-name {{ pod }}-infra \
|
||||
{%- if args['ports'] is defined %}
|
||||
{%- for ports in args['ports'] %}
|
||||
-p {{ ports['host'] }}:{{ ports['container'] }}{% if ports['protocol'] is defined %}/{{ ports['protocol'] }}{% endif %} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
fi
|
||||
{%- for container, cargs in pillar['pods'][pod]['containers'].items() %}
|
||||
if ! podman container exists {{ pod }}-{{ container }};then
|
||||
podman container create \
|
||||
--name {{ pod }}-{{ container }} \
|
||||
--pod {{ pod }} \
|
||||
{%- if cargs['podman_options'] is defined %}
|
||||
{%- for option, value in cargs['podman_options'].items() %}
|
||||
--{{ option }} {{ value }} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if cargs['volumes'] is defined %}
|
||||
{%- for volume, mount in cargs['volumes'].items() %}
|
||||
-v {{ volume }}:{{ mount }} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if cargs['env'] is defined %}
|
||||
{%- for key, value in cargs['env'].items() %}
|
||||
-e {{ key }}={{ value }} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- if cargs['devices'] is defined %}
|
||||
{%- for key, value in cargs['devices'].items() %}
|
||||
--device {{ key }}:{{ value}} \
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{{ cargs['image'] }}:{{ cargs['tag'] }}{%- if cargs['run'] is defined %} \
|
||||
{{ cargs['run'] }}
|
||||
{%- endif %}
|
||||
fi
|
||||
{%- endfor %}
|
||||
}
|
||||
|
||||
function generate_systemd_unit_file() {
|
||||
cd /etc/systemd/system
|
||||
podman generate systemd --files --name {{ pod }} --pod-prefix="" --container-prefix=""
|
||||
}
|
||||
|
||||
function check_update() {
|
||||
ImageUpdate=0
|
||||
{%- for container, cargs in pillar['pods'][pod]['containers'].items() %}
|
||||
podman pull {{ cargs['image'] }}:{{ cargs['tag'] }}
|
||||
if [[ "$(podman image inspect {{ cargs['image'] }}:{{ cargs['tag'] }} --format "{% raw %}{{.Id}}{% endraw %}")" == "$(podman inspect {{ pod }}-{{ container }} --format "{% raw %}{{ .Image }}{% endraw %}")" ]];then
|
||||
echo "No image updates available for {{ pod }}-{{ container }}"
|
||||
else
|
||||
echo "Image update available for {{ pod }}-{{ container }}"
|
||||
ImageUpdate=1
|
||||
fi
|
||||
{%- endfor %}
|
||||
return $ImageUpdate
|
||||
}
|
||||
|
||||
function update() {
|
||||
systemctl stop {{ pod }}
|
||||
podman pod rm {{ pod }}
|
||||
create_pod
|
||||
generate_systemd_unit_file
|
||||
systemctl daemon-reload
|
||||
systemctl enable --now {{ pod }}.service
|
||||
}
|
||||
|
||||
|
||||
|
||||
function printHelp(){
|
||||
cat << EOF
|
||||
Usage ${0##*/} [options..]
|
||||
-h,-?, --help Show help and exit
|
||||
-p, --pull pull container images for all containers in pod {{ pod }}
|
||||
-c, --create create {{ pod }} pod
|
||||
-s, --start start and enables {{ pod }} pod
|
||||
-S, --stop stop {{ pod }} pod
|
||||
-i, --is-running check to see if pod service is running
|
||||
-u, --check-update check if there are image updates avaiable
|
||||
--update perform image update if it exists
|
||||
-g, --generate-systemd generate user systemd service unit file
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
while :; do
|
||||
case $1 in
|
||||
-h|-\?|--help)
|
||||
printHelp
|
||||
exit
|
||||
;;
|
||||
-p|--pull)
|
||||
pull_image
|
||||
shift
|
||||
;;
|
||||
-v|--volumes)
|
||||
create_volumes
|
||||
shift
|
||||
;;
|
||||
-c|--create)
|
||||
create_pod
|
||||
shift
|
||||
;;
|
||||
-s|--start)
|
||||
systemctl --user enable --now {{ pod }}.service
|
||||
shift
|
||||
;;
|
||||
-S|--stop)
|
||||
systemctl --user stop {{ pod }}.service
|
||||
shift
|
||||
;;
|
||||
-i|--is-running)
|
||||
systemctl --user is-active {{ pod }}.service
|
||||
exit $?
|
||||
shift
|
||||
;;
|
||||
-g|--generate-systemd)
|
||||
generate_systemd_unit_file
|
||||
shift
|
||||
;;
|
||||
-u|--check-update)
|
||||
check_update
|
||||
shift
|
||||
;;
|
||||
--update)
|
||||
update
|
||||
shift
|
||||
;;
|
||||
--) #End of all options
|
||||
shift
|
||||
break
|
||||
;;
|
||||
-?*)
|
||||
printf "'%s' is not a valid option\n" "$1" >&2
|
||||
exit 1
|
||||
;;
|
||||
*) #Break out of case, no more options
|
||||
break
|
||||
esac
|
||||
shift
|
||||
done
|
11
pod/gotify.sls
Normal file
11
pod/gotify.sls
Normal file
@@ -0,0 +1,11 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create gotify data directory:
|
||||
file.directory:
|
||||
- name: /srv/gotify
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('gotify') }}
|
||||
|
46
pod/init.sls
Normal file
46
pod/init.sls
Normal file
@@ -0,0 +1,46 @@
|
||||
Copy check image update script:
|
||||
file.managed:
|
||||
- name: /root/bin/check_image_updates.sh
|
||||
- source: salt://pod/files/check_image_updates.sh.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 0700
|
||||
|
||||
Create check image update service:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/check_image_updates.service
|
||||
- source: salt://pod/files/check_image_updates.service.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Create check image update timer:
|
||||
file.managed:
|
||||
- name: /etc/systemd/system/check_image_updates.timer
|
||||
- source: salt://pod/files/check_image_updates.timer.jinja
|
||||
- template: jinja
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Systemd daemon reload for image update:
|
||||
cmd.run:
|
||||
- name: systemctl daemon-reload
|
||||
- watch:
|
||||
- file: Create check image update service
|
||||
- file: Create check image update timer
|
||||
|
||||
Start check image update timer:
|
||||
service.running:
|
||||
- name: check_image_updates.timer
|
||||
- enable: True
|
||||
|
||||
{% if pillar.pods is defined %}
|
||||
include:
|
||||
{%- for pod, args in pillar.pods.items() %}
|
||||
- containers.{{ pod }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
24
pod/loki.sls
Normal file
24
pod/loki.sls
Normal file
@@ -0,0 +1,24 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create loki root directory:
|
||||
file.directory:
|
||||
- name: /srv/loki
|
||||
- user: 10001
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create loki data directory:
|
||||
file.directory:
|
||||
- name: /srv/loki
|
||||
- user: 10001
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create loki config directory:
|
||||
file.directory:
|
||||
- name: /srv/loki/config
|
||||
- user: 10001
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('loki') }}
|
11
pod/mariadb.sls
Normal file
11
pod/mariadb.sls
Normal file
@@ -0,0 +1,11 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create data directory for mariadb:
|
||||
file.directory:
|
||||
- name: /srv/mariadb
|
||||
- user: 999
|
||||
- group: 999
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('mariadb') }}
|
||||
|
17
pod/mosquitto.sls
Normal file
17
pod/mosquitto.sls
Normal file
@@ -0,0 +1,17 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create mosquitto configuration directory:
|
||||
file.directory:
|
||||
- name: /etc/mosquitto
|
||||
- user: 1883
|
||||
- group: 1883
|
||||
- mode: "0750"
|
||||
|
||||
Create mosquitto data directory:
|
||||
file.directory:
|
||||
- name: /srv/mosquitto
|
||||
- user: 1883
|
||||
- group: 1883
|
||||
- mode: "0750"
|
||||
|
||||
{{ pod_deploy('mosquitto') }}
|
10
pod/nextcloud.sls
Normal file
10
pod/nextcloud.sls
Normal file
@@ -0,0 +1,10 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create nextcloud data directory:
|
||||
file.directory:
|
||||
- name: /srv/nextcloud
|
||||
- user: 33
|
||||
- group: 33
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('nextcloud') }}
|
25
pod/nodered.sls
Normal file
25
pod/nodered.sls
Normal file
@@ -0,0 +1,25 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create udev-rule for rfxcom usb dongel:
|
||||
file.managed:
|
||||
- name: /etc/udev/rules.d/99-rfxcom-serial.rules
|
||||
- source: salt://pod/files/99-rfxcom-serial.rules
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0644"
|
||||
|
||||
Relead udev rules for rfxcom dongel:
|
||||
cmd.run:
|
||||
- name: udevadm control --reload-rules
|
||||
- onchanges:
|
||||
- file: Create udev-rule for rfxcom usb dongel
|
||||
|
||||
Create data folder for nodered:
|
||||
file.directory:
|
||||
- name: /srv/nodered
|
||||
- user: 1000
|
||||
- group: 1000
|
||||
- mode: "0750"
|
||||
|
||||
{{ pod_deploy('nodered') }}
|
||||
|
25
pod/piwigo.sls
Normal file
25
pod/piwigo.sls
Normal file
@@ -0,0 +1,25 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create piwigo root directory:
|
||||
file.directory:
|
||||
- name: /srv/piwigo
|
||||
- user: {{ pillar.pods.piwigo.containers.main.env.PUID }}
|
||||
- group: {{ pillar.pods.piwigo.containers.main.env.GUID }}
|
||||
- mode: "0750"
|
||||
|
||||
Create piwigo config directory:
|
||||
file.directory:
|
||||
- name: /srv/piwigo/config
|
||||
- user: {{ pillar.pods.piwigo.containers.main.env.PUID }}
|
||||
- group: {{ pillar.pods.piwigo.containers.main.env.GUID }}
|
||||
- mode: "0750"
|
||||
|
||||
Create piwigo gallery directory:
|
||||
file.directory:
|
||||
- name: /srv/piwigo/gallery
|
||||
- user: {{ pillar.pods.piwigo.containers.main.env.PUID }}
|
||||
- group: {{ pillar.pods.piwigo.containers.main.env.GUID }}
|
||||
- mode: "0750"
|
||||
|
||||
{{ pod_deploy('piwigo') }}
|
||||
|
47
pod/prometheus.sls
Normal file
47
pod/prometheus.sls
Normal file
@@ -0,0 +1,47 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create prometheus root directory:
|
||||
file.directory:
|
||||
- name: /srv/prometheus
|
||||
- user: nobody
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create prometheus data directory:
|
||||
file.directory:
|
||||
- name: /srv/prometheus/data
|
||||
- user: nobody
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create prometheus config directory:
|
||||
file.directory:
|
||||
- name: /srv/prometheus/config
|
||||
- user: nobody
|
||||
- group: root
|
||||
- mode: "0755"
|
||||
|
||||
Create alertmanager root directory:
|
||||
file.directory:
|
||||
- name: /srv/alertmanager
|
||||
- source: salt://containers/files/alertmanager
|
||||
- user: nobody
|
||||
- group: root
|
||||
- dir_mode: "0755"
|
||||
- file_mode: "0644"
|
||||
|
||||
Create alertmanager data directory:
|
||||
file.directory:
|
||||
- name: /srv/alertmanager/data
|
||||
- user: nobody
|
||||
- group: nobody
|
||||
- mode: "0755"
|
||||
|
||||
Create alertmanager config directory:
|
||||
file.directory:
|
||||
- name: /srv/alertmanager/config
|
||||
- user: nobody
|
||||
- group: nobody
|
||||
- mode: "0755"
|
||||
|
||||
{{ pod_deploy('prometheus') }}
|
10
pod/unifi.sls
Normal file
10
pod/unifi.sls
Normal file
@@ -0,0 +1,10 @@
|
||||
{% from 'lib.sls' import pod_deploy with context %}
|
||||
|
||||
Create unifi data directory:
|
||||
file.directory:
|
||||
- name: /srv/unifi
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: "0750"
|
||||
|
||||
{{ pod_deploy('unifi') }}
|
Reference in New Issue
Block a user