Compare commits
250 Commits
DnsMigrati
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
90dd0ecd9a | ||
|
4f6743db5f | ||
|
2452a2ad44 | ||
|
5e2bb57914 | ||
|
3eb2dbfa08 | ||
|
1ea094aa6e | ||
|
c1e48d4ace | ||
|
b2710aab2f | ||
|
c000933f66 | ||
|
7948773757 | ||
|
3d90a1f6d7 | ||
|
1f29007172 | ||
|
af58866882 | ||
|
374a62c304 | ||
|
9451443266 | ||
|
dacd187f7b | ||
|
e48a879c43 | ||
|
6ea5de0315 | ||
|
984b712c78 | ||
|
293fddd81c | ||
|
0952c4bf42 | ||
|
3228054172 | ||
|
ee7cd0c12e | ||
|
22a60b42d4 | ||
|
d578fefbce | ||
|
cae4ceb623 | ||
|
ddc4320fe9 | ||
|
d1b475d651 | ||
|
d817f3a7f8 | ||
|
18a78f6fd2 | ||
|
f22e3406be | ||
|
1520ec0dcc | ||
|
275435664c | ||
|
f9ff70a9d9 | ||
|
8915ff52dd | ||
|
74794f866a | ||
|
7244ceb5b1 | ||
|
49a8a427f7 | ||
|
f4f77fc55a | ||
|
351d7c287f | ||
|
598896ad5f | ||
|
6e00668840 | ||
|
24eb640c60 | ||
|
9b6ed6cc6e | ||
|
2f1de5dcd5 | ||
|
78692be3fd | ||
|
272efbb844 | ||
|
c9f4656470 | ||
|
6e679c82a0 | ||
|
9d0c513787 | ||
|
69a2ad4efd | ||
|
2f6c814fb1 | ||
|
ab3c42cf8b | ||
|
992937c011 | ||
|
5fe61223c3 | ||
|
452ab3611a | ||
|
1ee5e21f84 | ||
|
92befa7ea4 | ||
|
4be6af919d | ||
|
77e7cd4f88 | ||
|
fe9bc8dbab | ||
|
60cfe75e47 | ||
|
4fcf862279 | ||
|
98c1d63962 | ||
|
0b067cabca | ||
|
4ef30222f7 | ||
|
117e9397a3 | ||
|
0b25eb194e | ||
|
74dc3a0c89 | ||
|
9bc0e24357 | ||
|
e0f9190b76 | ||
|
f0676ec3f7 | ||
|
8b895fee06 | ||
|
aeed90ea34 | ||
|
a89109e1ff | ||
|
d748beb6a4 | ||
|
3a80c47b56 | ||
|
c75e9e707a | ||
|
4926b4eb06 | ||
|
0ebd087544 | ||
|
b7dc26cc27 | ||
|
012c448c73 | ||
|
1b79fe4cb0 | ||
|
6848ffa05b | ||
|
aec7230f11 | ||
|
da3b290d4a | ||
|
5718968407 | ||
|
0db8555fe8 | ||
|
2fee8293dc | ||
|
3dae6adb33 | ||
|
f207be7d7d | ||
|
f32c0d1e40 | ||
|
d37fe78e39 | ||
|
586e6101ca | ||
|
e470b204a5 | ||
|
c4d10aacfe | ||
|
e10830e028 | ||
|
c37083b5c9 | ||
|
c7e6270c3a | ||
|
625bda7fda | ||
|
d1cc5ff299 | ||
|
0a57c5659c | ||
|
7191cb7216 | ||
|
b3488061da | ||
|
c08032052d | ||
|
25780828cc | ||
|
46b4a51935 | ||
|
993753f284 | ||
|
5188d865d8 | ||
|
2a731201a1 | ||
|
70e0d6011b | ||
|
2c0da4bd15 | ||
|
547ce05466 | ||
|
bfb3ec3d34 | ||
|
9756939f8e | ||
|
f420f17929 | ||
|
2bae64c40b | ||
|
c8f7d7f8c3 | ||
|
2632c6d2b0 | ||
|
f61008b570 | ||
|
73df5fa582 | ||
|
e3d76630c3 | ||
|
41b1a71c76 | ||
|
e9ad317436 | ||
|
2db6061516 | ||
|
3367c78314 | ||
|
08ea604028 | ||
|
29ab70a1d5 | ||
|
e083f4da7a | ||
|
2ea4992f57 | ||
|
49de33bbdb | ||
|
2b678b7786 | ||
|
fc2dcd7b33 | ||
|
29d70cac0e | ||
|
4117bd80c5 | ||
|
da6f04e42e | ||
|
13bda4cd34 | ||
|
63cd352fff | ||
|
a65e3484b5 | ||
|
2b9e034232 | ||
|
527d2f2345 | ||
|
2da18e9c12 | ||
|
49f639cb15 | ||
|
abc88f0074 | ||
|
394dbaf6cb | ||
|
78762b477e | ||
|
2c00b9be59 | ||
|
acc6cdc5fa | ||
|
43b6cf9158 | ||
|
015a89b27e | ||
|
68434f3e92 | ||
|
fe6d1c5e26 | ||
|
f8bc026165 | ||
|
80f489422a | ||
|
4207b1fc75 | ||
|
ea30fce975 | ||
|
5b23006e97 | ||
|
9370a92518 | ||
|
9fcf2d78e6 | ||
|
f82c99c2ba | ||
|
cecad8b785 | ||
|
28fc2bf6a7 | ||
|
a0214d0d74 | ||
|
9812376a1d | ||
|
6ddcc4736e | ||
|
11fe5fb5dc | ||
|
ec2ecd08cd | ||
|
40ce7c1550 | ||
|
64346cc63b | ||
|
ffd597f710 | ||
|
c4f1423501 | ||
|
5a8c4519a6 | ||
|
908495bce3 | ||
|
8ca6413b02 | ||
|
8008295780 | ||
|
05930da661 | ||
|
5d966908c5 | ||
|
c7a6ed5392 | ||
|
f3469bd612 | ||
|
33b4fc6ad5 | ||
|
351bef555c | ||
|
6db6b28706 | ||
|
8081e89176 | ||
|
3628139699 | ||
|
f0dd3e8f33 | ||
|
0b78cbe0e3 | ||
|
da1686cdea | ||
|
5939ff8057 | ||
|
d15939640f | ||
|
47761bf90e | ||
|
2fc86fc14f | ||
|
49d2ce491f | ||
|
1992f75888 | ||
|
a0179b829d | ||
|
2cad7575d1 | ||
|
9f5c738317 | ||
|
f2c7e9a95a | ||
|
4f1646afc2 | ||
|
ba4647379e | ||
|
58f89756d3 | ||
|
a60a1bc578 | ||
|
9578b25804 | ||
|
f2bc16cbe0 | ||
|
70eec26d0a | ||
|
98f1e34d04 | ||
|
9e4348065e | ||
|
f17a946d81 | ||
|
b494eaf358 | ||
|
5d3432ff45 | ||
|
5685458fbf | ||
|
674813e2e4 | ||
|
3944d444aa | ||
|
9a0aa359a5 | ||
|
4e9155e0db | ||
|
b54420c0d9 | ||
|
db8b2c3b1e | ||
|
bed1a666da | ||
|
9d44ad59c7 | ||
|
c8a1ba34f3 | ||
|
b1afa5a801 | ||
|
4cd583622b | ||
|
8718bfe051 | ||
|
594ffcad44 | ||
|
14b1ac38e2 | ||
|
521ea28229 | ||
|
85d9dfa7d7 | ||
|
61d182dfe6 | ||
|
ecc4e1dbb9 | ||
|
439611990e | ||
|
ef927ee761 | ||
|
3770c41d03 | ||
|
50d43dd44c | ||
|
1accb487e6 | ||
|
9965a58e47 | ||
|
b972781036 | ||
|
0e4d6c30d1 | ||
|
cf53b72179 | ||
|
a99d4534c6 | ||
|
38ea6d811e | ||
|
202fdf176e | ||
|
dc7d2134bf | ||
|
aef03b0e13 | ||
|
d5ad4a239c | ||
|
42cce82722 | ||
|
276fa3c7ec | ||
|
7a433c2492 | ||
|
6f55907bb3 | ||
|
bfa620f178 | ||
|
1fbf3a9407 | ||
|
a8ed6daf77 |
@ -22,13 +22,6 @@ make create-dev
|
||||
|
||||
## Rebuild
|
||||
|
||||
to rebuild from scratch ansible need a vault server up and unseal
|
||||
you can rebuild a standalone vault server with a consul database snaphot with
|
||||
|
||||
```sh
|
||||
make vault-dev FILE=./yourconsulsnaphot.snap
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
```mermaid
|
||||
|
35
Vagrantfile
vendored
35
Vagrantfile
vendored
@ -1,9 +1,10 @@
|
||||
Vagrant.configure('2') do |config|
|
||||
if Vagrant.has_plugin?('vagrant-cachier')
|
||||
config.cache.scope = 'machine'
|
||||
config.cache.enable :pacman
|
||||
end
|
||||
config.vm.provider :libvirt do |libvirt|
|
||||
libvirt.management_network_domain = "ducamps-dev.win"
|
||||
libvirt.management_network_domain = "lan.ducamps.dev"
|
||||
|
||||
end
|
||||
config.vm.define "oscar-dev" do |c|
|
||||
@ -19,14 +20,20 @@ Vagrant.configure('2') do |config|
|
||||
# Provider
|
||||
c.vm.provider "libvirt" do |libvirt, override|
|
||||
|
||||
libvirt.memory = 1024
|
||||
libvirt.memory = 2048
|
||||
libvirt.cpus = 2
|
||||
end
|
||||
c.vm.provision "ansible" do |bootstrap|
|
||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
||||
bootstrap.limit="oscar-dev"
|
||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.define "merlin-dev" do |c|
|
||||
# Box definition
|
||||
c.vm.box = "generic/rocky9"
|
||||
c.vm.box = "archlinux/archlinux"
|
||||
# Config options
|
||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
c.ssh.insert_key = true
|
||||
@ -36,15 +43,21 @@ Vagrant.configure('2') do |config|
|
||||
# Provider
|
||||
c.vm.provider "libvirt" do |libvirt, override|
|
||||
|
||||
libvirt.memory = 1024
|
||||
libvirt.memory = 512
|
||||
libvirt.cpus = 2
|
||||
|
||||
end
|
||||
c.vm.provision "ansible" do |bootstrap|
|
||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
||||
bootstrap.limit="merlin-dev"
|
||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.define "gerard-dev" do |c|
|
||||
# Box definition
|
||||
c.vm.box = "debian/bookworm64"
|
||||
c.vm.box = "archlinux/archlinux"
|
||||
# Config options
|
||||
|
||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
@ -54,9 +67,15 @@ Vagrant.configure('2') do |config|
|
||||
# instance_raw_config_args
|
||||
# Provider
|
||||
c.vm.provider "libvirt" do |libvirt, override|
|
||||
libvirt.memory = 1024
|
||||
libvirt.memory = 2048
|
||||
libvirt.cpus = 2
|
||||
end
|
||||
c.vm.provision "ansible" do |bootstrap|
|
||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
||||
bootstrap.limit="gerard-dev"
|
||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.define "nas-dev" do |c|
|
||||
@ -71,14 +90,14 @@ Vagrant.configure('2') do |config|
|
||||
# Provider
|
||||
c.vm.provider "libvirt" do |libvirt, override|
|
||||
|
||||
libvirt.memory = 1024
|
||||
libvirt.memory = 2048
|
||||
libvirt.cpus = 2
|
||||
end
|
||||
|
||||
c.vm.provision "ansible" do |bootstrap|
|
||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
||||
bootstrap.limit="all"
|
||||
bootstrap.limit="nas-dev"
|
||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
||||
end
|
||||
end
|
||||
|
@ -99,7 +99,7 @@ host_key_checking = False
|
||||
#sudo_flags = -H -S -n
|
||||
|
||||
# SSH timeout
|
||||
#timeout = 10
|
||||
timeout = 30
|
||||
|
||||
# default user to use for playbooks if user is not specified
|
||||
# (/usr/bin/ansible will use current user as default)
|
||||
@ -136,7 +136,7 @@ host_key_checking = False
|
||||
|
||||
# If set, configures the path to the Vault password file as an alternative to
|
||||
# specifying --vault-password-file on the command line.
|
||||
#vault_password_file = /path/to/vault_password_file
|
||||
vault_password_file = ./misc/vault-keyring-client.sh
|
||||
|
||||
# format of string {{ ansible_managed }} available within Jinja2
|
||||
# templates indicates to users editing templates files will be replaced.
|
||||
|
24
ansible/group_vars/DNS
Normal file
24
ansible/group_vars/DNS
Normal file
@ -0,0 +1,24 @@
|
||||
pdns_config:
|
||||
local-address: "127.0.0.1"
|
||||
local-port: "5300"
|
||||
api: yes
|
||||
api-key:
|
||||
|
||||
pdns_backends:
|
||||
gsqlite3:
|
||||
dnssec: yes
|
||||
database: "/var/lib/powerdns/powerdns.sqlite"
|
||||
pdns_sqlite_databases_locations:
|
||||
- "/var/lib/powerdns/powerdns.sqlite"
|
||||
|
||||
pdns_rec_config:
|
||||
forward-zones:
|
||||
- "{{ consul_domain }}=127.0.0.1:8600"
|
||||
- "ducamps.win=192.168.1.10"
|
||||
- "{{ domain.name }}=192.168.1.5"
|
||||
- "lan.{{ domain.name }}=192.168.1.5"
|
||||
- "1.168.192.in-addr.arpa=192.168.1.5:5300"
|
||||
|
||||
local-address: "{{ hostvars[inventory_hostname]['ansible_'+ default_interface].ipv4.address|default(ansible_default_ipv4.address) }}"
|
||||
dnssec: "off"
|
||||
|
90
ansible/group_vars/NAS/NAS
Normal file
90
ansible/group_vars/NAS/NAS
Normal file
@ -0,0 +1,90 @@
|
||||
NAS_nomad_folder:
|
||||
- name: actualbudget
|
||||
- name: archiso
|
||||
owner: 1000001
|
||||
- name: backup
|
||||
owner: 1000001
|
||||
- name: borgmatic
|
||||
- name: crowdsec
|
||||
owner: 1000001
|
||||
- name: dms
|
||||
owner: 1000001
|
||||
- name: filestash
|
||||
owner: 1000
|
||||
- name: gitea
|
||||
owner: 1000000
|
||||
- name: grafana
|
||||
owner: 472
|
||||
- name: hass
|
||||
owner: 1000001
|
||||
- name: homer
|
||||
owner: 1000001
|
||||
- name: immich/cache
|
||||
- name: immich/upload
|
||||
- name: jellyfin
|
||||
owner: 1000001
|
||||
- name: loki
|
||||
owner: 10001
|
||||
- name: mealie
|
||||
owner: 1000001
|
||||
- name: mosquito
|
||||
owner: 1883
|
||||
- name: pacoloco
|
||||
owner: 1000001
|
||||
- name: pdns-auth
|
||||
owner: 1000001
|
||||
- name: pdns-admin
|
||||
owner: 1000001
|
||||
- name: pihole
|
||||
owner: 999
|
||||
- name: prometheus
|
||||
owner: 65534
|
||||
- name: prowlarr
|
||||
owner: 1000001
|
||||
- name: radicale
|
||||
owner: 1000001
|
||||
- name: openldap
|
||||
owner: 1001
|
||||
- name: registry/ghcr
|
||||
- name: registry/docker
|
||||
- name: syncthing
|
||||
owner: 1000001
|
||||
- name: traefik
|
||||
owner: 1000001
|
||||
- name: tt-rss
|
||||
owner: 1000001
|
||||
- name: vaultwarden
|
||||
owner: 1000001
|
||||
- name: zigbee2mqtt
|
||||
owner: 1000001
|
||||
nas_bind_target: "/exports"
|
||||
|
||||
nas_bind_source:
|
||||
- dest: "{{ nas_bind_target }}/nomad"
|
||||
source: /data/data1/nomad
|
||||
- dest: "{{ nas_bind_target }}/music"
|
||||
source: /data/data1/music
|
||||
- dest: "{{ nas_bind_target }}/download"
|
||||
source: /data/data1/download
|
||||
- dest: "{{ nas_bind_target }}/media/serie"
|
||||
source: /data/data2/serie
|
||||
- dest: "{{ nas_bind_target }}/media/film"
|
||||
source: /data/data3/film
|
||||
- dest: "{{ nas_bind_target }}/photo"
|
||||
source: /data/data1/photo
|
||||
- dest: "{{ nas_bind_target }}/homes"
|
||||
source: /data/data1/homes
|
||||
- dest: "{{ nas_bind_target }}/ebook"
|
||||
source: /data/data1/ebook
|
||||
- dest: "{{ nas_bind_target }}/media/download/serie"
|
||||
source: /data/data1/download/serie
|
||||
- dest: "{{ nas_bind_target }}/media/download/film"
|
||||
source: /data/data1/download/film
|
||||
- dest: "{{ nas_bind_target }}/music/download/"
|
||||
source: /data/data1/download/music
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
1
ansible/group_vars/NAS/ftp
Normal file
1
ansible/group_vars/NAS/ftp
Normal file
@ -0,0 +1 @@
|
||||
vsftpd_config: {}
|
15
ansible/group_vars/NAS/nfs
Normal file
15
ansible/group_vars/NAS/nfs
Normal file
@ -0,0 +1,15 @@
|
||||
nfs_cluster_list: "{% for server in groups['all']%} {% if hostvars[server]['ansible_default_ipv4']['address'] is defined %} {{hostvars[server]['ansible_' + hostvars[server]['nfs_iface']|default('')].ipv4.address|default(hostvars[server]['ansible_default_ipv4']['address'],true)}}{{ nfs_options }} {% endif %} {%endfor%}"
|
||||
nfs_options: "(rw,no_root_squash,crossmnt,async,insecure_locks,sec=sys)"
|
||||
nfs_consul_service: true
|
||||
nfs_bind_target: "/exports"
|
||||
|
||||
|
||||
nfs_exports:
|
||||
- "{{ nas_bind_target }} *(fsid=0,insecure,no_subtree_check)"
|
||||
- "{{ nas_bind_target }}/nomad {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/download {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/music {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/media {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/photo {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/homes {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/ebook {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
1
ansible/group_vars/NAS/nomad
Normal file
1
ansible/group_vars/NAS/nomad
Normal file
@ -0,0 +1 @@
|
||||
nomad_node_class: 'NAS'
|
25
ansible/group_vars/NAS/samba
Normal file
25
ansible/group_vars/NAS/samba
Normal file
@ -0,0 +1,25 @@
|
||||
samba_passdb_backend: tdbsam
|
||||
samba_shares_root: /exports
|
||||
samba_shares:
|
||||
- name: media
|
||||
comment: "media"
|
||||
write_list: "@NAS_media"
|
||||
browseable: true
|
||||
- name: ebook
|
||||
comment: "ebook"
|
||||
write_list: "@NAS_ebook"
|
||||
browseable: true
|
||||
- name: music
|
||||
comment: "music"
|
||||
write_list: "@NAS_music"
|
||||
browseable: true
|
||||
- name: photo
|
||||
comment: "photo"
|
||||
write_list: "@NAS_photo"
|
||||
browseable: true
|
||||
- name: download
|
||||
comment: "downlaod"
|
||||
write_list: "@NAS_download"
|
||||
browseable: true
|
||||
samba_load_homes: True
|
||||
samba_homes_include: samba_homes_include.conf
|
@ -42,35 +42,4 @@ nomad_datacenter: hetzner
|
||||
|
||||
consul_server: False
|
||||
nomad_server: False
|
||||
systemd_mounts:
|
||||
diskstation_nomad:
|
||||
share: diskstation.ducamps.win:/volume2/nomad
|
||||
mount: /mnt/diskstation/nomad
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
hetzner_storage:
|
||||
share: //u304977.your-storagebox.de/backup
|
||||
mount: /mnt/hetzner/storagebox
|
||||
type: cifs
|
||||
options:
|
||||
- credentials=/etc/creds/hetzner_credentials
|
||||
- uid= 1024
|
||||
- gid= 10
|
||||
- vers=3.0
|
||||
- mfsymlinks
|
||||
automount: true
|
||||
|
||||
credentials_files:
|
||||
1:
|
||||
type: smb
|
||||
path: /etc/creds/hetzner_credentials
|
||||
username: u304977
|
||||
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
|
||||
|
||||
|
||||
|
||||
systemd_mounts_enabled:
|
||||
- diskstation_nomad
|
||||
- hetzner_storage
|
28
ansible/group_vars/VPS/mount
Normal file
28
ansible/group_vars/VPS/mount
Normal file
@ -0,0 +1,28 @@
|
||||
systemd_mounts:
|
||||
diskstation_nomad:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
||||
mount: /mnt/diskstation/nomad
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
hetzner_storage:
|
||||
share: //u304977.your-storagebox.de/backup
|
||||
mount: /mnt/hetzner/storagebox
|
||||
type: cifs
|
||||
options:
|
||||
- credentials=/etc/creds/hetzner_credentials
|
||||
- uid=100001
|
||||
- gid=10
|
||||
- vers=3.0
|
||||
- mfsymlinks
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
|
||||
credentials_files:
|
||||
1:
|
||||
type: smb
|
||||
path: /etc/creds/hetzner_credentials
|
||||
username: u304977
|
||||
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
|
12
ansible/group_vars/VPS/vault_mount
Normal file
12
ansible/group_vars/VPS/vault_mount
Normal file
@ -0,0 +1,12 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
31303539336464336239376636623862303066336438383739356163616431643366386565366361
|
||||
3264336232303135336334333663326234393832343235640a313638323963666631353836373531
|
||||
61636261623662396330653135326238363630363938323166303861313563393063386161393238
|
||||
3231336232663533640a333763643864363939336566333731353031313739616633623537386435
|
||||
39613934663133613733356433616162363430616439623830663837343530623937656434366663
|
||||
33656466396263616132356337326236383761363834663363643163343231366563333865656433
|
||||
39316365663734653734363362363539623636666261333534313935343566646166316233623535
|
||||
32323831626463656337313266343634303830633936396232663966373264313762346235646665
|
||||
61333139363039363436393962666365336334663164306230393433636664623934343039323637
|
||||
33383036323233646237343031633030353330633734353232343633623864333834646239346362
|
||||
643634303135656333646235343366636361
|
45
ansible/group_vars/VPS/vps
Normal file
45
ansible/group_vars/VPS/vps
Normal file
@ -0,0 +1,45 @@
|
||||
# defaults file for ansible-arch-provissionning
|
||||
partition_table:
|
||||
- device: "/dev/sda"
|
||||
label: gpt
|
||||
settings:
|
||||
- number: 1
|
||||
part_end: 64MB
|
||||
flags: [boot, esp]
|
||||
fstype: vfat
|
||||
format: yes
|
||||
- number: 2
|
||||
part_start: 512MB
|
||||
part_end: 1524MB
|
||||
flags: []
|
||||
fstype: swap
|
||||
format: yes
|
||||
- number: 3
|
||||
part_start: 1524MB
|
||||
flags: [lvm]
|
||||
fstype: ext4
|
||||
format: yes
|
||||
#- device: "/dev/sdb"
|
||||
#settings:
|
||||
#- number: 1
|
||||
#name: home
|
||||
#fstype: ext4
|
||||
#format:
|
||||
mount_table:
|
||||
- device: "/dev/sda"
|
||||
settings:
|
||||
- number: 3
|
||||
mountpath: /mnt
|
||||
fstype: ext4
|
||||
- number: 1
|
||||
mountpath: /mnt/boot
|
||||
fstype: vfat
|
||||
|
||||
#need vfat boot partition with esp label
|
||||
provissionning_UEFI_Enable: True
|
||||
#sssd_configure: False
|
||||
nomad_datacenter: hetzner
|
||||
|
||||
consul_server: False
|
||||
nomad_server: False
|
||||
|
@ -1,36 +1,7 @@
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
user:
|
||||
name: vincent
|
||||
home: /home/vincent
|
||||
uid: 1024
|
||||
mail: vincent@ducamps.win
|
||||
groups:
|
||||
- docker
|
||||
authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
||||
privatekey:
|
||||
- keyname: "id_gitea"
|
||||
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||
|
||||
user_config_repo: "ssh://git@git.{{ domain.name }}:2222/vincent/conf2.git"
|
||||
domain:
|
||||
name: ducamps.win
|
||||
|
||||
hass_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDfVei9iC/Ra5qmSZcLu8z2CTaXCmfn4JSS4o3eu0HhykdYGSqhBTcUDD3/FhcTPQJVFsu1P4Gwqq1dCE+EvaZZRQaMUqVKUpOliThSG6etbImkvqLQQsC1qt+/NqSvfzu2+28A6+YspzuxsViGo7e3Gg9MdwV3LMGh0mcOr/uXb/HIk18sJg5yQpwMfYTj0Wda90nyegcN3F2iZMeauh/aaFJzWcHNakAAewceDYOErU07NhlZgVA2C8HgkJ8HL7AqIVqt9VOx3xLp91DbKTNXSxvyM0X4NQP24P7ZFxAOk/j0AX3hAWhaNmievCHyBWvQve1VshZXFwEIiuHm8q4GSCxK2r0oQudKdtIuQMfuUALigdiSxo522oEiML/2kSk17WsxZwh7SxfD0DKa82fy9iAwcAluWLwJ+yN3nGnDFF/tHYaamSiowpmTTmQ9ycyIPWPLVZclt3BlEt9WH/FPOdzAyY7YLzW9X6jhsU3QwViyaTRGqAdqzUAiflKCMsNzb5kq0oYsDFC+/eqp1USlgTZDhoKtTKRGEjW2KuUlDsXGBeB6w1D8XZxXJXAaHuMh4oMUgLswjLUdTH3oLnnAvfOrl8O66kTkmcQ8i/kr1wDODMy/oNUzs8q4DeRuhD5dpUiTUGYDTWPYj6m6U/GAEHvN/2YEqSgfVff1iQ4VBw==
|
||||
|
||||
system_arch_local_mirror: "https://arch.{{domain.name}}/repo/archlinux_$arch"
|
||||
|
||||
|
||||
system_sudoers_group: "serverAdmin"
|
||||
system_ipV6_disable: True
|
||||
|
||||
user_custom_host:
|
||||
- host: "git.ducamps.win"
|
||||
user: "git"
|
||||
keyfile: "~/.ssh/id_gitea"
|
||||
- host: "gitlab.com"
|
||||
user: "git"
|
||||
keyfile: "~/.ssh/id_consort"
|
||||
|
||||
system_ip_unprivileged_port_start: 0
|
||||
wireguard_mtu: 1420
|
||||
|
5
ansible/group_vars/all/consul
Normal file
5
ansible/group_vars/all/consul
Normal file
@ -0,0 +1,5 @@
|
||||
consul_client_addr: "0.0.0.0"
|
||||
consul_datacenter: "homelab"
|
||||
consul_backup_location: "/mnt/diskstation/git/backup/consul"
|
||||
consul_ansible_group: all
|
||||
consul_systemd_resolved_enable: true
|
8
ansible/group_vars/all/docker
Normal file
8
ansible/group_vars/all/docker
Normal file
@ -0,0 +1,8 @@
|
||||
docker_daemon_config:
|
||||
dns:
|
||||
- 172.17.0.1
|
||||
- 192.168.1.6
|
||||
mtu: 1420
|
||||
insecure-registries:
|
||||
- 192.168.1.0/24
|
||||
- 192.168.121.0/24
|
9
ansible/group_vars/all/nomad
Normal file
9
ansible/group_vars/all/nomad
Normal file
@ -0,0 +1,9 @@
|
||||
nomad_docker_allow_caps:
|
||||
- NET_ADMIN
|
||||
- NET_BROADCAST
|
||||
- NET_RAW
|
||||
nomad_allow_privileged: True
|
||||
nomad_vault_enabled: true
|
||||
nomad_vault_address: "http://active.vault.service.{{consul_domain}}:8200"
|
||||
nomad_vault_role: "nomad-cluster"
|
||||
nomad_docker_extra_labels: ["job_name", "task_group_name", "task_name", "namespace", "node_name"]
|
@ -1,42 +0,0 @@
|
||||
consul_client_addr: "0.0.0.0"
|
||||
consul_datacenter: "homelab"
|
||||
consul_backup_location: "/mnt/diskstation/git/backup/consul"
|
||||
consul_ansible_group: all
|
||||
consul_bootstrap_expect: 3
|
||||
nomad_docker_allow_caps:
|
||||
- NET_ADMIN
|
||||
- NET_BROADCAST
|
||||
- NET_RAW
|
||||
nomad_vault_enabled: true
|
||||
nomad_vault_address: "http://active.vault.service.consul:8200"
|
||||
nomad_vault_role: "nomad-cluster"
|
||||
nomad_vault_token: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:nomad_vault_token') }}"
|
||||
nomad_bootstrap_expect: 3
|
||||
notification_mail: "{{inventory_hostname}}@{{ domain.name }}"
|
||||
msmtp_mailhub: smtp.{{ domain.name }}
|
||||
msmtp_auth_user: "{{ user.mail }}"
|
||||
msmtp_auth_pass: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:email') }}"
|
||||
|
||||
system_user:
|
||||
- name: drone-deploy
|
||||
home: /home/drone-deploy
|
||||
shell: /bin/bash
|
||||
privatekey:
|
||||
- keyname: id_gitea
|
||||
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||
|
||||
|
||||
authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
|
||||
|
||||
- name: ansible
|
||||
home: /home/ansible
|
||||
shell: /bin/bash
|
||||
|
||||
- name: root
|
||||
home: /root
|
||||
privatekey:
|
||||
- keyname: id_gitea
|
||||
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||
|
||||
|
@ -1,9 +1,5 @@
|
||||
sssd_configure: true
|
||||
# sssd_configure is False by default - by default nothing is done by this role.
|
||||
ldap_search_base: "dc=ducamps,dc=win"
|
||||
ldap_uri: "ldaps://ldap.ducamps.win"
|
||||
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=win"
|
||||
ldap_default_bind_dn : "uid=vaultserviceaccount,cn=users,dc=ducamps,dc=win"
|
||||
ldap_password : "{{lookup('hashi_vault', 'secret=secrets/data/ansible/other:vaulserviceaccount')}}"
|
||||
userPassword: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/user:userPassword')}}"
|
||||
|
||||
ldap_search_base: "dc=ducamps,dc=eu"
|
||||
ldap_uri: "ldaps://ldaps.service.consul"
|
||||
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=eu"
|
||||
|
42
ansible/group_vars/all/users
Normal file
42
ansible/group_vars/all/users
Normal file
@ -0,0 +1,42 @@
|
||||
user:
|
||||
name: vincent
|
||||
home: /home/vincent
|
||||
uid: 1024
|
||||
mail: vincent@ducamps.eu
|
||||
groups:
|
||||
- docker
|
||||
authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
||||
privatekey:
|
||||
- keyname: "id_gitea"
|
||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
||||
|
||||
|
||||
|
||||
system_user:
|
||||
- name: drone-deploy
|
||||
home: /home/drone-deploy
|
||||
shell: /bin/bash
|
||||
authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
|
||||
|
||||
- name: ansible
|
||||
home: /home/ansible
|
||||
shell: /bin/bash
|
||||
|
||||
- name: root
|
||||
home: /root
|
||||
privatekey:
|
||||
- keyname: id_gitea
|
||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
||||
|
||||
|
||||
|
||||
user_custom_host:
|
||||
- host: "git.ducamps.eu"
|
||||
user: "git"
|
||||
keyfile: "~/.ssh/id_gitea"
|
||||
|
||||
user_config_repo: "ssh://git@git.ducamps.eu:2222/vincent/conf2.git"
|
1
ansible/group_vars/all/vault
Normal file
1
ansible/group_vars/all/vault
Normal file
@ -0,0 +1 @@
|
||||
vault_raft_group_name: "homelab"
|
11
ansible/group_vars/all/vault_nomad
Normal file
11
ansible/group_vars/all/vault_nomad
Normal file
@ -0,0 +1,11 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
39613433313663653039643961643165643632313938626339653365376633613135653436363938
|
||||
6331623132366638633665636163336462393333336264320a666466303465663839646435626231
|
||||
38396437363034313236383261326637306238616162303131356537393635363939376236386130
|
||||
6466353961643233310a306631333664363332336263656638623763393732306361306632386662
|
||||
37623934633932653965316532386664353130653830356237313337643266366233346633323265
|
||||
37616533303561363864626531396366323565396536383133643539663630636633356238386633
|
||||
34383464333363663532643239363438626135336632316135393537643930613532336231633064
|
||||
35376561663637623932313365636261306131353233636661313435643563323534623365346436
|
||||
65366132333635643832353464323961643466343832376635386531393834336535386364396333
|
||||
3932393561646133336437643138373230366266633430663937
|
12
ansible/group_vars/all/vault_sssd
Normal file
12
ansible/group_vars/all/vault_sssd
Normal file
@ -0,0 +1,12 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61326233336236343231396231306638373837653661313334313261313539316532373437346132
|
||||
3931306637303530373032663236363466383433316161310a396439393564643731656664663639
|
||||
32386130663837303663376432633930393663386436666263313939326631616466643237333138
|
||||
3365346131636333330a376436323964656563363664336638653564656231636136663635303439
|
||||
35346461356337303064623861326331346263373539336335393566623462343464323065366237
|
||||
61346637326336613232643462323733366530656439626234663335633965376335623733336162
|
||||
37323739376237323534613361333831396531663637666161666366656237353563626164626632
|
||||
33326336353663356235373835666166643465666562616663336539316233373430633862613133
|
||||
36363831623361393230653161626131353264366634326233363232336635306266376363363739
|
||||
66373434343330633337633436316135656533613465613963363931383266323466653762623365
|
||||
363332393662393532313063613066653964
|
14
ansible/group_vars/all/vault_users
Normal file
14
ansible/group_vars/all/vault_users
Normal file
@ -0,0 +1,14 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35303137383361396262313561623237626336306366376630663065396664643630383638376436
|
||||
3930346265616235383331383735613166383461643233310a663564356266663366633539303630
|
||||
37616532393035356133653838323964393464333230313861356465326433353339336435363263
|
||||
3162653932646662650a613762393062613433343362633365316434663661306637623363333834
|
||||
61303231303362313133346461373738633239613933303564383532353537626538363636306461
|
||||
66663330346566356637623036363964396137646435333139323430353639386134396537366334
|
||||
39303130386432366335383433626431663034656466626265393863623438366130346562623365
|
||||
63653963393663353666313631326131636361333230386461383638333338393137336562323935
|
||||
37343034363961306663303232346139356534613837663230393962323333656536303161373939
|
||||
65626164336166306264653538313661393934383966303135356161336331623835663235646332
|
||||
63343764643861366537383962616230323036326331386333346463353835393762653735353862
|
||||
32323839663365353337303363313535633362643231653663393936363539363933636430613832
|
||||
32336566633962646463316636346330336265626130373636643335323762363661
|
14
ansible/group_vars/all/vault_vault
Normal file
14
ansible/group_vars/all/vault_vault
Normal file
@ -0,0 +1,14 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
64396261616266633665646330393631316463386334633032353965323964633464333331323334
|
||||
6261653930313764313836366531383462313965336231620a656637623439623639383931373361
|
||||
37373434636531623563336565356136633031633835633636643436653165386436636564616130
|
||||
3763383036343739370a376565343130636631653635616566653531323464343632623566313436
|
||||
32396165636333393032636636613030373663393238323964396462323163616162613933626536
|
||||
31623931343633346131636563643563393230323839636438373933666137393031326532356535
|
||||
32363439306338623533353734613966396362303164616335363535333438326234623161653732
|
||||
66613762653966613763623966633939323634346536636334343364306332323563653361346563
|
||||
65313433376634363261323934376637646233636233346536316262386634353666376539613235
|
||||
63666432396636373139663861393164626165383665663933383734303165623464666630343231
|
||||
33323339663138373530396636636333323439616137313434316465633162396237306238343366
|
||||
30326162306539396630633738323435323432646338633331626665363838376363343835336534
|
||||
3635
|
50
ansible/group_vars/cluster/mount
Normal file
50
ansible/group_vars/cluster/mount
Normal file
@ -0,0 +1,50 @@
|
||||
systemd_mounts:
|
||||
diskstation_photo:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/photo"
|
||||
mount: /mnt/diskstation/photo
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
diskstation_music:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/music"
|
||||
mount: /mnt/diskstation/music
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
diskstation_media:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/media"
|
||||
mount: /mnt/diskstation/media
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
|
||||
diskstation_ebook:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/ebook"
|
||||
mount: /mnt/diskstation/ebook
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
diskstation_nomad:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
||||
mount: /mnt/diskstation/nomad
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
diskstation_download:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/download"
|
||||
mount: /mnt/diskstation/download
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
1
ansible/group_vars/cluster/nomad
Normal file
1
ansible/group_vars/cluster/nomad
Normal file
@ -0,0 +1 @@
|
||||
nomad_node_class: 'cluster'
|
@ -1,54 +0,0 @@
|
||||
|
||||
postgresql_users:
|
||||
- name: root
|
||||
role_attr_flags: SUPERUSER
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:root')}}"
|
||||
- name: wikijs
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/wikijs:password')}}"
|
||||
- name: ttrss
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/ttrss:password')}}"
|
||||
- name: gitea
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/gitea:password')}}"
|
||||
- name: supysonic
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/supysonic:password')}}"
|
||||
- name: hass
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/homeassistant:password')}}"
|
||||
- name: vaultwarden
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/vaultwarden:password')}}"
|
||||
- name: drone
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/droneci:password')}}"
|
||||
- name: dendrite
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/dendrite:password')}}"
|
||||
- name: paperless
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/paperless:password')}}"
|
||||
- name: dump
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/dump:password')}}"
|
||||
- name: vikunja
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/vikunja:password')}}"
|
||||
- name: ghostfolio
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/ghostfolio:password')}}"
|
||||
|
||||
|
||||
postgresql_databases:
|
||||
- name: wikijs
|
||||
owner: wikijs
|
||||
- name: ttrss
|
||||
owner: ttrss
|
||||
- name: gitea
|
||||
owner: gitea
|
||||
- name: supysonic
|
||||
owner: supysonic
|
||||
- name: hass
|
||||
owner: hass
|
||||
- name: vaultwarden
|
||||
owner: vaultwarden
|
||||
- name: drone
|
||||
owner: drone
|
||||
- name: dendrite
|
||||
owner: dendrite
|
||||
- name: paperless
|
||||
owner: paperless
|
||||
- name: vikunja
|
||||
owner: vikunja
|
||||
- name: ghostfolio
|
||||
owner: ghostfolio
|
38
ansible/group_vars/database/database
Normal file
38
ansible/group_vars/database/database
Normal file
@ -0,0 +1,38 @@
|
||||
postgres_consul_service: true
|
||||
postgres_consul_service_name: db
|
||||
|
||||
postgresql_databases:
|
||||
- name: ttrss
|
||||
owner: ttrss
|
||||
- name: gitea
|
||||
owner: gitea
|
||||
- name: supysonic
|
||||
owner: supysonic
|
||||
- name: hass
|
||||
owner: hass
|
||||
- name: vaultwarden
|
||||
owner: vaultwarden
|
||||
- name: drone
|
||||
owner: drone
|
||||
- name: paperless
|
||||
owner: paperless
|
||||
- name: vikunja
|
||||
owner: vikunja
|
||||
- name: ghostfolio
|
||||
owner: ghostfolio
|
||||
- name: pdns-auth
|
||||
owner: pdns-auth
|
||||
- name: pdns-admin
|
||||
owner: pdns-admin
|
||||
- name: mealie
|
||||
owner: mealie
|
||||
- name: immich
|
||||
owner: immich
|
||||
|
||||
postgresql_hba_entries:
|
||||
- {type: local, database: all, user: postgres, auth_method: peer}
|
||||
- {type: local, database: all, user: all, auth_method: peer}
|
||||
- {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5}
|
||||
- {type: host, database: all, user: all, address: '::1/128', auth_method: md5}
|
||||
- {type: host, database: all, user: all, address: '::0/128', auth_method: md5}
|
||||
- {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5}
|
54
ansible/group_vars/database/vault_database
Normal file
54
ansible/group_vars/database/vault_database
Normal file
@ -0,0 +1,54 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
39363436643831373861376361613830316334613939346338616636393462663033393261633838
|
||||
6337336161393063646136613538396366653538656435360a303062636463383739653730346639
|
||||
61323634306265613336313634653039313639663836363032353261383566393865613166613032
|
||||
3837313634633466610a313062646237396138316361303361663565353862363139343566306539
|
||||
38303161303163323265376539323939393938373965353934303535613962653534363362346563
|
||||
61643638353138623162353364353736396162613735333063633739346132613161303564356437
|
||||
62343535363263646463306466663536613937393463666336396332646533343439613433626566
|
||||
38643363343065393165646134343935386461626166316662356365366666363737653336626631
|
||||
64643230616431396666666462303366343164323233303139643939346635353730316234386163
|
||||
35613235643034643833393233373536383863333763393066373564353535353463363336316335
|
||||
63363537643432663266386438316563656663656462333039303861393364333966383430643263
|
||||
63356435373064633861343137616637393161383361306135373864386235653034323732316663
|
||||
65336465386135663532356433386562666639333464633362663131646237613034646563396133
|
||||
33303464633635636233626633353038656230373266666132323561383866343632333561323363
|
||||
61346664623338376436373332646232646235323639633262666166346535663238653563363239
|
||||
34663365633363313433376333653534333364393635316235333965383262313563373161663065
|
||||
36393565396534353235623238303835343334646632306638306332336539616463393966653538
|
||||
35336462623031326539633139636533633632623137393463333531663935323765663139306361
|
||||
66643434393533313039356434326438626265323066613966323634306632653765363834613034
|
||||
30373039336536393865383265643335396232643537343363313338383838383030386665303237
|
||||
64363666346535633237353462333232623132353031323231623338356136656261303662656465
|
||||
31313039643561623635643435333133663032313964323061393231666336343233363038616231
|
||||
36356262326530383233336130326361613431623866633832663361633937646461343731343938
|
||||
33306262346463623935663466356264393837626239313739356431653163376563333234346566
|
||||
38373663643532313635333131663239383736343930623735323861663037356136353433633865
|
||||
63626435613936303661366637623338633961643137613933303735366265663933396130363039
|
||||
34396637643638613839306639343765393539653164616536653661373264376436626639316666
|
||||
61303835323761643531326438363035343539383464376433363534623934366534373631353364
|
||||
61383866323737316430303736366533643939313637393631303833363431613562303639323939
|
||||
66313434613963656464383964313734383938353366306462666537653563336465376464303538
|
||||
34336531663334303938333739313638636363623562613536333736386137363139653164626261
|
||||
62663662316365663563646164303935323866633336633939323837393962393130626330666233
|
||||
63663661303565646236623130663034636264353235376561306630376365613966663536303963
|
||||
63643161386435633831393334333035653761393863373731616239313235383033633439376166
|
||||
39613762376162386231633938393036633461303732323337656430373430636435313337303365
|
||||
37646461336339623339316663616636373036656564383462356562306465623762653162633963
|
||||
35636466386138333564666564323034393162633965386133643235303938616439333130353637
|
||||
61343536323034366464653138353665326436396133313432666563353335383733363335613562
|
||||
61646365346665383866623364396138323666326338313530353663323938613362653038313339
|
||||
32613663616535313661386538366330373364366637386634633437646362383764346263636434
|
||||
35616166393065343038643861636333373738363335353164326435303961326662356230323262
|
||||
35656531653535643630376330393731643532353132366662636664626132646632306361323035
|
||||
31373136616435336362633439356339336466313337623538383763386132396135653864386638
|
||||
31393864363466653137643565306462616238333435343036613331653866393532313861376331
|
||||
33646636623666343439616332386363373664346164313963623861393134666463383366633539
|
||||
35313761333564303635656364303566643436393130356163623137313530653539656537653139
|
||||
38336636623732313630303933303962303561376436623737633139643564343166326335386639
|
||||
31373437336139326562613339393235393065396538333566323864643639303132313733396132
|
||||
35613532396363326166313061353136373965303964623534653634613639303764393038333037
|
||||
63656131616463663565653134363336326139303736313138366262616338643339316231663631
|
||||
30656132386462393433313261313466303239346138623433643634616465656139343764353338
|
||||
62616139613731363665333438383861623837643432643134626461643631323034383262656439
|
||||
33653563323434343964633236353434643739333863636630636363633639373630
|
1
ansible/group_vars/database_active
Normal file
1
ansible/group_vars/database_active
Normal file
@ -0,0 +1 @@
|
||||
postgres_consul_tag: "active"
|
1
ansible/group_vars/database_standby
Normal file
1
ansible/group_vars/database_standby
Normal file
@ -0,0 +1 @@
|
||||
postgres_consul_tag: "standby"
|
@ -3,19 +3,15 @@ dhcpd_lease_time: '72'
|
||||
dhcpd_domain_name: "lan.{{ domain.name }}"
|
||||
dhcpd_nameservers:
|
||||
- '192.168.1.4'
|
||||
- '192.168.1.10'
|
||||
dhcpd_keys:
|
||||
- key: dhcp
|
||||
algorithm: HMAC-MD5
|
||||
secret: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:dhcpd_key') }}"
|
||||
- '192.168.1.40'
|
||||
|
||||
dhcpd_zones:
|
||||
- zone: "lan.{{ domain.name }}."
|
||||
primary: "192.168.1.10"
|
||||
key: "dhcp"
|
||||
primary: "192.168.1.5"
|
||||
key: "dhcpdupdate"
|
||||
- zone: "1.168.192.in-addr.arpa."
|
||||
primary: "192.168.1.10"
|
||||
key: "dhcp"
|
||||
primary: "192.168.1.5"
|
||||
key: "dhcpdupdate"
|
||||
|
||||
dhcpd_options: |
|
||||
ddns-updates on;
|
||||
@ -45,17 +41,10 @@ dhcpd_hosts:
|
||||
|
||||
- hostname: 'oscar'
|
||||
address: '192.168.1.40'
|
||||
ethernet: '7C:83:34:B3:49:9A'
|
||||
ethernet: '68:1D:EF:3C:F0:44'
|
||||
- hostname: 'bleys'
|
||||
address: '192.168.1.42'
|
||||
ethernet: '68:1d:ef:2b:3d:24'
|
||||
- hostname: 'VMAS-HML'
|
||||
address: '192.168.1.50'
|
||||
ethernet: '52:54:00:02:74:ed'
|
||||
|
||||
- hostname: 'VMAS-BUILD'
|
||||
address: '192.168.1.53'
|
||||
ethernet: '52:54:13:1e:93'
|
||||
|
||||
|
||||
- hostname: 'xiaomi-chambre-gateway'
|
||||
@ -73,4 +62,7 @@ dhcpd_hosts:
|
||||
- hostname: 'shelly-chambre-ventilo'
|
||||
address: '192.168.1.65'
|
||||
ethernet: 'e0:98:06:97:78:0b'
|
||||
- hostname: 'shelly-Bureau-chauffeau'
|
||||
address: '192.168.1.66'
|
||||
ethernet: '8c:aa:b5:42:b9:b9'
|
||||
|
14
ansible/group_vars/dhcp/vault_dhcp
Normal file
14
ansible/group_vars/dhcp/vault_dhcp
Normal file
@ -0,0 +1,14 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65303666336535386536653939626336646338623431353161636565393532623264316534326539
|
||||
6265393839323438376666393030383839326239323261660a333132613538306137383332336538
|
||||
38323830353062366133643734303138343939323135333532333666653039326437316361353463
|
||||
6665393263376132620a346239386437326462363565636335303766306638393331656664376665
|
||||
63373131373039653065633861626263646635323634333538343163346239633937303761366362
|
||||
31376438363731613666393531656232653033336332653261313866396434616461303831353336
|
||||
38663965636536313932346133363733636636643938366364366435366237316435643062336231
|
||||
34343931653963613431336465653036616431323263613731393963656637303561366461663038
|
||||
31336131346266393035343135323131636435333865323733386439363763376638383337613530
|
||||
34356331356361636665383933633130343564373739343630663835313164326565393439306163
|
||||
31386538633033333961386534323234653833323537356565616436346462613333663139623035
|
||||
30636265313230383162633466373937353262383965313631326336666133653331366230653961
|
||||
6131
|
@ -1,3 +1,2 @@
|
||||
nomad_datacenter: homelab
|
||||
nomad_allow_privileged: True
|
||||
system_wol_enable: True
|
||||
|
@ -1,83 +0,0 @@
|
||||
systemd_mounts:
|
||||
diskstation_git:
|
||||
share: diskstation.ducamps.win:/volume2/git
|
||||
mount: /mnt/diskstation/git
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_CardDav:
|
||||
share: diskstation.ducamps.win:/volume2/CardDav
|
||||
mount: /mnt/diskstation/CardDav
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
backup_disk:
|
||||
share: /dev/sdb1
|
||||
mount: /mnt/backup
|
||||
type: ntfs-3g
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_home:
|
||||
share: diskstation.ducamps.win:/volume2/homes/admin
|
||||
mount: /mnt/diskstation/home
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_photo:
|
||||
share: diskstation.ducamps.win:/volume2/photo
|
||||
mount: /mnt/diskstation/photo
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_music:
|
||||
share: diskstation.ducamps.win:/volume2/music
|
||||
mount: /mnt/diskstation/music
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_media:
|
||||
share: diskstation.ducamps.win:/volume1/media
|
||||
mount: /mnt/diskstation/media
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_ebook:
|
||||
share: diskstation.ducamps.win:/volume2/ebook
|
||||
mount: /mnt/diskstation/ebook
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_archMirror:
|
||||
share: diskstation.ducamps.win:/volume2/archMirror
|
||||
mount: /mnt/diskstation/archMirror
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_nomad:
|
||||
share: diskstation.ducamps.win:/volume2/nomad
|
||||
mount: /mnt/diskstation/nomad
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
|
||||
systemd_mounts_enabled:
|
||||
- diskstation_git
|
||||
- diskstation_music
|
||||
- backup_disk
|
||||
- diskstation_photo
|
||||
- diskstation_home
|
||||
- diskstation_CardDav
|
||||
- diskstation_media
|
||||
- diskstation_ebook
|
||||
- diskstation_archMirror
|
||||
- diskstation_nomad
|
13
ansible/group_vars/production
Normal file
13
ansible/group_vars/production
Normal file
@ -0,0 +1,13 @@
|
||||
domain:
|
||||
name: ducamps.eu
|
||||
consul_bootstrap_expect: 3
|
||||
consul_domain: "consul"
|
||||
nomad_bootstrap_expect: 3
|
||||
nomad_client_meta:
|
||||
- name: "env"
|
||||
value: "production"
|
||||
vault_unseal_keys_dir_output: "~/vaultUnseal/production"
|
||||
env_default_nfs_path: ""
|
||||
env_media_nfs_path: "/volume1"
|
||||
env_automount: true
|
||||
nas_ip: "192.168.1.43"
|
@ -1,4 +1,21 @@
|
||||
systemd_mounts: []
|
||||
systemd_mounts_enabled: []
|
||||
domain:
|
||||
name: ducamps.dev
|
||||
#systemd_mounts: []
|
||||
#systemd_mounts_enabled: []
|
||||
consul_bootstrap_expect: 2
|
||||
consul_domain: "consul"
|
||||
nomad_bootstrap_expect: 2
|
||||
nomad_client_meta:
|
||||
- name: "env"
|
||||
value: "staging"
|
||||
|
||||
vault_unseal_keys_dir_output: "~/vaultUnseal/staging"
|
||||
hosts_entries:
|
||||
- ip: "{{ hostvars['nas-dev']['ansible_default_ipv4']['address'] }}"
|
||||
name: diskstation.ducamps.eu
|
||||
|
||||
env_default_nfs_path: ""
|
||||
env_automount: true
|
||||
nas_ip: "nfs.service.consul"
|
||||
|
||||
|
||||
|
@ -1,6 +1,10 @@
|
||||
---
|
||||
ansible_host: "192.168.1.42"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
default_interface: "enp2s0"
|
||||
consul_iface: "{{ default_interface}}"
|
||||
vault_iface: "{{ default_interface}}"
|
||||
nfs_iface: "{{ default_interface}}"
|
||||
wireguard_address: "10.0.0.7/24"
|
||||
wireguard_byhost_allowed_ips:
|
||||
merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24
|
||||
@ -11,13 +15,13 @@ wireguard_endpoint: ""
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{default_interface}} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=1
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
|
||||
- iptables -t nat -D POSTROUTING -o {default_interface} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=0
|
||||
|
||||
partition_table:
|
||||
|
@ -1,22 +1,23 @@
|
||||
---
|
||||
ansible_host: 10.0.0.1
|
||||
|
||||
#ansible_host: 135.181.150.203
|
||||
default_interface: "eth0"
|
||||
wireguard_address: "10.0.0.1/24"
|
||||
wireguard_endpoint: "135.181.150.203"
|
||||
wireguard_persistent_keepalive: "20"
|
||||
wireguard_allowed_ips: "10.0.0.1/32,10.0.0.3/32,10.0.0.5/32"
|
||||
wireguard_allowed_ips: 10.0.0.1
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -o %i -j ACCEPT
|
||||
- iptables -A FORWARD -i %i -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=1
|
||||
- resolvectl dns %i 192.168.1.4 192.168.1.10; resolvectl domain %i '~ducamps.win' '~consul'
|
||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i %i -j ACCEPT
|
||||
- iptables -D FORWARD -o %i -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=0
|
||||
|
||||
wireguard_unmanaged_peers:
|
||||
@ -28,7 +29,7 @@ wireguard_unmanaged_peers:
|
||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
||||
allowed_ips: 10.0.0.5/32
|
||||
persistent_keepalive: 0
|
||||
wireguard_dns: "192.168.1.4,192.168.1.10"
|
||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
||||
consul_client_addr: "127.0.0.1 10.0.0.1"
|
||||
consul_bind_address: "10.0.0.1"
|
||||
consul_ui: True
|
||||
@ -41,5 +42,6 @@ nomad_host_networks:
|
||||
interface: eth0
|
||||
- name: "default"
|
||||
interface: wg0
|
||||
nomad_client_network_interface : "wg0"
|
||||
vault_listener_address: 10.0.0.1
|
||||
nomad_plugins_podman: True
|
||||
|
@ -1,6 +1,10 @@
|
||||
---
|
||||
ansible_host: "192.168.1.41"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
default_interface: "enu1u1"
|
||||
consul_iface: "{{ default_interface }}"
|
||||
vault_iface: "{{ default_interface }}"
|
||||
|
||||
wireguard_address: "10.0.0.6/24"
|
||||
wireguard_byhost_allowed_ips:
|
||||
merlin: 10.0.0.6,192.168.1.41
|
||||
@ -11,10 +15,10 @@ wireguard_endpoint: ""
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o enu1u1 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o enu1u1 -j MASQUERADE
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
|
@ -1,4 +1,8 @@
|
||||
---
|
||||
|
||||
default_interface: eth0
|
||||
vault_iface: "{{ default_interface}}"
|
||||
ansible_host: gerard-dev.lan.ducamps.dev
|
||||
wireguard_address: "10.0.1.6/24"
|
||||
perrsistent_keepalive: "20"
|
||||
wireguard_endpoint: ""
|
||||
@ -6,10 +10,10 @@ wireguard_endpoint: ""
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface}} -j MASQUERADE
|
||||
|
||||
|
@ -1,31 +1,39 @@
|
||||
---
|
||||
ansible_host: 10.0.0.4
|
||||
|
||||
#ansible_host: 65.21.2.14
|
||||
default_interface: "ens3"
|
||||
nfs_iface: "wg0"
|
||||
wireguard_address: "10.0.0.4/24"
|
||||
wireguard_endpoint: "95.216.217.5"
|
||||
wireguard_persistent_keepalive: "30"
|
||||
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3/32,10.0.0.5/32"
|
||||
wireguard_endpoint: "65.21.2.14"
|
||||
wireguard_persistent_keepalive: "20"
|
||||
wireguard_byhost_allowed_ips:
|
||||
oscar: "0.0.0.0/0"
|
||||
bleys: "0.0.0.0/0"
|
||||
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3,10.0.0.5"
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -o %i -j ACCEPT
|
||||
- iptables -A FORWARD -i %i -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=1
|
||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i %i -j ACCEPT
|
||||
- iptables -D FORWARD -o %i -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=0
|
||||
|
||||
wireguard_unmanaged_peers:
|
||||
phone:
|
||||
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
|
||||
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
|
||||
allowed_ips: 10.0.0.3/32
|
||||
persistent_keepalive: 0
|
||||
zen:
|
||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
||||
allowed_ips: 10.0.0.5/32
|
||||
persistent_keepalive: 0
|
||||
wireguard_dns: "192.168.1.40,192.168.1.10"
|
||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
||||
consul_client_addr: "127.0.0.1 10.0.0.4"
|
||||
consul_bind_address: "10.0.0.4"
|
||||
consul_ui: True
|
||||
@ -35,7 +43,8 @@ nomad_host_networks:
|
||||
- name: "private"
|
||||
interface: wg0
|
||||
- name: "public"
|
||||
interface: eth0
|
||||
interface: ens3
|
||||
- name: "default"
|
||||
interface: wg0
|
||||
vault_listener_address: 10.0.0.4
|
||||
nomad_plugins_podman: True
|
||||
|
@ -1,4 +1,8 @@
|
||||
---
|
||||
|
||||
ansible_host: merlin-dev.lan.ducamps.dev
|
||||
default_interface: eth0
|
||||
vault_iface: "{{ default_interface}}"
|
||||
wireguard_address: "10.0.1.4/24"
|
||||
wireguard_endpoint: "{{ ansible_default_ipv4.address }}"
|
||||
wireguard_persistent_keepalive: "30"
|
||||
@ -6,12 +10,12 @@ wireguard_persistent_keepalive: "30"
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -o %i -j ACCEPT
|
||||
- iptables -A FORWARD -i %i -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i %i -j ACCEPT
|
||||
- iptables -D FORWARD -o %i -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_unmanaged_peers:
|
||||
phone:
|
||||
|
17
ansible/host_vars/nas-dev
Normal file
17
ansible/host_vars/nas-dev
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
ansible_host: nas-dev.lan.ducamps.dev
|
||||
default_interface: eth0
|
||||
vault_iface: "{{ default_interface}}"
|
||||
wireguard_address: "10.0.1.8/24"
|
||||
perrsistent_keepalive: "30"
|
||||
wireguard_endpoint: ""
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
19
ansible/host_vars/oberon
Normal file
19
ansible/host_vars/oberon
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
wireguard_address: "10.0.0.8/24"
|
||||
default_interface: "enp2s0"
|
||||
consul_iface: "{{ default_interface}}"
|
||||
vault_iface: "{{ default_interface}}"
|
||||
perrsistent_keepalive: "30"
|
||||
wireguard_endpoint: ""
|
||||
wireguard_byhost_allowed_ips:
|
||||
merlin: 10.0.0.8,192.168.1.43
|
||||
corwin: 10.0.0.8,192.168.1.43
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
@ -1,4 +1,9 @@
|
||||
---
|
||||
default_interface: "enp1s0"
|
||||
consul_iface: "{{ default_interface}}"
|
||||
vault_iface: "{{ default_interface}}"
|
||||
nfs_iface: "{{ default_interface}}"
|
||||
nomad_client_cpu_total_compute: 8000
|
||||
wireguard_address: "10.0.0.2/24"
|
||||
wireguard_byhost_allowed_ips:
|
||||
merlin: 10.0.0.2,192.168.1.40
|
||||
@ -9,18 +14,13 @@ wireguard_endpoint: ""
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
|
||||
consul_snapshot: True
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
vault_snapshot: true
|
||||
vault_backup_location: "/mnt/diskstation/git/backup/vault"
|
||||
vault_roleID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_approle') }}"
|
||||
vault_secretID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_secretID') }}"
|
||||
partition_table:
|
||||
- device: "/dev/sda"
|
||||
label: gpt
|
||||
|
@ -1,4 +1,7 @@
|
||||
---
|
||||
ansible_host: oscar-dev.lan.ducamps.dev
|
||||
default_interface: eth0
|
||||
vault_iface: "{{ default_interface}}"
|
||||
wireguard_address: "10.0.1.2/24"
|
||||
perrsistent_keepalive: "30"
|
||||
wireguard_endpoint: ""
|
||||
@ -6,14 +9,9 @@ wireguard_endpoint: ""
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||
consul_snapshot: True
|
||||
vault_snapshot: True
|
||||
vault_backup_location: "/mnt/diskstation/git/backup/vault"
|
||||
vault_roleID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_approle') }}"
|
||||
vault_secretID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_secretID') }}"
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
@ -2,12 +2,24 @@
|
||||
requirements:
|
||||
ansible-galaxy install -g -r roles/requirements.yml
|
||||
|
||||
deploy_production: generate-token
|
||||
deploy_production:
|
||||
ansible-playbook site.yml -i production -u ansible
|
||||
|
||||
deploy_staging: generate-token
|
||||
deploy_production_wiregard:
|
||||
ansible-playbook playbooks/wireguard.yml -i production -u ansible
|
||||
|
||||
deploy_staging:
|
||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
||||
ansible-playbook site.yml -i staging -u ansible
|
||||
|
||||
generate-token:
|
||||
export VAULT_TOKEN=`vault token create -policy=ansible -field="token" -period 6h`
|
||||
|
||||
deploy_staging_base:
|
||||
ansible-playbook playbooks/sssd.yml -i staging -u ansible
|
||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
||||
ansible-playbook playbooks/server.yml -i staging -u ansible
|
||||
|
||||
|
||||
|
||||
view-allvault:
|
||||
ansible-vault view `git grep -l "ANSIBLE_VAULT;1.1;AES256$$"`
|
||||
|
||||
|
9
ansible/misc/vault-keyring-client.sh
Executable file
9
ansible/misc/vault-keyring-client.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
readonly vault_password_file_encrypted="$(dirname $0)/vault-password.gpg"
|
||||
|
||||
# flock used to work around "gpg: decryption failed: No secret key" in tf-stage2
|
||||
# would otherwise need 'auto-expand-secmem' (https://dev.gnupg.org/T3530#106174)
|
||||
flock "$vault_password_file_encrypted" \
|
||||
gpg --batch --decrypt --quiet "$vault_password_file_encrypted"
|
||||
|
BIN
ansible/misc/vault-password.gpg
Normal file
BIN
ansible/misc/vault-password.gpg
Normal file
Binary file not shown.
45
ansible/molecule/default/molecule.yml
Normal file
45
ansible/molecule/default/molecule.yml
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
prerun: false
|
||||
dependency:
|
||||
name: galaxy
|
||||
enabled: false
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
default_box: archlinux/archlinux
|
||||
platforms:
|
||||
- name: oscar-dev
|
||||
cpu: 1
|
||||
memory: 1024
|
||||
box: archlinux/archlinux
|
||||
- name: merlin-dev
|
||||
cpu: 1
|
||||
memory: 1024
|
||||
box: generic/rocky9
|
||||
- name: gerard-dev
|
||||
cpu: 1
|
||||
memory: 1024
|
||||
box: debian/bookworm64
|
||||
- name: nas-dev
|
||||
cpu: 1
|
||||
memory: 1024
|
||||
box: archlinux/archlinux
|
||||
provisioner:
|
||||
name: ansible
|
||||
connection_options:
|
||||
ansible_ssh_user: vagrant
|
||||
ansible_become: true
|
||||
env:
|
||||
ANSIBLE_CONFIG: ../../ansible.cfg
|
||||
ANSIBLE_ROLES_PATH: "../../roles"
|
||||
log: true
|
||||
lint:
|
||||
name: ansible-lint
|
||||
inventory:
|
||||
host_vars: []
|
||||
links:
|
||||
group_vars: ../../group_vars
|
||||
hosts: ../../staging
|
||||
verifier:
|
||||
name: ansible
|
@ -1,12 +1,54 @@
|
||||
---
|
||||
- hosts: all
|
||||
name: Hashicorp stack
|
||||
- name: Consul install
|
||||
hosts: all
|
||||
roles:
|
||||
- role: ansible-hashicorp-vault
|
||||
when: inventory_hostname not in groups['VPS']
|
||||
become: true
|
||||
- role: ansible-consul
|
||||
become: true
|
||||
|
||||
- name: Vault install
|
||||
hosts: homelab
|
||||
roles:
|
||||
- role: ansible-hashicorp-vault
|
||||
become: true
|
||||
post_tasks:
|
||||
- name: Stat root file
|
||||
ansible.builtin.stat:
|
||||
path: "{{ vault_unseal_keys_dir_output }}/rootkey"
|
||||
register: rootkey_exist
|
||||
delegate_to: localhost
|
||||
- name: Reading root contents
|
||||
ansible.builtin.command: cat "{{ vault_unseal_keys_dir_output }}/rootkey"
|
||||
register: root_token
|
||||
delegate_to: localhost
|
||||
when: rootkey_exist.stat.exists
|
||||
changed_when: false
|
||||
- name: debug
|
||||
ansible.builtin.debug:
|
||||
var: root_token
|
||||
- name: Generate nomad token
|
||||
community.hashi_vault.vault_token_create:
|
||||
renewable: true
|
||||
policies: "nomad-server-policy"
|
||||
period: 72h
|
||||
no_parent: true
|
||||
token: "{{ root_token.stdout }}"
|
||||
url: "http://active.vault.service.consul:8200"
|
||||
retries: 4
|
||||
run_once: true
|
||||
delegate_to: localhost
|
||||
when: root_token.stdout is defined
|
||||
register: nomad_token_data
|
||||
|
||||
- name: Gather nomad token
|
||||
ansible.builtin.set_fact:
|
||||
nomad_vault_token: "{{ nomad_token_data.login.auth.client_token }}"
|
||||
when: nomad_token_data.login is defined
|
||||
|
||||
- name: nomad
|
||||
hosts: all
|
||||
vars:
|
||||
unseal_keys_dir_output: ~/vaultunseal
|
||||
roles:
|
||||
- role: ansible-nomad
|
||||
become: true
|
||||
- role: docker
|
||||
|
9
ansible/playbooks/autofs.yml
Normal file
9
ansible/playbooks/autofs.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- hosts:
|
||||
- homelab
|
||||
- VPS
|
||||
- NAS
|
||||
vars:
|
||||
# certbot_force: true
|
||||
roles:
|
||||
- autofs
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
- hosts: all
|
||||
become: true
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- ansible_bootstrap
|
||||
|
@ -14,10 +14,13 @@
|
||||
- docker
|
||||
become: true
|
||||
become_user: '{{ user.name }}'
|
||||
|
||||
- hosts: all
|
||||
roles:
|
||||
- role: user_config
|
||||
vars:
|
||||
user_config_username: '{{ user.name }}'
|
||||
become_user: '{{ user.name }}'
|
||||
user_config_username: "{{ user.name }}"
|
||||
become_user: "{{ user.name }}"
|
||||
become: true
|
||||
- role: user_config
|
||||
vars:
|
@ -1,16 +1,54 @@
|
||||
---
|
||||
- hosts: database
|
||||
- name: Database playbook
|
||||
hosts: database
|
||||
vars:
|
||||
# certbot_force: true
|
||||
pre_tasks:
|
||||
- name: Install Pg vertors (immich)
|
||||
aur:
|
||||
name: pgvecto.rs-bin
|
||||
state: present
|
||||
become: true
|
||||
become_user: aur_builder
|
||||
- name: Add database member to pg_hba replication
|
||||
ansible.builtin.set_fact:
|
||||
postgresql_hba_entries: "{{ postgresql_hba_entries + [\
|
||||
{'type':'host', \
|
||||
'database': 'replication',\
|
||||
'user':'repli',\
|
||||
'address':hostvars[item]['ansible_'+hostvars[item]['default_interface']]['ipv4']['address']+'/32',\
|
||||
'auth_method':'trust'}] }}"
|
||||
loop: '{{ groups.database }}'
|
||||
roles:
|
||||
- role: ansible-role-postgresql
|
||||
become: true
|
||||
tasks:
|
||||
- name: add pg_read_all_data to dump
|
||||
community.postgresql.postgresql_membership:
|
||||
target_roles:
|
||||
- dump
|
||||
groups:
|
||||
- pg_read_all_data
|
||||
- name: Launch replication
|
||||
ansible.builtin.command: pg_basebackup -D /var/lib/postgres/data -h {{groups["database_active"]|first}} -U repli -Fp -Xs -P -R -w
|
||||
args:
|
||||
creates: /var/lib/postgres/data/postgresql.conf
|
||||
become: true
|
||||
become_user: "{{ postgresql_user }}"
|
||||
become_user: postgres
|
||||
when: inventory_hostname in groups["database_standby"]
|
||||
- name: Ensure PostgreSQL is started and enabled on boot.
|
||||
ansible.builtin.service:
|
||||
name: '{{ postgresql_daemon }}'
|
||||
state: '{{ postgresql_service_state }}'
|
||||
enabled: '{{ postgresql_service_enabled }}'
|
||||
become: true
|
||||
|
||||
- name: Set Postgress shared libraries
|
||||
community.postgresql.postgresql_set:
|
||||
name: shared_preload_libraries
|
||||
value: vectors.so
|
||||
become: true
|
||||
become_user: postgres
|
||||
when: inventory_hostname in groups["database_active"]
|
||||
notify: Restart postgresql
|
||||
- name: Set Postgress shared libraries
|
||||
community.postgresql.postgresql_set:
|
||||
name: search_path
|
||||
value: '$user, public, vectors'
|
||||
become: true
|
||||
become_user: postgres
|
||||
when: inventory_hostname in groups["database_active"]
|
||||
|
6
ansible/playbooks/dns.yml
Normal file
6
ansible/playbooks/dns.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: DNS playbook
|
||||
hosts: DNS
|
||||
roles:
|
||||
- role: pdns_recursor-ansible
|
||||
become: true
|
28
ansible/playbooks/nas.yml
Normal file
28
ansible/playbooks/nas.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: gather all
|
||||
hosts: all
|
||||
- name: NAS playbook
|
||||
hosts: NAS
|
||||
vars:
|
||||
# certbot_force: true
|
||||
pre_tasks:
|
||||
- name: include task NasBind
|
||||
ansible.builtin.include_tasks:
|
||||
file: tasks/NasBind.yml
|
||||
loop: "{{ nas_bind_source }}"
|
||||
- name: create nomad folder
|
||||
ansible.builtin.file:
|
||||
path: "{{ nas_bind_target }}/nomad/{{ item.name }}"
|
||||
owner: "{{ item.owner|default('root') }}"
|
||||
state: directory
|
||||
become: true
|
||||
loop: "{{ NAS_nomad_folder }}"
|
||||
roles:
|
||||
- role: ansible-role-nut
|
||||
become: true
|
||||
- role: ansible-role-nfs
|
||||
become: true
|
||||
- role: ansible-role-pureftpd
|
||||
become: true
|
||||
- role: vladgh.samba.server
|
||||
become: true
|
@ -2,6 +2,7 @@
|
||||
- hosts:
|
||||
- homelab
|
||||
- VPS
|
||||
- NAS
|
||||
vars:
|
||||
# certbot_force: true
|
||||
tasks:
|
||||
@ -22,7 +23,4 @@
|
||||
loop_var: create
|
||||
roles:
|
||||
- system
|
||||
- autofs
|
||||
- role: msmtp
|
||||
when: ansible_os_family != "RedHat"
|
||||
- cronie
|
||||
|
18
ansible/playbooks/tasks/NasBind.yml
Normal file
18
ansible/playbooks/tasks/NasBind.yml
Normal file
@ -0,0 +1,18 @@
|
||||
- name: Ensure base NFS directory exist
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.dest }}"
|
||||
state: directory
|
||||
become: true
|
||||
- name: Ensure source NFS directory exist
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.source }}"
|
||||
state: directory
|
||||
become: true
|
||||
- name: Bind NAS export
|
||||
ansible.posix.mount:
|
||||
path: "{{ item.dest }}"
|
||||
src: "{{ item.source }}"
|
||||
opts: bind
|
||||
fstype: none
|
||||
state: mounted
|
||||
become: true
|
1
ansible/playbooks/templates/samba_homes_include.conf
Normal file
1
ansible/playbooks/templates/samba_homes_include.conf
Normal file
@ -0,0 +1 @@
|
||||
path = /exports/homes/%S
|
@ -1,27 +1,52 @@
|
||||
[homelab]
|
||||
[DNS]
|
||||
oscar
|
||||
bleys
|
||||
gerard
|
||||
|
||||
[VPS]
|
||||
corwin
|
||||
merlin
|
||||
|
||||
|
||||
[dhcp]
|
||||
gerard
|
||||
oberon
|
||||
|
||||
[wireguard]
|
||||
corwin
|
||||
oscar
|
||||
merlin
|
||||
gerard
|
||||
[database_active]
|
||||
bleys
|
||||
|
||||
[database]
|
||||
[database_standby]
|
||||
oscar
|
||||
bleys
|
||||
|
||||
[database:children]
|
||||
database_active
|
||||
database_standby
|
||||
|
||||
[rsyncd]
|
||||
oscar
|
||||
bleys
|
||||
|
||||
[wireguard:children]
|
||||
production
|
||||
|
||||
[NAS]
|
||||
oberon
|
||||
|
||||
[cluster]
|
||||
oscar
|
||||
#gerard
|
||||
bleys
|
||||
|
||||
|
||||
[homelab:children]
|
||||
NAS
|
||||
cluster
|
||||
|
||||
[VPS]
|
||||
merlin
|
||||
|
||||
[region:children]
|
||||
homelab
|
||||
VPS
|
||||
production
|
||||
|
||||
[production]
|
||||
oscar
|
||||
merlin
|
||||
#gerard
|
||||
bleys
|
||||
oberon
|
||||
|
||||
[staging]
|
||||
|
@ -1,15 +1,11 @@
|
||||
---
|
||||
- hosts: all
|
||||
remote_user: root
|
||||
vars:
|
||||
provissionning_default_root: "{{ lookup('hashi_vault','secret=secrets/data/ansible/user:root') }}"
|
||||
roles:
|
||||
- ansible-arch-provissionning
|
||||
|
||||
- hosts: all
|
||||
remote_user: root
|
||||
vars:
|
||||
ansible_password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/user:root') }}"
|
||||
roles:
|
||||
- ansible_bootstrap
|
||||
|
||||
|
@ -1,41 +1,49 @@
|
||||
---
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-arch-provissionning.git
|
||||
roles:
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-arch-provissionning.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-postgresql.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-postgresql.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-sssd
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-sssd
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible_bootstrap.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible_bootstrap.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/autofs.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/autofs.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/cronie.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/cronie.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/docker.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/docker.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/hass-client-control.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/hass-client-control.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/msmtp.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/msmtp.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/rsyncd.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/rsyncd.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/system.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/system.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/user_config.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/user_config.git
|
||||
scm: git
|
||||
- src: git@github.com:vincentDcmps/ansible-role-wireguard.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-consul.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-consul.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-hashicorp-vault.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-hashicorp-vault.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-nomad.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-nomad.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/mpd.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/mpd.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-dhcpd.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-dhcpd.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-user.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-user.git
|
||||
scm: git
|
||||
- src: git@github.com:vincentDcmps/ansible-role-nfs.git
|
||||
scm: git
|
||||
- src: git@github.com:vincentDcmps/ansible-role-nut.git
|
||||
scm: git
|
||||
- src: git@git.ducamps.eu:2222/ansible-roles/ansible-role-pureftpd.git
|
||||
scm: git
|
||||
- src: https://github.com/PowerDNS/pdns_recursor-ansible.git
|
||||
collections:
|
||||
- name: vladgh.samba
|
||||
|
@ -1,10 +1,10 @@
|
||||
---
|
||||
- import_playbook: playbooks/sssd.yml
|
||||
- import_playbook: playbooks/wireguard.yml
|
||||
- import_playbook: playbooks/server.yml
|
||||
- import_playbook: playbooks/dhcpd.yml
|
||||
- import_playbook: playbooks/dns.yml
|
||||
- import_playbook: playbooks/HashicorpStack.yml
|
||||
- import_playbook: playbooks/nas.yml
|
||||
- import_playbook: playbooks/autofs.yml
|
||||
- import_playbook: playbooks/sssd.yml
|
||||
- import_playbook: playbooks/database.yml
|
||||
- import_playbook: playbooks/rsyncd.yml
|
||||
- import_playbook: playbooks/music-player.yml
|
||||
- import_playbook: playbooks/dhcpd.yml
|
||||
- import_playbook: playbooks/user_config.yml
|
||||
|
@ -1,18 +1,44 @@
|
||||
[homelab]
|
||||
[DNS]
|
||||
oscar-dev
|
||||
|
||||
[database_active]
|
||||
oscar-dev
|
||||
|
||||
[database_standby]
|
||||
gerard-dev
|
||||
|
||||
[database:children]
|
||||
database_active
|
||||
database_standby
|
||||
|
||||
[wireguard:children]
|
||||
staging
|
||||
|
||||
[NAS]
|
||||
nas-dev
|
||||
|
||||
[cluster]
|
||||
oscar-dev
|
||||
gerard-dev
|
||||
|
||||
[homelab:children]
|
||||
NAS
|
||||
cluster
|
||||
|
||||
[VPS]
|
||||
merlin-dev
|
||||
|
||||
[database]
|
||||
oscar-dev
|
||||
|
||||
[wireguard:children]
|
||||
[region:children]
|
||||
homelab
|
||||
VPS
|
||||
staging
|
||||
|
||||
|
||||
|
||||
[staging]
|
||||
oscar-dev
|
||||
gerard-dev
|
||||
merlin-dev
|
||||
nas-dev
|
||||
|
||||
[production]
|
||||
|
@ -6,14 +6,14 @@
|
||||
"tags": [
|
||||
"homer.enable=true",
|
||||
"homer.name=Diskstation",
|
||||
"homer.url=https://syno.ducamps.win",
|
||||
"homer.logo=https://syno.ducamps.win/webman/resources/images/icon_dsm_96.png",
|
||||
"homer.url=https://syno.ducamps.eu",
|
||||
"homer.logo=https://syno.ducamps.eu/webman/resources/images/icon_dsm_96.png",
|
||||
"homer.service=Application",
|
||||
"homer.target=_blank",
|
||||
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.syno.rule=Host(`syno.ducamps.win`)",
|
||||
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.win",
|
||||
"traefik.http.routers.syno.rule=Host(`syno.ducamps.eu`)",
|
||||
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.eu",
|
||||
"traefik.http.routers.syno.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.syno.entrypoints=web,websecure"
|
||||
]
|
||||
|
117
docs/ADR/004-DNS.md
Normal file
117
docs/ADR/004-DNS.md
Normal file
@ -0,0 +1,117 @@
|
||||
# DNS
|
||||
|
||||
## 001 Recursor out off NAS
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
curently main local domain DNS is located on NAS.
|
||||
|
||||
goal:
|
||||
|
||||
- avoid DNS outtage in case of NAS reboot (my synology have 10 years and is a litle long to reboot) morever during NAS reboot we lost the adblock DNS in the nomad cluster because nomad depend of the NFS share.
|
||||
- remove the direct redirection to service.consul DNS and the IPTABLE rule use to redirect port 53 on consul on gerard instead new DNS could be forward directly to an active consul node on port 8300
|
||||
|
||||
#### DNS software
|
||||
|
||||
need DHCP Dynamic update
|
||||
could redirect domain on other port than port 53
|
||||
|
||||
### Decision
|
||||
|
||||
we will migrate Main Domain DNS from NAS to gerard (powerDNS)
|
||||
powerDNS provide two disting binaries one for authority server one other for recursor
|
||||
goal is to first migrate the recursice part from synology to a physical service
|
||||
and in second time migrate authority server in nmad cluster
|
||||
|
||||
### Consequences
|
||||
|
||||
before to move authority server need to remove DB dns dependance (create db consul services)
|
||||
need to delete the iptable rule on gerard before deploy
|
||||
|
||||
## 002 each node request self consul client for consul dns query
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
to avoid a cluster failled in case of the DNS recursor default.
|
||||
I would like that each cluster client request their own consul client
|
||||
first to resolve consul DNS query
|
||||
|
||||
### Decision
|
||||
|
||||
Implement sytemd-resolved on all cluster member and add a DNS redirection
|
||||
|
||||
### Consequences
|
||||
|
||||
need to modify annsible system role for systemd-resolved activation and consul role for configure redirection
|
||||
|
||||
## 003 migrate authority DNS from NAS to cluster
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
we have curently three authority domain on NAS:
|
||||
|
||||
- ducamps.win
|
||||
- ducamps.eu
|
||||
- lan.ducamps.eu
|
||||
|
||||
we could migrate authority DNS in cluster
|
||||
ducamps.win and ducamps.eu are only use for application access so no dependence with cluster build
|
||||
need to study cluster build dependance for lan.ducamps.eu-> in every case in case of build from scratch need to use IP
|
||||
need keepalive IP and check if no conflict if store on same machine than pihole->ok don't need to listen on 53 only request by recursor
|
||||
DNS authority will dependant to storage (less problematic than recursor)
|
||||
|
||||
### Decision
|
||||
|
||||
### Consequences
|
||||
|
||||
## 004 migrate recurson in cluster
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
now that cluster doesn't depend of recursor because request self consul agent for consul query need
|
||||
need to study if we can migrate recursor in nomad wihout break dependance
|
||||
advantage:
|
||||
|
||||
- recursor could change client in case of faillure
|
||||
|
||||
agains:
|
||||
|
||||
- this job need a keepalive IP like pihole
|
||||
- *loss recursor if lost nomad cluster*
|
||||
|
||||
### Decision
|
||||
|
||||
put one recursor on cluster over authority server and keep the recursor on gerard for better recundancy
|
||||
|
||||
### Consequences
|
||||
|
||||
|
||||
## 005 physical Recursor location
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
following NAS migration physical DNS Recursor was install directly on NAS this bring a SPOF when NAS failed Recursor on Nomad cluster are stopped because of volume dependance
|
||||
|
||||
### Decision
|
||||
|
||||
Put physical Recursor on a cluster node like that to have a DNS issue we need to have NAS and this nomad down on same Time
|
42
docs/ADR/005-NAS.md
Normal file
42
docs/ADR/005-NAS.md
Normal file
@ -0,0 +1,42 @@
|
||||
# NAS
|
||||
|
||||
## 001 New Nas spec
|
||||
|
||||
### Status
|
||||
|
||||
In progress
|
||||
|
||||
### Context
|
||||
|
||||
Storage:
|
||||
|
||||
- Data filesytem will be in btrfs.
|
||||
- Study if keep root filesystem in EXT4.
|
||||
- Need to use LVM over btrfs added posibility to add cache later (cache on cold data useless on beginning maybe write cache in future use).
|
||||
- hot Data (nomad, document,fresh download file,music?) on SSD cold DATA on HDD (film, serie photo)
|
||||
- at least 2 HDD and 2 SSD
|
||||
|
||||
|
||||
|
||||
Hardware:
|
||||
|
||||
- network 2.5 gpbs will be good for evolve
|
||||
- at least 4go ram (expansive will be appreciable)
|
||||
|
||||
Software:
|
||||
|
||||
be able to install custom linux distrib
|
||||
|
||||
### Decision
|
||||
|
||||
- Due to form factor/consumption and SSD capability my choise is on ASUSTOR Nimbustor 2 Gen 2 AS5402, he corresponding to need and less expensive than a DIY NAS
|
||||
- buy only a new ssd of 2to in more to store system and hot data
|
||||
|
||||
### Cosequence
|
||||
|
||||
need to migrate Data and keep same disk
|
||||
|
||||
- install system
|
||||
- copy all data from 2to HDD to SSD then format 2to HDD
|
||||
- copy download data to FROM 4 to HDD to SSD
|
||||
- copy serie to 2to HDD and copy film on external harddrive
|
25
docs/ADR/006-Docker-pull-through
Normal file
25
docs/ADR/006-Docker-pull-through
Normal file
@ -0,0 +1,25 @@
|
||||
# Docker Pull throught
|
||||
|
||||
# 001 architecture consideration
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
docker hub get a pull limit if somebody go wrong on our infrastructure we can get quickyly this limit solution will be to implement a pull throught proxy.
|
||||
|
||||
|
||||
### Decision
|
||||
|
||||
create two container task to create a dockerhub pull through and a ghcr one
|
||||
|
||||
we can add these registry to traefick to have both under the port 5000 but this will add a traefik dependancy on rebuild
|
||||
|
||||
so to begin we will use one trafick service on two diferent static port
|
||||
|
||||
## Consequences
|
||||
|
||||
- this registry need to be start first on cluster creation
|
||||
- need to update all job image with local proxy url
|
@ -3,30 +3,34 @@
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph External
|
||||
recursor
|
||||
GandiDns[ Gandi ducamps.win]
|
||||
externalRecursor[recursor]
|
||||
GandiDns[ hetzner ducamps.win]
|
||||
end
|
||||
subgraph Internal
|
||||
pihole[pihole]----ducamps.win-->NAS
|
||||
pihole[pihole]--ducamps.win-->NAS
|
||||
pihole--service.consul-->consul[consul cluster]
|
||||
pihole--->recursor
|
||||
recursor--service.consul-->consul
|
||||
DHCP --dynamic update--> NAS
|
||||
NAS--service.consul-->consul
|
||||
NAS
|
||||
recursor--ducamps.win-->NAS
|
||||
consul--service.consul--->consul
|
||||
clients--->pihole
|
||||
clients--->recursor
|
||||
end
|
||||
NAS --> recursor
|
||||
pihole --> recursor
|
||||
|
||||
pihole --> externalRecursor
|
||||
recursor-->External
|
||||
```
|
||||
|
||||
## Detail
|
||||
|
||||
Pihole container in nomad cluster is set as primary DNS as add blocker secondary DNS is locate on NAS
|
||||
Pihole container in nomad cluster is set as primary DNS as add blocker secondary DNS recursore is locate on gerard
|
||||
|
||||
DNS locate on NAS manage domain *ducamps.win* on local network pihole forward each request on *ducamps.win* to this DNS.
|
||||
DNS locate on NAS manage domain *ducamps.win* on local network each recursor forward each request on *ducamps.win* to this DNS.
|
||||
|
||||
Each DNS forward *service.consul* request to the consul cluster. On Pihole a template configure each consul server.
|
||||
|
||||
On diskstation every request as forward to one consul node this point is to improve we because we have a possibility of outtage. du to synology DNSServer limitation we only put a forward on port 53 so we need on the target consul node to redirect port 53 to 8300 by iptables rules.
|
||||
Each DNS forward *service.consul* request to the consul cluster.
|
||||
Each consul node have a consul redirection in systemd-resolved to theire own consul client
|
||||
|
||||
a DHCP service is set to do dynamic update on NAS DNS on lease delivery
|
||||
|
||||
external recursor are on cloudflare and FDN
|
||||
external recursor are set on pihole on cloudflare and FDN in case of recursors faillure
|
||||
|
25
docs/How-to/ansible_vault.md
Normal file
25
docs/How-to/ansible_vault.md
Normal file
@ -0,0 +1,25 @@
|
||||
# ansible vault management
|
||||
|
||||
ansible password are encoded with a gpg key store in ansible/misc
|
||||
to renew password follow this workflown
|
||||
|
||||
```sh
|
||||
# Generate a new password for the default vault
|
||||
pwgen -s 64 default-pw
|
||||
|
||||
# Re-encrypt all default vaults
|
||||
ansible-vault rekey --new-vault-password-file ./default-pw \
|
||||
$(git grep -l 'ANSIBLE_VAULT;1.1;AES256$')
|
||||
|
||||
# Save the new password in encrypted form
|
||||
# (replace "RECIPIENT" with your email)
|
||||
gpg -r RECIPIENT -o misc/vault--password.gpg -e default-pw
|
||||
|
||||
# Ensure the new password is usable
|
||||
ansible-vault view misc/vaults/vault_hcloud.yml
|
||||
|
||||
# Remove the unencrypted password file
|
||||
rm new-default-pw
|
||||
```
|
||||
|
||||
script `vault-keyring-client.sh` is set in ansible.cfg as vault_password_file to decrypt the gpg file
|
8
docs/How-to/troubleshoot.md
Normal file
8
docs/How-to/troubleshoot.md
Normal file
@ -0,0 +1,8 @@
|
||||
# Troubleshooting
|
||||
|
||||
## issue with SMTP traefik port
|
||||
|
||||
ensure that no other traefik router (httt or TCP) listening on smtp or
|
||||
all entrypoint this can pertuubate smtp TLS connection
|
||||
see [https://doc.traefik.io/traefik/routing/routers/#entrypoints_1](here)
|
||||
|
@ -1,38 +0,0 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hetznercloud/hcloud" {
|
||||
version = "1.42.1"
|
||||
hashes = [
|
||||
"h1:1AGk4CAeqdyF1D4vNyjarKSBoN2z+Y6ubUxzqiyc7qI=",
|
||||
"zh:002e2e57c1425bb4cf620c6a80732ee071726d0d82d0523c5258dde3222113df",
|
||||
"zh:03213d79fc2bcd94ac812ca22c1d1d6678132ab957d26a65c84ee52853059c02",
|
||||
"zh:0785429efdb084cb4e5a0d899112764c21d2260391e82897d7e67c9e5deccc31",
|
||||
"zh:12a5653b7a00f458b65b89b15d4517f785322ebb65b5a689fa8766042a09184c",
|
||||
"zh:2dc7464290a623eb599cfbf731d13554448a7a824c2b1db16275f482d9059670",
|
||||
"zh:35a7e19868a304d77ab192871ccaa45418c13a3aac301df8d9f57c1259913051",
|
||||
"zh:368202d94a1104895c1d566e3f16edd55e05a09881fd4a20cd4854ca3593fee9",
|
||||
"zh:431503e5055979aabf520675bb465496d934979c7a687e1cd3c8d2ae27bfa649",
|
||||
"zh:45cede3c2147cfdc76d53853e07395c05b1feff8dca16a2f8f7f1fd151e2449f",
|
||||
"zh:8b57869af18982af21f6f816e65e6057ec5055481b220147fdbe0959917ae112",
|
||||
"zh:be9ba4813dcf640c0df04543a3c74b0db117fbd3dcc26140e252cf5157734945",
|
||||
"zh:d3fb9ca398a153dc894caa94f95ef2e989350cf2bbfa29bc93ff2608cab44c1f",
|
||||
"zh:fc690be8cbada1e99063ed1c6148f9a70ab341100a97ad2886f4826a951780d3",
|
||||
"zh:ffa9470e41fa04ac667d4d830987aeed2070767d57f2414692c2dd395a405fba",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/timohirt/hetznerdns" {
|
||||
version = "2.2.0"
|
||||
hashes = [
|
||||
"h1:HyskQAglrOueur79gSCBgx9MNDOs0tz39aNYQiFgxz8=",
|
||||
"zh:5bb0ab9f62be3ed92070235e507f3c290491d51391ef4edcc70df53b65a83019",
|
||||
"zh:5ccdfac7284f5515ac3cff748336b77f21c64760e429e811a1eeefa8ebb86e12",
|
||||
"zh:687c35665139ae37c291e99085be2e38071f6b355c4e1e8957c5a6a3bcdf9caf",
|
||||
"zh:6de27f0d0d1513b3a4b7e81923b4a8506c52759bd466e2b4f8156997b0478931",
|
||||
"zh:85770a9199a4c2d16ca41538d7a0f7a7bfc060678104a1faac19213e6f0a800c",
|
||||
"zh:a5ff723774a9ccfb27d5766c5e6713537f74dd94496048c89c5d64dba597e59e",
|
||||
"zh:bf9ab76fd37cb8aebb6868d73cbe8c08cee36fc25224cc1ef5949efa3c34b06c",
|
||||
"zh:db998fe3bdcd4902e99fa470bb3f355883170cf4c711c8da0b5f1f4510f1be41",
|
||||
]
|
||||
}
|
262
infra/dns.tf
262
infra/dns.tf
@ -1,262 +0,0 @@
|
||||
locals {
|
||||
defaultCname=hcloud_server.HomeLab2[0].name
|
||||
}
|
||||
|
||||
resource "hetznerdns_zone" "externalZone" {
|
||||
name = "ducamps.win"
|
||||
ttl = 1700
|
||||
}
|
||||
|
||||
resource "hetznerdns_zone" "externalZoneEU" {
|
||||
name = "ducamps.eu"
|
||||
ttl = 1700
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "MX1Eu" {
|
||||
zone_id = hetznerdns_zone.externalZoneEU.id
|
||||
name = "@"
|
||||
value = "20 mail"
|
||||
type = "MX"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "mailEu" {
|
||||
zone_id = hetznerdns_zone.externalZoneEU.id
|
||||
name = "mail"
|
||||
value = local.defaultCname
|
||||
type= "CNAME"
|
||||
}
|
||||
resource "hetznerdns_record" "serverEU" {
|
||||
zone_id = hetznerdns_zone.externalZoneEU.id
|
||||
name = local.defaultCname
|
||||
value = hcloud_server.HomeLab2[0].ipv4_address
|
||||
type = "A"
|
||||
}
|
||||
resource "hetznerdns_record" "spfEu" {
|
||||
zone_id = hetznerdns_zone.externalZoneEU.id
|
||||
name = "@"
|
||||
value = "\"v=spf1 ip4:${hcloud_server.HomeLab2[0].ipv4_address} ~all\""
|
||||
type = "TXT"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "dkimRecordEu" {
|
||||
zone_id = hetznerdns_zone.externalZoneEU.id
|
||||
name = "mail._domainkey"
|
||||
value = "\"v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0GadPljh+zM+Hf8MAf2wyj+h9p72aBFeFaiDhnswxO68fM9Uk6XhN4s1BkHLY5AWQh0SP1JDBaFWDfJiOV/27E3qJIa4KDHPZcgxgvo+SbfgNZq5qGIhKyqAAtyg/dI8IMKVOZ5Cevdv9VFrSF84xnTmDBCrWydPyV8D5+xA/bVna/AVCAVUeXVppyMPpC0s1HpRNJ0YaY23RH1KwChxvZY+BkanELSzTA8K0ATbIzwgQaK10/lc1S6EFvaSNG8sy6EIoondl6t+uiqU3bHgAW68r8snzl2gclG+uMkjXkH7YGPJzL9Co1o1MlKOHIONz89CCe0puIH4qaCo1G6EDwIDAQAB\""
|
||||
type = "TXT"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "dmarcEU" {
|
||||
|
||||
zone_id = hetznerdns_zone.externalZoneEU.id
|
||||
name = "_dmarc"
|
||||
value = "\"v=DMARC1; p=none; rua=mailto:vincent@ducamps.eu; ruf=mailto:vincent@ducamps.eu; sp=none; ri=86400\""
|
||||
type = "TXT"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "imapsAutodiscoverEU" {
|
||||
zone_id = hetznerdns_zone.externalZoneEU.id
|
||||
name = "_imaps._tcp"
|
||||
value = "0 0 993 mail.ducamps.eu"
|
||||
type = "SRV"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "submissionAutodiscoverEU" {
|
||||
zone_id = hetznerdns_zone.externalZoneEU.id
|
||||
name = "_submission._tcp"
|
||||
value = "0 0 465 mail.ducamps.eu"
|
||||
type = "SRV"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "rootalias" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "@"
|
||||
value = hcloud_server.HomeLab2[0].ipv4_address
|
||||
type = "A"
|
||||
}
|
||||
resource "hetznerdns_record" "MX1" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "@"
|
||||
value = "20 spool.mail.gandi.net."
|
||||
type = "MX"
|
||||
}
|
||||
resource "hetznerdns_record" "MX2" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "@"
|
||||
value = "50 fb.mail.gandi.net."
|
||||
type = "MX"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "spf" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "@"
|
||||
value = "\"v=spf1 include:_mailcust.gandi.net ~all\""
|
||||
type = "TXT"
|
||||
}
|
||||
resource "hetznerdns_record" "caldav" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "_caldavs_tcp"
|
||||
value = "10 20 443 www.${hetznerdns_zone.externalZone.name}."
|
||||
type = "SRV"
|
||||
}
|
||||
resource "hetznerdns_record" "carddavs" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "_carddavs_tcp"
|
||||
value = "10 20 443 www.${hetznerdns_zone.externalZone.name}."
|
||||
type = "SRV"
|
||||
}
|
||||
resource "hetznerdns_record" "server" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = local.defaultCname
|
||||
value = hcloud_server.HomeLab2[0].ipv4_address
|
||||
type = "A"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "dendrite" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "dendrite"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "diskstation" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "diskstation"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "drone" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "drone"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "file" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "file"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "ghostfolio" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "ghostfolio"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "git" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "git"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "grafana" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "grafana"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "hass" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "hass"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "jellyfin" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "jellyfin"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "supysonic" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "supysonic"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "syno" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "syno"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "vault" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "vault"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "vikunja" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "vikunja"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "www" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "www"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "ww" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "ww"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
resource "hetznerdns_record" "paperless" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "paperless-ng"
|
||||
value = local.defaultCname
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "gm1" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "gm1._domainkey"
|
||||
value = "gm1.gandimail.net."
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "gm2" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "gm2._domainkey"
|
||||
value = "gm2.gandimail.net."
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "gm3" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "gm3._domainkey"
|
||||
value = "gm3.gandimail.net."
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
|
||||
resource "hetznerdns_record" "imap" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "imap"
|
||||
value = "mail.gandi.net."
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
resource "hetznerdns_record" "smtp" {
|
||||
zone_id = hetznerdns_zone.externalZone.id
|
||||
name = "smtp"
|
||||
value = "mail.gandi.net."
|
||||
type = "CNAME"
|
||||
}
|
||||
|
||||
|
||||
|
20
makefile
20
makefile
@ -10,12 +10,30 @@ vault-dev:
|
||||
./vault/standalone_vault.sh $(FILE);\
|
||||
fi
|
||||
|
||||
create-dev:
|
||||
vagranup:
|
||||
vagrant up
|
||||
|
||||
create-dev: vagranup DNS-stagging
|
||||
make -C ansible deploy_staging
|
||||
make -C terraform deploy_vault env=staging
|
||||
VAULT_TOKEN=$(shell cat ~/vaultUnseal/staging/rootkey) python ./script/generate-vault-secret
|
||||
|
||||
create-dev-base: vagranup DNS-stagging
|
||||
make -C ansible deploy_staging_base
|
||||
|
||||
|
||||
destroy-dev:
|
||||
vagrant destroy --force
|
||||
|
||||
serve:
|
||||
mkdocs serve
|
||||
|
||||
DNS-stagging:
|
||||
$(eval dns := $(shell dig oscar-dev.lan.ducamps.dev +short))
|
||||
$(eval dns1 := $(shell dig nas-dev.lan.ducamps.dev +short))
|
||||
sudo resolvectl dns virbr2 "$(dns)" "$(dns1)";sudo resolvectl domain virbr2 "~consul";sudo systemctl restart systemd-resolved.service
|
||||
|
||||
|
||||
DNS-production:
|
||||
sudo resolvectl dns virbr2 "";sudo resolvectl domain virbr2 "";sudo systemctl restart systemd-resolved.service
|
||||
|
||||
|
@ -35,7 +35,7 @@ job "MQTT" {
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "eclipse-mosquitto"
|
||||
image = "docker.service.consul:5000/library/eclipse-mosquitto"
|
||||
ports = ["mosquittoWS", "mosquittoMQTT"]
|
||||
volumes = [
|
||||
"/mnt/diskstation/nomad/mosquitto:/mosquitto/data",
|
62
nomad-job/apps/actualbudget.nomad
Normal file
62
nomad-job/apps/actualbudget.nomad
Normal file
@ -0,0 +1,62 @@
|
||||
|
||||
job "actualbudget" {
|
||||
datacenters = ["homelab"]
|
||||
priority = 50
|
||||
type = "service"
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
group "actualbudget"{
|
||||
network {
|
||||
mode = "host"
|
||||
port "http" {
|
||||
to = 5006
|
||||
}
|
||||
}
|
||||
task "actualbudget-server" {
|
||||
driver = "docker"
|
||||
service {
|
||||
name = "actualbudget"
|
||||
port = "http"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`budget.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=budget.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
"homer.enable=true",
|
||||
"homer.name=${NOMAD_TASK_NAME}",
|
||||
"homer.service=Application",
|
||||
"homer.target=_blank",
|
||||
"homer.logo=https://budget.ducamps.eu/apple-touch-icon.png",
|
||||
"homer.url=https://budget.ducamps.eu",
|
||||
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "ghcr.service.consul:5000/actualbudget/actual-server:latest"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
"/mnt/diskstation/nomad/actualbudget:/data"
|
||||
]
|
||||
|
||||
}
|
||||
env {
|
||||
}
|
||||
|
||||
resources {
|
||||
memory = 300
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
239
nomad-job/apps/borgmatic.nomad
Normal file
239
nomad-job/apps/borgmatic.nomad
Normal file
@ -0,0 +1,239 @@
|
||||
|
||||
job "borgmatic" {
|
||||
datacenters = ["homelab"]
|
||||
priority = 50
|
||||
type = "service"
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "NAS"
|
||||
}
|
||||
|
||||
group "borgmatic"{
|
||||
vault{
|
||||
policies= ["borgmatic"]
|
||||
|
||||
}
|
||||
task "borgmatic" {
|
||||
action "manual-backup" {
|
||||
command = "/usr/local/bin/borgmatic"
|
||||
args = ["create",
|
||||
"prune",
|
||||
"--verbosity",
|
||||
"1"
|
||||
|
||||
]
|
||||
}
|
||||
action "list-backup" {
|
||||
command = "/usr/local/bin/borgmatic"
|
||||
args = ["rlist"]
|
||||
}
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.service.consul:5000/borgmatic-collective/borgmatic"
|
||||
volumes = [
|
||||
"/exports:/exports",
|
||||
"local/borgmatic.d:/etc/borgmatic.d",
|
||||
"secret/id_rsa:/root/.ssh/id_rsa",
|
||||
"secret/known_hosts:/root/.ssh/known_hosts",
|
||||
"/exports/nomad/borgmatic:/root/.cache/borg",
|
||||
]
|
||||
|
||||
}
|
||||
env {
|
||||
}
|
||||
|
||||
template {
|
||||
data= <<EOH
|
||||
BORG_RSH="ssh -i /root/.ssh/id_rsa -p 23"
|
||||
{{ with secret "secrets/data/nomad/borgmatic"}}
|
||||
BORG_PASSPHRASE= {{.Data.data.passphrase}}
|
||||
{{end}}
|
||||
EOH
|
||||
destination = "secrets/sample.env"
|
||||
env = true
|
||||
}
|
||||
template {
|
||||
data= <<EOH
|
||||
0 2 * * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic create prune --verbosity 1
|
||||
0 23 1 * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic check
|
||||
EOH
|
||||
destination = "local/borgmatic.d/crontab.txt"
|
||||
}
|
||||
template {
|
||||
data= <<EOH
|
||||
# List of source directories to backup (required). Globs and
|
||||
# tildes are expanded. Do not backslash spaces in path names.
|
||||
source_directories:
|
||||
- /exports/ebook
|
||||
- /exports/homes
|
||||
- /exports/music
|
||||
- /exports/nomad
|
||||
- /exports/photo
|
||||
|
||||
repositories:
|
||||
- path: ssh://u304977@u304977.your-storagebox.de/./{{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
|
||||
label: {{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
|
||||
|
||||
exclude_patterns:
|
||||
- '*/nomad/jellyfin/cache'
|
||||
- '*nomad/loki/'
|
||||
- '*nomad/prometheus'
|
||||
- '*nomad/registry'
|
||||
- '*nomad/pacoloco'
|
||||
- '*nomad/pihole'
|
||||
- '*nomad/jellyfin/config/data/library*'
|
||||
|
||||
match_archives: '*'
|
||||
archive_name_format: '{{ env "node.datacenter" }}-{now:%Y-%m-%dT%H:%M:%S.%f}'
|
||||
extra_borg_options:
|
||||
# Extra command-line options to pass to "borg init".
|
||||
# init: --extra-option
|
||||
|
||||
# Extra command-line options to pass to "borg prune".
|
||||
# prune: --extra-option
|
||||
|
||||
# Extra command-line options to pass to "borg compact".
|
||||
# compact: --extra-option
|
||||
|
||||
# Extra command-line options to pass to "borg create".
|
||||
create: --progress --stats
|
||||
|
||||
# Extra command-line options to pass to "borg check".
|
||||
# check: --extra-option
|
||||
|
||||
# Keep all archives within this time interval.
|
||||
# keep_within: 3H
|
||||
|
||||
# Number of secondly archives to keep.
|
||||
# keep_secondly: 60
|
||||
|
||||
# Number of minutely archives to keep.
|
||||
# keep_minutely: 60
|
||||
|
||||
# Number of hourly archives to keep.
|
||||
# keep_hourly: 24
|
||||
|
||||
# Number of daily archives to keep.
|
||||
keep_daily: 7
|
||||
|
||||
# Number of weekly archives to keep.
|
||||
keep_weekly: 4
|
||||
|
||||
# Number of monthly archives to keep.
|
||||
# keep_monthly: 6
|
||||
|
||||
# Number of yearly archives to keep.
|
||||
# keep_yearly: 1
|
||||
|
||||
checks:
|
||||
- name: repository
|
||||
# - archives
|
||||
# check_repositories:
|
||||
# - user@backupserver:sourcehostname.borg
|
||||
# check_last: 3
|
||||
# output:
|
||||
# color: false
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# before creating a backup, run once per configuration file.
|
||||
# before_backup:
|
||||
# - echo "Starting a backup."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# before pruning, run once per configuration file.
|
||||
# before_prune:
|
||||
# - echo "Starting pruning."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# before compaction, run once per configuration file.
|
||||
# before_compact:
|
||||
# - echo "Starting compaction."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# before consistency checks, run once per configuration file.
|
||||
# before_check:
|
||||
# - echo "Starting checks."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# before extracting a backup, run once per configuration file.
|
||||
# before_extract:
|
||||
# - echo "Starting extracting."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# after creating a backup, run once per configuration file.
|
||||
# after_backup:
|
||||
# - echo "Finished a backup."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# after compaction, run once per configuration file.
|
||||
# after_compact:
|
||||
# - echo "Finished compaction."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# after pruning, run once per configuration file.
|
||||
# after_prune:
|
||||
# - echo "Finished pruning."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# after consistency checks, run once per configuration file.
|
||||
# after_check:
|
||||
# - echo "Finished checks."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# after extracting a backup, run once per configuration file.
|
||||
# after_extract:
|
||||
# - echo "Finished extracting."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# when an exception occurs during a "prune", "compact",
|
||||
# "create", or "check" action or an associated before/after
|
||||
# hook.
|
||||
# on_error:
|
||||
# - echo "Error during prune/compact/create/check."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# before running all actions (if one of them is "create").
|
||||
# These are collected from all configuration files and then
|
||||
# run once before all of them (prior to all actions).
|
||||
# before_everything:
|
||||
# - echo "Starting actions."
|
||||
|
||||
# List of one or more shell commands or scripts to execute
|
||||
# after running all actions (if one of them is "create").
|
||||
# These are collected from all configuration files and then
|
||||
# run once after all of them (after any action).
|
||||
# after_everything:
|
||||
# - echo "Completed actions."
|
||||
EOH
|
||||
destination = "local/borgmatic.d/config.yaml"
|
||||
}
|
||||
template {
|
||||
data= <<EOH
|
||||
{{ with secret "secrets/data/nomad/borgmatic"}}
|
||||
{{.Data.data.privatekey}}
|
||||
{{end}}
|
||||
EOH
|
||||
destination = "secret/id_rsa"
|
||||
perms= "700"
|
||||
}
|
||||
template {
|
||||
data= <<EOH
|
||||
[u304977.your-storagebox.de]:23 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs
|
||||
[u304977.your-storagebox.de]:23 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5EB5p/5Hp3hGW1oHok+PIOH9Pbn7cnUiGmUEBrCVjnAw+HrKyN8bYVV0dIGllswYXwkG/+bgiBlE6IVIBAq+JwVWu1Sss3KarHY3OvFJUXZoZyRRg/Gc/+LRCE7lyKpwWQ70dbelGRyyJFH36eNv6ySXoUYtGkwlU5IVaHPApOxe4LHPZa/qhSRbPo2hwoh0orCtgejRebNtW5nlx00DNFgsvn8Svz2cIYLxsPVzKgUxs8Zxsxgn+Q/UvR7uq4AbAhyBMLxv7DjJ1pc7PJocuTno2Rw9uMZi1gkjbnmiOh6TTXIEWbnroyIhwc8555uto9melEUmWNQ+C+PwAK+MPw==
|
||||
[u304977.your-storagebox.de]:23 ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGK0po6usux4Qv2d8zKZN1dDvbWjxKkGsx7XwFdSUCnF19Q8psHEUWR7C/LtSQ5crU/g+tQVRBtSgoUcE8T+FWp5wBxKvWG2X9gD+s9/4zRmDeSJR77W6gSA/+hpOZoSE+4KgNdnbYSNtbZH/dN74EG7GLb/gcIpbUUzPNXpfKl7mQitw==
|
||||
EOH
|
||||
destination = "secret/known_hosts"
|
||||
perms="700"
|
||||
}
|
||||
resources {
|
||||
memory = 300
|
||||
memory_max = 1000
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -25,11 +25,11 @@ job "chainetv" {
|
||||
"homer.service=Application",
|
||||
"homer.icon=fas fa-tv",
|
||||
"homer.target=_blank",
|
||||
"homer.url=https://www.ducamps.win/${NOMAD_JOB_NAME}",
|
||||
"homer.url=https://www.ducamps.eu/${NOMAD_JOB_NAME}",
|
||||
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.win`)&&PathPrefix(`/chainetv`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.eu`)&&PathPrefix(`/chainetv`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=chainetv,chainetvStrip",
|
||||
@ -39,7 +39,7 @@ job "chainetv" {
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "ducampsv/chainetv:latest"
|
||||
image = "docker.service.consul:5000/ducampsv/chainetv:latest"
|
||||
ports = ["http"]
|
||||
}
|
||||
resources {
|
@ -1,5 +1,5 @@
|
||||
job "dockermailserver" {
|
||||
datacenters = ["hetzner"]
|
||||
datacenters = ["homelab"]
|
||||
priority = 90
|
||||
type = "service"
|
||||
meta {
|
||||
@ -9,7 +9,11 @@ job "dockermailserver" {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
group "dockermailserver" {
|
||||
network {
|
||||
mode = "host"
|
||||
@ -115,7 +119,7 @@ job "dockermailserver" {
|
||||
task "docker-mailserver" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ghcr.io/docker-mailserver/docker-mailserver:edge"
|
||||
image = "ghcr.service.consul:5000/docker-mailserver/docker-mailserver:latest"
|
||||
ports = ["smtp", "esmtp", "imap","rspamd"]
|
||||
volumes = [
|
||||
"/mnt/diskstation/nomad/dms/mail-data:/var/mail",
|
||||
@ -133,7 +137,7 @@ job "dockermailserver" {
|
||||
env {
|
||||
OVERRIDE_HOSTNAME = "mail.ducamps.eu"
|
||||
DMS_VMAIL_UID = 1000000
|
||||
DMS_VMAIL_GID = 100
|
||||
DMS_VMAIL_GID = 984
|
||||
SSL_TYPE= "letsencrypt"
|
||||
LOG_LEVEL="info"
|
||||
POSTMASTER_ADDRESS="vincent@ducamps.eu"
|
||||
@ -141,6 +145,8 @@ job "dockermailserver" {
|
||||
ENABLE_OPENDKIM=0
|
||||
ENABLE_OPENDMARC=0
|
||||
ENABLE_POLICYD_SPF=0
|
||||
ENABLE_UPDATE_CHECK=0
|
||||
UPDATE_CHECK_INTERVAL="1d"
|
||||
RSPAMD_CHECK_AUTHENTICATED=0
|
||||
|
||||
}
|
||||
@ -167,7 +173,7 @@ submissions/inet/smtpd_upstream_proxy_protocol=haproxy
|
||||
}
|
||||
template {
|
||||
data = <<EOH
|
||||
haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1
|
||||
haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1, 192.168.1.0/24
|
||||
haproxy_timeout = 3 secs
|
||||
service imap-login {
|
||||
inet_listener imaps {
|
@ -1,6 +1,6 @@
|
||||
|
||||
job "filestash" {
|
||||
datacenters = ["hetzner"]
|
||||
datacenters = ["homelab"]
|
||||
priority = 50
|
||||
type = "service"
|
||||
meta {
|
||||
@ -10,7 +10,11 @@ job "filestash" {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
|
||||
group "filestash" {
|
||||
network {
|
||||
@ -31,12 +35,12 @@ job "filestash" {
|
||||
"homer.enable=true",
|
||||
"homer.name=FileStash",
|
||||
"homer.service=Application",
|
||||
"homer.url=http://file.ducamps.win",
|
||||
"homer.logo=http://file.ducamps.win/assets/logo/apple-touch-icon.png",
|
||||
"homer.url=http://file.ducamps.eu",
|
||||
"homer.logo=http://file.ducamps.eu/assets/logo/apple-touch-icon.png",
|
||||
"homer.target=_blank",
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`file.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=file.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`file.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=file.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
|
||||
@ -44,7 +48,7 @@ job "filestash" {
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "machines/filestash"
|
||||
image = "docker.service.consul:5000/machines/filestash"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
"/mnt/diskstation/nomad/filestash:/app/data/state"
|
@ -27,7 +27,7 @@ job "ghostfolio" {
|
||||
task "redis" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "redis"
|
||||
image = "docker.service.consul:5000/library/redis"
|
||||
ports = ["redis"]
|
||||
}
|
||||
resources {
|
||||
@ -42,8 +42,8 @@ job "ghostfolio" {
|
||||
port = "http"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
|
||||
@ -51,7 +51,7 @@ job "ghostfolio" {
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "ghostfolio/ghostfolio:latest"
|
||||
image = "docker.service.consul:5000/ghostfolio/ghostfolio:latest"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
]
|
||||
@ -69,7 +69,7 @@ job "ghostfolio" {
|
||||
template {
|
||||
data= <<EOH
|
||||
{{ with secret "secrets/data/database/ghostfolio"}}
|
||||
DATABASE_URL = postgresql://ghostfolio:{{.Data.data.password}}@db1.ducamps.win:5432/ghostfolio?connect_timeout=300&sslmode=prefer
|
||||
DATABASE_URL = postgresql://ghostfolio:{{.Data.data.password}}@active.db.service.consul/ghostfolio?connect_timeout=300&sslmode=prefer
|
||||
{{end}}
|
||||
{{ with secret "secrets/data/nomad/ghostfolio"}}
|
||||
ACCESS_TOKEN_SALT = {{.Data.data.token}}
|
||||
@ -80,6 +80,7 @@ job "ghostfolio" {
|
||||
}
|
||||
resources {
|
||||
memory = 400
|
||||
memory_max = 600
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,11 @@ job "homeassistant" {
|
||||
datacenters = ["homelab"]
|
||||
priority = 90
|
||||
type = "service"
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
@ -38,10 +43,10 @@ job "homeassistant" {
|
||||
"homer.subtitle=Home Assistant",
|
||||
"homer.logo=https://raw.githubusercontent.com/home-assistant/assets/master/logo/logo-small.svg",
|
||||
"homer.target=_blank",
|
||||
"homer.url=https://${NOMAD_TASK_NAME}.ducamps.win",
|
||||
"homer.url=https://${NOMAD_TASK_NAME}.ducamps.eu",
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entrypoints=web,websecure",
|
||||
]
|
||||
@ -52,7 +57,7 @@ job "homeassistant" {
|
||||
}
|
||||
}
|
||||
config {
|
||||
image = "homeassistant/home-assistant:stable"
|
||||
image = "docker.service.consul:5000/homeassistant/home-assistant:stable"
|
||||
ports = ["http", "coap"]
|
||||
privileged = "true"
|
||||
network_mode = "host"
|
146
nomad-job/apps/immich.nomad.hcl
Normal file
146
nomad-job/apps/immich.nomad.hcl
Normal file
@ -0,0 +1,146 @@
|
||||
job "immich" {
|
||||
datacenters = ["homelab"]
|
||||
priority = 50
|
||||
type = "service"
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
group "immich" {
|
||||
network {
|
||||
mode = "host"
|
||||
port "http" {
|
||||
to = 3001
|
||||
}
|
||||
port "redis" {
|
||||
to = 6379
|
||||
}
|
||||
port "machinelearning" {
|
||||
to = 3003
|
||||
}
|
||||
}
|
||||
volume "immich-upload" {
|
||||
type = "csi"
|
||||
source = "immich-upload"
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
volume "immich-cache" {
|
||||
type = "csi"
|
||||
source = "immich-cache"
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
volume "photo" {
|
||||
type = "csi"
|
||||
source = "photo"
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
vault {
|
||||
policies = ["immich"]
|
||||
}
|
||||
task "immich-server" {
|
||||
driver = "docker"
|
||||
service {
|
||||
name = "immich"
|
||||
port = "http"
|
||||
tags = [
|
||||
"homer.enable=true",
|
||||
"homer.name=immich",
|
||||
"homer.service=Application",
|
||||
"homer.logo=https://immich.ducamps.eu/favicon-144.png",
|
||||
"homer.target=_blank",
|
||||
"homer.url=https://immich.ducamps.eu",
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
]
|
||||
}
|
||||
volume_mount {
|
||||
volume = "immich-upload"
|
||||
destination = "/usr/src/app/upload"
|
||||
}
|
||||
volume_mount {
|
||||
volume = "photo"
|
||||
destination = "/photo"
|
||||
}
|
||||
config {
|
||||
image = "ghcr.service.consul:5000/immich-app/immich-server:release"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
"/etc/localtime:/etc/localtime"
|
||||
]
|
||||
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "secrets/data/database/immich"}}
|
||||
DB_PASSWORD= {{ .Data.data.password }}
|
||||
{{end}}
|
||||
DB_DATABASE_NAME= immich
|
||||
DB_USERNAME= immich
|
||||
DB_HOSTNAME= active.db.service.consul
|
||||
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
|
||||
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
|
||||
IMMICH_MACHINE_LEARNING_URL = http://{{ env "NOMAD_ADDR_machinelearning"}}
|
||||
EOH
|
||||
destination = "secrets/immich.env"
|
||||
env = true
|
||||
}
|
||||
resources {
|
||||
memory = 600
|
||||
memory_max = 1800
|
||||
}
|
||||
}
|
||||
|
||||
task "immich-machine-learning" {
|
||||
driver = "docker"
|
||||
volume_mount {
|
||||
volume = "immich-cache"
|
||||
destination = "/cache"
|
||||
}
|
||||
config {
|
||||
image = "ghcr.service.consul:5000/immich-app/immich-machine-learning:main"
|
||||
ports = ["machinelearning"]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "secrets/data/database/immich"}}
|
||||
DB_PASSWORD= {{ .Data.data.password }}
|
||||
{{end}}
|
||||
DB_DATABASE_NAME= immich
|
||||
DB_USERNAME= immich
|
||||
DB_HOSTNAME= active.db.service.consul
|
||||
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
|
||||
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
|
||||
EOH
|
||||
destination = "secrets/immich.env"
|
||||
env = true
|
||||
}
|
||||
resources {
|
||||
memory = 200
|
||||
memory_max = 1800
|
||||
}
|
||||
}
|
||||
|
||||
task "redis" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image="docker.service.consul:5000/library/redis:6.2-alpine"
|
||||
ports = ["redis"]
|
||||
}
|
||||
resources {
|
||||
memory = 50
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -2,6 +2,7 @@ job "jellyfin" {
|
||||
datacenters = ["homelab"]
|
||||
priority = 30
|
||||
type = "service"
|
||||
|
||||
meta {
|
||||
forcedeploy = "1"
|
||||
}
|
||||
@ -9,6 +10,11 @@ job "jellyfin" {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
group jellyfin-vue {
|
||||
network {
|
||||
mode = "host"
|
||||
@ -26,22 +32,22 @@ job "jellyfin" {
|
||||
"homer.name=${NOMAD_TASK_NAME}",
|
||||
"homer.service=Application",
|
||||
"homer.target=_blank",
|
||||
"homer.logo=https://${NOMAD_TASK_NAME}.ducamps.win/icon.png",
|
||||
"homer.url=https://${NOMAD_TASK_NAME}.ducamps.win",
|
||||
"homer.logo=https://${NOMAD_TASK_NAME}.ducamps.eu/icon.png",
|
||||
"homer.url=https://${NOMAD_TASK_NAME}.ducamps.eu",
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entrypoints=web,websecure",
|
||||
]
|
||||
|
||||
}
|
||||
config {
|
||||
image = "ghcr.io/jellyfin/jellyfin-vue:unstable"
|
||||
image = "ghcr.service.consul:5000/jellyfin/jellyfin-vue:unstable"
|
||||
ports = ["http"]
|
||||
}
|
||||
env {
|
||||
DEFAULT_SERVERS = "${NOMAD_TASK_NAME}.ducamps.win"
|
||||
DEFAULT_SERVERS = "${NOMAD_TASK_NAME}.ducamps.eu"
|
||||
}
|
||||
|
||||
resources {
|
||||
@ -70,11 +76,11 @@ job "jellyfin" {
|
||||
"homer.name=jellyfin",
|
||||
"homer.service=Application",
|
||||
"homer.target=_blank",
|
||||
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.win/web/assets/img/banner-light.png",
|
||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.win",
|
||||
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/web/assets/img/banner-light.png",
|
||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
|
||||
@ -82,13 +88,13 @@ job "jellyfin" {
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "jellyfin/jellyfin"
|
||||
image = "docker.service.consul:5000/jellyfin/jellyfin"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
"/mnt/diskstation/nomad/jellyfin/config:/config",
|
||||
"/mnt/diskstation/nomad/jellyfin/cache:/cache",
|
||||
"/mnt/diskstation/media/:/media",
|
||||
"/mnt/diskstation/music/:/media2"
|
||||
"/mnt/diskstation/media:/media",
|
||||
"/mnt/diskstation/music:/music",
|
||||
]
|
||||
devices = [
|
||||
{
|
1
nomad-job/apps/makefile
Symbolic link
1
nomad-job/apps/makefile
Symbolic link
@ -0,0 +1 @@
|
||||
../makefile
|
95
nomad-job/apps/mealie.nomad.hcl
Normal file
95
nomad-job/apps/mealie.nomad.hcl
Normal file
@ -0,0 +1,95 @@
|
||||
|
||||
job "mealie" {
|
||||
datacenters = ["homelab"]
|
||||
priority = 50
|
||||
type = "service"
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
group "mealie" {
|
||||
network {
|
||||
mode = "host"
|
||||
port "http" {
|
||||
to = 9000
|
||||
}
|
||||
}
|
||||
volume "mealie-data" {
|
||||
type = "csi"
|
||||
source = "mealie-data"
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
vault {
|
||||
policies = ["mealie"]
|
||||
|
||||
}
|
||||
task "mealie-server" {
|
||||
driver = "docker"
|
||||
service {
|
||||
name = "mealie"
|
||||
port = "http"
|
||||
tags = [
|
||||
"homer.enable=true",
|
||||
"homer.name=Mealie",
|
||||
"homer.service=Application",
|
||||
"homer.subtitle=Mealie",
|
||||
"homer.logo=https://mealie.ducamps.eu/favicon.ico",
|
||||
"homer.target=_blank",
|
||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "ghcr.io/mealie-recipes/mealie"
|
||||
ports = ["http"]
|
||||
}
|
||||
volume_mount {
|
||||
volume = "mealie-data"
|
||||
destination = "/app/data"
|
||||
}
|
||||
env {
|
||||
PUID = "1000001"
|
||||
PGID = "1000001"
|
||||
TZ = "Europe/Paris"
|
||||
MAX_WORKERS = 1
|
||||
WEB_CONCURRENCY = 1
|
||||
BASE_URL = "https://mealie.ducamps.eu"
|
||||
OIDC_USER_GROUP = "MealieUsers"
|
||||
OIDC_ADMIN_GROUP = "MealieAdmins"
|
||||
OIDC_AUTH_ENABLED = "True"
|
||||
OIDC_SIGNUP_ENABLED = "true"
|
||||
OIDC_CONFIGURATION_URL = "https://auth.ducamps.eu/.well-known/openid-configuration"
|
||||
OIDC_CLIENT_ID = "mealie"
|
||||
OIDC_AUTO_REDIRECT = "false"
|
||||
OIDC_PROVIDER_NAME = "authelia"
|
||||
DB_ENGINE = "postgres"
|
||||
POSTGRES_USER = "mealie"
|
||||
POSTGRES_SERVER = "active.db.service.consul"
|
||||
POSTGRES_PORT = 5432
|
||||
POSTGRES_DB = "mealie"
|
||||
LOG_LEVEL = "DEBUG"
|
||||
}
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "secrets/data/database/mealie"}}POSTGRES_PASSWORD= "{{ .Data.data.password }}" {{end}}
|
||||
{{ with secret "secrets/data/authelia/mealie"}}OIDC_CLIENT_SECRET= "{{ .Data.data.password }}" {{end}}
|
||||
EOH
|
||||
destination = "secrets/var.env"
|
||||
env = true
|
||||
}
|
||||
resources {
|
||||
memory = 400
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -6,7 +6,11 @@ job "pacoloco" {
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
group "pacoloco" {
|
||||
network {
|
||||
mode = "host"
|
||||
@ -21,17 +25,17 @@ job "pacoloco" {
|
||||
port = "http"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`arch.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=arch.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`arch.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=arch.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "ducampsv/pacoloco"
|
||||
image = "docker.service.consul:5000/ducampsv/pacoloco"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
"/mnt/diskstation/archMirror:/var/cache/pacoloco",
|
||||
"/mnt/diskstation/nomad/pacoloco:/var/cache/pacoloco",
|
||||
"local/pacoloco.yaml:/etc/pacoloco.yaml"
|
||||
]
|
||||
|
||||
@ -49,6 +53,8 @@ repos:
|
||||
- http://archlinux.mailtunnel.eu
|
||||
- http://mirror.cyberbits.eu/archlinux
|
||||
- http://mirrors.niyawe.de/archlinux
|
||||
archlinux_armv8:
|
||||
url: http://mirror.archlinuxarm.org
|
||||
archlinux_armv7h:
|
||||
url: http://mirror.archlinuxarm.org
|
||||
prefetch:
|
@ -6,7 +6,11 @@ job "paperless-ng" {
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
@ -29,7 +33,7 @@ job "paperless-ng" {
|
||||
task "redis" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "redis"
|
||||
image = "docker.service.consul:5000/library/redis"
|
||||
ports = ["redis"]
|
||||
}
|
||||
resources {
|
||||
@ -43,16 +47,17 @@ job "paperless-ng" {
|
||||
port = "http"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia",
|
||||
"homer.enable=true",
|
||||
"homer.name=Paperless",
|
||||
"homer.service=Application",
|
||||
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.win/static/frontend/fr-FR/apple-touch-icon.png",
|
||||
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/static/frontend/fr-FR/apple-touch-icon.png",
|
||||
"homer.target=_blank",
|
||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.win",
|
||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
]
|
||||
check {
|
||||
type = "http"
|
||||
@ -63,7 +68,7 @@ job "paperless-ng" {
|
||||
}
|
||||
}
|
||||
config {
|
||||
image = "ghcr.io/paperless-ngx/paperless-ngx"
|
||||
image = "ghcr.service.consul:5000/paperless-ngx/paperless-ngx"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
"/mnt/diskstation/nomad/paperless-ng/media:/usr/src/paperless/media",
|
||||
@ -75,13 +80,16 @@ job "paperless-ng" {
|
||||
}
|
||||
env {
|
||||
PAPERLESS_REDIS = "redis://${NOMAD_ADDR_redis}"
|
||||
PAPERLESS_DBHOST = "db1.ducamps.win"
|
||||
PAPERLESS_DBHOST = "active.db.service.consul"
|
||||
PAPERLESS_DBNAME = "paperless"
|
||||
PAPERLESS_DBUSER = "paperless"
|
||||
PAPERLESS_OCR_LANGUAGE = "fra"
|
||||
PAPERLESS_CONSUMER_POLLING = "60"
|
||||
PAPERLESS_URL = "https://${NOMAD_JOB_NAME}.ducamps.win"
|
||||
PAPERLESS_URL = "https://${NOMAD_JOB_NAME}.ducamps.eu"
|
||||
PAPERLESS_ALLOWED_HOSTS = "192.168.1.42,192.168.1.40"
|
||||
PAPERLESS_ENABLE_HTTP_REMOTE_USER = "true"
|
||||
PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME = "HTTP_REMOTE_USER"
|
||||
PAPERLESS_LOGOUT_REDIRECT_URL= "https://auth.ducamps.eu/logout"
|
||||
}
|
||||
|
||||
template {
|
||||
@ -93,6 +101,7 @@ job "paperless-ng" {
|
||||
}
|
||||
resources {
|
||||
memory = 950
|
||||
memory_max = 1500
|
||||
cpu = 2000
|
||||
}
|
||||
}
|
@ -6,6 +6,11 @@ job "radicale" {
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
group "radicale" {
|
||||
network {
|
||||
mode = "host"
|
||||
@ -24,12 +29,12 @@ job "radicale" {
|
||||
"homer.service=Application",
|
||||
"homer.logo=https://radicale.org/assets/logo.svg",
|
||||
"homer.target=_blank",
|
||||
"homer.url=https://www.ducamps.win/${NOMAD_JOB_NAME}",
|
||||
"homer.url=https://www.ducamps.eu/${NOMAD_JOB_NAME}",
|
||||
|
||||
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.win`)&&PathPrefix(`/radicale`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.eu`)&&PathPrefix(`/radicale`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=radicaleHeader,radicalestrip",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
@ -39,11 +44,11 @@ job "radicale" {
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "tomsquest/docker-radicale"
|
||||
image = "docker.service.consul:5000/tomsquest/docker-radicale"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
"local/config:/config/config",
|
||||
"/mnt/diskstation/CardDav:/data"
|
||||
"/mnt/diskstation/nomad/radicale:/data"
|
||||
]
|
||||
|
||||
}
|
@ -23,7 +23,7 @@ job "torrent" {
|
||||
}
|
||||
}
|
||||
task "bittorent" {
|
||||
driver = "podman"
|
||||
driver = "docker"
|
||||
service {
|
||||
name = "bittorent"
|
||||
port = "http"
|
||||
@ -31,38 +31,37 @@ job "torrent" {
|
||||
tags = [
|
||||
"homer.enable=true",
|
||||
"homer.name=torrent",
|
||||
"homer.url=https://torrent.ducamps.win",
|
||||
"homer.url=https://torrent.ducamps.eu",
|
||||
"homer.service=Application",
|
||||
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.win/images/favicon-196x196.png",
|
||||
"homer.logo=https://fleet.linuxserver.io/images/linuxserver_rutorrent.png",
|
||||
"homer.target=_blank",
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
|
||||
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia-basic",
|
||||
]
|
||||
}
|
||||
user = "root"
|
||||
config {
|
||||
|
||||
image = "docker.io/crazymax/rtorrent-rutorrent:latest"
|
||||
privileged = "true"
|
||||
ulimit {
|
||||
nofile = "8192:8192"
|
||||
}
|
||||
image = "docker.service.consul:5000/crazymax/rtorrent-rutorrent:edge"
|
||||
ports = [
|
||||
"http",
|
||||
"torrent",
|
||||
"ecoute"
|
||||
]
|
||||
volumes = [
|
||||
"/mnt/hetzner/storagebox/rutorrentConfig:/data",
|
||||
"/opt/rutorrentConfig:/data",
|
||||
"/mnt/hetzner/storagebox/file:/downloads"
|
||||
]
|
||||
|
||||
}
|
||||
env {
|
||||
PUID = 1024
|
||||
PGID = 984
|
||||
PUID = 100001
|
||||
PGID = 10
|
||||
UMASK = 002
|
||||
WEBUI_PORT = "8080"
|
||||
}
|
64
nomad-job/apps/rutorrentlocal.nomad
Normal file
64
nomad-job/apps/rutorrentlocal.nomad
Normal file
@ -0,0 +1,64 @@
|
||||
|
||||
job "rutorrentlocal" {
|
||||
datacenters = ["homelab"]
|
||||
priority = 80
|
||||
type = "service"
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
constraint {
|
||||
attribute = "${node.unique.name}"
|
||||
operator = "set_contains"
|
||||
value = "oberon"
|
||||
}
|
||||
group "bittorent" {
|
||||
network {
|
||||
mode = "host"
|
||||
port "http" {
|
||||
to = 8080
|
||||
}
|
||||
port "torrent" {
|
||||
static = 6881
|
||||
}
|
||||
port "ecoute" {
|
||||
static = 50000
|
||||
}
|
||||
}
|
||||
task "bittorent" {
|
||||
driver = "podman"
|
||||
service {
|
||||
name = "bittorentlocal"
|
||||
port = "http"
|
||||
address_mode= "host"
|
||||
tags = [
|
||||
]
|
||||
}
|
||||
user = "root"
|
||||
config {
|
||||
|
||||
image = "docker.service.consul:5000/crazymax/rtorrent-rutorrent:edge"
|
||||
ports = [
|
||||
"http",
|
||||
"torrent",
|
||||
"ecoute"
|
||||
]
|
||||
volumes = [
|
||||
"/exports/nomad/rutorrent/data:/data",
|
||||
"/exports/nomad/rutorrent/downloads:/downloads"
|
||||
]
|
||||
|
||||
}
|
||||
env {
|
||||
PUID = 100001
|
||||
PGID = 10
|
||||
UMASK = 002
|
||||
WEBUI_PORT = "8080"
|
||||
}
|
||||
|
||||
resources {
|
||||
memory = 650
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
@ -10,7 +10,11 @@ job "supysonic" {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
group "supysonic" {
|
||||
network {
|
||||
mode = "host"
|
||||
@ -34,11 +38,11 @@ job "supysonic" {
|
||||
"homer.service=Application",
|
||||
"homer.icon=fas fa-headphones",
|
||||
"homer.target=_blank",
|
||||
"homer.url=http://${NOMAD_JOB_NAME}.ducamps.win",
|
||||
"homer.url=http://${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
|
||||
@ -49,7 +53,7 @@ job "supysonic" {
|
||||
task "supysonic-frontend" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "nginx:alpine"
|
||||
image = "docker.service.consul:5000/library/nginx:alpine"
|
||||
ports = [
|
||||
"http"
|
||||
]
|
||||
@ -92,7 +96,7 @@ http {
|
||||
task "supysonic-server" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "ducampsv/supysonic:latest"
|
||||
image = "docker.service.consul:5000/ducampsv/supysonic:latest"
|
||||
ports = ["fcgi"]
|
||||
force_pull = true
|
||||
volumes = [
|
||||
@ -105,16 +109,16 @@ http {
|
||||
SUPYSONIC_DAEMON_ENABLED = "true"
|
||||
SUPYSONIC_WEBAPP_LOG_LEVEL = "DEBUG"
|
||||
SUPYSONIC_DAEMON_LOG_LEVEL = "INFO"
|
||||
SUPYSONIC_LDAP_SERVER = "LDAP://ldap.ducamps.win"
|
||||
SUPYSONIC_LDAP_BASE_DN = "dc=ducamps,dc=win"
|
||||
SUPYSONIC_LDAP_USER_FILTER = "(&(memberOf=CN=SupysonicUsers,cn=groups,dc=ducamps,dc=win))"
|
||||
SUPYSONIC_LDAP_ADMIN_FILTER= "(&(memberOf=CN=SupysonicAdmins,cn=groups,dc=ducamps,dc=win))"
|
||||
SUPYSONIC_LDAP_SERVER = "LDAPS://ldaps.service.consul"
|
||||
SUPYSONIC_LDAP_BASE_DN = "dc=ducamps,dc=eu"
|
||||
SUPYSONIC_LDAP_USER_FILTER = "(&(memberOf=cn=SupysonicUsers,ou=groups,dc=ducamps,dc=eu))"
|
||||
SUPYSONIC_LDAP_ADMIN_FILTER= "(&(memberOf=cn=SupysonicAdmins,ou=groups,dc=ducamps,dc=eu))"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "secrets/data/database/supysonic"}}
|
||||
SUPYSONIC_DB_URI = "postgres://supysonic:{{ .Data.data.password}}@db1.ducamps.win/supysonic"
|
||||
SUPYSONIC_DB_URI = "postgres://supysonic:{{ .Data.data.password}}@active.db.service.consul/supysonic"
|
||||
{{end}}
|
||||
{{ with secret "secrets/data/nomad/supysonic"}}
|
||||
SUPYSONIC_LDAP_BIND_DN = "{{ .Data.data.serviceAccountName }}"
|
@ -10,7 +10,11 @@ job "syncthing" {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
|
||||
group "syncthing" {
|
||||
network {
|
||||
@ -40,7 +44,7 @@ job "syncthing" {
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "linuxserver/syncthing"
|
||||
image = "docker.service.consul:5000/linuxserver/syncthing"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
"/mnt/diskstation/nomad/syncthing/config:/config",
|
||||
@ -48,6 +52,11 @@ job "syncthing" {
|
||||
]
|
||||
|
||||
}
|
||||
|
||||
env{
|
||||
PUID = 1000001
|
||||
GUID = 1000001
|
||||
}
|
||||
resources {
|
||||
memory = 200
|
||||
}
|
@ -7,7 +7,11 @@ job "tt-rss" {
|
||||
attribute = "${attr.cpu.arch}"
|
||||
value = "amd64"
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
|
||||
group "ttrss" {
|
||||
ephemeral_disk {
|
||||
@ -34,13 +38,13 @@ job "tt-rss" {
|
||||
"homer.enable=true",
|
||||
"homer.name=TT-RSS",
|
||||
"homer.service=Application",
|
||||
"homer.logo=https://framalibre.org/sites/default/files/styles/thumbnail/public/leslogos/ic_launcher_1.png",
|
||||
"homer.logo=https://www.ducamps.eu/tt-rss/images/favicon-72px.png",
|
||||
"homer.target=_blank",
|
||||
"homer.url=https://www.ducamps.win/tt-rss",
|
||||
"homer.url=https://www.ducamps.eu/tt-rss",
|
||||
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.win`)&&PathPrefix(`/tt-rss`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.eu`)&&PathPrefix(`/tt-rss`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
]
|
||||
@ -50,30 +54,33 @@ job "tt-rss" {
|
||||
task "ttrss-app" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "cthulhoo/ttrss-fpm-pgsql-static"
|
||||
image = "docker.service.consul:5000/cthulhoo/ttrss-fpm-pgsql-static"
|
||||
ports = [
|
||||
"appPort"
|
||||
]
|
||||
volumes = [
|
||||
"${NOMAD_ALLOC_DIR}/data:/var/www/html"
|
||||
"${NOMAD_ALLOC_DIR}/data:/var/www/html",
|
||||
"/mnt/diskstation/nomad/tt-rss/ttrss-auth-oidc:/var/www/html/tt-rss/plugins.local/auth_oidc"
|
||||
]
|
||||
}
|
||||
env {
|
||||
TTRSS_DB-TYPE = "pgsql"
|
||||
TTRSS_DB_HOST = "db1.ducamps.win"
|
||||
TTRSS_DB_HOST = "active.db.service.consul"
|
||||
TTRSS_DB_NAME = "ttrss"
|
||||
TTRSS_DB_USER = "ttrss"
|
||||
TTRSS_SELF_URL_PATH = "https://www.ducamps.win/tt-rss"
|
||||
TTRSS_SELF_URL_PATH = "https://www.ducamps.eu/tt-rss"
|
||||
TTRSS_PLUGINS = "auth_oidc, auth_internal"
|
||||
TTRSS_AUTH_OIDC_NAME= "Authelia"
|
||||
TTRSS_AUTH_OIDC_URL = "https://auth.ducamps.eu"
|
||||
TTRSS_AUTH_OIDC_CLIENT_ID = "ttrss"
|
||||
}
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "secrets/data/database/ttrss"}}
|
||||
TTRSS_DB_PASS = "{{ .Data.data.password }}"
|
||||
{{end}}
|
||||
{{ with secret "secrets/data/database/ttrss"}}TTRSS_DB_PASS = "{{ .Data.data.password }}"{{end}}
|
||||
TTRSS_AUTH_OIDC_CLIENT_SECRET = {{ with secret "secrets/data/authelia/ttrss"}}"{{ .Data.data.password }}"{{end}}
|
||||
EOH
|
||||
destination = "secrets/tt-rss.env"
|
||||
destination = "secret/tt-rss.env"
|
||||
env = true
|
||||
|
||||
}
|
||||
resources {
|
||||
memory = 150
|
||||
@ -83,7 +90,7 @@ job "tt-rss" {
|
||||
task "ttrss-updater" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "cthulhoo/ttrss-fpm-pgsql-static"
|
||||
image = "docker.service.consul:5000/cthulhoo/ttrss-fpm-pgsql-static"
|
||||
volumes = [
|
||||
"${NOMAD_ALLOC_DIR}/data:/var/www/html"
|
||||
]
|
||||
@ -92,10 +99,10 @@ job "tt-rss" {
|
||||
}
|
||||
env {
|
||||
TTRSS_DB-TYPE = "pgsql"
|
||||
TTRSS_DB_HOST = "db1.ducamps.win"
|
||||
TTRSS_DB_HOST = "active.db.service.consul"
|
||||
TTRSS_DB_NAME = "ttrss"
|
||||
TTRSS_DB_USER = "ttrss"
|
||||
TTRSS_SELF_URL_PATH = "https://www.ducamps.win/tt-rss"
|
||||
TTRSS_SELF_URL_PATH = "https://www.ducamps.eu/tt-rss"
|
||||
}
|
||||
template {
|
||||
data = <<EOH
|
||||
@ -115,7 +122,7 @@ job "tt-rss" {
|
||||
task "ttrss-frontend" {
|
||||
driver = "docker"
|
||||
config {
|
||||
image = "nginx:alpine"
|
||||
image = "docker.service.consul:5000/library/nginx:alpine"
|
||||
ports = [
|
||||
"http"
|
||||
]
|
@ -6,7 +6,11 @@ job "vaultwarden" {
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
|
||||
constraint {
|
||||
attribute = "${node.class}"
|
||||
operator = "set_contains"
|
||||
value = "cluster"
|
||||
}
|
||||
group "vaultwarden" {
|
||||
network {
|
||||
mode = "host"
|
||||
@ -29,11 +33,11 @@ job "vaultwarden" {
|
||||
"homer.service=Application",
|
||||
"homer.logo=https://yunohost.org/user/images/bitwarden_logo.png",
|
||||
"homer.target=_blank",
|
||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.win",
|
||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`vault.ducamps.win`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=vault.ducamps.win",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`vault.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=vault.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
||||
]
|
||||
@ -50,7 +54,7 @@ job "vaultwarden" {
|
||||
}
|
||||
}
|
||||
config {
|
||||
image = "vaultwarden/server"
|
||||
image = "docker.service.consul:5000/vaultwarden/server"
|
||||
ports = ["http"]
|
||||
volumes = [
|
||||
"/mnt/diskstation/nomad/vaultwarden:/data"
|
||||
@ -60,14 +64,14 @@ job "vaultwarden" {
|
||||
env {
|
||||
DATA_FOLDER = "/data"
|
||||
WEB_VAULT_ENABLED = "true"
|
||||
DOMAIN = "https://vault.ducamps.win"
|
||||
DOMAIN = "https://vault.ducamps.eu"
|
||||
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "secrets/data/database/vaultwarden"}}
|
||||
DATABASE_URL=postgresql://vaultwarden:{{ .Data.data.password }}@db1.ducamps.win/vaultwarden
|
||||
DATABASE_URL=postgresql://vaultwarden:{{ .Data.data.password }}@active.db.service.consul/vaultwarden
|
||||
{{end}}
|
||||
EOH
|
||||
destination = "secrets/vaultwarden.env"
|
89
nomad-job/apps/vikunja.nomad
Normal file
89
nomad-job/apps/vikunja.nomad
Normal file
@ -0,0 +1,89 @@
|
||||
|
||||
job "vikunja" {
|
||||
datacenters = ["homelab"]
|
||||
priority = 70
|
||||
type = "service"
|
||||
meta {
|
||||
forcedeploy = "0"
|
||||
}
|
||||
|
||||
group "vikunja" {
|
||||
network {
|
||||
mode = "host"
|
||||
port "front" {
|
||||
to = 80
|
||||
}
|
||||
port "api" {
|
||||
to = 3456
|
||||
}
|
||||
}
|
||||
vault {
|
||||
policies = ["vikunja"]
|
||||
|
||||
}
|
||||
task "api" {
|
||||
driver = "docker"
|
||||
service {
|
||||
name = "vikunja-api"
|
||||
port = "api"
|
||||
tags = [
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.entrypoints=web,websecure",
|
||||
"homer.enable=true",
|
||||
"homer.name=vikunka",
|
||||
"homer.service=Application",
|
||||
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/images/icons/apple-touch-icon-180x180.png",
|
||||
"homer.target=_blank",
|
||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
|
||||
]
|
||||
}
|
||||
config {
|
||||
image = "docker.service.consul:5000/vikunja/vikunja"
|
||||
ports = ["api", "front"]
|
||||
volumes = ["local/config.yml:/etc/vikunja/config.yml"]
|
||||
}
|
||||
env {
|
||||
VIKUNJA_DATABASE_HOST = "active.db.service.consul"
|
||||
VIKUNJA_DATABASE_TYPE = "postgres"
|
||||
VIKUNJA_DATABASE_USER = "vikunja"
|
||||
VIKUNJA_DATABASE_DATABASE = "vikunja"
|
||||
VIKUNJA_SERVICE_JWTSECRET = uuidv4()
|
||||
VIKUNJA_SERVICE_FRONTENDURL = "https://${NOMAD_JOB_NAME}.ducamps.eu/"
|
||||
VIKUNJA_AUTH_LOCAL = False
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOH
|
||||
{{ with secret "secrets/data/database/vikunja"}}
|
||||
VIKUNJA_DATABASE_PASSWORD= "{{ .Data.data.password }}"
|
||||
{{end}}
|
||||
EOH
|
||||
destination = "secrets/sample.env"
|
||||
env = true
|
||||
}
|
||||
template {
|
||||
data = <<EOH
|
||||
auth:
|
||||
openid:
|
||||
enabled: true
|
||||
redirecturl: https://vikunja.ducamps.eu/auth/openid/
|
||||
providers:
|
||||
- name: Authelia
|
||||
authurl: https://auth.ducamps.eu
|
||||
clientid: vikunja
|
||||
clientsecret: {{ with secret "secrets/data/authelia/vikunja"}} {{ .Data.data.password }} {{end}}
|
||||
scope: openid profile email
|
||||
EOH
|
||||
destination = "local/config.yml"
|
||||
}
|
||||
resources {
|
||||
memory = 100
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user