Compare commits

...

250 Commits

Author SHA1 Message Date
vincent
90dd0ecd9a chore: link makefile
Some checks failed
continuous-integration/drone/push Build is failing
2024-11-09 10:24:15 +01:00
vincent
4f6743db5f perf: tweak mealie and pihole memory
Some checks failed
continuous-integration/drone/push Build is failing
2024-11-09 10:23:42 +01:00
vincent
2452a2ad44 fix (flaresolverr): change image to resolve chalenge issue 2024-11-09 10:23:07 +01:00
vincent
5e2bb57914 rutorrent: resolve issue with docker 2024-11-09 10:22:24 +01:00
vincent
3eb2dbfa08 authelia: custom consent preconfigured time 2024-11-09 10:21:50 +01:00
vincent
1ea094aa6e Revert "perfs: decrease CPU"
This reverts commit 6ea5de0315.
2024-10-29 19:21:05 +01:00
vincent
c1e48d4ace add compute parameter to oscar
Some checks failed
continuous-integration/drone/push Build is failing
2024-10-29 19:08:41 +01:00
vincent
b2710aab2f add oauth to gitea 2024-10-19 16:28:25 +02:00
vincent
c000933f66 add paperless-ng SSO
Some checks failed
continuous-integration/drone/push Build is failing
2024-10-12 10:12:38 +02:00
vincent
7948773757 perfs: increase memory-max for some job
Some checks failed
continuous-integration/drone/push Build is failing
2024-09-29 17:51:05 +02:00
vincent
3d90a1f6d7 fix: wrong dns in docker daemon.json 2024-09-29 17:50:31 +02:00
vincent
1f29007172 switch to nfs v4 on share 2024-09-29 17:50:11 +02:00
vincent
af58866882 dns: pdns-admin in dedicated nomad group 2024-09-29 17:38:27 +02:00
vincent
374a62c304 fix: aur call in database playbook
Some checks failed
continuous-integration/drone/push Build is failing
2024-08-04 11:49:40 +02:00
vincent
9451443266 refactor: split job in role folder
Some checks failed
continuous-integration/drone/push Build is failing
2024-08-03 15:06:36 +02:00
vincent
dacd187f7b fix: loki config
Some checks failed
continuous-integration/drone/push Build is failing
2024-08-03 14:47:27 +02:00
vincent
e48a879c43 fix: torrent PUID 2024-08-03 14:46:47 +02:00
vincent
6ea5de0315 perfs: decrease CPU 2024-08-03 14:46:05 +02:00
vincent
984b712c78 update: nfs csi nfs plugins 4.7 2024-08-03 14:45:22 +02:00
vincent
293fddd81c remove backup disk mount 2024-08-03 14:45:04 +02:00
vincent
0952c4bf42 fix: change media mount path 2024-08-03 14:43:30 +02:00
vincent
3228054172 oscar hardware replacement
Some checks failed
continuous-integration/drone/push Build is failing
2024-06-29 10:21:44 +02:00
vincent
ee7cd0c12e fix: wrong interface variable call 2024-06-29 10:20:25 +02:00
vincent
22a60b42d4 add vikunja to generate vault 2024-06-25 18:45:46 +02:00
vincent
d578fefbce perfs (registry): add memory 2024-06-25 18:45:16 +02:00
vincent
cae4ceb623 update: remove immich microservice 2024-06-25 18:44:51 +02:00
vincent
ddc4320fe9 feat (vikunja): implemant oauth
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-05-20 12:15:56 +02:00
vincent
d1b475d651 fix: add cluster consraint to prowalar and tt-rss 2024-05-20 11:21:45 +02:00
vincent
d817f3a7f8 perfs (immich): increase memory 2024-05-20 11:21:21 +02:00
vincent
18a78f6fd2 chore (immich): fix logo 2024-05-20 11:20:32 +02:00
vincent
f22e3406be borgmatic: modify jellyfin backup exeption 2024-05-16 19:19:00 +02:00
vincent
1520ec0dcc disable authelia notifier check 2024-05-16 19:18:18 +02:00
vincent
275435664c feat: grafanna sso
Some checks failed
continuous-integration/drone/push Build is failing
2024-05-10 15:50:45 +02:00
vincent
f9ff70a9d9 feat: immich sso
Some checks failed
continuous-integration/drone/push Build is failing
2024-05-10 14:49:50 +02:00
vincent
8915ff52dd fix: wrong array character 2024-05-10 14:49:20 +02:00
vincent
74794f866a feat: improve database playbook 2024-05-10 08:35:14 +02:00
vincent
7244ceb5b1 feat: manage all nomad folder creation on build 2024-05-10 08:35:14 +02:00
vincent
49a8a427f7 perf: adjust openldap ram 2024-05-10 08:35:14 +02:00
vincent
f4f77fc55a fix: add dev network to docker insecure registry 2024-05-10 08:35:14 +02:00
vincent
351d7c287f fix: increase VM ram among 2024-05-10 08:35:14 +02:00
vincent
598896ad5f feat: implement immich job 2024-05-10 08:20:01 +02:00
vincent
6e00668840 add terrform immich variable for vault and dns 2024-05-10 08:18:53 +02:00
vincent
24eb640c60 configure db for immich 2024-05-09 09:25:23 +02:00
vincent
9b6ed6cc6e switch to opentofu 2024-05-09 09:14:25 +02:00
vincent
2f1de5dcd5 fix vault dn
Some checks failed
continuous-integration/drone/push Build is failing
Signed-off-by: vincent <vincent@ducamps.win>
2024-05-08 21:38:10 +02:00
vincent
78692be3fd add vector.rs to database playbook 2024-05-08 21:37:27 +02:00
vincent
272efbb844 update openldap default tree 2024-05-08 21:14:37 +02:00
vincent
c9f4656470 switch gerard-dev to archlinux 2024-05-08 21:07:57 +02:00
vincent
6e679c82a0 fix: add missing argument to ldap manager
Some checks failed
continuous-integration/drone/push Build is failing
2024-05-08 09:11:28 +02:00
vincent
9d0c513787 chore: update nomad template 2024-04-28 16:11:37 +02:00
vincent
69a2ad4efd feat: implement mealie
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-28 16:10:43 +02:00
vincent
2f6c814fb1 CI: terraform makefile command parameter
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-27 14:29:38 +02:00
vincent
ab3c42cf8b feat: add authelia oidc authent
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-24 21:23:39 +02:00
vincent
992937c011 feat: migrate rutorrent on authelia for authent
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-12 08:59:40 +02:00
vincent
5fe61223c3 feat: create authelia job 2024-04-12 08:59:20 +02:00
vincent
452ab3611a fix (syncthing): change UID to match to folder 2024-04-12 08:58:02 +02:00
vincent
1ee5e21f84 ldap: remove login shell for service acount 2024-04-12 08:57:38 +02:00
vincent
92befa7ea4 chore: update alertmanager smtp hello url 2024-04-12 08:56:50 +02:00
vincent
4be6af919d refactor: mmove lldap to decom job 2024-04-12 08:56:34 +02:00
vincent
77e7cd4f88 style: update missing icon 2024-04-12 08:56:12 +02:00
vincent
fe9bc8dbab feat: add torrent automation job (prawlarr + flareresolver) 2024-04-11 10:16:20 +02:00
vincent
60cfe75e47 perfs (prometheus): add memory_max
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-09 08:41:06 +02:00
vincent
4fcf862279 borgmatic: add exclusion 2024-04-09 08:40:53 +02:00
vincent
98c1d63962 borgmatic: add action 2024-04-09 08:40:38 +02:00
vincent
0b067cabca loki: review config 2024-04-09 08:39:37 +02:00
vincent
4ef30222f7 fix: memory_max
Some checks failed
continuous-integration/drone/push Build is failing
2024-03-29 21:15:39 +01:00
vincent
117e9397a3 switch volume to nfsv4 2024-03-29 21:14:24 +01:00
vincent
0b25eb194e feat: add authorization for local docker in nfs
Some checks failed
continuous-integration/drone/push Build is failing
2024-03-17 19:07:51 +01:00
vincent
74dc3a0c89 chore: clean gerard from inventory 2024-03-17 19:01:52 +01:00
vincent
9bc0e24357 fix: pureftpd variable 2024-03-17 19:01:32 +01:00
vincent
e0f9190b76 feat: docker pull througt mirror
Some checks failed
continuous-integration/drone/push Build is failing
2024-03-17 18:58:24 +01:00
vincent
f0676ec3f7 fix: change rutorrent tag 2024-03-17 11:07:59 +01:00
vincent
8b895fee06 docs: update ADR 2024-03-17 11:07:59 +01:00
vincent
aeed90ea34 perfs: adjust max mermory 2024-03-17 11:07:59 +01:00
vincent
a89109e1ff feat: add actual budget 2024-03-17 11:07:59 +01:00
vincent
d748beb6a4 feat: switxh from vsftp to pure-ftpd 2024-03-17 11:07:59 +01:00
vincent
3a80c47b56 add service account ou in ldap default tree 2024-03-17 11:07:59 +01:00
vincent
c75e9e707a fix: staging nas bind 2024-03-17 11:07:59 +01:00
vincent
4926b4eb06 perfs: increase backup postgress memory 2024-03-17 11:07:59 +01:00
vincent
0ebd087544 fix: move binding dn 2024-03-17 11:07:59 +01:00
vincent
b7dc26cc27 borgmatic: fix config 2024-03-17 11:07:59 +01:00
vincent
012c448c73 improve share binding 2024-03-17 11:07:59 +01:00
vincent
1b79fe4cb0 Borgmatic: add know host 2024-03-17 11:07:59 +01:00
vincent
6848ffa05b fix: become on nut role 2024-03-17 11:07:59 +01:00
vincent
aec7230f11 feat: ftp local user iss chroot 2024-03-17 11:07:58 +01:00
vincent
da3b290d4a feat: enable crossmount in nfs share 2024-03-17 11:07:58 +01:00
vincent
5718968407 fix:hard DNS on oscar instead Nas (if NAS is shutdown cluster DNS will
shutdown )
2024-03-17 11:07:58 +01:00
vincent
0db8555fe8 change rutorrent group 2024-03-17 11:07:58 +01:00
vincent
2fee8293dc feat: add role for nut 2024-03-17 11:07:58 +01:00
vincent
3dae6adb33 switch dns on oberon 2024-03-17 11:07:58 +01:00
vincent
f207be7d7d finalize Nas data migration 2024-03-17 11:07:58 +01:00
vincent
f32c0d1e40 fix: no issue on nfs cluster if one device is down 2024-03-17 11:07:58 +01:00
vincent
d37fe78e39 feat: enable vsftp user session 2024-03-17 11:07:58 +01:00
vincent
586e6101ca feat: correct homedir for samba 2024-03-17 11:07:58 +01:00
vincent
e470b204a5 feat: add constrainst to limit nas job 2024-03-17 11:07:58 +01:00
vincent
c4d10aacfe fix: change path 2024-03-17 11:07:58 +01:00
vincent
e10830e028 fix: path issue 2024-03-17 11:07:58 +01:00
vincent
c37083b5c9 feat: isolate wireguard playbook 2024-03-17 11:07:58 +01:00
vincent
c7e6270c3a fix: remove separator from create user 2024-03-17 11:07:58 +01:00
vincent
625bda7fda feat: deploy NAS on oberon 2024-03-17 11:07:58 +01:00
vincent
d1cc5ff299 fix: add lan dns redirection to pdns recursor 2024-03-17 11:07:58 +01:00
vincent
0a57c5659c fix: upgrade vikunka 2024-03-17 11:07:58 +01:00
vincent
7191cb7216 rename nas to oberon 2024-03-17 11:07:58 +01:00
vincent
b3488061da dns: decrease local ttl 2024-03-17 11:07:58 +01:00
vincent
c08032052d fix: terraform dns makefile secret 2024-03-17 11:07:58 +01:00
vincent
25780828cc job: add borgmatic 2024-03-17 11:07:58 +01:00
vincent
46b4a51935 CI: improve consul stagging switch 2024-03-17 11:07:58 +01:00
vincent
993753f284 feat: intergrate SAMBA Nas role 2024-03-17 11:07:58 +01:00
vincent
5188d865d8 fix: get ldap admin password in vault 2024-03-17 11:07:58 +01:00
vincent
2a731201a1 add default crypt password for vault service account 2024-03-17 11:07:58 +01:00
vincent
70e0d6011b CI: autoapprove for terraform apply 2024-03-17 11:07:58 +01:00
vincent
2c0da4bd15 feat: enable automoint for staging 2024-03-17 11:07:58 +01:00
vincent
547ce05466 chore: complete generate-vault-secret 2024-03-17 11:07:58 +01:00
vincent
bfb3ec3d34 fix: modify vault endpoint for create nomad token 2024-03-17 11:07:58 +01:00
vincent
9756939f8e fix: create nomad dir in playbook with correct right 2024-03-17 11:07:58 +01:00
vincent
f420f17929 feat: modify staging domain name 2024-03-17 11:07:58 +01:00
vincent
2bae64c40b create script to bootstrap vault secret 2024-03-17 11:07:58 +01:00
vincent
c8f7d7f8c3 ordo: improve makefile for terraform 2024-03-17 11:07:58 +01:00
vincent
2632c6d2b0 dns: switch cname to alias 2024-03-17 11:07:58 +01:00
vincent
f61008b570 fix: bootstrap become 2024-03-17 11:07:58 +01:00
vincent
73df5fa582 refactor: consul in first of hashicorp stack 2024-03-17 11:07:58 +01:00
vincent
e3d76630c3 feat: replace rocky by arch in vagrant 2024-03-17 11:07:58 +01:00
vincent
41b1a71c76 feat: switch consul DNS in makefile 2024-03-17 11:07:58 +01:00
vincent
e9ad317436 feat ensure nfs share folder exist 2024-03-17 11:07:58 +01:00
vincent
2db6061516 fix: declare main interface variable for stagging 2024-03-17 11:07:58 +01:00
vincent
3367c78314 feat: merge user create and config playbook 2024-03-17 11:07:58 +01:00
vincent
08ea604028 feat: create home share ans delete home mont on cluster 2024-03-17 11:07:58 +01:00
vincent
29ab70a1d5 fix: samba mount option issue 2024-03-17 11:07:58 +01:00
vincent
e083f4da7a terraform: remove corwin 2024-03-17 11:07:58 +01:00
vincent
2ea4992f57 fix dockermailserver: add privae network to ha proxy auth 2024-03-17 11:07:58 +01:00
vincent
49de33bbdb calc docket mtu on wireguard MTU 2024-03-17 11:07:58 +01:00
vincent
2b678b7786 remove bootstap become 2024-03-17 11:07:58 +01:00
vincent
fc2dcd7b33 fix: add empty env group to avoid issue 2024-03-17 11:07:58 +01:00
vincent
29d70cac0e migrate to merlin 2024-03-17 11:07:58 +01:00
vincent
4117bd80c5 fix: www specific location for archiso 2024-03-17 11:07:58 +01:00
vincent
da6f04e42e fix: database pg_hba 2024-03-17 11:07:58 +01:00
vincent
13bda4cd34 fix: case where vault root file not exist 2024-03-17 11:07:58 +01:00
vincent
63cd352fff archiso on web server 2024-03-17 11:07:58 +01:00
vincent
a65e3484b5 implement default interface variable 2024-03-17 11:07:58 +01:00
vincent
2b9e034232 delete old var file 2024-03-17 11:07:58 +01:00
vincent
527d2f2345 add packer to build arch image on hetzner 2024-03-17 11:07:58 +01:00
vincent
2da18e9c12 docs: add smtp case troubleshoot 2024-03-17 11:07:58 +01:00
vincent
49f639cb15 delete old dns terraform file 2024-03-17 11:07:58 +01:00
vincent
abc88f0074 add packer for hetzner image 2024-03-17 11:07:58 +01:00
vincent
394dbaf6cb move filestash on homelab 2024-03-17 11:07:58 +01:00
vincent
78762b477e move mail on homelab 2024-03-17 11:07:58 +01:00
vincent
2c00b9be59 feat: redirect all cluster traffic on wirequard 2024-03-17 11:07:58 +01:00
vincent
acc6cdc5fa fix crowsec: rename data file 2024-03-17 11:07:58 +01:00
vincent
43b6cf9158 fix www: change redirection method 2024-03-17 11:07:58 +01:00
vincent
015a89b27e fix: port 25 entrypoint conflict 2024-03-17 11:07:58 +01:00
vincent
68434f3e92 fix: switch ldap user manager traefik router 2024-03-17 11:07:58 +01:00
vincent
fe6d1c5e26 add user group to tree ldif 2024-03-17 11:07:58 +01:00
vincent
f8bc026165 feat: implemant openldap and migration 2024-03-17 11:07:58 +01:00
vincent
80f489422a change docker repo for testing 2024-03-17 11:07:58 +01:00
vincent
4207b1fc75 init lldap job 2024-03-17 11:07:58 +01:00
vincent
ea30fce975 feat: move backup in dedicated folder 2024-03-17 11:07:58 +01:00
vincent
5b23006e97 feat: move last application data folder in nomad share 2024-03-17 11:07:58 +01:00
vincent
9370a92518 put hashicorpstack before nas role 2024-03-17 11:07:58 +01:00
vincent
9fcf2d78e6 config repo on prod 2024-03-17 11:07:58 +01:00
vincent
f82c99c2ba fix: typo 2024-03-17 11:07:58 +01:00
vincent
cecad8b785 feat: change nas if by consul service for stagging 2024-03-17 11:07:58 +01:00
vincent
28fc2bf6a7 init csi 2024-01-13 18:37:11 +01:00
vincent
a0214d0d74 allow nomad privileged on all
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-13 18:36:27 +01:00
vincent
9812376a1d gather all device before nas playbook 2024-01-13 18:36:27 +01:00
vincent
6ddcc4736e put nfs share in export bind 2024-01-13 18:32:02 +01:00
vincent
11fe5fb5dc conf dhcp: add ip for shelly
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-13 16:49:47 +01:00
vincent
ec2ecd08cd perfs backup-postgress: increse memory
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-13 10:20:53 +01:00
vincent
40ce7c1550 feat: improce variable management
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-09 18:52:47 +01:00
vincent
64346cc63b depends: update terraform plugins 2024-01-09 18:52:47 +01:00
vincent
ffd597f710 by mount enable option instead dedicated variable 2024-01-09 18:52:47 +01:00
vincent
c4f1423501 recover dynamic ip for nfs mount 2024-01-09 18:52:47 +01:00
vincent
5a8c4519a6 fix: switch nfs auth to IP 2024-01-09 18:52:47 +01:00
vincent
908495bce3 norootsquash 2024-01-07 10:04:53 +01:00
vincent
8ca6413b02 add nas host file 2024-01-07 10:04:53 +01:00
vincent
8008295780 add become to nas role 2024-01-07 10:04:53 +01:00
vincent
05930da661 switch ducamps.eu 2024-01-07 10:04:53 +01:00
vincent
5d966908c5 add ftp role 2024-01-07 10:04:53 +01:00
vincent
c7a6ed5392 add some share
--amend

squash
2024-01-07 10:04:53 +01:00
vincent
f3469bd612 feat: dedicated playbook for autofs 2024-01-07 10:04:53 +01:00
vincent
33b4fc6ad5 feat: variable file by env
squash
2024-01-07 10:04:53 +01:00
vincent
351bef555c feat: server playbook for all device 2024-01-07 10:04:53 +01:00
vincent
6db6b28706 fix: nfs role execution 2024-01-07 10:04:53 +01:00
vincent
8081e89176 add nas variable and playbook 2024-01-07 10:04:53 +01:00
vincent
3628139699 init nas config 2024-01-07 10:04:52 +01:00
vincent
f0dd3e8f33 add repli in pg_hba variable
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-07 09:58:40 +01:00
vincent
0b78cbe0e3 fix: add second dns for docker
All checks were successful
continuous-integration/drone/push Build is passing
issue with drone docker in docker DNS connection refused on systemd rstub DNS
2024-01-07 09:47:15 +01:00
vincent
da1686cdea fix rutorrent: PUID
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-04 19:37:42 +01:00
vincent
5939ff8057 perfs: increase memory for postgres backup
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-01 11:44:10 +01:00
vincent
d15939640f deps: remove role pdn
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-23 10:34:00 +01:00
vincent
47761bf90e use nas IP for mount
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-17 10:54:53 +01:00
vincent
2fc86fc14f fix: run rutoren batch each hour
All checks were successful
continuous-integration/drone/push Build is passing
2023-12-03 10:16:49 +01:00
vincent
49d2ce491f add loki metrics in prometheus
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-28 19:02:19 +01:00
vincent
1992f75888 dockermailserver switch to latest
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-28 18:30:52 +01:00
vincent
a0179b829d feat: switch to vector for docker log collect
Signed-off-by: vincent <vincent@ducamps.win>
2023-11-28 18:22:13 +01:00
vincent
2cad7575d1 add batch to clen rutorrent forward folder 2023-11-25 18:57:52 +01:00
vincent
9f5c738317 rename batch job 2023-11-25 18:57:32 +01:00
vincent
f2c7e9a95a change torrent copy
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-23 20:57:56 +01:00
vincent
4f1646afc2 change ldap dns 2023-11-17 20:03:33 +01:00
vincent
ba4647379e update makefile
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-14 17:48:20 +01:00
vincent
58f89756d3 add defualt nomad interface for corwin 2023-11-14 17:47:53 +01:00
vincent
a60a1bc578 docs: update ADR NAS and DNS 2023-11-14 17:47:18 +01:00
vincent
9578b25804 remove dns option from promtail 2023-11-14 17:46:24 +01:00
vincent
f2bc16cbe0 perf: increase memory 2023-11-14 17:45:52 +01:00
vincent
70eec26d0a pacoloco manage archarmV8 2023-11-14 17:45:38 +01:00
vincent
98f1e34d04 perf: decrease memory pdns 2023-11-07 18:56:03 +01:00
vincent
9e4348065e add variable system_ip_unprivileged_port_start 2023-11-07 18:55:34 +01:00
vincent
f17a946d81 add recursort in front of auth server 2023-11-06 19:07:25 +01:00
vincent
b494eaf358 big bang ducamps.win -> ducamps.eu 2023-11-05 19:08:17 +01:00
vincent
5d3432ff45 switch dns update on pdns
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-05 17:00:16 +01:00
vincent
5685458fbf fix mail dns entry
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-05 14:58:41 +01:00
vincent
674813e2e4 migrate ducamps.eu on pdns 2023-11-05 14:58:10 +01:00
vincent
3944d444aa pihole listen only on 192.168.1.4 2023-11-05 11:58:07 +01:00
vincent
9a0aa359a5 add basic auth to torrent
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-04 21:34:03 +01:00
vincent
4e9155e0db prepare DNS migration 2023-11-04 21:33:51 +01:00
vincent
b54420c0d9 style: fix markdown
All checks were successful
continuous-integration/drone/push Build is passing
2023-11-01 19:53:42 +01:00
vincent
db8b2c3b1e consul backup in nomad
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-01 19:30:39 +01:00
vincent
bed1a666da add dns config for docker
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-01 18:58:56 +01:00
vincent
9d44ad59c7 vault backup cron in nomad 2023-11-01 18:58:42 +01:00
vincent
c8a1ba34f3 fix: tt-rss db host 2023-11-01 09:08:47 +01:00
vincent
b1afa5a801 fix: vault unseal key encrypted
Some checks failed
continuous-integration/drone/push Build is failing
2023-11-01 08:42:30 +01:00
vincent
4cd583622b database DNS entry in consul 2023-11-01 08:42:04 +01:00
vincent
8718bfe051 user_config ouside of site (to remove git.ducamps.win dependance) 2023-11-01 08:41:03 +01:00
vincent
594ffcad44 add alias to see all vault alias
Some checks failed
continuous-integration/drone/push Build is failing
2023-10-29 20:23:41 +01:00
vincent
14b1ac38e2 remove hasshicorp vault dependance on ansil metal deployment 2023-10-29 20:04:53 +01:00
vincent
521ea28229 fix nomad token condition
Signed-off-by: vincent <vincent@ducamps.win>
2023-10-29 20:03:08 +01:00
vincent
85d9dfa7d7 decom msmtp 2023-10-29 18:06:36 +01:00
vincent
61d182dfe6 factorize consul domain on corwin
Some checks failed
continuous-integration/drone/push Build is failing
2023-10-29 15:36:05 +01:00
vincent
ecc4e1dbb9 add dns in site 2023-10-29 15:35:51 +01:00
vincent
439611990e add base dev 2023-10-29 15:35:30 +01:00
vincent
ef927ee761 manage nomad vault token in ansible 2023-10-29 15:35:11 +01:00
vincent
3770c41d03 ansible variable: split variable in file 2023-10-29 15:33:24 +01:00
vincent
50d43dd44c ansible: increase sssh timeout 2023-10-29 15:30:10 +01:00
vincent
1accb487e6 vagrant: bootstrap per VM 2023-10-29 15:29:20 +01:00
vincent
9965a58e47 update debian image 2023-10-29 15:28:48 +01:00
vincent
b972781036 increase prometheus memory
Some checks failed
continuous-integration/drone/push Build is failing
2023-10-23 19:16:47 +02:00
vincent
0e4d6c30d1 increase prometheus retention 2023-10-23 19:16:27 +02:00
vincent
cf53b72179 remove duplicate label 2023-10-23 19:15:55 +02:00
vincent
a99d4534c6 increate prometheus retention time 2023-10-22 21:50:12 +02:00
vincent
38ea6d811e feat: add prometheus alerting for node hardware
Some checks failed
continuous-integration/drone/push Build is failing
2023-10-22 17:26:57 +02:00
vincent
202fdf176e docs: add DNS ADR 2023-10-22 16:10:22 +02:00
vincent
dc7d2134bf fix: conflict between pihole and dnsmasq
Some checks failed
continuous-integration/drone/push Build is failing
2023-10-21 22:37:59 +02:00
vincent
aef03b0e13 docs: update DNS schema 2023-10-21 15:54:10 +02:00
vincent
d5ad4a239c docs: complete DNS ADR 2023-10-21 15:43:13 +02:00
vincent
42cce82722 add systemd-resolved redirection variable 2023-10-21 15:24:58 +02:00
vincent
276fa3c7ec update wireguard DNS 2023-10-21 14:04:12 +02:00
vincent
7a433c2492 fix second dhcp IP 2023-10-21 14:04:12 +02:00
vincent
6f55907bb3 disable DNSSEC 2023-10-21 14:04:12 +02:00
vincent
bfa620f178 move vagrant domain 2023-10-21 14:04:12 +02:00
vincent
1fbf3a9407 create config powerdns 2023-10-21 14:04:12 +02:00
vincent
a8ed6daf77 fix: missing NS entry for trafieck acme 2023-10-21 14:02:05 +02:00
197 changed files with 6299 additions and 1169 deletions

View File

@ -22,13 +22,6 @@ make create-dev
## Rebuild
to rebuild from scratch ansible need a vault server up and unseal
you can rebuild a standalone vault server with a consul database snaphot with
```sh
make vault-dev FILE=./yourconsulsnaphot.snap
```
## Architecture
```mermaid

35
Vagrantfile vendored
View File

@ -1,9 +1,10 @@
Vagrant.configure('2') do |config|
if Vagrant.has_plugin?('vagrant-cachier')
config.cache.scope = 'machine'
config.cache.enable :pacman
end
config.vm.provider :libvirt do |libvirt|
libvirt.management_network_domain = "ducamps-dev.win"
libvirt.management_network_domain = "lan.ducamps.dev"
end
config.vm.define "oscar-dev" do |c|
@ -19,14 +20,20 @@ Vagrant.configure('2') do |config|
# Provider
c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 1024
libvirt.memory = 2048
libvirt.cpus = 2
end
c.vm.provision "ansible" do |bootstrap|
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
bootstrap.galaxy_roles_path= "ansible/roles"
bootstrap.limit="oscar-dev"
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
end
end
config.vm.define "merlin-dev" do |c|
# Box definition
c.vm.box = "generic/rocky9"
c.vm.box = "archlinux/archlinux"
# Config options
c.vm.synced_folder ".", "/vagrant", disabled: true
c.ssh.insert_key = true
@ -36,15 +43,21 @@ Vagrant.configure('2') do |config|
# Provider
c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 1024
libvirt.memory = 512
libvirt.cpus = 2
end
c.vm.provision "ansible" do |bootstrap|
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
bootstrap.galaxy_roles_path= "ansible/roles"
bootstrap.limit="merlin-dev"
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
end
end
config.vm.define "gerard-dev" do |c|
# Box definition
c.vm.box = "debian/bookworm64"
c.vm.box = "archlinux/archlinux"
# Config options
c.vm.synced_folder ".", "/vagrant", disabled: true
@ -54,9 +67,15 @@ Vagrant.configure('2') do |config|
# instance_raw_config_args
# Provider
c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 1024
libvirt.memory = 2048
libvirt.cpus = 2
end
c.vm.provision "ansible" do |bootstrap|
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
bootstrap.galaxy_roles_path= "ansible/roles"
bootstrap.limit="gerard-dev"
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
end
end
config.vm.define "nas-dev" do |c|
@ -71,14 +90,14 @@ Vagrant.configure('2') do |config|
# Provider
c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 1024
libvirt.memory = 2048
libvirt.cpus = 2
end
c.vm.provision "ansible" do |bootstrap|
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
bootstrap.galaxy_roles_path= "ansible/roles"
bootstrap.limit="all"
bootstrap.limit="nas-dev"
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
end
end

View File

@ -99,7 +99,7 @@ host_key_checking = False
#sudo_flags = -H -S -n
# SSH timeout
#timeout = 10
timeout = 30
# default user to use for playbooks if user is not specified
# (/usr/bin/ansible will use current user as default)
@ -136,7 +136,7 @@ host_key_checking = False
# If set, configures the path to the Vault password file as an alternative to
# specifying --vault-password-file on the command line.
#vault_password_file = /path/to/vault_password_file
vault_password_file = ./misc/vault-keyring-client.sh
# format of string {{ ansible_managed }} available within Jinja2
# templates indicates to users editing templates files will be replaced.

24
ansible/group_vars/DNS Normal file
View File

@ -0,0 +1,24 @@
pdns_config:
local-address: "127.0.0.1"
local-port: "5300"
api: yes
api-key:
pdns_backends:
gsqlite3:
dnssec: yes
database: "/var/lib/powerdns/powerdns.sqlite"
pdns_sqlite_databases_locations:
- "/var/lib/powerdns/powerdns.sqlite"
pdns_rec_config:
forward-zones:
- "{{ consul_domain }}=127.0.0.1:8600"
- "ducamps.win=192.168.1.10"
- "{{ domain.name }}=192.168.1.5"
- "lan.{{ domain.name }}=192.168.1.5"
- "1.168.192.in-addr.arpa=192.168.1.5:5300"
local-address: "{{ hostvars[inventory_hostname]['ansible_'+ default_interface].ipv4.address|default(ansible_default_ipv4.address) }}"
dnssec: "off"

View File

@ -0,0 +1,90 @@
NAS_nomad_folder:
- name: actualbudget
- name: archiso
owner: 1000001
- name: backup
owner: 1000001
- name: borgmatic
- name: crowdsec
owner: 1000001
- name: dms
owner: 1000001
- name: filestash
owner: 1000
- name: gitea
owner: 1000000
- name: grafana
owner: 472
- name: hass
owner: 1000001
- name: homer
owner: 1000001
- name: immich/cache
- name: immich/upload
- name: jellyfin
owner: 1000001
- name: loki
owner: 10001
- name: mealie
owner: 1000001
- name: mosquito
owner: 1883
- name: pacoloco
owner: 1000001
- name: pdns-auth
owner: 1000001
- name: pdns-admin
owner: 1000001
- name: pihole
owner: 999
- name: prometheus
owner: 65534
- name: prowlarr
owner: 1000001
- name: radicale
owner: 1000001
- name: openldap
owner: 1001
- name: registry/ghcr
- name: registry/docker
- name: syncthing
owner: 1000001
- name: traefik
owner: 1000001
- name: tt-rss
owner: 1000001
- name: vaultwarden
owner: 1000001
- name: zigbee2mqtt
owner: 1000001
nas_bind_target: "/exports"
nas_bind_source:
- dest: "{{ nas_bind_target }}/nomad"
source: /data/data1/nomad
- dest: "{{ nas_bind_target }}/music"
source: /data/data1/music
- dest: "{{ nas_bind_target }}/download"
source: /data/data1/download
- dest: "{{ nas_bind_target }}/media/serie"
source: /data/data2/serie
- dest: "{{ nas_bind_target }}/media/film"
source: /data/data3/film
- dest: "{{ nas_bind_target }}/photo"
source: /data/data1/photo
- dest: "{{ nas_bind_target }}/homes"
source: /data/data1/homes
- dest: "{{ nas_bind_target }}/ebook"
source: /data/data1/ebook
- dest: "{{ nas_bind_target }}/media/download/serie"
source: /data/data1/download/serie
- dest: "{{ nas_bind_target }}/media/download/film"
source: /data/data1/download/film
- dest: "{{ nas_bind_target }}/music/download/"
source: /data/data1/download/music

View File

@ -0,0 +1 @@
vsftpd_config: {}

View File

@ -0,0 +1,15 @@
nfs_cluster_list: "{% for server in groups['all']%} {% if hostvars[server]['ansible_default_ipv4']['address'] is defined %} {{hostvars[server]['ansible_' + hostvars[server]['nfs_iface']|default('')].ipv4.address|default(hostvars[server]['ansible_default_ipv4']['address'],true)}}{{ nfs_options }} {% endif %} {%endfor%}"
nfs_options: "(rw,no_root_squash,crossmnt,async,insecure_locks,sec=sys)"
nfs_consul_service: true
nfs_bind_target: "/exports"
nfs_exports:
- "{{ nas_bind_target }} *(fsid=0,insecure,no_subtree_check)"
- "{{ nas_bind_target }}/nomad {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/download {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/music {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/media {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/photo {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/homes {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/ebook {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"

View File

@ -0,0 +1 @@
nomad_node_class: 'NAS'

View File

@ -0,0 +1,25 @@
samba_passdb_backend: tdbsam
samba_shares_root: /exports
samba_shares:
- name: media
comment: "media"
write_list: "@NAS_media"
browseable: true
- name: ebook
comment: "ebook"
write_list: "@NAS_ebook"
browseable: true
- name: music
comment: "music"
write_list: "@NAS_music"
browseable: true
- name: photo
comment: "photo"
write_list: "@NAS_photo"
browseable: true
- name: download
comment: "downlaod"
write_list: "@NAS_download"
browseable: true
samba_load_homes: True
samba_homes_include: samba_homes_include.conf

View File

@ -42,35 +42,4 @@ nomad_datacenter: hetzner
consul_server: False
nomad_server: False
systemd_mounts:
diskstation_nomad:
share: diskstation.ducamps.win:/volume2/nomad
mount: /mnt/diskstation/nomad
type: nfs
options:
- " "
automount: true
hetzner_storage:
share: //u304977.your-storagebox.de/backup
mount: /mnt/hetzner/storagebox
type: cifs
options:
- credentials=/etc/creds/hetzner_credentials
- uid= 1024
- gid= 10
- vers=3.0
- mfsymlinks
automount: true
credentials_files:
1:
type: smb
path: /etc/creds/hetzner_credentials
username: u304977
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
systemd_mounts_enabled:
- diskstation_nomad
- hetzner_storage

View File

@ -0,0 +1,28 @@
systemd_mounts:
diskstation_nomad:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
mount: /mnt/diskstation/nomad
type: nfs
options:
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
hetzner_storage:
share: //u304977.your-storagebox.de/backup
mount: /mnt/hetzner/storagebox
type: cifs
options:
- credentials=/etc/creds/hetzner_credentials
- uid=100001
- gid=10
- vers=3.0
- mfsymlinks
automount: "{{ env_automount }}"
enabled: true
credentials_files:
1:
type: smb
path: /etc/creds/hetzner_credentials
username: u304977
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"

View File

@ -0,0 +1,12 @@
$ANSIBLE_VAULT;1.1;AES256
31303539336464336239376636623862303066336438383739356163616431643366386565366361
3264336232303135336334333663326234393832343235640a313638323963666631353836373531
61636261623662396330653135326238363630363938323166303861313563393063386161393238
3231336232663533640a333763643864363939336566333731353031313739616633623537386435
39613934663133613733356433616162363430616439623830663837343530623937656434366663
33656466396263616132356337326236383761363834663363643163343231366563333865656433
39316365663734653734363362363539623636666261333534313935343566646166316233623535
32323831626463656337313266343634303830633936396232663966373264313762346235646665
61333139363039363436393962666365336334663164306230393433636664623934343039323637
33383036323233646237343031633030353330633734353232343633623864333834646239346362
643634303135656333646235343366636361

View File

@ -0,0 +1,45 @@
# defaults file for ansible-arch-provissionning
partition_table:
- device: "/dev/sda"
label: gpt
settings:
- number: 1
part_end: 64MB
flags: [boot, esp]
fstype: vfat
format: yes
- number: 2
part_start: 512MB
part_end: 1524MB
flags: []
fstype: swap
format: yes
- number: 3
part_start: 1524MB
flags: [lvm]
fstype: ext4
format: yes
#- device: "/dev/sdb"
#settings:
#- number: 1
#name: home
#fstype: ext4
#format:
mount_table:
- device: "/dev/sda"
settings:
- number: 3
mountpath: /mnt
fstype: ext4
- number: 1
mountpath: /mnt/boot
fstype: vfat
#need vfat boot partition with esp label
provissionning_UEFI_Enable: True
#sssd_configure: False
nomad_datacenter: hetzner
consul_server: False
nomad_server: False

View File

@ -1,36 +1,7 @@
ansible_python_interpreter: /usr/bin/python3
user:
name: vincent
home: /home/vincent
uid: 1024
mail: vincent@ducamps.win
groups:
- docker
authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
privatekey:
- keyname: "id_gitea"
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
user_config_repo: "ssh://git@git.{{ domain.name }}:2222/vincent/conf2.git"
domain:
name: ducamps.win
hass_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDfVei9iC/Ra5qmSZcLu8z2CTaXCmfn4JSS4o3eu0HhykdYGSqhBTcUDD3/FhcTPQJVFsu1P4Gwqq1dCE+EvaZZRQaMUqVKUpOliThSG6etbImkvqLQQsC1qt+/NqSvfzu2+28A6+YspzuxsViGo7e3Gg9MdwV3LMGh0mcOr/uXb/HIk18sJg5yQpwMfYTj0Wda90nyegcN3F2iZMeauh/aaFJzWcHNakAAewceDYOErU07NhlZgVA2C8HgkJ8HL7AqIVqt9VOx3xLp91DbKTNXSxvyM0X4NQP24P7ZFxAOk/j0AX3hAWhaNmievCHyBWvQve1VshZXFwEIiuHm8q4GSCxK2r0oQudKdtIuQMfuUALigdiSxo522oEiML/2kSk17WsxZwh7SxfD0DKa82fy9iAwcAluWLwJ+yN3nGnDFF/tHYaamSiowpmTTmQ9ycyIPWPLVZclt3BlEt9WH/FPOdzAyY7YLzW9X6jhsU3QwViyaTRGqAdqzUAiflKCMsNzb5kq0oYsDFC+/eqp1USlgTZDhoKtTKRGEjW2KuUlDsXGBeB6w1D8XZxXJXAaHuMh4oMUgLswjLUdTH3oLnnAvfOrl8O66kTkmcQ8i/kr1wDODMy/oNUzs8q4DeRuhD5dpUiTUGYDTWPYj6m6U/GAEHvN/2YEqSgfVff1iQ4VBw==
system_arch_local_mirror: "https://arch.{{domain.name}}/repo/archlinux_$arch"
system_sudoers_group: "serverAdmin"
system_ipV6_disable: True
user_custom_host:
- host: "git.ducamps.win"
user: "git"
keyfile: "~/.ssh/id_gitea"
- host: "gitlab.com"
user: "git"
keyfile: "~/.ssh/id_consort"
system_ip_unprivileged_port_start: 0
wireguard_mtu: 1420

View File

@ -0,0 +1,5 @@
consul_client_addr: "0.0.0.0"
consul_datacenter: "homelab"
consul_backup_location: "/mnt/diskstation/git/backup/consul"
consul_ansible_group: all
consul_systemd_resolved_enable: true

View File

@ -0,0 +1,8 @@
docker_daemon_config:
dns:
- 172.17.0.1
- 192.168.1.6
mtu: 1420
insecure-registries:
- 192.168.1.0/24
- 192.168.121.0/24

View File

@ -0,0 +1,9 @@
nomad_docker_allow_caps:
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
nomad_allow_privileged: True
nomad_vault_enabled: true
nomad_vault_address: "http://active.vault.service.{{consul_domain}}:8200"
nomad_vault_role: "nomad-cluster"
nomad_docker_extra_labels: ["job_name", "task_group_name", "task_name", "namespace", "node_name"]

View File

@ -1,42 +0,0 @@
consul_client_addr: "0.0.0.0"
consul_datacenter: "homelab"
consul_backup_location: "/mnt/diskstation/git/backup/consul"
consul_ansible_group: all
consul_bootstrap_expect: 3
nomad_docker_allow_caps:
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
nomad_vault_enabled: true
nomad_vault_address: "http://active.vault.service.consul:8200"
nomad_vault_role: "nomad-cluster"
nomad_vault_token: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:nomad_vault_token') }}"
nomad_bootstrap_expect: 3
notification_mail: "{{inventory_hostname}}@{{ domain.name }}"
msmtp_mailhub: smtp.{{ domain.name }}
msmtp_auth_user: "{{ user.mail }}"
msmtp_auth_pass: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:email') }}"
system_user:
- name: drone-deploy
home: /home/drone-deploy
shell: /bin/bash
privatekey:
- keyname: id_gitea
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
- name: ansible
home: /home/ansible
shell: /bin/bash
- name: root
home: /root
privatekey:
- keyname: id_gitea
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"

View File

@ -1,9 +1,5 @@
sssd_configure: true
# sssd_configure is False by default - by default nothing is done by this role.
ldap_search_base: "dc=ducamps,dc=win"
ldap_uri: "ldaps://ldap.ducamps.win"
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=win"
ldap_default_bind_dn : "uid=vaultserviceaccount,cn=users,dc=ducamps,dc=win"
ldap_password : "{{lookup('hashi_vault', 'secret=secrets/data/ansible/other:vaulserviceaccount')}}"
userPassword: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/user:userPassword')}}"
ldap_search_base: "dc=ducamps,dc=eu"
ldap_uri: "ldaps://ldaps.service.consul"
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=eu"

View File

@ -0,0 +1,42 @@
user:
name: vincent
home: /home/vincent
uid: 1024
mail: vincent@ducamps.eu
groups:
- docker
authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
privatekey:
- keyname: "id_gitea"
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
system_user:
- name: drone-deploy
home: /home/drone-deploy
shell: /bin/bash
authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
- name: ansible
home: /home/ansible
shell: /bin/bash
- name: root
home: /root
privatekey:
- keyname: id_gitea
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
user_custom_host:
- host: "git.ducamps.eu"
user: "git"
keyfile: "~/.ssh/id_gitea"
user_config_repo: "ssh://git@git.ducamps.eu:2222/vincent/conf2.git"

View File

@ -0,0 +1 @@
vault_raft_group_name: "homelab"

View File

@ -0,0 +1,11 @@
$ANSIBLE_VAULT;1.1;AES256
39613433313663653039643961643165643632313938626339653365376633613135653436363938
6331623132366638633665636163336462393333336264320a666466303465663839646435626231
38396437363034313236383261326637306238616162303131356537393635363939376236386130
6466353961643233310a306631333664363332336263656638623763393732306361306632386662
37623934633932653965316532386664353130653830356237313337643266366233346633323265
37616533303561363864626531396366323565396536383133643539663630636633356238386633
34383464333363663532643239363438626135336632316135393537643930613532336231633064
35376561663637623932313365636261306131353233636661313435643563323534623365346436
65366132333635643832353464323961643466343832376635386531393834336535386364396333
3932393561646133336437643138373230366266633430663937

View File

@ -0,0 +1,12 @@
$ANSIBLE_VAULT;1.1;AES256
61326233336236343231396231306638373837653661313334313261313539316532373437346132
3931306637303530373032663236363466383433316161310a396439393564643731656664663639
32386130663837303663376432633930393663386436666263313939326631616466643237333138
3365346131636333330a376436323964656563363664336638653564656231636136663635303439
35346461356337303064623861326331346263373539336335393566623462343464323065366237
61346637326336613232643462323733366530656439626234663335633965376335623733336162
37323739376237323534613361333831396531663637666161666366656237353563626164626632
33326336353663356235373835666166643465666562616663336539316233373430633862613133
36363831623361393230653161626131353264366634326233363232336635306266376363363739
66373434343330633337633436316135656533613465613963363931383266323466653762623365
363332393662393532313063613066653964

View File

@ -0,0 +1,14 @@
$ANSIBLE_VAULT;1.1;AES256
35303137383361396262313561623237626336306366376630663065396664643630383638376436
3930346265616235383331383735613166383461643233310a663564356266663366633539303630
37616532393035356133653838323964393464333230313861356465326433353339336435363263
3162653932646662650a613762393062613433343362633365316434663661306637623363333834
61303231303362313133346461373738633239613933303564383532353537626538363636306461
66663330346566356637623036363964396137646435333139323430353639386134396537366334
39303130386432366335383433626431663034656466626265393863623438366130346562623365
63653963393663353666313631326131636361333230386461383638333338393137336562323935
37343034363961306663303232346139356534613837663230393962323333656536303161373939
65626164336166306264653538313661393934383966303135356161336331623835663235646332
63343764643861366537383962616230323036326331386333346463353835393762653735353862
32323839663365353337303363313535633362643231653663393936363539363933636430613832
32336566633962646463316636346330336265626130373636643335323762363661

View File

@ -0,0 +1,14 @@
$ANSIBLE_VAULT;1.1;AES256
64396261616266633665646330393631316463386334633032353965323964633464333331323334
6261653930313764313836366531383462313965336231620a656637623439623639383931373361
37373434636531623563336565356136633031633835633636643436653165386436636564616130
3763383036343739370a376565343130636631653635616566653531323464343632623566313436
32396165636333393032636636613030373663393238323964396462323163616162613933626536
31623931343633346131636563643563393230323839636438373933666137393031326532356535
32363439306338623533353734613966396362303164616335363535333438326234623161653732
66613762653966613763623966633939323634346536636334343364306332323563653361346563
65313433376634363261323934376637646233636233346536316262386634353666376539613235
63666432396636373139663861393164626165383665663933383734303165623464666630343231
33323339663138373530396636636333323439616137313434316465633162396237306238343366
30326162306539396630633738323435323432646338633331626665363838376363343835336534
3635

View File

@ -0,0 +1,50 @@
systemd_mounts:
diskstation_photo:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/photo"
mount: /mnt/diskstation/photo
type: nfs
options:
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
diskstation_music:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/music"
mount: /mnt/diskstation/music
type: nfs
options:
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
diskstation_media:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/media"
mount: /mnt/diskstation/media
type: nfs
options:
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
diskstation_ebook:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/ebook"
mount: /mnt/diskstation/ebook
type: nfs
options:
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
diskstation_nomad:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
mount: /mnt/diskstation/nomad
type: nfs
options:
- " "
automount: "{{ env_automount }}"
enabled: true
diskstation_download:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/download"
mount: /mnt/diskstation/download
type: nfs
options:
- "vers=4"
automount: "{{ env_automount }}"
enabled: true

View File

@ -0,0 +1 @@
nomad_node_class: 'cluster'

View File

@ -1,54 +0,0 @@
postgresql_users:
- name: root
role_attr_flags: SUPERUSER
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:root')}}"
- name: wikijs
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/wikijs:password')}}"
- name: ttrss
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/ttrss:password')}}"
- name: gitea
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/gitea:password')}}"
- name: supysonic
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/supysonic:password')}}"
- name: hass
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/homeassistant:password')}}"
- name: vaultwarden
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/vaultwarden:password')}}"
- name: drone
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/droneci:password')}}"
- name: dendrite
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/dendrite:password')}}"
- name: paperless
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/paperless:password')}}"
- name: dump
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/dump:password')}}"
- name: vikunja
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/vikunja:password')}}"
- name: ghostfolio
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/ghostfolio:password')}}"
postgresql_databases:
- name: wikijs
owner: wikijs
- name: ttrss
owner: ttrss
- name: gitea
owner: gitea
- name: supysonic
owner: supysonic
- name: hass
owner: hass
- name: vaultwarden
owner: vaultwarden
- name: drone
owner: drone
- name: dendrite
owner: dendrite
- name: paperless
owner: paperless
- name: vikunja
owner: vikunja
- name: ghostfolio
owner: ghostfolio

View File

@ -0,0 +1,38 @@
postgres_consul_service: true
postgres_consul_service_name: db
postgresql_databases:
- name: ttrss
owner: ttrss
- name: gitea
owner: gitea
- name: supysonic
owner: supysonic
- name: hass
owner: hass
- name: vaultwarden
owner: vaultwarden
- name: drone
owner: drone
- name: paperless
owner: paperless
- name: vikunja
owner: vikunja
- name: ghostfolio
owner: ghostfolio
- name: pdns-auth
owner: pdns-auth
- name: pdns-admin
owner: pdns-admin
- name: mealie
owner: mealie
- name: immich
owner: immich
postgresql_hba_entries:
- {type: local, database: all, user: postgres, auth_method: peer}
- {type: local, database: all, user: all, auth_method: peer}
- {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5}
- {type: host, database: all, user: all, address: '::1/128', auth_method: md5}
- {type: host, database: all, user: all, address: '::0/128', auth_method: md5}
- {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5}

View File

@ -0,0 +1,54 @@
$ANSIBLE_VAULT;1.1;AES256
39363436643831373861376361613830316334613939346338616636393462663033393261633838
6337336161393063646136613538396366653538656435360a303062636463383739653730346639
61323634306265613336313634653039313639663836363032353261383566393865613166613032
3837313634633466610a313062646237396138316361303361663565353862363139343566306539
38303161303163323265376539323939393938373965353934303535613962653534363362346563
61643638353138623162353364353736396162613735333063633739346132613161303564356437
62343535363263646463306466663536613937393463666336396332646533343439613433626566
38643363343065393165646134343935386461626166316662356365366666363737653336626631
64643230616431396666666462303366343164323233303139643939346635353730316234386163
35613235643034643833393233373536383863333763393066373564353535353463363336316335
63363537643432663266386438316563656663656462333039303861393364333966383430643263
63356435373064633861343137616637393161383361306135373864386235653034323732316663
65336465386135663532356433386562666639333464633362663131646237613034646563396133
33303464633635636233626633353038656230373266666132323561383866343632333561323363
61346664623338376436373332646232646235323639633262666166346535663238653563363239
34663365633363313433376333653534333364393635316235333965383262313563373161663065
36393565396534353235623238303835343334646632306638306332336539616463393966653538
35336462623031326539633139636533633632623137393463333531663935323765663139306361
66643434393533313039356434326438626265323066613966323634306632653765363834613034
30373039336536393865383265643335396232643537343363313338383838383030386665303237
64363666346535633237353462333232623132353031323231623338356136656261303662656465
31313039643561623635643435333133663032313964323061393231666336343233363038616231
36356262326530383233336130326361613431623866633832663361633937646461343731343938
33306262346463623935663466356264393837626239313739356431653163376563333234346566
38373663643532313635333131663239383736343930623735323861663037356136353433633865
63626435613936303661366637623338633961643137613933303735366265663933396130363039
34396637643638613839306639343765393539653164616536653661373264376436626639316666
61303835323761643531326438363035343539383464376433363534623934366534373631353364
61383866323737316430303736366533643939313637393631303833363431613562303639323939
66313434613963656464383964313734383938353366306462666537653563336465376464303538
34336531663334303938333739313638636363623562613536333736386137363139653164626261
62663662316365663563646164303935323866633336633939323837393962393130626330666233
63663661303565646236623130663034636264353235376561306630376365613966663536303963
63643161386435633831393334333035653761393863373731616239313235383033633439376166
39613762376162386231633938393036633461303732323337656430373430636435313337303365
37646461336339623339316663616636373036656564383462356562306465623762653162633963
35636466386138333564666564323034393162633965386133643235303938616439333130353637
61343536323034366464653138353665326436396133313432666563353335383733363335613562
61646365346665383866623364396138323666326338313530353663323938613362653038313339
32613663616535313661386538366330373364366637386634633437646362383764346263636434
35616166393065343038643861636333373738363335353164326435303961326662356230323262
35656531653535643630376330393731643532353132366662636664626132646632306361323035
31373136616435336362633439356339336466313337623538383763386132396135653864386638
31393864363466653137643565306462616238333435343036613331653866393532313861376331
33646636623666343439616332386363373664346164313963623861393134666463383366633539
35313761333564303635656364303566643436393130356163623137313530653539656537653139
38336636623732313630303933303962303561376436623737633139643564343166326335386639
31373437336139326562613339393235393065396538333566323864643639303132313733396132
35613532396363326166313061353136373965303964623534653634613639303764393038333037
63656131616463663565653134363336326139303736313138366262616338643339316231663631
30656132386462393433313261313466303239346138623433643634616465656139343764353338
62616139613731363665333438383861623837643432643134626461643631323034383262656439
33653563323434343964633236353434643739333863636630636363633639373630

View File

@ -0,0 +1 @@
postgres_consul_tag: "active"

View File

@ -0,0 +1 @@
postgres_consul_tag: "standby"

View File

@ -3,19 +3,15 @@ dhcpd_lease_time: '72'
dhcpd_domain_name: "lan.{{ domain.name }}"
dhcpd_nameservers:
- '192.168.1.4'
- '192.168.1.10'
dhcpd_keys:
- key: dhcp
algorithm: HMAC-MD5
secret: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:dhcpd_key') }}"
- '192.168.1.40'
dhcpd_zones:
- zone: "lan.{{ domain.name }}."
primary: "192.168.1.10"
key: "dhcp"
primary: "192.168.1.5"
key: "dhcpdupdate"
- zone: "1.168.192.in-addr.arpa."
primary: "192.168.1.10"
key: "dhcp"
primary: "192.168.1.5"
key: "dhcpdupdate"
dhcpd_options: |
ddns-updates on;
@ -45,17 +41,10 @@ dhcpd_hosts:
- hostname: 'oscar'
address: '192.168.1.40'
ethernet: '7C:83:34:B3:49:9A'
ethernet: '68:1D:EF:3C:F0:44'
- hostname: 'bleys'
address: '192.168.1.42'
ethernet: '68:1d:ef:2b:3d:24'
- hostname: 'VMAS-HML'
address: '192.168.1.50'
ethernet: '52:54:00:02:74:ed'
- hostname: 'VMAS-BUILD'
address: '192.168.1.53'
ethernet: '52:54:13:1e:93'
- hostname: 'xiaomi-chambre-gateway'
@ -73,4 +62,7 @@ dhcpd_hosts:
- hostname: 'shelly-chambre-ventilo'
address: '192.168.1.65'
ethernet: 'e0:98:06:97:78:0b'
- hostname: 'shelly-Bureau-chauffeau'
address: '192.168.1.66'
ethernet: '8c:aa:b5:42:b9:b9'

View File

@ -0,0 +1,14 @@
$ANSIBLE_VAULT;1.1;AES256
65303666336535386536653939626336646338623431353161636565393532623264316534326539
6265393839323438376666393030383839326239323261660a333132613538306137383332336538
38323830353062366133643734303138343939323135333532333666653039326437316361353463
6665393263376132620a346239386437326462363565636335303766306638393331656664376665
63373131373039653065633861626263646635323634333538343163346239633937303761366362
31376438363731613666393531656232653033336332653261313866396434616461303831353336
38663965636536313932346133363733636636643938366364366435366237316435643062336231
34343931653963613431336465653036616431323263613731393963656637303561366461663038
31336131346266393035343135323131636435333865323733386439363763376638383337613530
34356331356361636665383933633130343564373739343630663835313164326565393439306163
31386538633033333961386534323234653833323537356565616436346462613333663139623035
30636265313230383162633466373937353262383965313631326336666133653331366230653961
6131

View File

@ -1,3 +1,2 @@
nomad_datacenter: homelab
nomad_allow_privileged: True
system_wol_enable: True

View File

@ -1,83 +0,0 @@
systemd_mounts:
diskstation_git:
share: diskstation.ducamps.win:/volume2/git
mount: /mnt/diskstation/git
type: nfs
options:
- " "
automount: true
diskstation_CardDav:
share: diskstation.ducamps.win:/volume2/CardDav
mount: /mnt/diskstation/CardDav
type: nfs
options:
- " "
automount: true
backup_disk:
share: /dev/sdb1
mount: /mnt/backup
type: ntfs-3g
options:
- " "
automount: true
diskstation_home:
share: diskstation.ducamps.win:/volume2/homes/admin
mount: /mnt/diskstation/home
type: nfs
options:
- " "
automount: true
diskstation_photo:
share: diskstation.ducamps.win:/volume2/photo
mount: /mnt/diskstation/photo
type: nfs
options:
- " "
automount: true
diskstation_music:
share: diskstation.ducamps.win:/volume2/music
mount: /mnt/diskstation/music
type: nfs
options:
- " "
automount: true
diskstation_media:
share: diskstation.ducamps.win:/volume1/media
mount: /mnt/diskstation/media
type: nfs
options:
- " "
automount: true
diskstation_ebook:
share: diskstation.ducamps.win:/volume2/ebook
mount: /mnt/diskstation/ebook
type: nfs
options:
- " "
automount: true
diskstation_archMirror:
share: diskstation.ducamps.win:/volume2/archMirror
mount: /mnt/diskstation/archMirror
type: nfs
options:
- " "
automount: true
diskstation_nomad:
share: diskstation.ducamps.win:/volume2/nomad
mount: /mnt/diskstation/nomad
type: nfs
options:
- " "
automount: true
systemd_mounts_enabled:
- diskstation_git
- diskstation_music
- backup_disk
- diskstation_photo
- diskstation_home
- diskstation_CardDav
- diskstation_media
- diskstation_ebook
- diskstation_archMirror
- diskstation_nomad

View File

@ -0,0 +1,13 @@
domain:
name: ducamps.eu
consul_bootstrap_expect: 3
consul_domain: "consul"
nomad_bootstrap_expect: 3
nomad_client_meta:
- name: "env"
value: "production"
vault_unseal_keys_dir_output: "~/vaultUnseal/production"
env_default_nfs_path: ""
env_media_nfs_path: "/volume1"
env_automount: true
nas_ip: "192.168.1.43"

View File

@ -1,4 +1,21 @@
systemd_mounts: []
systemd_mounts_enabled: []
domain:
name: ducamps.dev
#systemd_mounts: []
#systemd_mounts_enabled: []
consul_bootstrap_expect: 2
consul_domain: "consul"
nomad_bootstrap_expect: 2
nomad_client_meta:
- name: "env"
value: "staging"
vault_unseal_keys_dir_output: "~/vaultUnseal/staging"
hosts_entries:
- ip: "{{ hostvars['nas-dev']['ansible_default_ipv4']['address'] }}"
name: diskstation.ducamps.eu
env_default_nfs_path: ""
env_automount: true
nas_ip: "nfs.service.consul"

View File

@ -1,6 +1,10 @@
---
ansible_host: "192.168.1.42"
ansible_python_interpreter: "/usr/bin/python3"
default_interface: "enp2s0"
consul_iface: "{{ default_interface}}"
vault_iface: "{{ default_interface}}"
nfs_iface: "{{ default_interface}}"
wireguard_address: "10.0.0.7/24"
wireguard_byhost_allowed_ips:
merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24
@ -11,13 +15,13 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{default_interface}} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=1
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {default_interface} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=0
partition_table:

View File

@ -1,22 +1,23 @@
---
ansible_host: 10.0.0.1
#ansible_host: 135.181.150.203
default_interface: "eth0"
wireguard_address: "10.0.0.1/24"
wireguard_endpoint: "135.181.150.203"
wireguard_persistent_keepalive: "20"
wireguard_allowed_ips: "10.0.0.1/32,10.0.0.3/32,10.0.0.5/32"
wireguard_allowed_ips: 10.0.0.1
wireguard_postup:
- iptables -A FORWARD -o %i -j ACCEPT
- iptables -A FORWARD -i %i -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=1
- resolvectl dns %i 192.168.1.4 192.168.1.10; resolvectl domain %i '~ducamps.win' '~consul'
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
wireguard_postdown:
- iptables -D FORWARD -i %i -j ACCEPT
- iptables -D FORWARD -o %i -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=0
wireguard_unmanaged_peers:
@ -28,7 +29,7 @@ wireguard_unmanaged_peers:
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
allowed_ips: 10.0.0.5/32
persistent_keepalive: 0
wireguard_dns: "192.168.1.4,192.168.1.10"
wireguard_dns: "192.168.1.4,192.168.1.41"
consul_client_addr: "127.0.0.1 10.0.0.1"
consul_bind_address: "10.0.0.1"
consul_ui: True
@ -41,5 +42,6 @@ nomad_host_networks:
interface: eth0
- name: "default"
interface: wg0
nomad_client_network_interface : "wg0"
vault_listener_address: 10.0.0.1
nomad_plugins_podman: True

View File

@ -1,6 +1,10 @@
---
ansible_host: "192.168.1.41"
ansible_python_interpreter: "/usr/bin/python3"
default_interface: "enu1u1"
consul_iface: "{{ default_interface }}"
vault_iface: "{{ default_interface }}"
wireguard_address: "10.0.0.6/24"
wireguard_byhost_allowed_ips:
merlin: 10.0.0.6,192.168.1.41
@ -11,10 +15,10 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o enu1u1 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o enu1u1 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE

View File

@ -1,4 +1,8 @@
---
default_interface: eth0
vault_iface: "{{ default_interface}}"
ansible_host: gerard-dev.lan.ducamps.dev
wireguard_address: "10.0.1.6/24"
perrsistent_keepalive: "20"
wireguard_endpoint: ""
@ -6,10 +10,10 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface}} -j MASQUERADE

View File

@ -1,31 +1,39 @@
---
ansible_host: 10.0.0.4
#ansible_host: 65.21.2.14
default_interface: "ens3"
nfs_iface: "wg0"
wireguard_address: "10.0.0.4/24"
wireguard_endpoint: "95.216.217.5"
wireguard_persistent_keepalive: "30"
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3/32,10.0.0.5/32"
wireguard_endpoint: "65.21.2.14"
wireguard_persistent_keepalive: "20"
wireguard_byhost_allowed_ips:
oscar: "0.0.0.0/0"
bleys: "0.0.0.0/0"
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3,10.0.0.5"
wireguard_postup:
- iptables -A FORWARD -o %i -j ACCEPT
- iptables -A FORWARD -i %i -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=1
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
wireguard_postdown:
- iptables -D FORWARD -i %i -j ACCEPT
- iptables -D FORWARD -o %i -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=0
wireguard_unmanaged_peers:
phone:
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
allowed_ips: 10.0.0.3/32
persistent_keepalive: 0
zen:
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
allowed_ips: 10.0.0.5/32
persistent_keepalive: 0
wireguard_dns: "192.168.1.40,192.168.1.10"
wireguard_dns: "192.168.1.4,192.168.1.41"
consul_client_addr: "127.0.0.1 10.0.0.4"
consul_bind_address: "10.0.0.4"
consul_ui: True
@ -35,7 +43,8 @@ nomad_host_networks:
- name: "private"
interface: wg0
- name: "public"
interface: eth0
interface: ens3
- name: "default"
interface: wg0
vault_listener_address: 10.0.0.4
nomad_plugins_podman: True

View File

@ -1,4 +1,8 @@
---
ansible_host: merlin-dev.lan.ducamps.dev
default_interface: eth0
vault_iface: "{{ default_interface}}"
wireguard_address: "10.0.1.4/24"
wireguard_endpoint: "{{ ansible_default_ipv4.address }}"
wireguard_persistent_keepalive: "30"
@ -6,12 +10,12 @@ wireguard_persistent_keepalive: "30"
wireguard_postup:
- iptables -A FORWARD -o %i -j ACCEPT
- iptables -A FORWARD -i %i -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i %i -j ACCEPT
- iptables -D FORWARD -o %i -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_unmanaged_peers:
phone:

17
ansible/host_vars/nas-dev Normal file
View File

@ -0,0 +1,17 @@
---
ansible_host: nas-dev.lan.ducamps.dev
default_interface: eth0
vault_iface: "{{ default_interface}}"
wireguard_address: "10.0.1.8/24"
perrsistent_keepalive: "30"
wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE

19
ansible/host_vars/oberon Normal file
View File

@ -0,0 +1,19 @@
---
wireguard_address: "10.0.0.8/24"
default_interface: "enp2s0"
consul_iface: "{{ default_interface}}"
vault_iface: "{{ default_interface}}"
perrsistent_keepalive: "30"
wireguard_endpoint: ""
wireguard_byhost_allowed_ips:
merlin: 10.0.0.8,192.168.1.43
corwin: 10.0.0.8,192.168.1.43
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE

View File

@ -1,4 +1,9 @@
---
default_interface: "enp1s0"
consul_iface: "{{ default_interface}}"
vault_iface: "{{ default_interface}}"
nfs_iface: "{{ default_interface}}"
nomad_client_cpu_total_compute: 8000
wireguard_address: "10.0.0.2/24"
wireguard_byhost_allowed_ips:
merlin: 10.0.0.2,192.168.1.40
@ -9,18 +14,13 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
consul_snapshot: True
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
vault_snapshot: true
vault_backup_location: "/mnt/diskstation/git/backup/vault"
vault_roleID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_approle') }}"
vault_secretID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_secretID') }}"
partition_table:
- device: "/dev/sda"
label: gpt

View File

@ -1,4 +1,7 @@
---
ansible_host: oscar-dev.lan.ducamps.dev
default_interface: eth0
vault_iface: "{{ default_interface}}"
wireguard_address: "10.0.1.2/24"
perrsistent_keepalive: "30"
wireguard_endpoint: ""
@ -6,14 +9,9 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
consul_snapshot: True
vault_snapshot: True
vault_backup_location: "/mnt/diskstation/git/backup/vault"
vault_roleID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_approle') }}"
vault_secretID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_secretID') }}"
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE

View File

@ -2,12 +2,24 @@
requirements:
ansible-galaxy install -g -r roles/requirements.yml
deploy_production: generate-token
deploy_production:
ansible-playbook site.yml -i production -u ansible
deploy_staging: generate-token
deploy_production_wiregard:
ansible-playbook playbooks/wireguard.yml -i production -u ansible
deploy_staging:
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
ansible-playbook site.yml -i staging -u ansible
generate-token:
export VAULT_TOKEN=`vault token create -policy=ansible -field="token" -period 6h`
deploy_staging_base:
ansible-playbook playbooks/sssd.yml -i staging -u ansible
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
ansible-playbook playbooks/server.yml -i staging -u ansible
view-allvault:
ansible-vault view `git grep -l "ANSIBLE_VAULT;1.1;AES256$$"`

View File

@ -0,0 +1,9 @@
#!/bin/sh
readonly vault_password_file_encrypted="$(dirname $0)/vault-password.gpg"
# flock used to work around "gpg: decryption failed: No secret key" in tf-stage2
# would otherwise need 'auto-expand-secmem' (https://dev.gnupg.org/T3530#106174)
flock "$vault_password_file_encrypted" \
gpg --batch --decrypt --quiet "$vault_password_file_encrypted"

Binary file not shown.

View File

@ -0,0 +1,45 @@
---
prerun: false
dependency:
name: galaxy
enabled: false
driver:
name: vagrant
provider:
name: libvirt
default_box: archlinux/archlinux
platforms:
- name: oscar-dev
cpu: 1
memory: 1024
box: archlinux/archlinux
- name: merlin-dev
cpu: 1
memory: 1024
box: generic/rocky9
- name: gerard-dev
cpu: 1
memory: 1024
box: debian/bookworm64
- name: nas-dev
cpu: 1
memory: 1024
box: archlinux/archlinux
provisioner:
name: ansible
connection_options:
ansible_ssh_user: vagrant
ansible_become: true
env:
ANSIBLE_CONFIG: ../../ansible.cfg
ANSIBLE_ROLES_PATH: "../../roles"
log: true
lint:
name: ansible-lint
inventory:
host_vars: []
links:
group_vars: ../../group_vars
hosts: ../../staging
verifier:
name: ansible

View File

@ -1,12 +1,54 @@
---
- hosts: all
name: Hashicorp stack
- name: Consul install
hosts: all
roles:
- role: ansible-hashicorp-vault
when: inventory_hostname not in groups['VPS']
become: true
- role: ansible-consul
become: true
- name: Vault install
hosts: homelab
roles:
- role: ansible-hashicorp-vault
become: true
post_tasks:
- name: Stat root file
ansible.builtin.stat:
path: "{{ vault_unseal_keys_dir_output }}/rootkey"
register: rootkey_exist
delegate_to: localhost
- name: Reading root contents
ansible.builtin.command: cat "{{ vault_unseal_keys_dir_output }}/rootkey"
register: root_token
delegate_to: localhost
when: rootkey_exist.stat.exists
changed_when: false
- name: debug
ansible.builtin.debug:
var: root_token
- name: Generate nomad token
community.hashi_vault.vault_token_create:
renewable: true
policies: "nomad-server-policy"
period: 72h
no_parent: true
token: "{{ root_token.stdout }}"
url: "http://active.vault.service.consul:8200"
retries: 4
run_once: true
delegate_to: localhost
when: root_token.stdout is defined
register: nomad_token_data
- name: Gather nomad token
ansible.builtin.set_fact:
nomad_vault_token: "{{ nomad_token_data.login.auth.client_token }}"
when: nomad_token_data.login is defined
- name: nomad
hosts: all
vars:
unseal_keys_dir_output: ~/vaultunseal
roles:
- role: ansible-nomad
become: true
- role: docker

View File

@ -0,0 +1,9 @@
---
- hosts:
- homelab
- VPS
- NAS
vars:
# certbot_force: true
roles:
- autofs

View File

@ -1,6 +1,6 @@
---
- hosts: all
become: true
gather_facts: false
become: true
roles:
- ansible_bootstrap

View File

@ -14,10 +14,13 @@
- docker
become: true
become_user: '{{ user.name }}'
- hosts: all
roles:
- role: user_config
vars:
user_config_username: '{{ user.name }}'
become_user: '{{ user.name }}'
user_config_username: "{{ user.name }}"
become_user: "{{ user.name }}"
become: true
- role: user_config
vars:

View File

@ -1,16 +1,54 @@
---
- hosts: database
- name: Database playbook
hosts: database
vars:
# certbot_force: true
pre_tasks:
- name: Install Pg vertors (immich)
aur:
name: pgvecto.rs-bin
state: present
become: true
become_user: aur_builder
- name: Add database member to pg_hba replication
ansible.builtin.set_fact:
postgresql_hba_entries: "{{ postgresql_hba_entries + [\
{'type':'host', \
'database': 'replication',\
'user':'repli',\
'address':hostvars[item]['ansible_'+hostvars[item]['default_interface']]['ipv4']['address']+'/32',\
'auth_method':'trust'}] }}"
loop: '{{ groups.database }}'
roles:
- role: ansible-role-postgresql
become: true
tasks:
- name: add pg_read_all_data to dump
community.postgresql.postgresql_membership:
target_roles:
- dump
groups:
- pg_read_all_data
- name: Launch replication
ansible.builtin.command: pg_basebackup -D /var/lib/postgres/data -h {{groups["database_active"]|first}} -U repli -Fp -Xs -P -R -w
args:
creates: /var/lib/postgres/data/postgresql.conf
become: true
become_user: "{{ postgresql_user }}"
become_user: postgres
when: inventory_hostname in groups["database_standby"]
- name: Ensure PostgreSQL is started and enabled on boot.
ansible.builtin.service:
name: '{{ postgresql_daemon }}'
state: '{{ postgresql_service_state }}'
enabled: '{{ postgresql_service_enabled }}'
become: true
- name: Set Postgress shared libraries
community.postgresql.postgresql_set:
name: shared_preload_libraries
value: vectors.so
become: true
become_user: postgres
when: inventory_hostname in groups["database_active"]
notify: Restart postgresql
- name: Set Postgress shared libraries
community.postgresql.postgresql_set:
name: search_path
value: '$user, public, vectors'
become: true
become_user: postgres
when: inventory_hostname in groups["database_active"]

View File

@ -0,0 +1,6 @@
---
- name: DNS playbook
hosts: DNS
roles:
- role: pdns_recursor-ansible
become: true

28
ansible/playbooks/nas.yml Normal file
View File

@ -0,0 +1,28 @@
---
- name: gather all
hosts: all
- name: NAS playbook
hosts: NAS
vars:
# certbot_force: true
pre_tasks:
- name: include task NasBind
ansible.builtin.include_tasks:
file: tasks/NasBind.yml
loop: "{{ nas_bind_source }}"
- name: create nomad folder
ansible.builtin.file:
path: "{{ nas_bind_target }}/nomad/{{ item.name }}"
owner: "{{ item.owner|default('root') }}"
state: directory
become: true
loop: "{{ NAS_nomad_folder }}"
roles:
- role: ansible-role-nut
become: true
- role: ansible-role-nfs
become: true
- role: ansible-role-pureftpd
become: true
- role: vladgh.samba.server
become: true

View File

@ -2,6 +2,7 @@
- hosts:
- homelab
- VPS
- NAS
vars:
# certbot_force: true
tasks:
@ -22,7 +23,4 @@
loop_var: create
roles:
- system
- autofs
- role: msmtp
when: ansible_os_family != "RedHat"
- cronie

View File

@ -0,0 +1,18 @@
- name: Ensure base NFS directory exist
ansible.builtin.file:
path: "{{ item.dest }}"
state: directory
become: true
- name: Ensure source NFS directory exist
ansible.builtin.file:
path: "{{ item.source }}"
state: directory
become: true
- name: Bind NAS export
ansible.posix.mount:
path: "{{ item.dest }}"
src: "{{ item.source }}"
opts: bind
fstype: none
state: mounted
become: true

View File

@ -0,0 +1 @@
path = /exports/homes/%S

View File

@ -1,27 +1,52 @@
[homelab]
[DNS]
oscar
bleys
gerard
[VPS]
corwin
merlin
[dhcp]
gerard
oberon
[wireguard]
corwin
oscar
merlin
gerard
[database_active]
bleys
[database]
[database_standby]
oscar
bleys
[database:children]
database_active
database_standby
[rsyncd]
oscar
bleys
[wireguard:children]
production
[NAS]
oberon
[cluster]
oscar
#gerard
bleys
[homelab:children]
NAS
cluster
[VPS]
merlin
[region:children]
homelab
VPS
production
[production]
oscar
merlin
#gerard
bleys
oberon
[staging]

View File

@ -1,15 +1,11 @@
---
- hosts: all
remote_user: root
vars:
provissionning_default_root: "{{ lookup('hashi_vault','secret=secrets/data/ansible/user:root') }}"
roles:
- ansible-arch-provissionning
- hosts: all
remote_user: root
vars:
ansible_password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/user:root') }}"
roles:
- ansible_bootstrap

View File

@ -1,41 +1,49 @@
---
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-arch-provissionning.git
roles:
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-arch-provissionning.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-postgresql.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-postgresql.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-sssd
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-sssd
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible_bootstrap.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible_bootstrap.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/autofs.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/autofs.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/cronie.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/cronie.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/docker.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/docker.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/hass-client-control.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/hass-client-control.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/msmtp.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/msmtp.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/rsyncd.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/rsyncd.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/system.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/system.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/user_config.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/user_config.git
scm: git
- src: git@github.com:vincentDcmps/ansible-role-wireguard.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-consul.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-consul.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-hashicorp-vault.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-hashicorp-vault.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-nomad.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-nomad.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/mpd.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/mpd.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-dhcpd.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-dhcpd.git
scm: git
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-user.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-user.git
scm: git
- src: git@github.com:vincentDcmps/ansible-role-nfs.git
scm: git
- src: git@github.com:vincentDcmps/ansible-role-nut.git
scm: git
- src: git@git.ducamps.eu:2222/ansible-roles/ansible-role-pureftpd.git
scm: git
- src: https://github.com/PowerDNS/pdns_recursor-ansible.git
collections:
- name: vladgh.samba

View File

@ -1,10 +1,10 @@
---
- import_playbook: playbooks/sssd.yml
- import_playbook: playbooks/wireguard.yml
- import_playbook: playbooks/server.yml
- import_playbook: playbooks/dhcpd.yml
- import_playbook: playbooks/dns.yml
- import_playbook: playbooks/HashicorpStack.yml
- import_playbook: playbooks/nas.yml
- import_playbook: playbooks/autofs.yml
- import_playbook: playbooks/sssd.yml
- import_playbook: playbooks/database.yml
- import_playbook: playbooks/rsyncd.yml
- import_playbook: playbooks/music-player.yml
- import_playbook: playbooks/dhcpd.yml
- import_playbook: playbooks/user_config.yml

View File

@ -1,18 +1,44 @@
[homelab]
[DNS]
oscar-dev
[database_active]
oscar-dev
[database_standby]
gerard-dev
[database:children]
database_active
database_standby
[wireguard:children]
staging
[NAS]
nas-dev
[cluster]
oscar-dev
gerard-dev
[homelab:children]
NAS
cluster
[VPS]
merlin-dev
[database]
oscar-dev
[wireguard:children]
[region:children]
homelab
VPS
staging
[staging]
oscar-dev
gerard-dev
merlin-dev
nas-dev
[production]

View File

@ -6,14 +6,14 @@
"tags": [
"homer.enable=true",
"homer.name=Diskstation",
"homer.url=https://syno.ducamps.win",
"homer.logo=https://syno.ducamps.win/webman/resources/images/icon_dsm_96.png",
"homer.url=https://syno.ducamps.eu",
"homer.logo=https://syno.ducamps.eu/webman/resources/images/icon_dsm_96.png",
"homer.service=Application",
"homer.target=_blank",
"traefik.enable=true",
"traefik.http.routers.syno.rule=Host(`syno.ducamps.win`)",
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.win",
"traefik.http.routers.syno.rule=Host(`syno.ducamps.eu`)",
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.eu",
"traefik.http.routers.syno.tls.certresolver=myresolver",
"traefik.http.routers.syno.entrypoints=web,websecure"
]

117
docs/ADR/004-DNS.md Normal file
View File

@ -0,0 +1,117 @@
# DNS
## 001 Recursor out off NAS
### Status
done
### Context
curently main local domain DNS is located on NAS.
goal:
- avoid DNS outtage in case of NAS reboot (my synology have 10 years and is a litle long to reboot) morever during NAS reboot we lost the adblock DNS in the nomad cluster because nomad depend of the NFS share.
- remove the direct redirection to service.consul DNS and the IPTABLE rule use to redirect port 53 on consul on gerard instead new DNS could be forward directly to an active consul node on port 8300
#### DNS software
need DHCP Dynamic update
could redirect domain on other port than port 53
### Decision
we will migrate Main Domain DNS from NAS to gerard (powerDNS)
powerDNS provide two disting binaries one for authority server one other for recursor
goal is to first migrate the recursice part from synology to a physical service
and in second time migrate authority server in nmad cluster
### Consequences
before to move authority server need to remove DB dns dependance (create db consul services)
need to delete the iptable rule on gerard before deploy
## 002 each node request self consul client for consul dns query
### Status
done
### Context
to avoid a cluster failled in case of the DNS recursor default.
I would like that each cluster client request their own consul client
first to resolve consul DNS query
### Decision
Implement sytemd-resolved on all cluster member and add a DNS redirection
### Consequences
need to modify annsible system role for systemd-resolved activation and consul role for configure redirection
## 003 migrate authority DNS from NAS to cluster
### Status
done
### Context
we have curently three authority domain on NAS:
- ducamps.win
- ducamps.eu
- lan.ducamps.eu
we could migrate authority DNS in cluster
ducamps.win and ducamps.eu are only use for application access so no dependence with cluster build
need to study cluster build dependance for lan.ducamps.eu-> in every case in case of build from scratch need to use IP
need keepalive IP and check if no conflict if store on same machine than pihole->ok don't need to listen on 53 only request by recursor
DNS authority will dependant to storage (less problematic than recursor)
### Decision
### Consequences
## 004 migrate recurson in cluster
### Status
done
### Context
now that cluster doesn't depend of recursor because request self consul agent for consul query need
need to study if we can migrate recursor in nomad wihout break dependance
advantage:
- recursor could change client in case of faillure
agains:
- this job need a keepalive IP like pihole
- *loss recursor if lost nomad cluster*
### Decision
put one recursor on cluster over authority server and keep the recursor on gerard for better recundancy
### Consequences
## 005 physical Recursor location
### Status
done
### Context
following NAS migration physical DNS Recursor was install directly on NAS this bring a SPOF when NAS failed Recursor on Nomad cluster are stopped because of volume dependance
### Decision
Put physical Recursor on a cluster node like that to have a DNS issue we need to have NAS and this nomad down on same Time

42
docs/ADR/005-NAS.md Normal file
View File

@ -0,0 +1,42 @@
# NAS
## 001 New Nas spec
### Status
In progress
### Context
Storage:
- Data filesytem will be in btrfs.
- Study if keep root filesystem in EXT4.
- Need to use LVM over btrfs added posibility to add cache later (cache on cold data useless on beginning maybe write cache in future use).
- hot Data (nomad, document,fresh download file,music?) on SSD cold DATA on HDD (film, serie photo)
- at least 2 HDD and 2 SSD
Hardware:
- network 2.5 gpbs will be good for evolve
- at least 4go ram (expansive will be appreciable)
Software:
be able to install custom linux distrib
### Decision
- Due to form factor/consumption and SSD capability my choise is on ASUSTOR Nimbustor 2 Gen 2 AS5402, he corresponding to need and less expensive than a DIY NAS
- buy only a new ssd of 2to in more to store system and hot data
### Cosequence
need to migrate Data and keep same disk
- install system
- copy all data from 2to HDD to SSD then format 2to HDD
- copy download data to FROM 4 to HDD to SSD
- copy serie to 2to HDD and copy film on external harddrive

View File

@ -0,0 +1,25 @@
# Docker Pull throught
# 001 architecture consideration
## Status
Accepted
## Context
docker hub get a pull limit if somebody go wrong on our infrastructure we can get quickyly this limit solution will be to implement a pull throught proxy.
### Decision
create two container task to create a dockerhub pull through and a ghcr one
we can add these registry to traefick to have both under the port 5000 but this will add a traefik dependancy on rebuild
so to begin we will use one trafick service on two diferent static port
## Consequences
- this registry need to be start first on cluster creation
- need to update all job image with local proxy url

View File

@ -3,30 +3,34 @@
```mermaid
flowchart LR
subgraph External
recursor
GandiDns[ Gandi ducamps.win]
externalRecursor[recursor]
GandiDns[ hetzner ducamps.win]
end
subgraph Internal
pihole[pihole]----ducamps.win-->NAS
pihole[pihole]--ducamps.win-->NAS
pihole--service.consul-->consul[consul cluster]
pihole--->recursor
recursor--service.consul-->consul
DHCP --dynamic update--> NAS
NAS--service.consul-->consul
NAS
recursor--ducamps.win-->NAS
consul--service.consul--->consul
clients--->pihole
clients--->recursor
end
NAS --> recursor
pihole --> recursor
pihole --> externalRecursor
recursor-->External
```
## Detail
Pihole container in nomad cluster is set as primary DNS as add blocker secondary DNS is locate on NAS
Pihole container in nomad cluster is set as primary DNS as add blocker secondary DNS recursore is locate on gerard
DNS locate on NAS manage domain *ducamps.win* on local network pihole forward each request on *ducamps.win* to this DNS.
DNS locate on NAS manage domain *ducamps.win* on local network each recursor forward each request on *ducamps.win* to this DNS.
Each DNS forward *service.consul* request to the consul cluster. On Pihole a template configure each consul server.
On diskstation every request as forward to one consul node this point is to improve we because we have a possibility of outtage. du to synology DNSServer limitation we only put a forward on port 53 so we need on the target consul node to redirect port 53 to 8300 by iptables rules.
Each DNS forward *service.consul* request to the consul cluster.
Each consul node have a consul redirection in systemd-resolved to theire own consul client
a DHCP service is set to do dynamic update on NAS DNS on lease delivery
external recursor are on cloudflare and FDN
external recursor are set on pihole on cloudflare and FDN in case of recursors faillure

View File

@ -0,0 +1,25 @@
# ansible vault management
ansible password are encoded with a gpg key store in ansible/misc
to renew password follow this workflown
```sh
# Generate a new password for the default vault
pwgen -s 64 default-pw
# Re-encrypt all default vaults
ansible-vault rekey --new-vault-password-file ./default-pw \
$(git grep -l 'ANSIBLE_VAULT;1.1;AES256$')
# Save the new password in encrypted form
# (replace "RECIPIENT" with your email)
gpg -r RECIPIENT -o misc/vault--password.gpg -e default-pw
# Ensure the new password is usable
ansible-vault view misc/vaults/vault_hcloud.yml
# Remove the unencrypted password file
rm new-default-pw
```
script `vault-keyring-client.sh` is set in ansible.cfg as vault_password_file to decrypt the gpg file

View File

@ -0,0 +1,8 @@
# Troubleshooting
## issue with SMTP traefik port
ensure that no other traefik router (httt or TCP) listening on smtp or
all entrypoint this can pertuubate smtp TLS connection
see [https://doc.traefik.io/traefik/routing/routers/#entrypoints_1](here)

View File

@ -1,38 +0,0 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "registry.terraform.io/hetznercloud/hcloud" {
version = "1.42.1"
hashes = [
"h1:1AGk4CAeqdyF1D4vNyjarKSBoN2z+Y6ubUxzqiyc7qI=",
"zh:002e2e57c1425bb4cf620c6a80732ee071726d0d82d0523c5258dde3222113df",
"zh:03213d79fc2bcd94ac812ca22c1d1d6678132ab957d26a65c84ee52853059c02",
"zh:0785429efdb084cb4e5a0d899112764c21d2260391e82897d7e67c9e5deccc31",
"zh:12a5653b7a00f458b65b89b15d4517f785322ebb65b5a689fa8766042a09184c",
"zh:2dc7464290a623eb599cfbf731d13554448a7a824c2b1db16275f482d9059670",
"zh:35a7e19868a304d77ab192871ccaa45418c13a3aac301df8d9f57c1259913051",
"zh:368202d94a1104895c1d566e3f16edd55e05a09881fd4a20cd4854ca3593fee9",
"zh:431503e5055979aabf520675bb465496d934979c7a687e1cd3c8d2ae27bfa649",
"zh:45cede3c2147cfdc76d53853e07395c05b1feff8dca16a2f8f7f1fd151e2449f",
"zh:8b57869af18982af21f6f816e65e6057ec5055481b220147fdbe0959917ae112",
"zh:be9ba4813dcf640c0df04543a3c74b0db117fbd3dcc26140e252cf5157734945",
"zh:d3fb9ca398a153dc894caa94f95ef2e989350cf2bbfa29bc93ff2608cab44c1f",
"zh:fc690be8cbada1e99063ed1c6148f9a70ab341100a97ad2886f4826a951780d3",
"zh:ffa9470e41fa04ac667d4d830987aeed2070767d57f2414692c2dd395a405fba",
]
}
provider "registry.terraform.io/timohirt/hetznerdns" {
version = "2.2.0"
hashes = [
"h1:HyskQAglrOueur79gSCBgx9MNDOs0tz39aNYQiFgxz8=",
"zh:5bb0ab9f62be3ed92070235e507f3c290491d51391ef4edcc70df53b65a83019",
"zh:5ccdfac7284f5515ac3cff748336b77f21c64760e429e811a1eeefa8ebb86e12",
"zh:687c35665139ae37c291e99085be2e38071f6b355c4e1e8957c5a6a3bcdf9caf",
"zh:6de27f0d0d1513b3a4b7e81923b4a8506c52759bd466e2b4f8156997b0478931",
"zh:85770a9199a4c2d16ca41538d7a0f7a7bfc060678104a1faac19213e6f0a800c",
"zh:a5ff723774a9ccfb27d5766c5e6713537f74dd94496048c89c5d64dba597e59e",
"zh:bf9ab76fd37cb8aebb6868d73cbe8c08cee36fc25224cc1ef5949efa3c34b06c",
"zh:db998fe3bdcd4902e99fa470bb3f355883170cf4c711c8da0b5f1f4510f1be41",
]
}

View File

@ -1,262 +0,0 @@
locals {
defaultCname=hcloud_server.HomeLab2[0].name
}
resource "hetznerdns_zone" "externalZone" {
name = "ducamps.win"
ttl = 1700
}
resource "hetznerdns_zone" "externalZoneEU" {
name = "ducamps.eu"
ttl = 1700
}
resource "hetznerdns_record" "MX1Eu" {
zone_id = hetznerdns_zone.externalZoneEU.id
name = "@"
value = "20 mail"
type = "MX"
}
resource "hetznerdns_record" "mailEu" {
zone_id = hetznerdns_zone.externalZoneEU.id
name = "mail"
value = local.defaultCname
type= "CNAME"
}
resource "hetznerdns_record" "serverEU" {
zone_id = hetznerdns_zone.externalZoneEU.id
name = local.defaultCname
value = hcloud_server.HomeLab2[0].ipv4_address
type = "A"
}
resource "hetznerdns_record" "spfEu" {
zone_id = hetznerdns_zone.externalZoneEU.id
name = "@"
value = "\"v=spf1 ip4:${hcloud_server.HomeLab2[0].ipv4_address} ~all\""
type = "TXT"
}
resource "hetznerdns_record" "dkimRecordEu" {
zone_id = hetznerdns_zone.externalZoneEU.id
name = "mail._domainkey"
value = "\"v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0GadPljh+zM+Hf8MAf2wyj+h9p72aBFeFaiDhnswxO68fM9Uk6XhN4s1BkHLY5AWQh0SP1JDBaFWDfJiOV/27E3qJIa4KDHPZcgxgvo+SbfgNZq5qGIhKyqAAtyg/dI8IMKVOZ5Cevdv9VFrSF84xnTmDBCrWydPyV8D5+xA/bVna/AVCAVUeXVppyMPpC0s1HpRNJ0YaY23RH1KwChxvZY+BkanELSzTA8K0ATbIzwgQaK10/lc1S6EFvaSNG8sy6EIoondl6t+uiqU3bHgAW68r8snzl2gclG+uMkjXkH7YGPJzL9Co1o1MlKOHIONz89CCe0puIH4qaCo1G6EDwIDAQAB\""
type = "TXT"
}
resource "hetznerdns_record" "dmarcEU" {
zone_id = hetznerdns_zone.externalZoneEU.id
name = "_dmarc"
value = "\"v=DMARC1; p=none; rua=mailto:vincent@ducamps.eu; ruf=mailto:vincent@ducamps.eu; sp=none; ri=86400\""
type = "TXT"
}
resource "hetznerdns_record" "imapsAutodiscoverEU" {
zone_id = hetznerdns_zone.externalZoneEU.id
name = "_imaps._tcp"
value = "0 0 993 mail.ducamps.eu"
type = "SRV"
}
resource "hetznerdns_record" "submissionAutodiscoverEU" {
zone_id = hetznerdns_zone.externalZoneEU.id
name = "_submission._tcp"
value = "0 0 465 mail.ducamps.eu"
type = "SRV"
}
resource "hetznerdns_record" "rootalias" {
zone_id = hetznerdns_zone.externalZone.id
name = "@"
value = hcloud_server.HomeLab2[0].ipv4_address
type = "A"
}
resource "hetznerdns_record" "MX1" {
zone_id = hetznerdns_zone.externalZone.id
name = "@"
value = "20 spool.mail.gandi.net."
type = "MX"
}
resource "hetznerdns_record" "MX2" {
zone_id = hetznerdns_zone.externalZone.id
name = "@"
value = "50 fb.mail.gandi.net."
type = "MX"
}
resource "hetznerdns_record" "spf" {
zone_id = hetznerdns_zone.externalZone.id
name = "@"
value = "\"v=spf1 include:_mailcust.gandi.net ~all\""
type = "TXT"
}
resource "hetznerdns_record" "caldav" {
zone_id = hetznerdns_zone.externalZone.id
name = "_caldavs_tcp"
value = "10 20 443 www.${hetznerdns_zone.externalZone.name}."
type = "SRV"
}
resource "hetznerdns_record" "carddavs" {
zone_id = hetznerdns_zone.externalZone.id
name = "_carddavs_tcp"
value = "10 20 443 www.${hetznerdns_zone.externalZone.name}."
type = "SRV"
}
resource "hetznerdns_record" "server" {
zone_id = hetznerdns_zone.externalZone.id
name = local.defaultCname
value = hcloud_server.HomeLab2[0].ipv4_address
type = "A"
}
resource "hetznerdns_record" "dendrite" {
zone_id = hetznerdns_zone.externalZone.id
name = "dendrite"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "diskstation" {
zone_id = hetznerdns_zone.externalZone.id
name = "diskstation"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "drone" {
zone_id = hetznerdns_zone.externalZone.id
name = "drone"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "file" {
zone_id = hetznerdns_zone.externalZone.id
name = "file"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "ghostfolio" {
zone_id = hetznerdns_zone.externalZone.id
name = "ghostfolio"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "git" {
zone_id = hetznerdns_zone.externalZone.id
name = "git"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "grafana" {
zone_id = hetznerdns_zone.externalZone.id
name = "grafana"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "hass" {
zone_id = hetznerdns_zone.externalZone.id
name = "hass"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "jellyfin" {
zone_id = hetznerdns_zone.externalZone.id
name = "jellyfin"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "supysonic" {
zone_id = hetznerdns_zone.externalZone.id
name = "supysonic"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "syno" {
zone_id = hetznerdns_zone.externalZone.id
name = "syno"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "vault" {
zone_id = hetznerdns_zone.externalZone.id
name = "vault"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "vikunja" {
zone_id = hetznerdns_zone.externalZone.id
name = "vikunja"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "www" {
zone_id = hetznerdns_zone.externalZone.id
name = "www"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "ww" {
zone_id = hetznerdns_zone.externalZone.id
name = "ww"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "paperless" {
zone_id = hetznerdns_zone.externalZone.id
name = "paperless-ng"
value = local.defaultCname
type = "CNAME"
}
resource "hetznerdns_record" "gm1" {
zone_id = hetznerdns_zone.externalZone.id
name = "gm1._domainkey"
value = "gm1.gandimail.net."
type = "CNAME"
}
resource "hetznerdns_record" "gm2" {
zone_id = hetznerdns_zone.externalZone.id
name = "gm2._domainkey"
value = "gm2.gandimail.net."
type = "CNAME"
}
resource "hetznerdns_record" "gm3" {
zone_id = hetznerdns_zone.externalZone.id
name = "gm3._domainkey"
value = "gm3.gandimail.net."
type = "CNAME"
}
resource "hetznerdns_record" "imap" {
zone_id = hetznerdns_zone.externalZone.id
name = "imap"
value = "mail.gandi.net."
type = "CNAME"
}
resource "hetznerdns_record" "smtp" {
zone_id = hetznerdns_zone.externalZone.id
name = "smtp"
value = "mail.gandi.net."
type = "CNAME"
}

View File

@ -10,12 +10,30 @@ vault-dev:
./vault/standalone_vault.sh $(FILE);\
fi
create-dev:
vagranup:
vagrant up
create-dev: vagranup DNS-stagging
make -C ansible deploy_staging
make -C terraform deploy_vault env=staging
VAULT_TOKEN=$(shell cat ~/vaultUnseal/staging/rootkey) python ./script/generate-vault-secret
create-dev-base: vagranup DNS-stagging
make -C ansible deploy_staging_base
destroy-dev:
vagrant destroy --force
serve:
mkdocs serve
DNS-stagging:
$(eval dns := $(shell dig oscar-dev.lan.ducamps.dev +short))
$(eval dns1 := $(shell dig nas-dev.lan.ducamps.dev +short))
sudo resolvectl dns virbr2 "$(dns)" "$(dns1)";sudo resolvectl domain virbr2 "~consul";sudo systemctl restart systemd-resolved.service
DNS-production:
sudo resolvectl dns virbr2 "";sudo resolvectl domain virbr2 "";sudo systemctl restart systemd-resolved.service

View File

@ -35,7 +35,7 @@ job "MQTT" {
]
}
config {
image = "eclipse-mosquitto"
image = "docker.service.consul:5000/library/eclipse-mosquitto"
ports = ["mosquittoWS", "mosquittoMQTT"]
volumes = [
"/mnt/diskstation/nomad/mosquitto:/mosquitto/data",

View File

@ -0,0 +1,62 @@
job "actualbudget" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "actualbudget"{
network {
mode = "host"
port "http" {
to = 5006
}
}
task "actualbudget-server" {
driver = "docker"
service {
name = "actualbudget"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`budget.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=budget.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"homer.enable=true",
"homer.name=${NOMAD_TASK_NAME}",
"homer.service=Application",
"homer.target=_blank",
"homer.logo=https://budget.ducamps.eu/apple-touch-icon.png",
"homer.url=https://budget.ducamps.eu",
]
}
config {
image = "ghcr.service.consul:5000/actualbudget/actual-server:latest"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/actualbudget:/data"
]
}
env {
}
resources {
memory = 300
}
}
}
}

View File

@ -0,0 +1,239 @@
job "borgmatic" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "NAS"
}
group "borgmatic"{
vault{
policies= ["borgmatic"]
}
task "borgmatic" {
action "manual-backup" {
command = "/usr/local/bin/borgmatic"
args = ["create",
"prune",
"--verbosity",
"1"
]
}
action "list-backup" {
command = "/usr/local/bin/borgmatic"
args = ["rlist"]
}
driver = "docker"
config {
image = "ghcr.service.consul:5000/borgmatic-collective/borgmatic"
volumes = [
"/exports:/exports",
"local/borgmatic.d:/etc/borgmatic.d",
"secret/id_rsa:/root/.ssh/id_rsa",
"secret/known_hosts:/root/.ssh/known_hosts",
"/exports/nomad/borgmatic:/root/.cache/borg",
]
}
env {
}
template {
data= <<EOH
BORG_RSH="ssh -i /root/.ssh/id_rsa -p 23"
{{ with secret "secrets/data/nomad/borgmatic"}}
BORG_PASSPHRASE= {{.Data.data.passphrase}}
{{end}}
EOH
destination = "secrets/sample.env"
env = true
}
template {
data= <<EOH
0 2 * * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic create prune --verbosity 1
0 23 1 * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic check
EOH
destination = "local/borgmatic.d/crontab.txt"
}
template {
data= <<EOH
# List of source directories to backup (required). Globs and
# tildes are expanded. Do not backslash spaces in path names.
source_directories:
- /exports/ebook
- /exports/homes
- /exports/music
- /exports/nomad
- /exports/photo
repositories:
- path: ssh://u304977@u304977.your-storagebox.de/./{{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
label: {{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
exclude_patterns:
- '*/nomad/jellyfin/cache'
- '*nomad/loki/'
- '*nomad/prometheus'
- '*nomad/registry'
- '*nomad/pacoloco'
- '*nomad/pihole'
- '*nomad/jellyfin/config/data/library*'
match_archives: '*'
archive_name_format: '{{ env "node.datacenter" }}-{now:%Y-%m-%dT%H:%M:%S.%f}'
extra_borg_options:
# Extra command-line options to pass to "borg init".
# init: --extra-option
# Extra command-line options to pass to "borg prune".
# prune: --extra-option
# Extra command-line options to pass to "borg compact".
# compact: --extra-option
# Extra command-line options to pass to "borg create".
create: --progress --stats
# Extra command-line options to pass to "borg check".
# check: --extra-option
# Keep all archives within this time interval.
# keep_within: 3H
# Number of secondly archives to keep.
# keep_secondly: 60
# Number of minutely archives to keep.
# keep_minutely: 60
# Number of hourly archives to keep.
# keep_hourly: 24
# Number of daily archives to keep.
keep_daily: 7
# Number of weekly archives to keep.
keep_weekly: 4
# Number of monthly archives to keep.
# keep_monthly: 6
# Number of yearly archives to keep.
# keep_yearly: 1
checks:
- name: repository
# - archives
# check_repositories:
# - user@backupserver:sourcehostname.borg
# check_last: 3
# output:
# color: false
# List of one or more shell commands or scripts to execute
# before creating a backup, run once per configuration file.
# before_backup:
# - echo "Starting a backup."
# List of one or more shell commands or scripts to execute
# before pruning, run once per configuration file.
# before_prune:
# - echo "Starting pruning."
# List of one or more shell commands or scripts to execute
# before compaction, run once per configuration file.
# before_compact:
# - echo "Starting compaction."
# List of one or more shell commands or scripts to execute
# before consistency checks, run once per configuration file.
# before_check:
# - echo "Starting checks."
# List of one or more shell commands or scripts to execute
# before extracting a backup, run once per configuration file.
# before_extract:
# - echo "Starting extracting."
# List of one or more shell commands or scripts to execute
# after creating a backup, run once per configuration file.
# after_backup:
# - echo "Finished a backup."
# List of one or more shell commands or scripts to execute
# after compaction, run once per configuration file.
# after_compact:
# - echo "Finished compaction."
# List of one or more shell commands or scripts to execute
# after pruning, run once per configuration file.
# after_prune:
# - echo "Finished pruning."
# List of one or more shell commands or scripts to execute
# after consistency checks, run once per configuration file.
# after_check:
# - echo "Finished checks."
# List of one or more shell commands or scripts to execute
# after extracting a backup, run once per configuration file.
# after_extract:
# - echo "Finished extracting."
# List of one or more shell commands or scripts to execute
# when an exception occurs during a "prune", "compact",
# "create", or "check" action or an associated before/after
# hook.
# on_error:
# - echo "Error during prune/compact/create/check."
# List of one or more shell commands or scripts to execute
# before running all actions (if one of them is "create").
# These are collected from all configuration files and then
# run once before all of them (prior to all actions).
# before_everything:
# - echo "Starting actions."
# List of one or more shell commands or scripts to execute
# after running all actions (if one of them is "create").
# These are collected from all configuration files and then
# run once after all of them (after any action).
# after_everything:
# - echo "Completed actions."
EOH
destination = "local/borgmatic.d/config.yaml"
}
template {
data= <<EOH
{{ with secret "secrets/data/nomad/borgmatic"}}
{{.Data.data.privatekey}}
{{end}}
EOH
destination = "secret/id_rsa"
perms= "700"
}
template {
data= <<EOH
[u304977.your-storagebox.de]:23 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs
[u304977.your-storagebox.de]:23 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5EB5p/5Hp3hGW1oHok+PIOH9Pbn7cnUiGmUEBrCVjnAw+HrKyN8bYVV0dIGllswYXwkG/+bgiBlE6IVIBAq+JwVWu1Sss3KarHY3OvFJUXZoZyRRg/Gc/+LRCE7lyKpwWQ70dbelGRyyJFH36eNv6ySXoUYtGkwlU5IVaHPApOxe4LHPZa/qhSRbPo2hwoh0orCtgejRebNtW5nlx00DNFgsvn8Svz2cIYLxsPVzKgUxs8Zxsxgn+Q/UvR7uq4AbAhyBMLxv7DjJ1pc7PJocuTno2Rw9uMZi1gkjbnmiOh6TTXIEWbnroyIhwc8555uto9melEUmWNQ+C+PwAK+MPw==
[u304977.your-storagebox.de]:23 ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGK0po6usux4Qv2d8zKZN1dDvbWjxKkGsx7XwFdSUCnF19Q8psHEUWR7C/LtSQ5crU/g+tQVRBtSgoUcE8T+FWp5wBxKvWG2X9gD+s9/4zRmDeSJR77W6gSA/+hpOZoSE+4KgNdnbYSNtbZH/dN74EG7GLb/gcIpbUUzPNXpfKl7mQitw==
EOH
destination = "secret/known_hosts"
perms="700"
}
resources {
memory = 300
memory_max = 1000
}
}
}
}

View File

@ -25,11 +25,11 @@ job "chainetv" {
"homer.service=Application",
"homer.icon=fas fa-tv",
"homer.target=_blank",
"homer.url=https://www.ducamps.win/${NOMAD_JOB_NAME}",
"homer.url=https://www.ducamps.eu/${NOMAD_JOB_NAME}",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.win`)&&PathPrefix(`/chainetv`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.eu`)&&PathPrefix(`/chainetv`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=chainetv,chainetvStrip",
@ -39,7 +39,7 @@ job "chainetv" {
]
}
config {
image = "ducampsv/chainetv:latest"
image = "docker.service.consul:5000/ducampsv/chainetv:latest"
ports = ["http"]
}
resources {

View File

@ -1,5 +1,5 @@
job "dockermailserver" {
datacenters = ["hetzner"]
datacenters = ["homelab"]
priority = 90
type = "service"
meta {
@ -9,7 +9,11 @@ job "dockermailserver" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "dockermailserver" {
network {
mode = "host"
@ -115,7 +119,7 @@ job "dockermailserver" {
task "docker-mailserver" {
driver = "docker"
config {
image = "ghcr.io/docker-mailserver/docker-mailserver:edge"
image = "ghcr.service.consul:5000/docker-mailserver/docker-mailserver:latest"
ports = ["smtp", "esmtp", "imap","rspamd"]
volumes = [
"/mnt/diskstation/nomad/dms/mail-data:/var/mail",
@ -133,7 +137,7 @@ job "dockermailserver" {
env {
OVERRIDE_HOSTNAME = "mail.ducamps.eu"
DMS_VMAIL_UID = 1000000
DMS_VMAIL_GID = 100
DMS_VMAIL_GID = 984
SSL_TYPE= "letsencrypt"
LOG_LEVEL="info"
POSTMASTER_ADDRESS="vincent@ducamps.eu"
@ -141,6 +145,8 @@ job "dockermailserver" {
ENABLE_OPENDKIM=0
ENABLE_OPENDMARC=0
ENABLE_POLICYD_SPF=0
ENABLE_UPDATE_CHECK=0
UPDATE_CHECK_INTERVAL="1d"
RSPAMD_CHECK_AUTHENTICATED=0
}
@ -167,7 +173,7 @@ submissions/inet/smtpd_upstream_proxy_protocol=haproxy
}
template {
data = <<EOH
haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1
haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1, 192.168.1.0/24
haproxy_timeout = 3 secs
service imap-login {
inet_listener imaps {

View File

@ -1,6 +1,6 @@
job "filestash" {
datacenters = ["hetzner"]
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
@ -10,7 +10,11 @@ job "filestash" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "filestash" {
network {
@ -31,12 +35,12 @@ job "filestash" {
"homer.enable=true",
"homer.name=FileStash",
"homer.service=Application",
"homer.url=http://file.ducamps.win",
"homer.logo=http://file.ducamps.win/assets/logo/apple-touch-icon.png",
"homer.url=http://file.ducamps.eu",
"homer.logo=http://file.ducamps.eu/assets/logo/apple-touch-icon.png",
"homer.target=_blank",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`file.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=file.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`file.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=file.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
@ -44,7 +48,7 @@ job "filestash" {
]
}
config {
image = "machines/filestash"
image = "docker.service.consul:5000/machines/filestash"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/filestash:/app/data/state"

View File

@ -27,7 +27,7 @@ job "ghostfolio" {
task "redis" {
driver = "docker"
config {
image = "redis"
image = "docker.service.consul:5000/library/redis"
ports = ["redis"]
}
resources {
@ -42,8 +42,8 @@ job "ghostfolio" {
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
@ -51,7 +51,7 @@ job "ghostfolio" {
]
}
config {
image = "ghostfolio/ghostfolio:latest"
image = "docker.service.consul:5000/ghostfolio/ghostfolio:latest"
ports = ["http"]
volumes = [
]
@ -69,7 +69,7 @@ job "ghostfolio" {
template {
data= <<EOH
{{ with secret "secrets/data/database/ghostfolio"}}
DATABASE_URL = postgresql://ghostfolio:{{.Data.data.password}}@db1.ducamps.win:5432/ghostfolio?connect_timeout=300&sslmode=prefer
DATABASE_URL = postgresql://ghostfolio:{{.Data.data.password}}@active.db.service.consul/ghostfolio?connect_timeout=300&sslmode=prefer
{{end}}
{{ with secret "secrets/data/nomad/ghostfolio"}}
ACCESS_TOKEN_SALT = {{.Data.data.token}}
@ -80,6 +80,7 @@ job "ghostfolio" {
}
resources {
memory = 400
memory_max = 600
}
}

View File

@ -3,6 +3,11 @@ job "homeassistant" {
datacenters = ["homelab"]
priority = 90
type = "service"
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
meta {
forcedeploy = "0"
}
@ -38,10 +43,10 @@ job "homeassistant" {
"homer.subtitle=Home Assistant",
"homer.logo=https://raw.githubusercontent.com/home-assistant/assets/master/logo/logo-small.svg",
"homer.target=_blank",
"homer.url=https://${NOMAD_TASK_NAME}.ducamps.win",
"homer.url=https://${NOMAD_TASK_NAME}.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.ducamps.win`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.ducamps.win",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_TASK_NAME}.entrypoints=web,websecure",
]
@ -52,7 +57,7 @@ job "homeassistant" {
}
}
config {
image = "homeassistant/home-assistant:stable"
image = "docker.service.consul:5000/homeassistant/home-assistant:stable"
ports = ["http", "coap"]
privileged = "true"
network_mode = "host"

View File

@ -0,0 +1,146 @@
job "immich" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "immich" {
network {
mode = "host"
port "http" {
to = 3001
}
port "redis" {
to = 6379
}
port "machinelearning" {
to = 3003
}
}
volume "immich-upload" {
type = "csi"
source = "immich-upload"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
volume "immich-cache" {
type = "csi"
source = "immich-cache"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
volume "photo" {
type = "csi"
source = "photo"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault {
policies = ["immich"]
}
task "immich-server" {
driver = "docker"
service {
name = "immich"
port = "http"
tags = [
"homer.enable=true",
"homer.name=immich",
"homer.service=Application",
"homer.logo=https://immich.ducamps.eu/favicon-144.png",
"homer.target=_blank",
"homer.url=https://immich.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
volume_mount {
volume = "immich-upload"
destination = "/usr/src/app/upload"
}
volume_mount {
volume = "photo"
destination = "/photo"
}
config {
image = "ghcr.service.consul:5000/immich-app/immich-server:release"
ports = ["http"]
volumes = [
"/etc/localtime:/etc/localtime"
]
}
template {
data = <<EOH
{{ with secret "secrets/data/database/immich"}}
DB_PASSWORD= {{ .Data.data.password }}
{{end}}
DB_DATABASE_NAME= immich
DB_USERNAME= immich
DB_HOSTNAME= active.db.service.consul
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
IMMICH_MACHINE_LEARNING_URL = http://{{ env "NOMAD_ADDR_machinelearning"}}
EOH
destination = "secrets/immich.env"
env = true
}
resources {
memory = 600
memory_max = 1800
}
}
task "immich-machine-learning" {
driver = "docker"
volume_mount {
volume = "immich-cache"
destination = "/cache"
}
config {
image = "ghcr.service.consul:5000/immich-app/immich-machine-learning:main"
ports = ["machinelearning"]
}
template {
data = <<EOH
{{ with secret "secrets/data/database/immich"}}
DB_PASSWORD= {{ .Data.data.password }}
{{end}}
DB_DATABASE_NAME= immich
DB_USERNAME= immich
DB_HOSTNAME= active.db.service.consul
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
EOH
destination = "secrets/immich.env"
env = true
}
resources {
memory = 200
memory_max = 1800
}
}
task "redis" {
driver = "docker"
config {
image="docker.service.consul:5000/library/redis:6.2-alpine"
ports = ["redis"]
}
resources {
memory = 50
}
}
}
}

View File

@ -2,6 +2,7 @@ job "jellyfin" {
datacenters = ["homelab"]
priority = 30
type = "service"
meta {
forcedeploy = "1"
}
@ -9,6 +10,11 @@ job "jellyfin" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group jellyfin-vue {
network {
mode = "host"
@ -26,22 +32,22 @@ job "jellyfin" {
"homer.name=${NOMAD_TASK_NAME}",
"homer.service=Application",
"homer.target=_blank",
"homer.logo=https://${NOMAD_TASK_NAME}.ducamps.win/icon.png",
"homer.url=https://${NOMAD_TASK_NAME}.ducamps.win",
"homer.logo=https://${NOMAD_TASK_NAME}.ducamps.eu/icon.png",
"homer.url=https://${NOMAD_TASK_NAME}.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.ducamps.win`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.ducamps.win",
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_TASK_NAME}.entrypoints=web,websecure",
]
}
config {
image = "ghcr.io/jellyfin/jellyfin-vue:unstable"
image = "ghcr.service.consul:5000/jellyfin/jellyfin-vue:unstable"
ports = ["http"]
}
env {
DEFAULT_SERVERS = "${NOMAD_TASK_NAME}.ducamps.win"
DEFAULT_SERVERS = "${NOMAD_TASK_NAME}.ducamps.eu"
}
resources {
@ -70,11 +76,11 @@ job "jellyfin" {
"homer.name=jellyfin",
"homer.service=Application",
"homer.target=_blank",
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.win/web/assets/img/banner-light.png",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.win",
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/web/assets/img/banner-light.png",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
@ -82,13 +88,13 @@ job "jellyfin" {
]
}
config {
image = "jellyfin/jellyfin"
image = "docker.service.consul:5000/jellyfin/jellyfin"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/jellyfin/config:/config",
"/mnt/diskstation/nomad/jellyfin/cache:/cache",
"/mnt/diskstation/media/:/media",
"/mnt/diskstation/music/:/media2"
"/mnt/diskstation/media:/media",
"/mnt/diskstation/music:/music",
]
devices = [
{

1
nomad-job/apps/makefile Symbolic link
View File

@ -0,0 +1 @@
../makefile

View File

@ -0,0 +1,95 @@
job "mealie" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "mealie" {
network {
mode = "host"
port "http" {
to = 9000
}
}
volume "mealie-data" {
type = "csi"
source = "mealie-data"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault {
policies = ["mealie"]
}
task "mealie-server" {
driver = "docker"
service {
name = "mealie"
port = "http"
tags = [
"homer.enable=true",
"homer.name=Mealie",
"homer.service=Application",
"homer.subtitle=Mealie",
"homer.logo=https://mealie.ducamps.eu/favicon.ico",
"homer.target=_blank",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
config {
image = "ghcr.io/mealie-recipes/mealie"
ports = ["http"]
}
volume_mount {
volume = "mealie-data"
destination = "/app/data"
}
env {
PUID = "1000001"
PGID = "1000001"
TZ = "Europe/Paris"
MAX_WORKERS = 1
WEB_CONCURRENCY = 1
BASE_URL = "https://mealie.ducamps.eu"
OIDC_USER_GROUP = "MealieUsers"
OIDC_ADMIN_GROUP = "MealieAdmins"
OIDC_AUTH_ENABLED = "True"
OIDC_SIGNUP_ENABLED = "true"
OIDC_CONFIGURATION_URL = "https://auth.ducamps.eu/.well-known/openid-configuration"
OIDC_CLIENT_ID = "mealie"
OIDC_AUTO_REDIRECT = "false"
OIDC_PROVIDER_NAME = "authelia"
DB_ENGINE = "postgres"
POSTGRES_USER = "mealie"
POSTGRES_SERVER = "active.db.service.consul"
POSTGRES_PORT = 5432
POSTGRES_DB = "mealie"
LOG_LEVEL = "DEBUG"
}
template {
data = <<EOH
{{ with secret "secrets/data/database/mealie"}}POSTGRES_PASSWORD= "{{ .Data.data.password }}" {{end}}
{{ with secret "secrets/data/authelia/mealie"}}OIDC_CLIENT_SECRET= "{{ .Data.data.password }}" {{end}}
EOH
destination = "secrets/var.env"
env = true
}
resources {
memory = 400
}
}
}
}

View File

@ -6,7 +6,11 @@ job "pacoloco" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "pacoloco" {
network {
mode = "host"
@ -21,17 +25,17 @@ job "pacoloco" {
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`arch.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=arch.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`arch.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=arch.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
config {
image = "ducampsv/pacoloco"
image = "docker.service.consul:5000/ducampsv/pacoloco"
ports = ["http"]
volumes = [
"/mnt/diskstation/archMirror:/var/cache/pacoloco",
"/mnt/diskstation/nomad/pacoloco:/var/cache/pacoloco",
"local/pacoloco.yaml:/etc/pacoloco.yaml"
]
@ -49,6 +53,8 @@ repos:
- http://archlinux.mailtunnel.eu
- http://mirror.cyberbits.eu/archlinux
- http://mirrors.niyawe.de/archlinux
archlinux_armv8:
url: http://mirror.archlinuxarm.org
archlinux_armv7h:
url: http://mirror.archlinuxarm.org
prefetch:

View File

@ -6,7 +6,11 @@ job "paperless-ng" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
@ -29,7 +33,7 @@ job "paperless-ng" {
task "redis" {
driver = "docker"
config {
image = "redis"
image = "docker.service.consul:5000/library/redis"
ports = ["redis"]
}
resources {
@ -43,16 +47,17 @@ job "paperless-ng" {
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia",
"homer.enable=true",
"homer.name=Paperless",
"homer.service=Application",
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.win/static/frontend/fr-FR/apple-touch-icon.png",
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/static/frontend/fr-FR/apple-touch-icon.png",
"homer.target=_blank",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.win",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
]
check {
type = "http"
@ -63,7 +68,7 @@ job "paperless-ng" {
}
}
config {
image = "ghcr.io/paperless-ngx/paperless-ngx"
image = "ghcr.service.consul:5000/paperless-ngx/paperless-ngx"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/paperless-ng/media:/usr/src/paperless/media",
@ -75,13 +80,16 @@ job "paperless-ng" {
}
env {
PAPERLESS_REDIS = "redis://${NOMAD_ADDR_redis}"
PAPERLESS_DBHOST = "db1.ducamps.win"
PAPERLESS_DBHOST = "active.db.service.consul"
PAPERLESS_DBNAME = "paperless"
PAPERLESS_DBUSER = "paperless"
PAPERLESS_OCR_LANGUAGE = "fra"
PAPERLESS_CONSUMER_POLLING = "60"
PAPERLESS_URL = "https://${NOMAD_JOB_NAME}.ducamps.win"
PAPERLESS_URL = "https://${NOMAD_JOB_NAME}.ducamps.eu"
PAPERLESS_ALLOWED_HOSTS = "192.168.1.42,192.168.1.40"
PAPERLESS_ENABLE_HTTP_REMOTE_USER = "true"
PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME = "HTTP_REMOTE_USER"
PAPERLESS_LOGOUT_REDIRECT_URL= "https://auth.ducamps.eu/logout"
}
template {
@ -93,6 +101,7 @@ job "paperless-ng" {
}
resources {
memory = 950
memory_max = 1500
cpu = 2000
}
}

View File

@ -6,6 +6,11 @@ job "radicale" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "radicale" {
network {
mode = "host"
@ -24,12 +29,12 @@ job "radicale" {
"homer.service=Application",
"homer.logo=https://radicale.org/assets/logo.svg",
"homer.target=_blank",
"homer.url=https://www.ducamps.win/${NOMAD_JOB_NAME}",
"homer.url=https://www.ducamps.eu/${NOMAD_JOB_NAME}",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.win`)&&PathPrefix(`/radicale`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.eu`)&&PathPrefix(`/radicale`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=radicaleHeader,radicalestrip",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
@ -39,11 +44,11 @@ job "radicale" {
]
}
config {
image = "tomsquest/docker-radicale"
image = "docker.service.consul:5000/tomsquest/docker-radicale"
ports = ["http"]
volumes = [
"local/config:/config/config",
"/mnt/diskstation/CardDav:/data"
"/mnt/diskstation/nomad/radicale:/data"
]
}

View File

@ -23,7 +23,7 @@ job "torrent" {
}
}
task "bittorent" {
driver = "podman"
driver = "docker"
service {
name = "bittorent"
port = "http"
@ -31,38 +31,37 @@ job "torrent" {
tags = [
"homer.enable=true",
"homer.name=torrent",
"homer.url=https://torrent.ducamps.win",
"homer.url=https://torrent.ducamps.eu",
"homer.service=Application",
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.win/images/favicon-196x196.png",
"homer.logo=https://fleet.linuxserver.io/images/linuxserver_rutorrent.png",
"homer.target=_blank",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia-basic",
]
}
user = "root"
config {
image = "docker.io/crazymax/rtorrent-rutorrent:latest"
privileged = "true"
ulimit {
nofile = "8192:8192"
}
image = "docker.service.consul:5000/crazymax/rtorrent-rutorrent:edge"
ports = [
"http",
"torrent",
"ecoute"
]
volumes = [
"/mnt/hetzner/storagebox/rutorrentConfig:/data",
"/opt/rutorrentConfig:/data",
"/mnt/hetzner/storagebox/file:/downloads"
]
}
env {
PUID = 1024
PGID = 984
PUID = 100001
PGID = 10
UMASK = 002
WEBUI_PORT = "8080"
}

View File

@ -0,0 +1,64 @@
job "rutorrentlocal" {
datacenters = ["homelab"]
priority = 80
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.unique.name}"
operator = "set_contains"
value = "oberon"
}
group "bittorent" {
network {
mode = "host"
port "http" {
to = 8080
}
port "torrent" {
static = 6881
}
port "ecoute" {
static = 50000
}
}
task "bittorent" {
driver = "podman"
service {
name = "bittorentlocal"
port = "http"
address_mode= "host"
tags = [
]
}
user = "root"
config {
image = "docker.service.consul:5000/crazymax/rtorrent-rutorrent:edge"
ports = [
"http",
"torrent",
"ecoute"
]
volumes = [
"/exports/nomad/rutorrent/data:/data",
"/exports/nomad/rutorrent/downloads:/downloads"
]
}
env {
PUID = 100001
PGID = 10
UMASK = 002
WEBUI_PORT = "8080"
}
resources {
memory = 650
}
}
}
}

View File

@ -10,7 +10,11 @@ job "supysonic" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "supysonic" {
network {
mode = "host"
@ -34,11 +38,11 @@ job "supysonic" {
"homer.service=Application",
"homer.icon=fas fa-headphones",
"homer.target=_blank",
"homer.url=http://${NOMAD_JOB_NAME}.ducamps.win",
"homer.url=http://${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
@ -49,7 +53,7 @@ job "supysonic" {
task "supysonic-frontend" {
driver = "docker"
config {
image = "nginx:alpine"
image = "docker.service.consul:5000/library/nginx:alpine"
ports = [
"http"
]
@ -92,7 +96,7 @@ http {
task "supysonic-server" {
driver = "docker"
config {
image = "ducampsv/supysonic:latest"
image = "docker.service.consul:5000/ducampsv/supysonic:latest"
ports = ["fcgi"]
force_pull = true
volumes = [
@ -105,16 +109,16 @@ http {
SUPYSONIC_DAEMON_ENABLED = "true"
SUPYSONIC_WEBAPP_LOG_LEVEL = "DEBUG"
SUPYSONIC_DAEMON_LOG_LEVEL = "INFO"
SUPYSONIC_LDAP_SERVER = "LDAP://ldap.ducamps.win"
SUPYSONIC_LDAP_BASE_DN = "dc=ducamps,dc=win"
SUPYSONIC_LDAP_USER_FILTER = "(&(memberOf=CN=SupysonicUsers,cn=groups,dc=ducamps,dc=win))"
SUPYSONIC_LDAP_ADMIN_FILTER= "(&(memberOf=CN=SupysonicAdmins,cn=groups,dc=ducamps,dc=win))"
SUPYSONIC_LDAP_SERVER = "LDAPS://ldaps.service.consul"
SUPYSONIC_LDAP_BASE_DN = "dc=ducamps,dc=eu"
SUPYSONIC_LDAP_USER_FILTER = "(&(memberOf=cn=SupysonicUsers,ou=groups,dc=ducamps,dc=eu))"
SUPYSONIC_LDAP_ADMIN_FILTER= "(&(memberOf=cn=SupysonicAdmins,ou=groups,dc=ducamps,dc=eu))"
}
template {
data = <<EOH
{{ with secret "secrets/data/database/supysonic"}}
SUPYSONIC_DB_URI = "postgres://supysonic:{{ .Data.data.password}}@db1.ducamps.win/supysonic"
SUPYSONIC_DB_URI = "postgres://supysonic:{{ .Data.data.password}}@active.db.service.consul/supysonic"
{{end}}
{{ with secret "secrets/data/nomad/supysonic"}}
SUPYSONIC_LDAP_BIND_DN = "{{ .Data.data.serviceAccountName }}"

View File

@ -10,7 +10,11 @@ job "syncthing" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "syncthing" {
network {
@ -40,7 +44,7 @@ job "syncthing" {
]
}
config {
image = "linuxserver/syncthing"
image = "docker.service.consul:5000/linuxserver/syncthing"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/syncthing/config:/config",
@ -48,6 +52,11 @@ job "syncthing" {
]
}
env{
PUID = 1000001
GUID = 1000001
}
resources {
memory = 200
}

View File

@ -7,7 +7,11 @@ job "tt-rss" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "ttrss" {
ephemeral_disk {
@ -34,13 +38,13 @@ job "tt-rss" {
"homer.enable=true",
"homer.name=TT-RSS",
"homer.service=Application",
"homer.logo=https://framalibre.org/sites/default/files/styles/thumbnail/public/leslogos/ic_launcher_1.png",
"homer.logo=https://www.ducamps.eu/tt-rss/images/favicon-72px.png",
"homer.target=_blank",
"homer.url=https://www.ducamps.win/tt-rss",
"homer.url=https://www.ducamps.eu/tt-rss",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.win`)&&PathPrefix(`/tt-rss`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.eu`)&&PathPrefix(`/tt-rss`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
@ -50,30 +54,33 @@ job "tt-rss" {
task "ttrss-app" {
driver = "docker"
config {
image = "cthulhoo/ttrss-fpm-pgsql-static"
image = "docker.service.consul:5000/cthulhoo/ttrss-fpm-pgsql-static"
ports = [
"appPort"
]
volumes = [
"${NOMAD_ALLOC_DIR}/data:/var/www/html"
"${NOMAD_ALLOC_DIR}/data:/var/www/html",
"/mnt/diskstation/nomad/tt-rss/ttrss-auth-oidc:/var/www/html/tt-rss/plugins.local/auth_oidc"
]
}
env {
TTRSS_DB-TYPE = "pgsql"
TTRSS_DB_HOST = "db1.ducamps.win"
TTRSS_DB_HOST = "active.db.service.consul"
TTRSS_DB_NAME = "ttrss"
TTRSS_DB_USER = "ttrss"
TTRSS_SELF_URL_PATH = "https://www.ducamps.win/tt-rss"
TTRSS_SELF_URL_PATH = "https://www.ducamps.eu/tt-rss"
TTRSS_PLUGINS = "auth_oidc, auth_internal"
TTRSS_AUTH_OIDC_NAME= "Authelia"
TTRSS_AUTH_OIDC_URL = "https://auth.ducamps.eu"
TTRSS_AUTH_OIDC_CLIENT_ID = "ttrss"
}
template {
data = <<EOH
{{ with secret "secrets/data/database/ttrss"}}
TTRSS_DB_PASS = "{{ .Data.data.password }}"
{{end}}
{{ with secret "secrets/data/database/ttrss"}}TTRSS_DB_PASS = "{{ .Data.data.password }}"{{end}}
TTRSS_AUTH_OIDC_CLIENT_SECRET = {{ with secret "secrets/data/authelia/ttrss"}}"{{ .Data.data.password }}"{{end}}
EOH
destination = "secrets/tt-rss.env"
destination = "secret/tt-rss.env"
env = true
}
resources {
memory = 150
@ -83,7 +90,7 @@ job "tt-rss" {
task "ttrss-updater" {
driver = "docker"
config {
image = "cthulhoo/ttrss-fpm-pgsql-static"
image = "docker.service.consul:5000/cthulhoo/ttrss-fpm-pgsql-static"
volumes = [
"${NOMAD_ALLOC_DIR}/data:/var/www/html"
]
@ -92,10 +99,10 @@ job "tt-rss" {
}
env {
TTRSS_DB-TYPE = "pgsql"
TTRSS_DB_HOST = "db1.ducamps.win"
TTRSS_DB_HOST = "active.db.service.consul"
TTRSS_DB_NAME = "ttrss"
TTRSS_DB_USER = "ttrss"
TTRSS_SELF_URL_PATH = "https://www.ducamps.win/tt-rss"
TTRSS_SELF_URL_PATH = "https://www.ducamps.eu/tt-rss"
}
template {
data = <<EOH
@ -115,7 +122,7 @@ job "tt-rss" {
task "ttrss-frontend" {
driver = "docker"
config {
image = "nginx:alpine"
image = "docker.service.consul:5000/library/nginx:alpine"
ports = [
"http"
]

View File

@ -6,7 +6,11 @@ job "vaultwarden" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "vaultwarden" {
network {
mode = "host"
@ -29,11 +33,11 @@ job "vaultwarden" {
"homer.service=Application",
"homer.logo=https://yunohost.org/user/images/bitwarden_logo.png",
"homer.target=_blank",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.win",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`vault.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=vault.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`vault.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=vault.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
@ -50,7 +54,7 @@ job "vaultwarden" {
}
}
config {
image = "vaultwarden/server"
image = "docker.service.consul:5000/vaultwarden/server"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/vaultwarden:/data"
@ -60,14 +64,14 @@ job "vaultwarden" {
env {
DATA_FOLDER = "/data"
WEB_VAULT_ENABLED = "true"
DOMAIN = "https://vault.ducamps.win"
DOMAIN = "https://vault.ducamps.eu"
}
template {
data = <<EOH
{{ with secret "secrets/data/database/vaultwarden"}}
DATABASE_URL=postgresql://vaultwarden:{{ .Data.data.password }}@db1.ducamps.win/vaultwarden
DATABASE_URL=postgresql://vaultwarden:{{ .Data.data.password }}@active.db.service.consul/vaultwarden
{{end}}
EOH
destination = "secrets/vaultwarden.env"

View File

@ -0,0 +1,89 @@
job "vikunja" {
datacenters = ["homelab"]
priority = 70
type = "service"
meta {
forcedeploy = "0"
}
group "vikunja" {
network {
mode = "host"
port "front" {
to = 80
}
port "api" {
to = 3456
}
}
vault {
policies = ["vikunja"]
}
task "api" {
driver = "docker"
service {
name = "vikunja-api"
port = "api"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.entrypoints=web,websecure",
"homer.enable=true",
"homer.name=vikunka",
"homer.service=Application",
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/images/icons/apple-touch-icon-180x180.png",
"homer.target=_blank",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
]
}
config {
image = "docker.service.consul:5000/vikunja/vikunja"
ports = ["api", "front"]
volumes = ["local/config.yml:/etc/vikunja/config.yml"]
}
env {
VIKUNJA_DATABASE_HOST = "active.db.service.consul"
VIKUNJA_DATABASE_TYPE = "postgres"
VIKUNJA_DATABASE_USER = "vikunja"
VIKUNJA_DATABASE_DATABASE = "vikunja"
VIKUNJA_SERVICE_JWTSECRET = uuidv4()
VIKUNJA_SERVICE_FRONTENDURL = "https://${NOMAD_JOB_NAME}.ducamps.eu/"
VIKUNJA_AUTH_LOCAL = False
}
template {
data = <<EOH
{{ with secret "secrets/data/database/vikunja"}}
VIKUNJA_DATABASE_PASSWORD= "{{ .Data.data.password }}"
{{end}}
EOH
destination = "secrets/sample.env"
env = true
}
template {
data = <<EOH
auth:
openid:
enabled: true
redirecturl: https://vikunja.ducamps.eu/auth/openid/
providers:
- name: Authelia
authurl: https://auth.ducamps.eu
clientid: vikunja
clientsecret: {{ with secret "secrets/data/authelia/vikunja"}} {{ .Data.data.password }} {{end}}
scope: openid profile email
EOH
destination = "local/config.yml"
}
resources {
memory = 100
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More