Compare commits

...

168 Commits

Author SHA1 Message Date
vincent
d9cf7cb297 borgmatic: add exclusion
Some checks failed
continuous-integration/drone/push Build is failing
2024-11-17 16:28:20 +01:00
vincent
90dd0ecd9a chore: link makefile
Some checks failed
continuous-integration/drone/push Build is failing
2024-11-09 10:24:15 +01:00
vincent
4f6743db5f perf: tweak mealie and pihole memory
Some checks failed
continuous-integration/drone/push Build is failing
2024-11-09 10:23:42 +01:00
vincent
2452a2ad44 fix (flaresolverr): change image to resolve chalenge issue 2024-11-09 10:23:07 +01:00
vincent
5e2bb57914 rutorrent: resolve issue with docker 2024-11-09 10:22:24 +01:00
vincent
3eb2dbfa08 authelia: custom consent preconfigured time 2024-11-09 10:21:50 +01:00
vincent
1ea094aa6e Revert "perfs: decrease CPU"
This reverts commit 6ea5de0315.
2024-10-29 19:21:05 +01:00
vincent
c1e48d4ace add compute parameter to oscar
Some checks failed
continuous-integration/drone/push Build is failing
2024-10-29 19:08:41 +01:00
vincent
b2710aab2f add oauth to gitea 2024-10-19 16:28:25 +02:00
vincent
c000933f66 add paperless-ng SSO
Some checks failed
continuous-integration/drone/push Build is failing
2024-10-12 10:12:38 +02:00
vincent
7948773757 perfs: increase memory-max for some job
Some checks failed
continuous-integration/drone/push Build is failing
2024-09-29 17:51:05 +02:00
vincent
3d90a1f6d7 fix: wrong dns in docker daemon.json 2024-09-29 17:50:31 +02:00
vincent
1f29007172 switch to nfs v4 on share 2024-09-29 17:50:11 +02:00
vincent
af58866882 dns: pdns-admin in dedicated nomad group 2024-09-29 17:38:27 +02:00
vincent
374a62c304 fix: aur call in database playbook
Some checks failed
continuous-integration/drone/push Build is failing
2024-08-04 11:49:40 +02:00
vincent
9451443266 refactor: split job in role folder
Some checks failed
continuous-integration/drone/push Build is failing
2024-08-03 15:06:36 +02:00
vincent
dacd187f7b fix: loki config
Some checks failed
continuous-integration/drone/push Build is failing
2024-08-03 14:47:27 +02:00
vincent
e48a879c43 fix: torrent PUID 2024-08-03 14:46:47 +02:00
vincent
6ea5de0315 perfs: decrease CPU 2024-08-03 14:46:05 +02:00
vincent
984b712c78 update: nfs csi nfs plugins 4.7 2024-08-03 14:45:22 +02:00
vincent
293fddd81c remove backup disk mount 2024-08-03 14:45:04 +02:00
vincent
0952c4bf42 fix: change media mount path 2024-08-03 14:43:30 +02:00
vincent
3228054172 oscar hardware replacement
Some checks failed
continuous-integration/drone/push Build is failing
2024-06-29 10:21:44 +02:00
vincent
ee7cd0c12e fix: wrong interface variable call 2024-06-29 10:20:25 +02:00
vincent
22a60b42d4 add vikunja to generate vault 2024-06-25 18:45:46 +02:00
vincent
d578fefbce perfs (registry): add memory 2024-06-25 18:45:16 +02:00
vincent
cae4ceb623 update: remove immich microservice 2024-06-25 18:44:51 +02:00
vincent
ddc4320fe9 feat (vikunja): implemant oauth
Some checks reported errors
continuous-integration/drone/push Build was killed
2024-05-20 12:15:56 +02:00
vincent
d1b475d651 fix: add cluster consraint to prowalar and tt-rss 2024-05-20 11:21:45 +02:00
vincent
d817f3a7f8 perfs (immich): increase memory 2024-05-20 11:21:21 +02:00
vincent
18a78f6fd2 chore (immich): fix logo 2024-05-20 11:20:32 +02:00
vincent
f22e3406be borgmatic: modify jellyfin backup exeption 2024-05-16 19:19:00 +02:00
vincent
1520ec0dcc disable authelia notifier check 2024-05-16 19:18:18 +02:00
vincent
275435664c feat: grafanna sso
Some checks failed
continuous-integration/drone/push Build is failing
2024-05-10 15:50:45 +02:00
vincent
f9ff70a9d9 feat: immich sso
Some checks failed
continuous-integration/drone/push Build is failing
2024-05-10 14:49:50 +02:00
vincent
8915ff52dd fix: wrong array character 2024-05-10 14:49:20 +02:00
vincent
74794f866a feat: improve database playbook 2024-05-10 08:35:14 +02:00
vincent
7244ceb5b1 feat: manage all nomad folder creation on build 2024-05-10 08:35:14 +02:00
vincent
49a8a427f7 perf: adjust openldap ram 2024-05-10 08:35:14 +02:00
vincent
f4f77fc55a fix: add dev network to docker insecure registry 2024-05-10 08:35:14 +02:00
vincent
351d7c287f fix: increase VM ram among 2024-05-10 08:35:14 +02:00
vincent
598896ad5f feat: implement immich job 2024-05-10 08:20:01 +02:00
vincent
6e00668840 add terrform immich variable for vault and dns 2024-05-10 08:18:53 +02:00
vincent
24eb640c60 configure db for immich 2024-05-09 09:25:23 +02:00
vincent
9b6ed6cc6e switch to opentofu 2024-05-09 09:14:25 +02:00
vincent
2f1de5dcd5 fix vault dn
Some checks failed
continuous-integration/drone/push Build is failing
Signed-off-by: vincent <vincent@ducamps.win>
2024-05-08 21:38:10 +02:00
vincent
78692be3fd add vector.rs to database playbook 2024-05-08 21:37:27 +02:00
vincent
272efbb844 update openldap default tree 2024-05-08 21:14:37 +02:00
vincent
c9f4656470 switch gerard-dev to archlinux 2024-05-08 21:07:57 +02:00
vincent
6e679c82a0 fix: add missing argument to ldap manager
Some checks failed
continuous-integration/drone/push Build is failing
2024-05-08 09:11:28 +02:00
vincent
9d0c513787 chore: update nomad template 2024-04-28 16:11:37 +02:00
vincent
69a2ad4efd feat: implement mealie
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-28 16:10:43 +02:00
vincent
2f6c814fb1 CI: terraform makefile command parameter
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-27 14:29:38 +02:00
vincent
ab3c42cf8b feat: add authelia oidc authent
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-24 21:23:39 +02:00
vincent
992937c011 feat: migrate rutorrent on authelia for authent
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-12 08:59:40 +02:00
vincent
5fe61223c3 feat: create authelia job 2024-04-12 08:59:20 +02:00
vincent
452ab3611a fix (syncthing): change UID to match to folder 2024-04-12 08:58:02 +02:00
vincent
1ee5e21f84 ldap: remove login shell for service acount 2024-04-12 08:57:38 +02:00
vincent
92befa7ea4 chore: update alertmanager smtp hello url 2024-04-12 08:56:50 +02:00
vincent
4be6af919d refactor: mmove lldap to decom job 2024-04-12 08:56:34 +02:00
vincent
77e7cd4f88 style: update missing icon 2024-04-12 08:56:12 +02:00
vincent
fe9bc8dbab feat: add torrent automation job (prawlarr + flareresolver) 2024-04-11 10:16:20 +02:00
vincent
60cfe75e47 perfs (prometheus): add memory_max
Some checks failed
continuous-integration/drone/push Build is failing
2024-04-09 08:41:06 +02:00
vincent
4fcf862279 borgmatic: add exclusion 2024-04-09 08:40:53 +02:00
vincent
98c1d63962 borgmatic: add action 2024-04-09 08:40:38 +02:00
vincent
0b067cabca loki: review config 2024-04-09 08:39:37 +02:00
vincent
4ef30222f7 fix: memory_max
Some checks failed
continuous-integration/drone/push Build is failing
2024-03-29 21:15:39 +01:00
vincent
117e9397a3 switch volume to nfsv4 2024-03-29 21:14:24 +01:00
vincent
0b25eb194e feat: add authorization for local docker in nfs
Some checks failed
continuous-integration/drone/push Build is failing
2024-03-17 19:07:51 +01:00
vincent
74dc3a0c89 chore: clean gerard from inventory 2024-03-17 19:01:52 +01:00
vincent
9bc0e24357 fix: pureftpd variable 2024-03-17 19:01:32 +01:00
vincent
e0f9190b76 feat: docker pull througt mirror
Some checks failed
continuous-integration/drone/push Build is failing
2024-03-17 18:58:24 +01:00
vincent
f0676ec3f7 fix: change rutorrent tag 2024-03-17 11:07:59 +01:00
vincent
8b895fee06 docs: update ADR 2024-03-17 11:07:59 +01:00
vincent
aeed90ea34 perfs: adjust max mermory 2024-03-17 11:07:59 +01:00
vincent
a89109e1ff feat: add actual budget 2024-03-17 11:07:59 +01:00
vincent
d748beb6a4 feat: switxh from vsftp to pure-ftpd 2024-03-17 11:07:59 +01:00
vincent
3a80c47b56 add service account ou in ldap default tree 2024-03-17 11:07:59 +01:00
vincent
c75e9e707a fix: staging nas bind 2024-03-17 11:07:59 +01:00
vincent
4926b4eb06 perfs: increase backup postgress memory 2024-03-17 11:07:59 +01:00
vincent
0ebd087544 fix: move binding dn 2024-03-17 11:07:59 +01:00
vincent
b7dc26cc27 borgmatic: fix config 2024-03-17 11:07:59 +01:00
vincent
012c448c73 improve share binding 2024-03-17 11:07:59 +01:00
vincent
1b79fe4cb0 Borgmatic: add know host 2024-03-17 11:07:59 +01:00
vincent
6848ffa05b fix: become on nut role 2024-03-17 11:07:59 +01:00
vincent
aec7230f11 feat: ftp local user iss chroot 2024-03-17 11:07:58 +01:00
vincent
da3b290d4a feat: enable crossmount in nfs share 2024-03-17 11:07:58 +01:00
vincent
5718968407 fix:hard DNS on oscar instead Nas (if NAS is shutdown cluster DNS will
shutdown )
2024-03-17 11:07:58 +01:00
vincent
0db8555fe8 change rutorrent group 2024-03-17 11:07:58 +01:00
vincent
2fee8293dc feat: add role for nut 2024-03-17 11:07:58 +01:00
vincent
3dae6adb33 switch dns on oberon 2024-03-17 11:07:58 +01:00
vincent
f207be7d7d finalize Nas data migration 2024-03-17 11:07:58 +01:00
vincent
f32c0d1e40 fix: no issue on nfs cluster if one device is down 2024-03-17 11:07:58 +01:00
vincent
d37fe78e39 feat: enable vsftp user session 2024-03-17 11:07:58 +01:00
vincent
586e6101ca feat: correct homedir for samba 2024-03-17 11:07:58 +01:00
vincent
e470b204a5 feat: add constrainst to limit nas job 2024-03-17 11:07:58 +01:00
vincent
c4d10aacfe fix: change path 2024-03-17 11:07:58 +01:00
vincent
e10830e028 fix: path issue 2024-03-17 11:07:58 +01:00
vincent
c37083b5c9 feat: isolate wireguard playbook 2024-03-17 11:07:58 +01:00
vincent
c7e6270c3a fix: remove separator from create user 2024-03-17 11:07:58 +01:00
vincent
625bda7fda feat: deploy NAS on oberon 2024-03-17 11:07:58 +01:00
vincent
d1cc5ff299 fix: add lan dns redirection to pdns recursor 2024-03-17 11:07:58 +01:00
vincent
0a57c5659c fix: upgrade vikunka 2024-03-17 11:07:58 +01:00
vincent
7191cb7216 rename nas to oberon 2024-03-17 11:07:58 +01:00
vincent
b3488061da dns: decrease local ttl 2024-03-17 11:07:58 +01:00
vincent
c08032052d fix: terraform dns makefile secret 2024-03-17 11:07:58 +01:00
vincent
25780828cc job: add borgmatic 2024-03-17 11:07:58 +01:00
vincent
46b4a51935 CI: improve consul stagging switch 2024-03-17 11:07:58 +01:00
vincent
993753f284 feat: intergrate SAMBA Nas role 2024-03-17 11:07:58 +01:00
vincent
5188d865d8 fix: get ldap admin password in vault 2024-03-17 11:07:58 +01:00
vincent
2a731201a1 add default crypt password for vault service account 2024-03-17 11:07:58 +01:00
vincent
70e0d6011b CI: autoapprove for terraform apply 2024-03-17 11:07:58 +01:00
vincent
2c0da4bd15 feat: enable automoint for staging 2024-03-17 11:07:58 +01:00
vincent
547ce05466 chore: complete generate-vault-secret 2024-03-17 11:07:58 +01:00
vincent
bfb3ec3d34 fix: modify vault endpoint for create nomad token 2024-03-17 11:07:58 +01:00
vincent
9756939f8e fix: create nomad dir in playbook with correct right 2024-03-17 11:07:58 +01:00
vincent
f420f17929 feat: modify staging domain name 2024-03-17 11:07:58 +01:00
vincent
2bae64c40b create script to bootstrap vault secret 2024-03-17 11:07:58 +01:00
vincent
c8f7d7f8c3 ordo: improve makefile for terraform 2024-03-17 11:07:58 +01:00
vincent
2632c6d2b0 dns: switch cname to alias 2024-03-17 11:07:58 +01:00
vincent
f61008b570 fix: bootstrap become 2024-03-17 11:07:58 +01:00
vincent
73df5fa582 refactor: consul in first of hashicorp stack 2024-03-17 11:07:58 +01:00
vincent
e3d76630c3 feat: replace rocky by arch in vagrant 2024-03-17 11:07:58 +01:00
vincent
41b1a71c76 feat: switch consul DNS in makefile 2024-03-17 11:07:58 +01:00
vincent
e9ad317436 feat ensure nfs share folder exist 2024-03-17 11:07:58 +01:00
vincent
2db6061516 fix: declare main interface variable for stagging 2024-03-17 11:07:58 +01:00
vincent
3367c78314 feat: merge user create and config playbook 2024-03-17 11:07:58 +01:00
vincent
08ea604028 feat: create home share ans delete home mont on cluster 2024-03-17 11:07:58 +01:00
vincent
29ab70a1d5 fix: samba mount option issue 2024-03-17 11:07:58 +01:00
vincent
e083f4da7a terraform: remove corwin 2024-03-17 11:07:58 +01:00
vincent
2ea4992f57 fix dockermailserver: add privae network to ha proxy auth 2024-03-17 11:07:58 +01:00
vincent
49de33bbdb calc docket mtu on wireguard MTU 2024-03-17 11:07:58 +01:00
vincent
2b678b7786 remove bootstap become 2024-03-17 11:07:58 +01:00
vincent
fc2dcd7b33 fix: add empty env group to avoid issue 2024-03-17 11:07:58 +01:00
vincent
29d70cac0e migrate to merlin 2024-03-17 11:07:58 +01:00
vincent
4117bd80c5 fix: www specific location for archiso 2024-03-17 11:07:58 +01:00
vincent
da6f04e42e fix: database pg_hba 2024-03-17 11:07:58 +01:00
vincent
13bda4cd34 fix: case where vault root file not exist 2024-03-17 11:07:58 +01:00
vincent
63cd352fff archiso on web server 2024-03-17 11:07:58 +01:00
vincent
a65e3484b5 implement default interface variable 2024-03-17 11:07:58 +01:00
vincent
2b9e034232 delete old var file 2024-03-17 11:07:58 +01:00
vincent
527d2f2345 add packer to build arch image on hetzner 2024-03-17 11:07:58 +01:00
vincent
2da18e9c12 docs: add smtp case troubleshoot 2024-03-17 11:07:58 +01:00
vincent
49f639cb15 delete old dns terraform file 2024-03-17 11:07:58 +01:00
vincent
abc88f0074 add packer for hetzner image 2024-03-17 11:07:58 +01:00
vincent
394dbaf6cb move filestash on homelab 2024-03-17 11:07:58 +01:00
vincent
78762b477e move mail on homelab 2024-03-17 11:07:58 +01:00
vincent
2c00b9be59 feat: redirect all cluster traffic on wirequard 2024-03-17 11:07:58 +01:00
vincent
acc6cdc5fa fix crowsec: rename data file 2024-03-17 11:07:58 +01:00
vincent
43b6cf9158 fix www: change redirection method 2024-03-17 11:07:58 +01:00
vincent
015a89b27e fix: port 25 entrypoint conflict 2024-03-17 11:07:58 +01:00
vincent
68434f3e92 fix: switch ldap user manager traefik router 2024-03-17 11:07:58 +01:00
vincent
fe6d1c5e26 add user group to tree ldif 2024-03-17 11:07:58 +01:00
vincent
f8bc026165 feat: implemant openldap and migration 2024-03-17 11:07:58 +01:00
vincent
80f489422a change docker repo for testing 2024-03-17 11:07:58 +01:00
vincent
4207b1fc75 init lldap job 2024-03-17 11:07:58 +01:00
vincent
ea30fce975 feat: move backup in dedicated folder 2024-03-17 11:07:58 +01:00
vincent
5b23006e97 feat: move last application data folder in nomad share 2024-03-17 11:07:58 +01:00
vincent
9370a92518 put hashicorpstack before nas role 2024-03-17 11:07:58 +01:00
vincent
9fcf2d78e6 config repo on prod 2024-03-17 11:07:58 +01:00
vincent
f82c99c2ba fix: typo 2024-03-17 11:07:58 +01:00
vincent
cecad8b785 feat: change nas if by consul service for stagging 2024-03-17 11:07:58 +01:00
vincent
28fc2bf6a7 init csi 2024-01-13 18:37:11 +01:00
vincent
a0214d0d74 allow nomad privileged on all
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-13 18:36:27 +01:00
vincent
9812376a1d gather all device before nas playbook 2024-01-13 18:36:27 +01:00
vincent
6ddcc4736e put nfs share in export bind 2024-01-13 18:32:02 +01:00
vincent
11fe5fb5dc conf dhcp: add ip for shelly
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-13 16:49:47 +01:00
vincent
ec2ecd08cd perfs backup-postgress: increse memory
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-13 10:20:53 +01:00
147 changed files with 4719 additions and 908 deletions

15
Vagrantfile vendored
View File

@ -1,9 +1,10 @@
Vagrant.configure('2') do |config|
if Vagrant.has_plugin?('vagrant-cachier')
config.cache.scope = 'machine'
config.cache.enable :pacman
end
config.vm.provider :libvirt do |libvirt|
libvirt.management_network_domain = "ducamps-dev.eu"
libvirt.management_network_domain = "lan.ducamps.dev"
end
config.vm.define "oscar-dev" do |c|
@ -19,7 +20,7 @@ Vagrant.configure('2') do |config|
# Provider
c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 1024
libvirt.memory = 2048
libvirt.cpus = 2
end
c.vm.provision "ansible" do |bootstrap|
@ -32,7 +33,7 @@ Vagrant.configure('2') do |config|
config.vm.define "merlin-dev" do |c|
# Box definition
c.vm.box = "generic/rocky9"
c.vm.box = "archlinux/archlinux"
# Config options
c.vm.synced_folder ".", "/vagrant", disabled: true
c.ssh.insert_key = true
@ -42,7 +43,7 @@ Vagrant.configure('2') do |config|
# Provider
c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 1024
libvirt.memory = 512
libvirt.cpus = 2
end
@ -56,7 +57,7 @@ Vagrant.configure('2') do |config|
config.vm.define "gerard-dev" do |c|
# Box definition
c.vm.box = "generic/debian12"
c.vm.box = "archlinux/archlinux"
# Config options
c.vm.synced_folder ".", "/vagrant", disabled: true
@ -66,7 +67,7 @@ Vagrant.configure('2') do |config|
# instance_raw_config_args
# Provider
c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 1024
libvirt.memory = 2048
libvirt.cpus = 2
end
c.vm.provision "ansible" do |bootstrap|
@ -89,7 +90,7 @@ Vagrant.configure('2') do |config|
# Provider
c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 1024
libvirt.memory = 2048
libvirt.cpus = 2
end

View File

@ -15,7 +15,10 @@ pdns_rec_config:
forward-zones:
- "{{ consul_domain }}=127.0.0.1:8600"
- "ducamps.win=192.168.1.10"
- "ducamps.eu=192.168.1.5"
- "{{ domain.name }}=192.168.1.5"
- "lan.{{ domain.name }}=192.168.1.5"
- "1.168.192.in-addr.arpa=192.168.1.5:5300"
local-address: "{{ ansible_default_ipv4.address }}"
local-address: "{{ hostvars[inventory_hostname]['ansible_'+ default_interface].ipv4.address|default(ansible_default_ipv4.address) }}"
dnssec: "off"

View File

@ -0,0 +1,90 @@
NAS_nomad_folder:
- name: actualbudget
- name: archiso
owner: 1000001
- name: backup
owner: 1000001
- name: borgmatic
- name: crowdsec
owner: 1000001
- name: dms
owner: 1000001
- name: filestash
owner: 1000
- name: gitea
owner: 1000000
- name: grafana
owner: 472
- name: hass
owner: 1000001
- name: homer
owner: 1000001
- name: immich/cache
- name: immich/upload
- name: jellyfin
owner: 1000001
- name: loki
owner: 10001
- name: mealie
owner: 1000001
- name: mosquito
owner: 1883
- name: pacoloco
owner: 1000001
- name: pdns-auth
owner: 1000001
- name: pdns-admin
owner: 1000001
- name: pihole
owner: 999
- name: prometheus
owner: 65534
- name: prowlarr
owner: 1000001
- name: radicale
owner: 1000001
- name: openldap
owner: 1001
- name: registry/ghcr
- name: registry/docker
- name: syncthing
owner: 1000001
- name: traefik
owner: 1000001
- name: tt-rss
owner: 1000001
- name: vaultwarden
owner: 1000001
- name: zigbee2mqtt
owner: 1000001
nas_bind_target: "/exports"
nas_bind_source:
- dest: "{{ nas_bind_target }}/nomad"
source: /data/data1/nomad
- dest: "{{ nas_bind_target }}/music"
source: /data/data1/music
- dest: "{{ nas_bind_target }}/download"
source: /data/data1/download
- dest: "{{ nas_bind_target }}/media/serie"
source: /data/data2/serie
- dest: "{{ nas_bind_target }}/media/film"
source: /data/data3/film
- dest: "{{ nas_bind_target }}/photo"
source: /data/data1/photo
- dest: "{{ nas_bind_target }}/homes"
source: /data/data1/homes
- dest: "{{ nas_bind_target }}/ebook"
source: /data/data1/ebook
- dest: "{{ nas_bind_target }}/media/download/serie"
source: /data/data1/download/serie
- dest: "{{ nas_bind_target }}/media/download/film"
source: /data/data1/download/film
- dest: "{{ nas_bind_target }}/music/download/"
source: /data/data1/download/music

View File

@ -1,3 +1 @@
vsftpd_config:
local_root: "/var/local/volume1"
seccomp_sandbox: False
vsftpd_config: {}

View File

@ -1,15 +1,15 @@
nfs_cluster_list: "{% for server in groups['all']%}{{ hostvars[server]['ansible_default_ipv4']['address'] }}(rw,no_root_squash,async,insecure_locks,sec=sys) {%endfor%}"
nfs_cluster_list: "{% for server in groups['all']%} {% if hostvars[server]['ansible_default_ipv4']['address'] is defined %} {{hostvars[server]['ansible_' + hostvars[server]['nfs_iface']|default('')].ipv4.address|default(hostvars[server]['ansible_default_ipv4']['address'],true)}}{{ nfs_options }} {% endif %} {%endfor%}"
nfs_options: "(rw,no_root_squash,crossmnt,async,insecure_locks,sec=sys)"
nfs_consul_service: true
nfs_bind_target: "/exports"
nfs_exports:
- "/var/local/volume1/nomad {{nfs_cluster_list}}"
- "/var/local/volume1/music {{nfs_cluster_list}}"
- "/var/local/volume1/media {{nfs_cluster_list}}"
- "/var/local/volume1/photo {{nfs_cluster_list}}"
- "/var/local/volume1/ebook {{nfs_cluster_list}}"
- "/var/local/volume1/git {{nfs_cluster_list}}"
- "/var/local/volume1/archMirror {{nfs_cluster_list}}"
- "/var/local/volume1/homes/admin {{nfs_cluster_list}}"
- "/var/local/volume1/CardDav {{nfs_cluster_list}}"
- "{{ nas_bind_target }} *(fsid=0,insecure,no_subtree_check)"
- "{{ nas_bind_target }}/nomad {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/download {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/music {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/media {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/photo {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/homes {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
- "{{ nas_bind_target }}/ebook {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"

View File

@ -0,0 +1,25 @@
samba_passdb_backend: tdbsam
samba_shares_root: /exports
samba_shares:
- name: media
comment: "media"
write_list: "@NAS_media"
browseable: true
- name: ebook
comment: "ebook"
write_list: "@NAS_ebook"
browseable: true
- name: music
comment: "music"
write_list: "@NAS_music"
browseable: true
- name: photo
comment: "photo"
write_list: "@NAS_photo"
browseable: true
- name: download
comment: "downlaod"
write_list: "@NAS_download"
browseable: true
samba_load_homes: True
samba_homes_include: samba_homes_include.conf

View File

@ -4,7 +4,7 @@ systemd_mounts:
mount: /mnt/diskstation/nomad
type: nfs
options:
- " "
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
hetzner_storage:
@ -13,8 +13,8 @@ systemd_mounts:
type: cifs
options:
- credentials=/etc/creds/hetzner_credentials
- uid= 100001
- gid= 10
- uid=100001
- gid=10
- vers=3.0
- mfsymlinks
automount: "{{ env_automount }}"

View File

@ -4,4 +4,4 @@ system_arch_local_mirror: "https://arch.{{domain.name}}/repo/archlinux_$arch"
system_sudoers_group: "serverAdmin"
system_ipV6_disable: True
system_ip_unprivileged_port_start: 0
nas_ip: "{{ hostvars[groups['NAS'][0]]['ansible_facts']['default_ipv4']['address']|default('192.168.1.10')}}"
wireguard_mtu: 1420

View File

@ -1,4 +1,8 @@
docker_daemon_config:
dns:
- 172.17.0.1
- 192.168.1.5
- 192.168.1.6
mtu: 1420
insecure-registries:
- 192.168.1.0/24
- 192.168.121.0/24

View File

@ -2,6 +2,7 @@ nomad_docker_allow_caps:
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
nomad_allow_privileged: True
nomad_vault_enabled: true
nomad_vault_address: "http://active.vault.service.{{consul_domain}}:8200"
nomad_vault_role: "nomad-cluster"

View File

@ -1,42 +0,0 @@
consul_client_addr: "0.0.0.0"
consul_datacenter: "homelab"
consul_backup_location: "/mnt/diskstation/git/backup/consul"
consul_ansible_group: all
consul_bootstrap_expect: 3
nomad_docker_allow_caps:
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
nomad_vault_enabled: true
nomad_vault_address: "http://active.vault.service.consul:8200"
nomad_vault_role: "nomad-cluster"
nomad_vault_token: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:nomad_vault_token') }}"
nomad_bootstrap_expect: 3
notification_mail: "{{inventory_hostname}}@{{ domain_name }}"
msmtp_mailhub: smtp.{{ domain_name }}
msmtp_auth_user: "{{ user.mail }}"
msmtp_auth_pass: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:email') }}"
system_user:
- name: drone-deploy
home: /home/drone-deploy
shell: /bin/bash
privatekey:
- keyname: id_gitea
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
- name: ansible
home: /home/ansible
shell: /bin/bash
- name: root
home: /root
privatekey:
- keyname: id_gitea
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"

View File

@ -1,5 +1,5 @@
sssd_configure: true
# sssd_configure is False by default - by default nothing is done by this role.
ldap_search_base: "dc=ducamps,dc=win"
ldap_uri: "ldaps://ldap.ducamps.eu"
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=win"
ldap_search_base: "dc=ducamps,dc=eu"
ldap_uri: "ldaps://ldaps.service.consul"
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=eu"

View File

@ -39,4 +39,4 @@ user_custom_host:
user: "git"
keyfile: "~/.ssh/id_gitea"
user_config_repo: "ssh://git@git.{{ domain.name }}:2222/vincent/conf2.git"
user_config_repo: "ssh://git@git.ducamps.eu:2222/vincent/conf2.git"

View File

@ -1,11 +1,12 @@
$ANSIBLE_VAULT;1.1;AES256
34356264306639303930393736376562653636383538623131343939323563653938616534623163
6536366261666662376533393836626664373766313439660a363331326231303638626165393164
63323063623365393566643230653964393565636430303365653233323931646236366664346430
3162383233656139320a323133323262386638363738346336613862626539386538633864613131
30306539376639303365323665613732616138346530346162633761386466626238373065316230
38396662363364336134306130616661643835616161313535613331303133383334393333653335
66363538313631373736396333363837376664616166663665343030336232346237333965303861
36613763666135393531653637616463333461343232366137656336383239623166633338646561
39336563636665396666663339306534643661366264623061626661343762373037383037373561
3431656130306133323436616531343034366665636434333362
61326233336236343231396231306638373837653661313334313261313539316532373437346132
3931306637303530373032663236363466383433316161310a396439393564643731656664663639
32386130663837303663376432633930393663386436666263313939326631616466643237333138
3365346131636333330a376436323964656563363664336638653564656231636136663635303439
35346461356337303064623861326331346263373539336335393566623462343464323065366237
61346637326336613232643462323733366530656439626234663335633965376335623733336162
37323739376237323534613361333831396531663637666161666366656237353563626164626632
33326336353663356235373835666166643465666562616663336539316233373430633862613133
36363831623361393230653161626131353264366634326233363232336635306266376363363739
66373434343330633337633436316135656533613465613963363931383266323466653762623365
363332393662393532313063613066653964

View File

@ -1,42 +1,10 @@
systemd_mounts:
diskstation_git:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}//git"
mount: /mnt/diskstation/git
type: nfs
options:
- " "
automount: "{{ env_automount }}"
enabled: true
diskstation_CardDav:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/CardDav"
mount: /mnt/diskstation/CardDav
type: nfs
options:
- " "
automount: "{{ env_automount }}"
enabled: true
backup_disk:
share: /dev/sdb1
mount: /mnt/backup
type: ntfs-3g
options:
- " "
automount: "{{ env_automount }}"
enabled: "{%if inventory_hostname in groups['staging'] %} false {% else %} true {% endif %}"
diskstation_home:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/homes/admin"
mount: /mnt/diskstation/home
type: nfs
options:
- " "
automount: "{{ env_automount }}"
enabled: true
diskstation_photo:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/photo"
mount: /mnt/diskstation/photo
type: nfs
options:
- " "
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
diskstation_music:
@ -44,7 +12,7 @@ systemd_mounts:
mount: /mnt/diskstation/music
type: nfs
options:
- " "
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
diskstation_media:
@ -52,23 +20,16 @@ systemd_mounts:
mount: /mnt/diskstation/media
type: nfs
options:
- " "
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
diskstation_ebook:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/ebook"
mount: /mnt/diskstation/ebook
type: nfs
options:
- " "
automount: "{{ env_automount }}"
enabled: true
diskstation_archMirror:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/archMirror"
mount: /mnt/diskstation/archMirror
type: nfs
options:
- " "
- "vers=4"
automount: "{{ env_automount }}"
enabled: true
diskstation_nomad:
@ -79,3 +40,11 @@ systemd_mounts:
- " "
automount: "{{ env_automount }}"
enabled: true
diskstation_download:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/download"
mount: /mnt/diskstation/download
type: nfs
options:
- "vers=4"
automount: "{{ env_automount }}"
enabled: true

View File

@ -24,6 +24,10 @@ postgresql_databases:
owner: pdns-auth
- name: pdns-admin
owner: pdns-admin
- name: mealie
owner: mealie
- name: immich
owner: immich
postgresql_hba_entries:
- {type: local, database: all, user: postgres, auth_method: peer}
@ -32,5 +36,3 @@ postgresql_hba_entries:
- {type: host, database: all, user: all, address: '::1/128', auth_method: md5}
- {type: host, database: all, user: all, address: '::0/128', auth_method: md5}
- {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5}
- {type: host, database: replication, user: repli, address:192.168.1.42/32, auth_method: md5}
- {type: host, database: replication, user: repli, address:192.168.1.40/32, auth_method: md5}

View File

@ -1,45 +1,54 @@
$ANSIBLE_VAULT;1.1;AES256
64656332666561346439636331396439333566646361333031613764376634363061623635356630
3832326235316435316264653637396130383465323234630a653138393161316232323236323366
32363661633631623132323864663366633766396266623630636135396165663062353434613231
6363646665626439610a313233313639333232393035633139326561316431393837616231313933
38646532613665666136316635376533653161616630313532333330393364636662653331336637
39353462336130333933383033656634633461333461393730633333343330306432623466623062
32353962623338356630393935646537313335313335323464666265303732653633396332363965
36356338386330653863646134623234623230356232643535643763303162626132333530626639
39316166613862356264336362303833343236616635613136356433663766383861333832656261
35613662653266396461383162303230613865373232353437646131633063633634346633383563
31323736303537643433633235613464376230373332613331623439643462313362356437623463
65326335653938626461353332356434303962376630626666666631386334316261653639623633
34326633393330313064326562363838316366316361626662393435363262333264626333396136
66353936623763323865656632373763303365316131663064343830663330323566346535316436
63623931383461363364613632363661613734306535373536643236656161393634633435653862
34316666353234646633633635653934373335396635343035663238323636323662346632303865
35326333366439646661303437626238326435313032373031636535353963666263636635366234
36336562633666623932653465376237366232306262386565646631346432346631353566326535
32356337333762653161376439353035323633363833633862336134366132623963326231643461
35623863373730313935393631626266336465613261636364353533666233613831323031643035
32663630316264633932643132633061303438613339646264666334306630643038323632366330
31366365333039636434613537386436313539396632613766333136663638393462653263613165
33323937313031626233623237616464323939303131613465326362346632346538323161343362
65353839386133326233356561363864336261663135343865323861623330613736333835396261
64653361333530326630363633383836396565646463396239616261646635303535316135306537
64343830616566663633323531383464383834373539646637633465616533383238346565303337
34386561626266303833353665306335326264343533386263626562373633303135313735643733
37333766373465326133663663303166316134643732343938343930616631383137356137373564
31633831663264653762326534343635323364313632353661323330646638363062346137646337
61323334623434613333613038633637666131393338653839373835633062396661653537343138
61643961623366393735393438356461333731326265313937613066323038313163353835363135
33323932353264313536393865373232333930613636343661613033656165616237373439383531
38393932366633616639303964386333386462353935646432663330313137306465386634633931
33656533306665653836363830363164303039356463386130663536636330396138643363383838
35393966646630663535623836303262353739353063303763333530383630353838623939376535
34343239373831623232343530396561393730303066323236306539333263656133366363396534
30666662336435313561666536643231633562663037353837303936326164353366333032656431
39303063343536336431336637323239356432616562656565306561666664663930303232313464
34333236613239656562323037656137376135396636323361383565336636303338663138396238
65396130303931393266636630656637333464346361303763653931383464326365333232623437
61623263316562643636386637303531626238333131656130306236636230626362653935353331
34366663303235653431616135343963643935303336313231343562376430343564393832343335
36363130313533373137383738346438666634303537633232636535303835636333653636303937
39356339656234303432
39363436643831373861376361613830316334613939346338616636393462663033393261633838
6337336161393063646136613538396366653538656435360a303062636463383739653730346639
61323634306265613336313634653039313639663836363032353261383566393865613166613032
3837313634633466610a313062646237396138316361303361663565353862363139343566306539
38303161303163323265376539323939393938373965353934303535613962653534363362346563
61643638353138623162353364353736396162613735333063633739346132613161303564356437
62343535363263646463306466663536613937393463666336396332646533343439613433626566
38643363343065393165646134343935386461626166316662356365366666363737653336626631
64643230616431396666666462303366343164323233303139643939346635353730316234386163
35613235643034643833393233373536383863333763393066373564353535353463363336316335
63363537643432663266386438316563656663656462333039303861393364333966383430643263
63356435373064633861343137616637393161383361306135373864386235653034323732316663
65336465386135663532356433386562666639333464633362663131646237613034646563396133
33303464633635636233626633353038656230373266666132323561383866343632333561323363
61346664623338376436373332646232646235323639633262666166346535663238653563363239
34663365633363313433376333653534333364393635316235333965383262313563373161663065
36393565396534353235623238303835343334646632306638306332336539616463393966653538
35336462623031326539633139636533633632623137393463333531663935323765663139306361
66643434393533313039356434326438626265323066613966323634306632653765363834613034
30373039336536393865383265643335396232643537343363313338383838383030386665303237
64363666346535633237353462333232623132353031323231623338356136656261303662656465
31313039643561623635643435333133663032313964323061393231666336343233363038616231
36356262326530383233336130326361613431623866633832663361633937646461343731343938
33306262346463623935663466356264393837626239313739356431653163376563333234346566
38373663643532313635333131663239383736343930623735323861663037356136353433633865
63626435613936303661366637623338633961643137613933303735366265663933396130363039
34396637643638613839306639343765393539653164616536653661373264376436626639316666
61303835323761643531326438363035343539383464376433363534623934366534373631353364
61383866323737316430303736366533643939313637393631303833363431613562303639323939
66313434613963656464383964313734383938353366306462666537653563336465376464303538
34336531663334303938333739313638636363623562613536333736386137363139653164626261
62663662316365663563646164303935323866633336633939323837393962393130626330666233
63663661303565646236623130663034636264353235376561306630376365613966663536303963
63643161386435633831393334333035653761393863373731616239313235383033633439376166
39613762376162386231633938393036633461303732323337656430373430636435313337303365
37646461336339623339316663616636373036656564383462356562306465623762653162633963
35636466386138333564666564323034393162633965386133643235303938616439333130353637
61343536323034366464653138353665326436396133313432666563353335383733363335613562
61646365346665383866623364396138323666326338313530353663323938613362653038313339
32613663616535313661386538366330373364366637386634633437646362383764346263636434
35616166393065343038643861636333373738363335353164326435303961326662356230323262
35656531653535643630376330393731643532353132366662636664626132646632306361323035
31373136616435336362633439356339336466313337623538383763386132396135653864386638
31393864363466653137643565306462616238333435343036613331653866393532313861376331
33646636623666343439616332386363373664346164313963623861393134666463383366633539
35313761333564303635656364303566643436393130356163623137313530653539656537653139
38336636623732313630303933303962303561376436623737633139643564343166326335386639
31373437336139326562613339393235393065396538333566323864643639303132313733396132
35613532396363326166313061353136373965303964623534653634613639303764393038333037
63656131616463663565653134363336326139303736313138366262616338643339316231663631
30656132386462393433313261313466303239346138623433643634616465656139343764353338
62616139613731363665333438383861623837643432643134626461643631323034383262656439
33653563323434343964633236353434643739333863636630636363633639373630

View File

@ -3,7 +3,7 @@ dhcpd_lease_time: '72'
dhcpd_domain_name: "lan.{{ domain.name }}"
dhcpd_nameservers:
- '192.168.1.4'
- '192.168.1.41'
- '192.168.1.40'
dhcpd_zones:
- zone: "lan.{{ domain.name }}."
@ -41,17 +41,10 @@ dhcpd_hosts:
- hostname: 'oscar'
address: '192.168.1.40'
ethernet: '7C:83:34:B3:49:9A'
ethernet: '68:1D:EF:3C:F0:44'
- hostname: 'bleys'
address: '192.168.1.42'
ethernet: '68:1d:ef:2b:3d:24'
- hostname: 'VMAS-HML'
address: '192.168.1.50'
ethernet: '52:54:00:02:74:ed'
- hostname: 'VMAS-BUILD'
address: '192.168.1.53'
ethernet: '52:54:13:1e:93'
- hostname: 'xiaomi-chambre-gateway'
@ -69,4 +62,7 @@ dhcpd_hosts:
- hostname: 'shelly-chambre-ventilo'
address: '192.168.1.65'
ethernet: 'e0:98:06:97:78:0b'
- hostname: 'shelly-Bureau-chauffeau'
address: '192.168.1.66'
ethernet: '8c:aa:b5:42:b9:b9'

View File

@ -1,3 +1,2 @@
nomad_datacenter: homelab
nomad_allow_privileged: True
system_wol_enable: True

View File

@ -7,6 +7,7 @@ nomad_client_meta:
- name: "env"
value: "production"
vault_unseal_keys_dir_output: "~/vaultUnseal/production"
env_default_nfs_path: "/volume2"
env_default_nfs_path: ""
env_media_nfs_path: "/volume1"
env_automount: true
nas_ip: "192.168.1.43"

View File

@ -1,5 +1,5 @@
domain:
name: ducamps-dev.eu
name: ducamps.dev
#systemd_mounts: []
#systemd_mounts_enabled: []
consul_bootstrap_expect: 2
@ -14,6 +14,8 @@ hosts_entries:
- ip: "{{ hostvars['nas-dev']['ansible_default_ipv4']['address'] }}"
name: diskstation.ducamps.eu
env_default_nfs_path: "/var/local/volume1"
env_media_nfs_path: "{{ env_default_nfs_path }}"
env_automount: false
env_default_nfs_path: ""
env_automount: true
nas_ip: "nfs.service.consul"

View File

@ -1,6 +1,10 @@
---
ansible_host: "192.168.1.42"
ansible_python_interpreter: "/usr/bin/python3"
default_interface: "enp2s0"
consul_iface: "{{ default_interface}}"
vault_iface: "{{ default_interface}}"
nfs_iface: "{{ default_interface}}"
wireguard_address: "10.0.0.7/24"
wireguard_byhost_allowed_ips:
merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24
@ -11,13 +15,13 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{default_interface}} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=1
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {default_interface} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=0
partition_table:

View File

@ -1,22 +1,23 @@
---
ansible_host: 10.0.0.1
#ansible_host: 135.181.150.203
default_interface: "eth0"
wireguard_address: "10.0.0.1/24"
wireguard_endpoint: "135.181.150.203"
wireguard_persistent_keepalive: "20"
wireguard_allowed_ips: "10.0.0.1/32,10.0.0.3/32,10.0.0.5/32"
wireguard_allowed_ips: 10.0.0.1
wireguard_postup:
- iptables -A FORWARD -o %i -j ACCEPT
- iptables -A FORWARD -i %i -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=1
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
wireguard_postdown:
- iptables -D FORWARD -i %i -j ACCEPT
- iptables -D FORWARD -o %i -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=0
wireguard_unmanaged_peers:

View File

@ -1,6 +1,10 @@
---
ansible_host: "192.168.1.41"
ansible_python_interpreter: "/usr/bin/python3"
default_interface: "enu1u1"
consul_iface: "{{ default_interface }}"
vault_iface: "{{ default_interface }}"
wireguard_address: "10.0.0.6/24"
wireguard_byhost_allowed_ips:
merlin: 10.0.0.6,192.168.1.41
@ -11,10 +15,10 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o enu1u1 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o enu1u1 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE

View File

@ -1,4 +1,8 @@
---
default_interface: eth0
vault_iface: "{{ default_interface}}"
ansible_host: gerard-dev.lan.ducamps.dev
wireguard_address: "10.0.1.6/24"
perrsistent_keepalive: "20"
wireguard_endpoint: ""
@ -6,10 +10,10 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface}} -j MASQUERADE

View File

@ -1,31 +1,39 @@
---
ansible_host: 10.0.0.4
#ansible_host: 65.21.2.14
default_interface: "ens3"
nfs_iface: "wg0"
wireguard_address: "10.0.0.4/24"
wireguard_endpoint: "95.216.217.5"
wireguard_persistent_keepalive: "30"
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3/32,10.0.0.5/32"
wireguard_endpoint: "65.21.2.14"
wireguard_persistent_keepalive: "20"
wireguard_byhost_allowed_ips:
oscar: "0.0.0.0/0"
bleys: "0.0.0.0/0"
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3,10.0.0.5"
wireguard_postup:
- iptables -A FORWARD -o %i -j ACCEPT
- iptables -A FORWARD -i %i -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=1
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
wireguard_postdown:
- iptables -D FORWARD -i %i -j ACCEPT
- iptables -D FORWARD -o %i -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=0
wireguard_unmanaged_peers:
phone:
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
allowed_ips: 10.0.0.3/32
persistent_keepalive: 0
zen:
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
allowed_ips: 10.0.0.5/32
persistent_keepalive: 0
wireguard_dns: "192.168.1.41,192.168.1.4"
wireguard_dns: "192.168.1.4,192.168.1.41"
consul_client_addr: "127.0.0.1 10.0.0.4"
consul_bind_address: "10.0.0.4"
consul_ui: True
@ -35,7 +43,8 @@ nomad_host_networks:
- name: "private"
interface: wg0
- name: "public"
interface: eth0
interface: ens3
- name: "default"
interface: wg0
vault_listener_address: 10.0.0.4
nomad_plugins_podman: True

View File

@ -1,4 +1,8 @@
---
ansible_host: merlin-dev.lan.ducamps.dev
default_interface: eth0
vault_iface: "{{ default_interface}}"
wireguard_address: "10.0.1.4/24"
wireguard_endpoint: "{{ ansible_default_ipv4.address }}"
wireguard_persistent_keepalive: "30"
@ -6,12 +10,12 @@ wireguard_persistent_keepalive: "30"
wireguard_postup:
- iptables -A FORWARD -o %i -j ACCEPT
- iptables -A FORWARD -i %i -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i %i -j ACCEPT
- iptables -D FORWARD -o %i -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_unmanaged_peers:
phone:

View File

@ -1,16 +0,0 @@
---
wireguard_address: "10.0.1.8/24"
perrsistent_keepalive: "30"
wireguard_endpoint: ""
wireguard_byhost_allowed_ips:
merlin: 10.0.0.8,192.168.1.10
corwin: 10.0.0.8,192.168.1.10
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE

View File

@ -1,4 +1,7 @@
---
ansible_host: nas-dev.lan.ducamps.dev
default_interface: eth0
vault_iface: "{{ default_interface}}"
wireguard_address: "10.0.1.8/24"
perrsistent_keepalive: "30"
wireguard_endpoint: ""
@ -6,9 +9,9 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE

19
ansible/host_vars/oberon Normal file
View File

@ -0,0 +1,19 @@
---
wireguard_address: "10.0.0.8/24"
default_interface: "enp2s0"
consul_iface: "{{ default_interface}}"
vault_iface: "{{ default_interface}}"
perrsistent_keepalive: "30"
wireguard_endpoint: ""
wireguard_byhost_allowed_ips:
merlin: 10.0.0.8,192.168.1.43
corwin: 10.0.0.8,192.168.1.43
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE

View File

@ -1,4 +1,9 @@
---
default_interface: "enp1s0"
consul_iface: "{{ default_interface}}"
vault_iface: "{{ default_interface}}"
nfs_iface: "{{ default_interface}}"
nomad_client_cpu_total_compute: 8000
wireguard_address: "10.0.0.2/24"
wireguard_byhost_allowed_ips:
merlin: 10.0.0.2,192.168.1.40
@ -9,12 +14,12 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
partition_table:
- device: "/dev/sda"

View File

@ -1,4 +1,7 @@
---
ansible_host: oscar-dev.lan.ducamps.dev
default_interface: eth0
vault_iface: "{{ default_interface}}"
wireguard_address: "10.0.1.2/24"
perrsistent_keepalive: "30"
wireguard_endpoint: ""
@ -6,9 +9,9 @@ wireguard_endpoint: ""
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE

View File

@ -5,7 +5,11 @@ requirements:
deploy_production:
ansible-playbook site.yml -i production -u ansible
deploy_production_wiregard:
ansible-playbook playbooks/wireguard.yml -i production -u ansible
deploy_staging:
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
ansible-playbook site.yml -i staging -u ansible

View File

@ -1,14 +1,26 @@
---
- name: Consul install
hosts: all
roles:
- role: ansible-consul
become: true
- name: Vault install
hosts: homelab
roles:
- role: ansible-hashicorp-vault
become: true
post_tasks:
- name: Stat root file
ansible.builtin.stat:
path: "{{ vault_unseal_keys_dir_output }}/rootkey"
register: rootkey_exist
delegate_to: localhost
- name: Reading root contents
ansible.builtin.command: cat "{{ vault_unseal_keys_dir_output }}/rootkey"
register: root_token
delegate_to: localhost
when: rootkey_exist.stat.exists
changed_when: false
- name: debug
ansible.builtin.debug:
@ -20,7 +32,7 @@
period: 72h
no_parent: true
token: "{{ root_token.stdout }}"
url: http://{{ ansible_default_ipv4.address }}:8200
url: "http://active.vault.service.consul:8200"
retries: 4
run_once: true
delegate_to: localhost
@ -32,13 +44,11 @@
nomad_vault_token: "{{ nomad_token_data.login.auth.client_token }}"
when: nomad_token_data.login is defined
- name: Hashicorp stack
- name: nomad
hosts: all
vars:
unseal_keys_dir_output: ~/vaultunseal
roles:
- role: ansible-consul
become: true
- role: ansible-nomad
become: true
- role: docker

View File

@ -1,6 +1,6 @@
---
- hosts: all
become: true
gather_facts: false
become: true
roles:
- ansible_bootstrap

View File

@ -14,3 +14,15 @@
- docker
become: true
become_user: '{{ user.name }}'
- hosts: all
roles:
- role: user_config
vars:
user_config_username: "{{ user.name }}"
become_user: "{{ user.name }}"
become: true
- role: user_config
vars:
user_config_username: root
become: true

View File

@ -1,16 +1,54 @@
---
- hosts: database
- name: Database playbook
hosts: database
vars:
# certbot_force: true
pre_tasks:
- name: Install Pg vertors (immich)
aur:
name: pgvecto.rs-bin
state: present
become: true
become_user: aur_builder
- name: Add database member to pg_hba replication
ansible.builtin.set_fact:
postgresql_hba_entries: "{{ postgresql_hba_entries + [\
{'type':'host', \
'database': 'replication',\
'user':'repli',\
'address':hostvars[item]['ansible_'+hostvars[item]['default_interface']]['ipv4']['address']+'/32',\
'auth_method':'trust'}] }}"
loop: '{{ groups.database }}'
roles:
- role: ansible-role-postgresql
become: true
tasks:
- name: add pg_read_all_data to dump
community.postgresql.postgresql_membership:
target_roles:
- dump
groups:
- pg_read_all_data
- name: Launch replication
ansible.builtin.command: pg_basebackup -D /var/lib/postgres/data -h {{groups["database_active"]|first}} -U repli -Fp -Xs -P -R -w
args:
creates: /var/lib/postgres/data/postgresql.conf
become: true
become_user: "{{ postgresql_user }}"
become_user: postgres
when: inventory_hostname in groups["database_standby"]
- name: Ensure PostgreSQL is started and enabled on boot.
ansible.builtin.service:
name: '{{ postgresql_daemon }}'
state: '{{ postgresql_service_state }}'
enabled: '{{ postgresql_service_enabled }}'
become: true
- name: Set Postgress shared libraries
community.postgresql.postgresql_set:
name: shared_preload_libraries
value: vectors.so
become: true
become_user: postgres
when: inventory_hostname in groups["database_active"]
notify: Restart postgresql
- name: Set Postgress shared libraries
community.postgresql.postgresql_set:
name: search_path
value: '$user, public, vectors'
become: true
become_user: postgres
when: inventory_hostname in groups["database_active"]

View File

@ -1,10 +1,28 @@
---
- hosts: NAS
- name: gather all
hosts: all
- name: NAS playbook
hosts: NAS
vars:
# certbot_force: true
pre_tasks:
- name: include task NasBind
ansible.builtin.include_tasks:
file: tasks/NasBind.yml
loop: "{{ nas_bind_source }}"
- name: create nomad folder
ansible.builtin.file:
path: "{{ nas_bind_target }}/nomad/{{ item.name }}"
owner: "{{ item.owner|default('root') }}"
state: directory
become: true
loop: "{{ NAS_nomad_folder }}"
roles:
- role: ansible-role-nfs
- role: ansible-role-nut
become: true
- role: ansible-role-vsftpd
- role: ansible-role-nfs
become: true
- role: ansible-role-pureftpd
become: true
- role: vladgh.samba.server
become: true
#- samba

View File

@ -0,0 +1,18 @@
- name: Ensure base NFS directory exist
ansible.builtin.file:
path: "{{ item.dest }}"
state: directory
become: true
- name: Ensure source NFS directory exist
ansible.builtin.file:
path: "{{ item.source }}"
state: directory
become: true
- name: Bind NAS export
ansible.posix.mount:
path: "{{ item.dest }}"
src: "{{ item.source }}"
opts: bind
fstype: none
state: mounted
become: true

View File

@ -0,0 +1 @@
path = /exports/homes/%S

View File

@ -1,12 +0,0 @@
---
- hosts: all
roles:
- role: user_config
vars:
user_config_username: "{{ user.name }}"
become_user: "{{ user.name }}"
become: true
- role: user_config
vars:
user_config_username: root
become: true

View File

@ -1,8 +1,8 @@
[DNS]
gerard
oscar
[dhcp]
gerard
oberon
[database_active]
bleys
@ -22,11 +22,11 @@ bleys
production
[NAS]
nas
oberon
[cluster]
oscar
gerard
#gerard
bleys
@ -35,7 +35,6 @@ NAS
cluster
[VPS]
corwin
merlin
[region:children]
@ -44,8 +43,10 @@ VPS
production
[production]
corwin
oscar
merlin
gerard
#gerard
bleys
oberon
[staging]

View File

@ -1,4 +1,5 @@
---
roles:
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-arch-provissionning.git
scm: git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-postgresql.git
@ -39,6 +40,10 @@
scm: git
- src: git@github.com:vincentDcmps/ansible-role-nfs.git
scm: git
- src: https://github.com/PowerDNS/pdns_recursor-ansible.git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-samba.git
- src: git@github.com:vincentDcmps/ansible-role-nut.git
scm: git
- src: git@git.ducamps.eu:2222/ansible-roles/ansible-role-pureftpd.git
scm: git
- src: https://github.com/PowerDNS/pdns_recursor-ansible.git
collections:
- name: vladgh.samba

View File

@ -1,12 +1,10 @@
---
- import_playbook: playbooks/server.yml
- import_playbook: playbooks/nas.yml
- import_playbook: playbooks/autofs.yml
- import_playbook: playbooks/sssd.yml
- import_playbook: playbooks/wireguard.yml
- import_playbook: playbooks/dhcpd.yml
- import_playbook: playbooks/dns.yml
- import_playbook: playbooks/HashicorpStack.yml
- import_playbook: playbooks/nas.yml
- import_playbook: playbooks/autofs.yml
- import_playbook: playbooks/sssd.yml
- import_playbook: playbooks/database.yml
- import_playbook: playbooks/rsyncd.yml
- import_playbook: playbooks/create_user.yml

View File

@ -5,6 +5,7 @@ oscar-dev
oscar-dev
[database_standby]
gerard-dev
[database:children]
database_active
@ -39,3 +40,5 @@ oscar-dev
gerard-dev
merlin-dev
nas-dev
[production]

View File

@ -100,3 +100,18 @@ agains:
put one recursor on cluster over authority server and keep the recursor on gerard for better recundancy
### Consequences
## 005 physical Recursor location
### Status
done
### Context
following NAS migration physical DNS Recursor was install directly on NAS this bring a SPOF when NAS failed Recursor on Nomad cluster are stopped because of volume dependance
### Decision
Put physical Recursor on a cluster node like that to have a DNS issue we need to have NAS and this nomad down on same Time

View File

@ -16,11 +16,27 @@ Storage:
- hot Data (nomad, document,fresh download file,music?) on SSD cold DATA on HDD (film, serie photo)
- at least 2 HDD and 2 SSD
Hardware:
- network 2.5 gpbs will be good for evolve
- at least 4go ram
- at least 4go ram (expansive will be appreciable)
Software:
be able to install custom linux distrib
### Decision
- Due to form factor/consumption and SSD capability my choise is on ASUSTOR Nimbustor 2 Gen 2 AS5402, he corresponding to need and less expensive than a DIY NAS
- buy only a new ssd of 2to in more to store system and hot data
### Cosequence
need to migrate Data and keep same disk
- install system
- copy all data from 2to HDD to SSD then format 2to HDD
- copy download data to FROM 4 to HDD to SSD
- copy serie to 2to HDD and copy film on external harddrive

View File

@ -0,0 +1,25 @@
# Docker Pull throught
# 001 architecture consideration
## Status
Accepted
## Context
docker hub get a pull limit if somebody go wrong on our infrastructure we can get quickyly this limit solution will be to implement a pull throught proxy.
### Decision
create two container task to create a dockerhub pull through and a ghcr one
we can add these registry to traefick to have both under the port 5000 but this will add a traefik dependancy on rebuild
so to begin we will use one trafick service on two diferent static port
## Consequences
- this registry need to be start first on cluster creation
- need to update all job image with local proxy url

View File

@ -0,0 +1,8 @@
# Troubleshooting
## issue with SMTP traefik port
ensure that no other traefik router (httt or TCP) listening on smtp or
all entrypoint this can pertuubate smtp TLS connection
see [https://doc.traefik.io/traefik/routing/routers/#entrypoints_1](here)

View File

@ -10,12 +10,15 @@ vault-dev:
./vault/standalone_vault.sh $(FILE);\
fi
create-dev:
vagranup:
vagrant up
make -C ansible deploy_staging
create-dev-base:
vagrant up
create-dev: vagranup DNS-stagging
make -C ansible deploy_staging
make -C terraform deploy_vault env=staging
VAULT_TOKEN=$(shell cat ~/vaultUnseal/staging/rootkey) python ./script/generate-vault-secret
create-dev-base: vagranup DNS-stagging
make -C ansible deploy_staging_base
@ -24,3 +27,13 @@ destroy-dev:
serve:
mkdocs serve
DNS-stagging:
$(eval dns := $(shell dig oscar-dev.lan.ducamps.dev +short))
$(eval dns1 := $(shell dig nas-dev.lan.ducamps.dev +short))
sudo resolvectl dns virbr2 "$(dns)" "$(dns1)";sudo resolvectl domain virbr2 "~consul";sudo systemctl restart systemd-resolved.service
DNS-production:
sudo resolvectl dns virbr2 "";sudo resolvectl domain virbr2 "";sudo systemctl restart systemd-resolved.service

View File

@ -35,7 +35,7 @@ job "MQTT" {
]
}
config {
image = "eclipse-mosquitto"
image = "docker.service.consul:5000/library/eclipse-mosquitto"
ports = ["mosquittoWS", "mosquittoMQTT"]
volumes = [
"/mnt/diskstation/nomad/mosquitto:/mosquitto/data",

View File

@ -0,0 +1,62 @@
job "actualbudget" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "actualbudget"{
network {
mode = "host"
port "http" {
to = 5006
}
}
task "actualbudget-server" {
driver = "docker"
service {
name = "actualbudget"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`budget.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=budget.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"homer.enable=true",
"homer.name=${NOMAD_TASK_NAME}",
"homer.service=Application",
"homer.target=_blank",
"homer.logo=https://budget.ducamps.eu/apple-touch-icon.png",
"homer.url=https://budget.ducamps.eu",
]
}
config {
image = "ghcr.service.consul:5000/actualbudget/actual-server:latest"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/actualbudget:/data"
]
}
env {
}
resources {
memory = 300
}
}
}
}

View File

@ -0,0 +1,240 @@
job "borgmatic" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "NAS"
}
group "borgmatic"{
vault{
policies= ["borgmatic"]
}
task "borgmatic" {
action "manual-backup" {
command = "/usr/local/bin/borgmatic"
args = ["create",
"prune",
"--verbosity",
"1"
]
}
action "list-backup" {
command = "/usr/local/bin/borgmatic"
args = ["rlist"]
}
driver = "docker"
config {
image = "ghcr.service.consul:5000/borgmatic-collective/borgmatic"
volumes = [
"/exports:/exports",
"local/borgmatic.d:/etc/borgmatic.d",
"secret/id_rsa:/root/.ssh/id_rsa",
"secret/known_hosts:/root/.ssh/known_hosts",
"/exports/nomad/borgmatic:/root/.cache/borg",
]
}
env {
}
template {
data= <<EOH
BORG_RSH="ssh -i /root/.ssh/id_rsa -p 23"
{{ with secret "secrets/data/nomad/borgmatic"}}
BORG_PASSPHRASE= {{.Data.data.passphrase}}
{{end}}
EOH
destination = "secrets/sample.env"
env = true
}
template {
data= <<EOH
0 2 * * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic create prune --verbosity 1
0 23 1 * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic check
EOH
destination = "local/borgmatic.d/crontab.txt"
}
template {
data= <<EOH
# List of source directories to backup (required). Globs and
# tildes are expanded. Do not backslash spaces in path names.
source_directories:
- /exports/ebook
- /exports/homes
- /exports/music
- /exports/nomad
- /exports/photo
repositories:
- path: ssh://u304977@u304977.your-storagebox.de/./{{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
label: {{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
exclude_patterns:
- '*/nomad/jellyfin/cache'
- '*nomad/loki/'
- '*nomad/prometheus'
- '*nomad/registry'
- '*nomad/pacoloco'
- '*nomad/pihole'
- '*nomad/jellyfin/*'
- '*.log*'
match_archives: '*'
archive_name_format: '{{ env "node.datacenter" }}-{now:%Y-%m-%dT%H:%M:%S.%f}'
extra_borg_options:
# Extra command-line options to pass to "borg init".
# init: --extra-option
# Extra command-line options to pass to "borg prune".
# prune: --extra-option
# Extra command-line options to pass to "borg compact".
# compact: --extra-option
# Extra command-line options to pass to "borg create".
create: --progress --stats
# Extra command-line options to pass to "borg check".
# check: --extra-option
# Keep all archives within this time interval.
# keep_within: 3H
# Number of secondly archives to keep.
# keep_secondly: 60
# Number of minutely archives to keep.
# keep_minutely: 60
# Number of hourly archives to keep.
# keep_hourly: 24
# Number of daily archives to keep.
keep_daily: 7
# Number of weekly archives to keep.
keep_weekly: 4
# Number of monthly archives to keep.
# keep_monthly: 6
# Number of yearly archives to keep.
# keep_yearly: 1
checks:
- name: repository
# - archives
# check_repositories:
# - user@backupserver:sourcehostname.borg
# check_last: 3
# output:
# color: false
# List of one or more shell commands or scripts to execute
# before creating a backup, run once per configuration file.
# before_backup:
# - echo "Starting a backup."
# List of one or more shell commands or scripts to execute
# before pruning, run once per configuration file.
# before_prune:
# - echo "Starting pruning."
# List of one or more shell commands or scripts to execute
# before compaction, run once per configuration file.
# before_compact:
# - echo "Starting compaction."
# List of one or more shell commands or scripts to execute
# before consistency checks, run once per configuration file.
# before_check:
# - echo "Starting checks."
# List of one or more shell commands or scripts to execute
# before extracting a backup, run once per configuration file.
# before_extract:
# - echo "Starting extracting."
# List of one or more shell commands or scripts to execute
# after creating a backup, run once per configuration file.
# after_backup:
# - echo "Finished a backup."
# List of one or more shell commands or scripts to execute
# after compaction, run once per configuration file.
# after_compact:
# - echo "Finished compaction."
# List of one or more shell commands or scripts to execute
# after pruning, run once per configuration file.
# after_prune:
# - echo "Finished pruning."
# List of one or more shell commands or scripts to execute
# after consistency checks, run once per configuration file.
# after_check:
# - echo "Finished checks."
# List of one or more shell commands or scripts to execute
# after extracting a backup, run once per configuration file.
# after_extract:
# - echo "Finished extracting."
# List of one or more shell commands or scripts to execute
# when an exception occurs during a "prune", "compact",
# "create", or "check" action or an associated before/after
# hook.
# on_error:
# - echo "Error during prune/compact/create/check."
# List of one or more shell commands or scripts to execute
# before running all actions (if one of them is "create").
# These are collected from all configuration files and then
# run once before all of them (prior to all actions).
# before_everything:
# - echo "Starting actions."
# List of one or more shell commands or scripts to execute
# after running all actions (if one of them is "create").
# These are collected from all configuration files and then
# run once after all of them (after any action).
# after_everything:
# - echo "Completed actions."
EOH
destination = "local/borgmatic.d/config.yaml"
}
template {
data= <<EOH
{{ with secret "secrets/data/nomad/borgmatic"}}
{{.Data.data.privatekey}}
{{end}}
EOH
destination = "secret/id_rsa"
perms= "700"
}
template {
data= <<EOH
[u304977.your-storagebox.de]:23 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs
[u304977.your-storagebox.de]:23 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5EB5p/5Hp3hGW1oHok+PIOH9Pbn7cnUiGmUEBrCVjnAw+HrKyN8bYVV0dIGllswYXwkG/+bgiBlE6IVIBAq+JwVWu1Sss3KarHY3OvFJUXZoZyRRg/Gc/+LRCE7lyKpwWQ70dbelGRyyJFH36eNv6ySXoUYtGkwlU5IVaHPApOxe4LHPZa/qhSRbPo2hwoh0orCtgejRebNtW5nlx00DNFgsvn8Svz2cIYLxsPVzKgUxs8Zxsxgn+Q/UvR7uq4AbAhyBMLxv7DjJ1pc7PJocuTno2Rw9uMZi1gkjbnmiOh6TTXIEWbnroyIhwc8555uto9melEUmWNQ+C+PwAK+MPw==
[u304977.your-storagebox.de]:23 ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGK0po6usux4Qv2d8zKZN1dDvbWjxKkGsx7XwFdSUCnF19Q8psHEUWR7C/LtSQ5crU/g+tQVRBtSgoUcE8T+FWp5wBxKvWG2X9gD+s9/4zRmDeSJR77W6gSA/+hpOZoSE+4KgNdnbYSNtbZH/dN74EG7GLb/gcIpbUUzPNXpfKl7mQitw==
EOH
destination = "secret/known_hosts"
perms="700"
}
resources {
memory = 300
memory_max = 1000
}
}
}
}

View File

@ -39,7 +39,7 @@ job "chainetv" {
]
}
config {
image = "ducampsv/chainetv:latest"
image = "docker.service.consul:5000/ducampsv/chainetv:latest"
ports = ["http"]
}
resources {

View File

@ -1,5 +1,5 @@
job "dockermailserver" {
datacenters = ["hetzner"]
datacenters = ["homelab"]
priority = 90
type = "service"
meta {
@ -9,7 +9,11 @@ job "dockermailserver" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "dockermailserver" {
network {
mode = "host"
@ -115,7 +119,7 @@ job "dockermailserver" {
task "docker-mailserver" {
driver = "docker"
config {
image = "ghcr.io/docker-mailserver/docker-mailserver:latest"
image = "ghcr.service.consul:5000/docker-mailserver/docker-mailserver:latest"
ports = ["smtp", "esmtp", "imap","rspamd"]
volumes = [
"/mnt/diskstation/nomad/dms/mail-data:/var/mail",
@ -133,7 +137,7 @@ job "dockermailserver" {
env {
OVERRIDE_HOSTNAME = "mail.ducamps.eu"
DMS_VMAIL_UID = 1000000
DMS_VMAIL_GID = 100
DMS_VMAIL_GID = 984
SSL_TYPE= "letsencrypt"
LOG_LEVEL="info"
POSTMASTER_ADDRESS="vincent@ducamps.eu"
@ -169,7 +173,7 @@ submissions/inet/smtpd_upstream_proxy_protocol=haproxy
}
template {
data = <<EOH
haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1
haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1, 192.168.1.0/24
haproxy_timeout = 3 secs
service imap-login {
inet_listener imaps {

View File

@ -1,6 +1,6 @@
job "filestash" {
datacenters = ["hetzner"]
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
@ -10,7 +10,11 @@ job "filestash" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "filestash" {
network {
@ -44,7 +48,7 @@ job "filestash" {
]
}
config {
image = "machines/filestash"
image = "docker.service.consul:5000/machines/filestash"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/filestash:/app/data/state"

View File

@ -27,7 +27,7 @@ job "ghostfolio" {
task "redis" {
driver = "docker"
config {
image = "redis"
image = "docker.service.consul:5000/library/redis"
ports = ["redis"]
}
resources {
@ -51,7 +51,7 @@ job "ghostfolio" {
]
}
config {
image = "ghostfolio/ghostfolio:latest"
image = "docker.service.consul:5000/ghostfolio/ghostfolio:latest"
ports = ["http"]
volumes = [
]
@ -80,6 +80,7 @@ job "ghostfolio" {
}
resources {
memory = 400
memory_max = 600
}
}

View File

@ -3,6 +3,11 @@ job "homeassistant" {
datacenters = ["homelab"]
priority = 90
type = "service"
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
meta {
forcedeploy = "0"
}
@ -52,7 +57,7 @@ job "homeassistant" {
}
}
config {
image = "homeassistant/home-assistant:stable"
image = "docker.service.consul:5000/homeassistant/home-assistant:stable"
ports = ["http", "coap"]
privileged = "true"
network_mode = "host"

View File

@ -0,0 +1,146 @@
job "immich" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "immich" {
network {
mode = "host"
port "http" {
to = 3001
}
port "redis" {
to = 6379
}
port "machinelearning" {
to = 3003
}
}
volume "immich-upload" {
type = "csi"
source = "immich-upload"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
volume "immich-cache" {
type = "csi"
source = "immich-cache"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
volume "photo" {
type = "csi"
source = "photo"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault {
policies = ["immich"]
}
task "immich-server" {
driver = "docker"
service {
name = "immich"
port = "http"
tags = [
"homer.enable=true",
"homer.name=immich",
"homer.service=Application",
"homer.logo=https://immich.ducamps.eu/favicon-144.png",
"homer.target=_blank",
"homer.url=https://immich.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
volume_mount {
volume = "immich-upload"
destination = "/usr/src/app/upload"
}
volume_mount {
volume = "photo"
destination = "/photo"
}
config {
image = "ghcr.service.consul:5000/immich-app/immich-server:release"
ports = ["http"]
volumes = [
"/etc/localtime:/etc/localtime"
]
}
template {
data = <<EOH
{{ with secret "secrets/data/database/immich"}}
DB_PASSWORD= {{ .Data.data.password }}
{{end}}
DB_DATABASE_NAME= immich
DB_USERNAME= immich
DB_HOSTNAME= active.db.service.consul
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
IMMICH_MACHINE_LEARNING_URL = http://{{ env "NOMAD_ADDR_machinelearning"}}
EOH
destination = "secrets/immich.env"
env = true
}
resources {
memory = 600
memory_max = 1800
}
}
task "immich-machine-learning" {
driver = "docker"
volume_mount {
volume = "immich-cache"
destination = "/cache"
}
config {
image = "ghcr.service.consul:5000/immich-app/immich-machine-learning:main"
ports = ["machinelearning"]
}
template {
data = <<EOH
{{ with secret "secrets/data/database/immich"}}
DB_PASSWORD= {{ .Data.data.password }}
{{end}}
DB_DATABASE_NAME= immich
DB_USERNAME= immich
DB_HOSTNAME= active.db.service.consul
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
EOH
destination = "secrets/immich.env"
env = true
}
resources {
memory = 200
memory_max = 1800
}
}
task "redis" {
driver = "docker"
config {
image="docker.service.consul:5000/library/redis:6.2-alpine"
ports = ["redis"]
}
resources {
memory = 50
}
}
}
}

View File

@ -2,6 +2,7 @@ job "jellyfin" {
datacenters = ["homelab"]
priority = 30
type = "service"
meta {
forcedeploy = "1"
}
@ -9,6 +10,11 @@ job "jellyfin" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group jellyfin-vue {
network {
mode = "host"
@ -37,7 +43,7 @@ job "jellyfin" {
}
config {
image = "ghcr.io/jellyfin/jellyfin-vue:unstable"
image = "ghcr.service.consul:5000/jellyfin/jellyfin-vue:unstable"
ports = ["http"]
}
env {
@ -82,13 +88,13 @@ job "jellyfin" {
]
}
config {
image = "jellyfin/jellyfin"
image = "docker.service.consul:5000/jellyfin/jellyfin"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/jellyfin/config:/config",
"/mnt/diskstation/nomad/jellyfin/cache:/cache",
"/mnt/diskstation/media/:/media",
"/mnt/diskstation/music/:/media2"
"/mnt/diskstation/media:/media",
"/mnt/diskstation/music:/music",
]
devices = [
{

1
nomad-job/apps/makefile Symbolic link
View File

@ -0,0 +1 @@
../makefile

View File

@ -0,0 +1,95 @@
job "mealie" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "mealie" {
network {
mode = "host"
port "http" {
to = 9000
}
}
volume "mealie-data" {
type = "csi"
source = "mealie-data"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault {
policies = ["mealie"]
}
task "mealie-server" {
driver = "docker"
service {
name = "mealie"
port = "http"
tags = [
"homer.enable=true",
"homer.name=Mealie",
"homer.service=Application",
"homer.subtitle=Mealie",
"homer.logo=https://mealie.ducamps.eu/favicon.ico",
"homer.target=_blank",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
config {
image = "ghcr.io/mealie-recipes/mealie"
ports = ["http"]
}
volume_mount {
volume = "mealie-data"
destination = "/app/data"
}
env {
PUID = "1000001"
PGID = "1000001"
TZ = "Europe/Paris"
MAX_WORKERS = 1
WEB_CONCURRENCY = 1
BASE_URL = "https://mealie.ducamps.eu"
OIDC_USER_GROUP = "MealieUsers"
OIDC_ADMIN_GROUP = "MealieAdmins"
OIDC_AUTH_ENABLED = "True"
OIDC_SIGNUP_ENABLED = "true"
OIDC_CONFIGURATION_URL = "https://auth.ducamps.eu/.well-known/openid-configuration"
OIDC_CLIENT_ID = "mealie"
OIDC_AUTO_REDIRECT = "false"
OIDC_PROVIDER_NAME = "authelia"
DB_ENGINE = "postgres"
POSTGRES_USER = "mealie"
POSTGRES_SERVER = "active.db.service.consul"
POSTGRES_PORT = 5432
POSTGRES_DB = "mealie"
LOG_LEVEL = "DEBUG"
}
template {
data = <<EOH
{{ with secret "secrets/data/database/mealie"}}POSTGRES_PASSWORD= "{{ .Data.data.password }}" {{end}}
{{ with secret "secrets/data/authelia/mealie"}}OIDC_CLIENT_SECRET= "{{ .Data.data.password }}" {{end}}
EOH
destination = "secrets/var.env"
env = true
}
resources {
memory = 400
}
}
}
}

View File

@ -6,7 +6,11 @@ job "pacoloco" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "pacoloco" {
network {
mode = "host"
@ -28,10 +32,10 @@ job "pacoloco" {
]
}
config {
image = "ducampsv/pacoloco"
image = "docker.service.consul:5000/ducampsv/pacoloco"
ports = ["http"]
volumes = [
"/mnt/diskstation/archMirror:/var/cache/pacoloco",
"/mnt/diskstation/nomad/pacoloco:/var/cache/pacoloco",
"local/pacoloco.yaml:/etc/pacoloco.yaml"
]

View File

@ -6,7 +6,11 @@ job "paperless-ng" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
@ -29,7 +33,7 @@ job "paperless-ng" {
task "redis" {
driver = "docker"
config {
image = "redis"
image = "docker.service.consul:5000/library/redis"
ports = ["redis"]
}
resources {
@ -47,6 +51,7 @@ job "paperless-ng" {
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia",
"homer.enable=true",
"homer.name=Paperless",
"homer.service=Application",
@ -63,7 +68,7 @@ job "paperless-ng" {
}
}
config {
image = "ghcr.io/paperless-ngx/paperless-ngx"
image = "ghcr.service.consul:5000/paperless-ngx/paperless-ngx"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/paperless-ng/media:/usr/src/paperless/media",
@ -82,6 +87,9 @@ job "paperless-ng" {
PAPERLESS_CONSUMER_POLLING = "60"
PAPERLESS_URL = "https://${NOMAD_JOB_NAME}.ducamps.eu"
PAPERLESS_ALLOWED_HOSTS = "192.168.1.42,192.168.1.40"
PAPERLESS_ENABLE_HTTP_REMOTE_USER = "true"
PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME = "HTTP_REMOTE_USER"
PAPERLESS_LOGOUT_REDIRECT_URL= "https://auth.ducamps.eu/logout"
}
template {
@ -93,6 +101,7 @@ job "paperless-ng" {
}
resources {
memory = 950
memory_max = 1500
cpu = 2000
}
}

View File

@ -6,6 +6,11 @@ job "radicale" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "radicale" {
network {
mode = "host"
@ -39,11 +44,11 @@ job "radicale" {
]
}
config {
image = "tomsquest/docker-radicale"
image = "docker.service.consul:5000/tomsquest/docker-radicale"
ports = ["http"]
volumes = [
"local/config:/config/config",
"/mnt/diskstation/CardDav:/data"
"/mnt/diskstation/nomad/radicale:/data"
]
}

View File

@ -6,9 +6,6 @@ job "torrent" {
meta {
forcedeploy = "0"
}
vault {
policies= ["torrent"]
}
group "bittorent" {
network {
mode = "host"
@ -26,7 +23,7 @@ job "torrent" {
}
}
task "bittorent" {
driver = "podman"
driver = "docker"
service {
name = "bittorent"
port = "http"
@ -36,43 +33,35 @@ job "torrent" {
"homer.name=torrent",
"homer.url=https://torrent.ducamps.eu",
"homer.service=Application",
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/images/favicon-196x196.png",
"homer.logo=https://fleet.linuxserver.io/images/linuxserver_rutorrent.png",
"homer.target=_blank",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=torrentauth",
"traefik.http.middlewares.torrentauth.basicauth.users=admin:${ADMIN_HASHED_PWD}"
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia-basic",
]
}
template {
data = <<-EOF
ADMIN_HASHED_PWD={{ with secret "secrets/nomad/torrent" }}{{.Data.data.hashed_pwd}}{{ end }}
EOF
destination = "secrets/env"
env = true
}
user = "root"
config {
image = "docker.io/crazymax/rtorrent-rutorrent:latest"
privileged = "true"
ulimit {
nofile = "8192:8192"
}
image = "docker.service.consul:5000/crazymax/rtorrent-rutorrent:edge"
ports = [
"http",
"torrent",
"ecoute"
]
volumes = [
"/mnt/hetzner/storagebox/rutorrentConfig:/data",
"/opt/rutorrentConfig:/data",
"/mnt/hetzner/storagebox/file:/downloads"
]
}
env {
PUID = 100001
PGID = 984
PGID = 10
UMASK = 002
WEBUI_PORT = "8080"
}

View File

@ -0,0 +1,64 @@
job "rutorrentlocal" {
datacenters = ["homelab"]
priority = 80
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.unique.name}"
operator = "set_contains"
value = "oberon"
}
group "bittorent" {
network {
mode = "host"
port "http" {
to = 8080
}
port "torrent" {
static = 6881
}
port "ecoute" {
static = 50000
}
}
task "bittorent" {
driver = "podman"
service {
name = "bittorentlocal"
port = "http"
address_mode= "host"
tags = [
]
}
user = "root"
config {
image = "docker.service.consul:5000/crazymax/rtorrent-rutorrent:edge"
ports = [
"http",
"torrent",
"ecoute"
]
volumes = [
"/exports/nomad/rutorrent/data:/data",
"/exports/nomad/rutorrent/downloads:/downloads"
]
}
env {
PUID = 100001
PGID = 10
UMASK = 002
WEBUI_PORT = "8080"
}
resources {
memory = 650
}
}
}
}

View File

@ -10,7 +10,11 @@ job "supysonic" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "supysonic" {
network {
mode = "host"
@ -49,7 +53,7 @@ job "supysonic" {
task "supysonic-frontend" {
driver = "docker"
config {
image = "nginx:alpine"
image = "docker.service.consul:5000/library/nginx:alpine"
ports = [
"http"
]
@ -92,7 +96,7 @@ http {
task "supysonic-server" {
driver = "docker"
config {
image = "ducampsv/supysonic:latest"
image = "docker.service.consul:5000/ducampsv/supysonic:latest"
ports = ["fcgi"]
force_pull = true
volumes = [
@ -105,10 +109,10 @@ http {
SUPYSONIC_DAEMON_ENABLED = "true"
SUPYSONIC_WEBAPP_LOG_LEVEL = "DEBUG"
SUPYSONIC_DAEMON_LOG_LEVEL = "INFO"
SUPYSONIC_LDAP_SERVER = "LDAP://ldap.ducamps.eu"
SUPYSONIC_LDAP_BASE_DN = "dc=ducamps,dc=win"
SUPYSONIC_LDAP_USER_FILTER = "(&(memberOf=CN=SupysonicUsers,cn=groups,dc=ducamps,dc=win))"
SUPYSONIC_LDAP_ADMIN_FILTER= "(&(memberOf=CN=SupysonicAdmins,cn=groups,dc=ducamps,dc=win))"
SUPYSONIC_LDAP_SERVER = "LDAPS://ldaps.service.consul"
SUPYSONIC_LDAP_BASE_DN = "dc=ducamps,dc=eu"
SUPYSONIC_LDAP_USER_FILTER = "(&(memberOf=cn=SupysonicUsers,ou=groups,dc=ducamps,dc=eu))"
SUPYSONIC_LDAP_ADMIN_FILTER= "(&(memberOf=cn=SupysonicAdmins,ou=groups,dc=ducamps,dc=eu))"
}
template {

View File

@ -10,7 +10,11 @@ job "syncthing" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "syncthing" {
network {
@ -40,7 +44,7 @@ job "syncthing" {
]
}
config {
image = "linuxserver/syncthing"
image = "docker.service.consul:5000/linuxserver/syncthing"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/syncthing/config:/config",
@ -48,6 +52,11 @@ job "syncthing" {
]
}
env{
PUID = 1000001
GUID = 1000001
}
resources {
memory = 200
}

View File

@ -7,7 +7,11 @@ job "tt-rss" {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "ttrss" {
ephemeral_disk {
@ -34,7 +38,7 @@ job "tt-rss" {
"homer.enable=true",
"homer.name=TT-RSS",
"homer.service=Application",
"homer.logo=https://framalibre.org/sites/default/files/styles/thumbnail/public/leslogos/ic_launcher_1.png",
"homer.logo=https://www.ducamps.eu/tt-rss/images/favicon-72px.png",
"homer.target=_blank",
"homer.url=https://www.ducamps.eu/tt-rss",
@ -50,12 +54,13 @@ job "tt-rss" {
task "ttrss-app" {
driver = "docker"
config {
image = "cthulhoo/ttrss-fpm-pgsql-static"
image = "docker.service.consul:5000/cthulhoo/ttrss-fpm-pgsql-static"
ports = [
"appPort"
]
volumes = [
"${NOMAD_ALLOC_DIR}/data:/var/www/html"
"${NOMAD_ALLOC_DIR}/data:/var/www/html",
"/mnt/diskstation/nomad/tt-rss/ttrss-auth-oidc:/var/www/html/tt-rss/plugins.local/auth_oidc"
]
}
env {
@ -64,16 +69,18 @@ job "tt-rss" {
TTRSS_DB_NAME = "ttrss"
TTRSS_DB_USER = "ttrss"
TTRSS_SELF_URL_PATH = "https://www.ducamps.eu/tt-rss"
TTRSS_PLUGINS = "auth_oidc, auth_internal"
TTRSS_AUTH_OIDC_NAME= "Authelia"
TTRSS_AUTH_OIDC_URL = "https://auth.ducamps.eu"
TTRSS_AUTH_OIDC_CLIENT_ID = "ttrss"
}
template {
data = <<EOH
{{ with secret "secrets/data/database/ttrss"}}
TTRSS_DB_PASS = "{{ .Data.data.password }}"
{{end}}
{{ with secret "secrets/data/database/ttrss"}}TTRSS_DB_PASS = "{{ .Data.data.password }}"{{end}}
TTRSS_AUTH_OIDC_CLIENT_SECRET = {{ with secret "secrets/data/authelia/ttrss"}}"{{ .Data.data.password }}"{{end}}
EOH
destination = "secrets/tt-rss.env"
destination = "secret/tt-rss.env"
env = true
}
resources {
memory = 150
@ -83,7 +90,7 @@ job "tt-rss" {
task "ttrss-updater" {
driver = "docker"
config {
image = "cthulhoo/ttrss-fpm-pgsql-static"
image = "docker.service.consul:5000/cthulhoo/ttrss-fpm-pgsql-static"
volumes = [
"${NOMAD_ALLOC_DIR}/data:/var/www/html"
]
@ -115,7 +122,7 @@ job "tt-rss" {
task "ttrss-frontend" {
driver = "docker"
config {
image = "nginx:alpine"
image = "docker.service.consul:5000/library/nginx:alpine"
ports = [
"http"
]

View File

@ -6,7 +6,11 @@ job "vaultwarden" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "vaultwarden" {
network {
mode = "host"
@ -50,7 +54,7 @@ job "vaultwarden" {
}
}
config {
image = "vaultwarden/server"
image = "docker.service.consul:5000/vaultwarden/server"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/vaultwarden:/data"

View File

@ -0,0 +1,89 @@
job "vikunja" {
datacenters = ["homelab"]
priority = 70
type = "service"
meta {
forcedeploy = "0"
}
group "vikunja" {
network {
mode = "host"
port "front" {
to = 80
}
port "api" {
to = 3456
}
}
vault {
policies = ["vikunja"]
}
task "api" {
driver = "docker"
service {
name = "vikunja-api"
port = "api"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.entrypoints=web,websecure",
"homer.enable=true",
"homer.name=vikunka",
"homer.service=Application",
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/images/icons/apple-touch-icon-180x180.png",
"homer.target=_blank",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
]
}
config {
image = "docker.service.consul:5000/vikunja/vikunja"
ports = ["api", "front"]
volumes = ["local/config.yml:/etc/vikunja/config.yml"]
}
env {
VIKUNJA_DATABASE_HOST = "active.db.service.consul"
VIKUNJA_DATABASE_TYPE = "postgres"
VIKUNJA_DATABASE_USER = "vikunja"
VIKUNJA_DATABASE_DATABASE = "vikunja"
VIKUNJA_SERVICE_JWTSECRET = uuidv4()
VIKUNJA_SERVICE_FRONTENDURL = "https://${NOMAD_JOB_NAME}.ducamps.eu/"
VIKUNJA_AUTH_LOCAL = False
}
template {
data = <<EOH
{{ with secret "secrets/data/database/vikunja"}}
VIKUNJA_DATABASE_PASSWORD= "{{ .Data.data.password }}"
{{end}}
EOH
destination = "secrets/sample.env"
env = true
}
template {
data = <<EOH
auth:
openid:
enabled: true
redirecturl: https://vikunja.ducamps.eu/auth/openid/
providers:
- name: Authelia
authurl: https://auth.ducamps.eu
clientid: vikunja
clientsecret: {{ with secret "secrets/data/authelia/vikunja"}} {{ .Data.data.password }} {{end}}
scope: openid profile email
EOH
destination = "local/config.yml"
}
resources {
memory = 100
}
}
}
}

View File

@ -36,13 +36,14 @@ job "www" {
task "server" {
driver = "docker"
config {
image = "nginx"
image = "docker.service.consul:5000/library/nginx"
ports = [
"http"
]
volumes = [
"local/nginx.conf:/etc/nginx/nginx.conf",
"/srv/http:/usr/share/nginx/html"
"/srv/http/:/usr/share/nginx/html/",
"/mnt/diskstation/nomad/archiso:/usr/share/nginx/archiso"
]
}
@ -70,7 +71,12 @@ http {
default_type text/html;
}
location =/ {
rewrite ^ /welcome;
rewrite ^ /welcome redirect;
#return 301 https://$host/welcome
}
location /archiso {
alias /usr/share/nginx/archiso/;
}
}

View File

@ -6,7 +6,11 @@ job "backup-consul" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
@ -22,9 +26,9 @@ job "backup-consul" {
task "consul-backup" {
driver = "docker"
config {
image = "ducampsv/docker-consul-backup:latest"
image = "docker.service.consul:5000/ducampsv/docker-consul-backup:latest"
volumes = [
"/mnt/diskstation/git/backup/consul:/backup"
"/mnt/diskstation/nomad/backup/consul:/backup"
]
}
resources {

View File

@ -6,7 +6,11 @@ job "backup-postgress" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
@ -28,9 +32,9 @@ job "backup-postgress" {
name = "backup-postgress"
}
config {
image = "ducampsv/docker-backup-postgres:latest"
image = "docker.service.consul:5000/ducampsv/docker-backup-postgres:latest"
volumes = [
"/mnt/diskstation/git/backup/postgres:/backup"
"/mnt/diskstation/nomad/backup/postgres:/backup"
]
}
template {
@ -45,7 +49,8 @@ job "backup-postgress" {
env = true
}
resources {
memory = 125
memory = 180
memory_max = 400
}
}

View File

@ -6,7 +6,11 @@ job "backup-vault" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
@ -25,9 +29,9 @@ job "backup-vault" {
task "backup-vault" {
driver = "docker"
config {
image = "ducampsv/docker-vault-backup:latest"
image = "docker.service.consul:5000/ducampsv/docker-vault-backup:latest"
volumes = [
"/mnt/diskstation/git/backup/vault:/backup"
"/mnt/diskstation/nomad/backup/vault:/backup"
]
}
template {

View File

@ -13,7 +13,7 @@ job "batch-rutorrent" {
task "cleanForwardFolder" {
driver= "docker"
config {
image = "alpine"
image = "docker.service.consul:5000/library/alpine"
volumes = [
"/mnt/hetzner/storagebox/file/forward:/file"
]

View File

@ -6,7 +6,11 @@ job "batch-seedboxsync" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
@ -28,9 +32,9 @@ job "batch-seedboxsync" {
name = "seedboxsync"
}
config {
image = "ducampsv/rsync:latest"
image = "docker.service.consul:5000/ducampsv/rsync:latest"
volumes = [
"/mnt/diskstation/media/download:/media",
"/mnt/diskstation/download:/media",
"local/id_rsa:/home/rsyncuser/.ssh/id_rsa"
]
command = "rsync"
@ -70,6 +74,7 @@ job "batch-seedboxsync" {
}
resources {
memory = 500
memory_max = 1000
}
}

View File

@ -0,0 +1,87 @@
job "torrent_automation" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "prowlarr"{
network {
mode = "host"
port "prowlarr" {
static = 9696
to = 9696
}
port "flaresolverr" {
static = 8191
to = 8191
}
}
task "flaresolverr" {
driver = "docker"
service {
name = "flaresolverr"
port = "flaresolverr"
}
config {
image = "alexfozor/flaresolverr:pr-1300-experimental"
ports = ["flaresolverr"]
}
env {
}
resources {
memory = 300
memory_max = 500
}
}
task "prowlarr" {
driver = "docker"
service {
name = "prowlarr"
port = "prowlarr"
tags = [
"homer.enable=true",
"homer.name=Prowlarr",
"homer.service=Application",
"homer.logo=http://${NOMAD_ADDR_prowlarr}/Content/Images/logo.png",
"homer.target=_blank",
"homer.url=http://${NOMAD_ADDR_prowlarr}",
]
}
config {
image = "ghcr.io/linuxserver/prowlarr:latest"
ports = ["prowlarr"]
volumes = [
"/mnt/diskstation/nomad/prowlarr:/config"
]
}
env {
PUID=1000001
PGID=1000001
TZ="Europe/Paris"
}
resources {
memory = 150
}
}
}
}

View File

@ -0,0 +1,69 @@
job "lldap" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "lldap"{
network {
mode = "host"
port "ldap" {
to = 3890
static = 3890
}
port "http" {
to = 17170
}
}
# vault{
# policies= ["lldap"]
#
# }
service {
name = "lldapHttp"
port = "http"
tags = [
]
}
service {
name = "lldapLDAP"
port = "ldap"
tags = [
]
}
task "lldap" {
driver = "docker"
config {
image = "docker.service.consul:5000/ducampsv/lldap:latest"
ports = ["ldap","http"]
volumes = [
"/mnt/diskstation/nomad/lldap:/data"
]
}
template {
data= <<EOH
UID=1000000
GID=1000
LLDAP_JWT_SECRET=
LLDAP_LDAP_USER_PASS=REPLACE_WITH_PASSWORD
LLDAP_LDAP_BASE_DN=dc=ducamps,dc=eu
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 300
}
}
}
}

View File

@ -18,6 +18,12 @@ job "sample" {
to = 0000
}
}
volume "sample-data" {
type = "csi"
source = "sapmle-data"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault{
policies= ["policy_name"]
@ -32,10 +38,15 @@ job "sample" {
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
volume_mount {
volume = "sample-data"
destination = "/app/data"
}
config {
image = "sample"
ports = ["http"]

View File

@ -8,6 +8,11 @@ job "alertmanager" {
vault {
policies = ["alertmanager"]
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "alertmanager" {
network {
mode = "host"
@ -25,7 +30,7 @@ job "alertmanager" {
"homer.enable=true",
"homer.name=AlertManager",
"homer.service=Monitoring",
"homer.logo=https://camo.githubusercontent.com/13ff7fc7ea6d8a6d98d856da8e3220501b9e6a89620f017d1db039007138e062/687474703a2f2f6465766f70792e696f2f77702d636f6e74656e742f75706c6f6164732f323031392f30322f7a616c2d3230302e706e67",
"homer.logo=http://${NOMAD_ADDR_http}/favicon.ico",
"homer.target=_blank",
"homer.url=http://${NOMAD_ADDR_http}",
@ -40,7 +45,7 @@ job "alertmanager" {
}
config {
image = "prom/alertmanager"
image = "docker.service.consul:5000/prom/alertmanager"
args= ["--log.level=debug", "--config.file=/etc/alertmanager/alertmanager.yml"]
ports = ["http"]
volumes = [
@ -53,7 +58,7 @@ job "alertmanager" {
global:
smtp_from: alert@ducamps.eu
smtp_smarthost: mail.ducamps.eu:465
smtp_hello: "mail.ducamps.win"
smtp_hello: "mail.ducamps.eu"
smtp_require_tls: false
{{with secret "secrets/data/nomad/alertmanager/mail"}}
smtp_auth_username: {{.Data.data.username}}

View File

@ -0,0 +1,285 @@
job "authelia" {
datacenters = ["homelab"]
priority = 80
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "authelia" {
network {
mode = "host"
port "authelia" {
to = 9091
}
}
volume "authelia-config" {
type = "csi"
source = "authelia-config"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault {
policies = ["authelia"]
}
task "authelia" {
driver = "docker"
service {
name = "authelia"
port = "authelia"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`auth.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=auth.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
action "generate-client-secret" {
command = "authelia"
args = ["crypto",
"hash",
"generate",
"pbkdf2",
"--random",
"--random.length",
"72",
"--random.charset",
"rfc3986"
]
}
config {
image = "authelia/authelia"
ports = ["authelia"]
args = [
"--config",
"/local/configuration.yml",
]
}
volume_mount {
volume = "authelia-config"
destination = "/config"
}
env {
AUTHELIA_SESSION_SECRET = uuidv4()
AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET = uuidv4()
}
template {
data = <<EOH
---
###############################################################
# Authelia configuration #
###############################################################
server:
address: 'tcp://:9091'
endpoints:
authz:
forward-auth:
implementation: 'ForwardAuth'
legacy:
implementation: 'Legacy'
identity_providers:
oidc:
hmac_secret: {{ with secret "secrets/data/nomad/authelia"}}{{ .Data.data.hmac}}{{end}}
jwks:
- key_id: 'key'
key: |
{{ with secret "secrets/data/nomad/authelia"}}{{ .Data.data.rsakey|indent 8 }}{{end}}
cors:
endpoints:
- userinfo
- authorization
- token
- revocation
- introspection
allowed_origins:
- https://mealie.ducamps.eu
allowed_origins_from_client_redirect_uris: true
clients:
- client_id: 'ttrss'
client_name: 'ttrss'
client_secret: {{ with secret "secrets/data/authelia/ttrss"}} {{ .Data.data.hash }} {{end}}
public: false
scopes:
- openid
- email
- profile
redirect_uris:
- 'https://www.ducamps.eu/tt-rss'
userinfo_signed_response_alg: none
authorization_policy: 'one_factor'
pre_configured_consent_duration: 3M
- client_id: 'mealie'
client_name: 'mealie'
client_secret: {{ with secret "secrets/data/authelia/mealie"}} {{ .Data.data.hash }} {{end}}
public: false
require_pkce: true
pkce_challenge_method: 'S256'
scopes:
- openid
- email
- profile
- groups
redirect_uris:
- 'https://mealie.ducamps.eu/login'
userinfo_signed_response_alg: none
authorization_policy: 'one_factor'
pre_configured_consent_duration: 3M
- client_id: 'immich'
client_name: 'immich'
client_secret: {{ with secret "secrets/data/authelia/immich"}} {{ .Data.data.hash }} {{end}}
public: false
authorization_policy: 'one_factor'
redirect_uris:
- 'https://immich.ducamps.eu/auth/login'
- 'https://immich.ducamps.eu/user-settings'
- 'app.immich:/'
scopes:
- 'openid'
- 'profile'
- 'email'
userinfo_signed_response_alg: 'none'
pre_configured_consent_duration: 3M
- client_id: 'grafana'
client_name: 'Grafana'
client_secret:{{ with secret "secrets/data/authelia/grafana"}} {{ .Data.data.hash }} {{end}}
public: false
authorization_policy: 'one_factor'
require_pkce: true
pkce_challenge_method: 'S256'
redirect_uris:
- 'https://grafana.ducamps.eu/login/generic_oauth'
scopes:
- 'openid'
- 'profile'
- 'groups'
- 'email'
userinfo_signed_response_alg: 'none'
token_endpoint_auth_method: 'client_secret_basic'
pre_configured_consent_duration: 3M
- client_id: 'vikunja'
client_name: 'vikunja'
client_secret:{{ with secret "secrets/data/authelia/vikunja"}} {{ .Data.data.hash }} {{end}}
public: false
authorization_policy: 'one_factor'
redirect_uris:
- 'https://vikunja.ducamps.eu/auth/openid/authelia'
scopes:
- 'openid'
- 'profile'
- 'email'
userinfo_signed_response_alg: 'none'
token_endpoint_auth_method: 'client_secret_basic'
pre_configured_consent_duration: 3M
- client_id: 'gitea'
client_name: 'gitea'
client_secret:{{ with secret "secrets/data/authelia/gitea"}} {{ .Data.data.hash }} {{end}}
public: false
authorization_policy: 'one_factor'
redirect_uris:
- 'https://git.ducamps.eu/user/oauth2/authelia/callback'
scopes:
- 'openid'
- 'profile'
- 'email'
userinfo_signed_response_alg: 'none'
token_endpoint_auth_method: 'client_secret_basic'
pre_configured_consent_duration: 3M
log:
level: 'trace'
totp:
issuer: 'authelia.com'
authentication_backend:
ldap:
address: 'ldaps://ldap.service.consul'
implementation: 'custom'
timeout: '5s'
start_tls: false
tls:
skip_verify: true
minimum_version: 'TLS1.2'
base_dn: 'DC=ducamps,DC=eu'
additional_users_dn: 'OU=users'
users_filter: '(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))'
additional_groups_dn: 'OU=groups'
#groups_filter: '(&(member=UID={input},OU=users,DC=ducamps,DC=eu)(objectClass=groupOfNames))'
groups_filter: '(&(|{memberof:rdn})(objectClass=groupOfNames))'
group_search_mode: 'memberof'
user: 'uid=authelia,ou=serviceAccount,ou=users,dc=ducamps,dc=eu'
password:{{ with secret "secrets/data/nomad/authelia"}} '{{ .Data.data.ldapPassword }}'{{ end }}
attributes:
distinguished_name: ''
username: 'uid'
mail: 'mail'
member_of: 'memberOf'
group_name: 'cn'
access_control:
default_policy: 'deny'
rules:
# Rules applied to everyone
- domain: '*.ducamps.eu'
policy: 'one_factor'
session:
cookies:
- name: 'authelia_session'
domain: 'ducamps.eu' # Should match whatever your root protected domain is
authelia_url: 'https://auth.ducamps.eu'
expiration: '12 hour'
inactivity: '5 minutes'
regulation:
max_retries: 3
find_time: '2 minutes'
ban_time: '5 minutes'
storage:
{{ with secret "secrets/data/nomad/authelia"}}
encryption_key: '{{.Data.data.encryptionKeys }}'
{{end}}
local:
path: '/config/db.sqlite3'
notifier:
disable_startup_check: true
smtp:
username: 'authelia@ducamps.eu'
{{ with secret "secrets/data/nomad/authelia"}}
password: '{{ .Data.data.mailPassword}}'
{{end}}
address: submissions://mail.ducamps.eu:465
disable_require_tls: true
sender: 'authelia@ducamps.eu'
tls:
server_name: 'mail.ducamps.eu'
skip_verify: true
EOH
destination = "local/configuration.yml"
}
resources {
memory = 100
}
}
}
}

View File

@ -27,7 +27,7 @@ job "crowdsec-agent" {
}
driver = "docker"
config {
image = "crowdsecurity/crowdsec"
image = "docker.service.consul:5000/crowdsecurity/crowdsec"
ports = ["metric"]
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",

View File

@ -5,9 +5,15 @@ job "crowdsec-api" {
meta {
forcedeploy = "-1"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
vault {
policies = ["crowdsec"]
}
group "crowdsec-api" {
network {
mode = "host"
@ -35,11 +41,11 @@ job "crowdsec-api" {
]
}
config {
image = "crowdsecurity/crowdsec"
image = "docker.service.consul:5000/crowdsecurity/crowdsec"
ports = ["http", "metric"]
volumes = [
"/mnt/diskstation/nomad/crowdsec/db:/var/lib/crowdsec/data",
"/mnt/diskstation/nomad/crowdsec/data:/etc/crowdsec_data",
"/mnt/diskstation/nomad/crowdsec/data:/etc/crowdsec",
]
}

View File

@ -6,7 +6,11 @@ job "dashboard" {
meta {
forcedeploy = "1"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "dashboard" {
network {
mode = "host"
@ -29,7 +33,7 @@ job "dashboard" {
]
}
config {
image = "b4bz/homer"
image = "docker.service.consul:5000/b4bz/homer"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/homer:/www/assets"

View File

@ -16,7 +16,7 @@ job "drone-runner" {
task "drone-runner" {
driver = "docker"
config {
image = "drone/drone-runner-docker:latest"
image = "docker.service.consul:5000/drone/drone-runner-docker:latest"
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
]

View File

@ -45,7 +45,7 @@ job "drone" {
]
}
config {
image = "drone/drone:latest"
image = "docker.service.consul:5000/drone/drone:latest"
ports = [
"http"
]

View File

@ -8,6 +8,11 @@ job "git" {
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "gitea" {
network {
@ -54,13 +59,12 @@ job "git" {
]
}
config {
image = "gitea/gitea:latest"
image = "docker.service.consul:5000/gitea/gitea:latest"
ports = [
"http",
"ssh"
]
volumes = [
"/mnt/diskstation/git:/repo",
"/mnt/diskstation/nomad/gitea:/data"
]
}
@ -77,10 +81,14 @@ job "git" {
GITEA__database__HOST = "active.db.service.consul"
GITEA__database__NAME = "gitea"
GITEA__database__USER = "gitea"
GITEA__service__DISABLE_REGISTRATION = "true"
GITEA__repository__ROOT = "/repo"
GITEA__service__DISABLE_REGISTRATION = "false"
GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION = "true"
GITEA__service__SHOW_REGISTRATION_BUTTON = "false"
GITEA__openid__ENABLE_OPENID_SIGNIN = "false"
GITEA__openid__ENABLE_OPENID_SIGNUP = "true"
GITEA__repository__ROOT = "/data/gitea-repositories"
GITEA__server__APP_DATA_PATH = "/data"
GITEA__server__LFS_CONTENT_PATH = "/repo/LFS"
GITEA__server__LFS_CONTENT_PATH = "/data/lfs"
GITEA__webhook__ALLOWED_HOST_LIST = "drone.ducamps.eu"
GITEA__webhook__DELIVER_TIMEOUT = "30"
}

View File

@ -2,8 +2,17 @@ job "grafana" {
datacenters = ["homelab"]
priority = 50
type = "service"
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
meta {
forcedeploiement = 1
forcedeploiement = 2
}
vault {
policies = ["grafana"]
}
group "grafana" {
network {
@ -11,7 +20,6 @@ job "grafana" {
to = 3000
}
}
service {
name = "grafana"
port = "http"
@ -36,13 +44,37 @@ job "grafana" {
task "dashboard" {
driver = "docker"
config {
image = "grafana/grafana"
image = "docker.service.consul:5000/grafana/grafana"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/grafana/config:/etc/grafana",
"local/grafana.ini:/etc/grafana/grafana.ini",
"/mnt/diskstation/nomad/grafana/lib:/var/lib/grafana"
]
}
template {
data = <<EOH
force_migration=true
[server]
root_url = https://grafana.ducamps.eu
[auth.generic_oauth]
enabled = true
name = Authelia
icon = signin
client_id = grafana
client_secret = {{ with secret "secrets/data/authelia/grafana"}} {{ .Data.data.password }} {{end}}
scopes = openid profile email groups
empty_scopes = false
auth_url = https://auth.ducamps.eu/api/oidc/authorization
token_url = https://auth.ducamps.eu/api/oidc/token
api_url = https://auth.ducamps.eu/api/oidc/userinfo
login_attribute_path = preferred_username
groups_attribute_path = groups
name_attribute_path = name
use_pkce = true
role_attribute_path=contains(groups[*], 'GrafanaAdmins') && 'Admin' || contains(groups[*], 'GrafanaUsers') && 'Viewer'
EOH
destination = "local/grafana.ini"
}
resources {
memory = 250
}

View File

@ -6,7 +6,11 @@ job "loki" {
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "loki" {
network {
mode = "host"
@ -34,7 +38,7 @@ job "loki" {
}
}
config {
image = "grafana/loki"
image = "docker.service.consul:5000/grafana/loki"
ports = ["http"]
args = [
"-config.file",
@ -49,56 +53,58 @@ job "loki" {
auth_enabled: false
server:
http_listen_port: 3100
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
# Any chunk not receiving new logs in this time will be flushed
chunk_idle_period: 1h
# All chunks will be flushed when they hit this age, default is 1h
max_chunk_age: 1h
# Loki will attempt to build chunks up to 1.5MB, flushing if chunk_idle_period or max_chunk_age is reached first
chunk_target_size: 1048576
# Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
chunk_retain_period: 30s
max_transfer_retries: 0 # Chunk transfers disabled
common:
instance_addr: 127.0.0.1
path_prefix: /loki
storage:
filesystem:
chunks_directory: /loki/chunks
rules_directory: /loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
- from: "2023-04-08" # <---- A date in the future
index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /loki/boltdb-shipper-active
cache_location: /loki/boltdb-shipper-cache
cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space
shared_store: filesystem
filesystem:
directory: /loki/chunks
prefix: index_
object_store: filesystem
schema: v13
store: tsdb
compactor:
working_directory: /tmp/loki/boltdb-shipper-compactor
retention_enabled: true
working_directory: /loki/tsdb-shipper-compactor
shared_store: filesystem
limits_config:
split_queries_by_interval: 24h
max_query_parallelism: 100
max_entries_limit_per_query: 10000
injection_rate_strategy: local
retention_period: 90d
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: false
retention_period: 0s
query_scheduler:
max_outstanding_requests_per_tenant: 4096
querier:
max_concurrent: 4096
frontend:
max_outstanding_per_tenant: 4096
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
EOH
destination = "local/loki/local-config.yaml"
}
resources {
memory = 300
memory_max = 1000
}
}

1
nomad-job/platform/makefile Symbolic link
View File

@ -0,0 +1 @@
../makefile

View File

@ -32,7 +32,7 @@ job "node-exporter" {
task "node-exporter" {
driver = "docker"
config {
image = "prom/node-exporter"
image = "docker.service.consul:5000/prom/node-exporter"
ports = ["http"]
args = [
"--web.listen-address=:${NOMAD_PORT_http}",

View File

@ -29,11 +29,11 @@ job "nut_exporter" {
task "nut_exporter" {
driver = "docker"
config {
image = "ghcr.io/druggeri/nut_exporter"
image = "ghcr.service.consul:5000/druggeri/nut_exporter"
ports = ["http"]
}
env {
NUT_EXPORTER_SERVER= "192.168.1.10"
NUT_EXPORTER_SERVER= "192.168.1.43"
NUT_EXPORTER_VARIABLES = "battery.runtime,battery.charge,input.voltage,output.voltage,output.voltage.nominal,ups.load,ups.status,ups.realpower"
}

View File

@ -9,7 +9,11 @@ job "prometheus" {
meta{
force_deploy= 1
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "prometheus" {
count = 1
@ -246,7 +250,7 @@ EOH
driver = "docker"
config {
image = "prom/prometheus:latest"
image = "docker.service.consul:5000/prom/prometheus:latest"
args = [
"--config.file=/etc/prometheus/prometheus.yml",
"--storage.tsdb.path=/prometheus",
@ -285,6 +289,7 @@ EOH
}
resources {
memory = 350
memory_max = 500
}
}
}

View File

@ -15,7 +15,7 @@ job "vector" {
task "vector" {
driver = "docker"
config {
image = "timberio/vector:0.34.1-alpine"
image = "docker.service.consul:5000/timberio/vector:0.34.1-alpine"
ports = ["api"]
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",

View File

@ -0,0 +1,26 @@
job "csi-nfs-controller" {
datacenters = ["homelab"]
group "controller" {
task "csi-nfs-controller" {
driver = "docker"
config {
image = "registry.k8s.io/sig-storage/nfsplugin:v4.7.0"
args = [
"--v=5",
"--nodeid=${attr.unique.hostname}",
"--endpoint=unix:///csi/csi.sock",
"--drivername=nfs.csi.k8s.io"
]
}
csi_plugin {
id = "nfs"
type = "controller"
mount_dir = "/csi"
}
resources {
memory = 32
cpu = 100
}
}
}
}

View File

@ -0,0 +1,29 @@
job "csi-nfs-nodes" {
datacenters = ["homelab","hetzner"]
type = "system"
group "csi-nfs-nodes" {
task "plugin" {
driver = "docker"
config {
image = "registry.k8s.io/sig-storage/nfsplugin:v4.7.0"
args = [
"--v=5",
"--nodeid=${attr.unique.hostname}",
"--endpoint=unix:///csi/csi.sock",
"--drivername=nfs.csi.k8s.io"
]
# node plugins must run as privileged jobs because they
# mount disks to the host
privileged = true
}
csi_plugin {
id = "nfs"
type = "node"
mount_dir = "/csi"
}
resources {
memory = 50
}
}
}
}

1
nomad-job/system/makefile Symbolic link
View File

@ -0,0 +1 @@
../makefile

View File

@ -0,0 +1,31 @@
dn: cn=module,cn=config
cn: module
objectClass: olcModuleList
olcModuleLoad: memberof
olcModuleLoad: refint
olcModulePath: /opt/bitnami/openldap/lib/openldap
dn: olcOverlay={0}memberof,olcDatabase={2}mdb,cn=config
objectClass: olcConfig
objectClass: olcMemberOf
objectClass: olcOverlayConfig
objectClass: top
olcOverlay: memberof
olcMemberOfDangling: ignore
olcMemberOfRefInt: TRUE
olcMemberOfGroupOC: groupOfNames
olcMemberOfMemberAD: member
olcMemberOfMemberOfAD: memberOf
dn: olcOverlay={1}refint,olcDatabase={2}mdb,cn=config
objectClass: olcConfig
objectClass: olcOverlayConfig
objectClass: olcRefintConfig
objectClass: top
olcOverlay: {1}refint
olcRefintAttribute: memberof
olcRefintAttribute: member
olcRefintAttribute: manager
olcRefintAttribute: owner

Some files were not shown because too many files have changed in this diff Show More