Compare commits

..

1 Commits

Author SHA1 Message Date
vincent
d2a8106fb1 job: add borgmatic
All checks were successful
continuous-integration/drone/push Build is passing
2024-01-09 18:54:25 +01:00
148 changed files with 1127 additions and 4718 deletions

15
Vagrantfile vendored
View File

@ -1,10 +1,9 @@
Vagrant.configure('2') do |config| Vagrant.configure('2') do |config|
if Vagrant.has_plugin?('vagrant-cachier') if Vagrant.has_plugin?('vagrant-cachier')
config.cache.scope = 'machine' config.cache.scope = 'machine'
config.cache.enable :pacman
end end
config.vm.provider :libvirt do |libvirt| config.vm.provider :libvirt do |libvirt|
libvirt.management_network_domain = "lan.ducamps.dev" libvirt.management_network_domain = "ducamps-dev.eu"
end end
config.vm.define "oscar-dev" do |c| config.vm.define "oscar-dev" do |c|
@ -20,7 +19,7 @@ Vagrant.configure('2') do |config|
# Provider # Provider
c.vm.provider "libvirt" do |libvirt, override| c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 2048 libvirt.memory = 1024
libvirt.cpus = 2 libvirt.cpus = 2
end end
c.vm.provision "ansible" do |bootstrap| c.vm.provision "ansible" do |bootstrap|
@ -33,7 +32,7 @@ Vagrant.configure('2') do |config|
config.vm.define "merlin-dev" do |c| config.vm.define "merlin-dev" do |c|
# Box definition # Box definition
c.vm.box = "archlinux/archlinux" c.vm.box = "generic/rocky9"
# Config options # Config options
c.vm.synced_folder ".", "/vagrant", disabled: true c.vm.synced_folder ".", "/vagrant", disabled: true
c.ssh.insert_key = true c.ssh.insert_key = true
@ -43,7 +42,7 @@ Vagrant.configure('2') do |config|
# Provider # Provider
c.vm.provider "libvirt" do |libvirt, override| c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 512 libvirt.memory = 1024
libvirt.cpus = 2 libvirt.cpus = 2
end end
@ -57,7 +56,7 @@ Vagrant.configure('2') do |config|
config.vm.define "gerard-dev" do |c| config.vm.define "gerard-dev" do |c|
# Box definition # Box definition
c.vm.box = "archlinux/archlinux" c.vm.box = "generic/debian12"
# Config options # Config options
c.vm.synced_folder ".", "/vagrant", disabled: true c.vm.synced_folder ".", "/vagrant", disabled: true
@ -67,7 +66,7 @@ Vagrant.configure('2') do |config|
# instance_raw_config_args # instance_raw_config_args
# Provider # Provider
c.vm.provider "libvirt" do |libvirt, override| c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 2048 libvirt.memory = 1024
libvirt.cpus = 2 libvirt.cpus = 2
end end
c.vm.provision "ansible" do |bootstrap| c.vm.provision "ansible" do |bootstrap|
@ -90,7 +89,7 @@ Vagrant.configure('2') do |config|
# Provider # Provider
c.vm.provider "libvirt" do |libvirt, override| c.vm.provider "libvirt" do |libvirt, override|
libvirt.memory = 2048 libvirt.memory = 1024
libvirt.cpus = 2 libvirt.cpus = 2
end end

View File

@ -15,10 +15,7 @@ pdns_rec_config:
forward-zones: forward-zones:
- "{{ consul_domain }}=127.0.0.1:8600" - "{{ consul_domain }}=127.0.0.1:8600"
- "ducamps.win=192.168.1.10" - "ducamps.win=192.168.1.10"
- "{{ domain.name }}=192.168.1.5" - "ducamps.eu=192.168.1.5"
- "lan.{{ domain.name }}=192.168.1.5"
- "1.168.192.in-addr.arpa=192.168.1.5:5300" - "1.168.192.in-addr.arpa=192.168.1.5:5300"
local-address: "{{ ansible_default_ipv4.address }}"
local-address: "{{ hostvars[inventory_hostname]['ansible_'+ default_interface].ipv4.address|default(ansible_default_ipv4.address) }}"
dnssec: "off" dnssec: "off"

View File

@ -1,90 +0,0 @@
NAS_nomad_folder:
- name: actualbudget
- name: archiso
owner: 1000001
- name: backup
owner: 1000001
- name: borgmatic
- name: crowdsec
owner: 1000001
- name: dms
owner: 1000001
- name: filestash
owner: 1000
- name: gitea
owner: 1000000
- name: grafana
owner: 472
- name: hass
owner: 1000001
- name: homer
owner: 1000001
- name: immich/cache
- name: immich/upload
- name: jellyfin
owner: 1000001
- name: loki
owner: 10001
- name: mealie
owner: 1000001
- name: mosquito
owner: 1883
- name: pacoloco
owner: 1000001
- name: pdns-auth
owner: 1000001
- name: pdns-admin
owner: 1000001
- name: pihole
owner: 999
- name: prometheus
owner: 65534
- name: prowlarr
owner: 1000001
- name: radicale
owner: 1000001
- name: openldap
owner: 1001
- name: registry/ghcr
- name: registry/docker
- name: syncthing
owner: 1000001
- name: traefik
owner: 1000001
- name: tt-rss
owner: 1000001
- name: vaultwarden
owner: 1000001
- name: zigbee2mqtt
owner: 1000001
nas_bind_target: "/exports"
nas_bind_source:
- dest: "{{ nas_bind_target }}/nomad"
source: /data/data1/nomad
- dest: "{{ nas_bind_target }}/music"
source: /data/data1/music
- dest: "{{ nas_bind_target }}/download"
source: /data/data1/download
- dest: "{{ nas_bind_target }}/media/serie"
source: /data/data2/serie
- dest: "{{ nas_bind_target }}/media/film"
source: /data/data3/film
- dest: "{{ nas_bind_target }}/photo"
source: /data/data1/photo
- dest: "{{ nas_bind_target }}/homes"
source: /data/data1/homes
- dest: "{{ nas_bind_target }}/ebook"
source: /data/data1/ebook
- dest: "{{ nas_bind_target }}/media/download/serie"
source: /data/data1/download/serie
- dest: "{{ nas_bind_target }}/media/download/film"
source: /data/data1/download/film
- dest: "{{ nas_bind_target }}/music/download/"
source: /data/data1/download/music

View File

@ -1 +1,3 @@
vsftpd_config: {} vsftpd_config:
local_root: "/var/local/volume1"
seccomp_sandbox: False

View File

@ -1,15 +1,15 @@
nfs_cluster_list: "{% for server in groups['all']%} {% if hostvars[server]['ansible_default_ipv4']['address'] is defined %} {{hostvars[server]['ansible_' + hostvars[server]['nfs_iface']|default('')].ipv4.address|default(hostvars[server]['ansible_default_ipv4']['address'],true)}}{{ nfs_options }} {% endif %} {%endfor%}" nfs_cluster_list: "{% for server in groups['all']%}{{ hostvars[server]['ansible_default_ipv4']['address'] }}(rw,no_root_squash,async,insecure_locks,sec=sys) {%endfor%}"
nfs_options: "(rw,no_root_squash,crossmnt,async,insecure_locks,sec=sys)"
nfs_consul_service: true
nfs_bind_target: "/exports"
nfs_exports: nfs_exports:
- "{{ nas_bind_target }} *(fsid=0,insecure,no_subtree_check)" - "/var/local/volume1/nomad {{nfs_cluster_list}}"
- "{{ nas_bind_target }}/nomad {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}" - "/var/local/volume1/music {{nfs_cluster_list}}"
- "{{ nas_bind_target }}/download {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}" - "/var/local/volume1/media {{nfs_cluster_list}}"
- "{{ nas_bind_target }}/music {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}" - "/var/local/volume1/photo {{nfs_cluster_list}}"
- "{{ nas_bind_target }}/media {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}" - "/var/local/volume1/ebook {{nfs_cluster_list}}"
- "{{ nas_bind_target }}/photo {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}" - "/var/local/volume1/git {{nfs_cluster_list}}"
- "{{ nas_bind_target }}/homes {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}" - "/var/local/volume1/archMirror {{nfs_cluster_list}}"
- "{{ nas_bind_target }}/ebook {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}" - "/var/local/volume1/homes/admin {{nfs_cluster_list}}"
- "/var/local/volume1/CardDav {{nfs_cluster_list}}"

View File

@ -1,25 +0,0 @@
samba_passdb_backend: tdbsam
samba_shares_root: /exports
samba_shares:
- name: media
comment: "media"
write_list: "@NAS_media"
browseable: true
- name: ebook
comment: "ebook"
write_list: "@NAS_ebook"
browseable: true
- name: music
comment: "music"
write_list: "@NAS_music"
browseable: true
- name: photo
comment: "photo"
write_list: "@NAS_photo"
browseable: true
- name: download
comment: "downlaod"
write_list: "@NAS_download"
browseable: true
samba_load_homes: True
samba_homes_include: samba_homes_include.conf

View File

@ -4,7 +4,7 @@ systemd_mounts:
mount: /mnt/diskstation/nomad mount: /mnt/diskstation/nomad
type: nfs type: nfs
options: options:
- "vers=4" - " "
automount: "{{ env_automount }}" automount: "{{ env_automount }}"
enabled: true enabled: true
hetzner_storage: hetzner_storage:
@ -13,8 +13,8 @@ systemd_mounts:
type: cifs type: cifs
options: options:
- credentials=/etc/creds/hetzner_credentials - credentials=/etc/creds/hetzner_credentials
- uid=100001 - uid= 100001
- gid=10 - gid= 10
- vers=3.0 - vers=3.0
- mfsymlinks - mfsymlinks
automount: "{{ env_automount }}" automount: "{{ env_automount }}"

View File

@ -4,4 +4,4 @@ system_arch_local_mirror: "https://arch.{{domain.name}}/repo/archlinux_$arch"
system_sudoers_group: "serverAdmin" system_sudoers_group: "serverAdmin"
system_ipV6_disable: True system_ipV6_disable: True
system_ip_unprivileged_port_start: 0 system_ip_unprivileged_port_start: 0
wireguard_mtu: 1420 nas_ip: "{{ hostvars[groups['NAS'][0]]['ansible_facts']['default_ipv4']['address']|default('192.168.1.10')}}"

View File

@ -1,8 +1,4 @@
docker_daemon_config: docker_daemon_config:
dns: dns:
- 172.17.0.1 - 172.17.0.1
- 192.168.1.6 - 192.168.1.5
mtu: 1420
insecure-registries:
- 192.168.1.0/24
- 192.168.121.0/24

View File

@ -2,7 +2,6 @@ nomad_docker_allow_caps:
- NET_ADMIN - NET_ADMIN
- NET_BROADCAST - NET_BROADCAST
- NET_RAW - NET_RAW
nomad_allow_privileged: True
nomad_vault_enabled: true nomad_vault_enabled: true
nomad_vault_address: "http://active.vault.service.{{consul_domain}}:8200" nomad_vault_address: "http://active.vault.service.{{consul_domain}}:8200"
nomad_vault_role: "nomad-cluster" nomad_vault_role: "nomad-cluster"

View File

@ -0,0 +1,42 @@
consul_client_addr: "0.0.0.0"
consul_datacenter: "homelab"
consul_backup_location: "/mnt/diskstation/git/backup/consul"
consul_ansible_group: all
consul_bootstrap_expect: 3
nomad_docker_allow_caps:
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
nomad_vault_enabled: true
nomad_vault_address: "http://active.vault.service.consul:8200"
nomad_vault_role: "nomad-cluster"
nomad_vault_token: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:nomad_vault_token') }}"
nomad_bootstrap_expect: 3
notification_mail: "{{inventory_hostname}}@{{ domain_name }}"
msmtp_mailhub: smtp.{{ domain_name }}
msmtp_auth_user: "{{ user.mail }}"
msmtp_auth_pass: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:email') }}"
system_user:
- name: drone-deploy
home: /home/drone-deploy
shell: /bin/bash
privatekey:
- keyname: id_gitea
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
- name: ansible
home: /home/ansible
shell: /bin/bash
- name: root
home: /root
privatekey:
- keyname: id_gitea
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"

View File

@ -1,5 +1,5 @@
sssd_configure: true sssd_configure: true
# sssd_configure is False by default - by default nothing is done by this role. # sssd_configure is False by default - by default nothing is done by this role.
ldap_search_base: "dc=ducamps,dc=eu" ldap_search_base: "dc=ducamps,dc=win"
ldap_uri: "ldaps://ldaps.service.consul" ldap_uri: "ldaps://ldap.ducamps.eu"
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=eu" ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=win"

View File

@ -39,4 +39,4 @@ user_custom_host:
user: "git" user: "git"
keyfile: "~/.ssh/id_gitea" keyfile: "~/.ssh/id_gitea"
user_config_repo: "ssh://git@git.ducamps.eu:2222/vincent/conf2.git" user_config_repo: "ssh://git@git.{{ domain.name }}:2222/vincent/conf2.git"

View File

@ -1,12 +1,11 @@
$ANSIBLE_VAULT;1.1;AES256 $ANSIBLE_VAULT;1.1;AES256
61326233336236343231396231306638373837653661313334313261313539316532373437346132 34356264306639303930393736376562653636383538623131343939323563653938616534623163
3931306637303530373032663236363466383433316161310a396439393564643731656664663639 6536366261666662376533393836626664373766313439660a363331326231303638626165393164
32386130663837303663376432633930393663386436666263313939326631616466643237333138 63323063623365393566643230653964393565636430303365653233323931646236366664346430
3365346131636333330a376436323964656563363664336638653564656231636136663635303439 3162383233656139320a323133323262386638363738346336613862626539386538633864613131
35346461356337303064623861326331346263373539336335393566623462343464323065366237 30306539376639303365323665613732616138346530346162633761386466626238373065316230
61346637326336613232643462323733366530656439626234663335633965376335623733336162 38396662363364336134306130616661643835616161313535613331303133383334393333653335
37323739376237323534613361333831396531663637666161666366656237353563626164626632 66363538313631373736396333363837376664616166663665343030336232346237333965303861
33326336353663356235373835666166643465666562616663336539316233373430633862613133 36613763666135393531653637616463333461343232366137656336383239623166633338646561
36363831623361393230653161626131353264366634326233363232336635306266376363363739 39336563636665396666663339306534643661366264623061626661343762373037383037373561
66373434343330633337633436316135656533613465613963363931383266323466653762623365 3431656130306133323436616531343034366665636434333362
363332393662393532313063613066653964

View File

@ -1,10 +1,42 @@
systemd_mounts: systemd_mounts:
diskstation_git:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}//git"
mount: /mnt/diskstation/git
type: nfs
options:
- " "
automount: "{{ env_automount }}"
enabled: true
diskstation_CardDav:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/CardDav"
mount: /mnt/diskstation/CardDav
type: nfs
options:
- " "
automount: "{{ env_automount }}"
enabled: true
backup_disk:
share: /dev/sdb1
mount: /mnt/backup
type: ntfs-3g
options:
- " "
automount: "{{ env_automount }}"
enabled: "{%if inventory_hostname in groups['staging'] %} false {% else %} true {% endif %}"
diskstation_home:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/homes/admin"
mount: /mnt/diskstation/home
type: nfs
options:
- " "
automount: "{{ env_automount }}"
enabled: true
diskstation_photo: diskstation_photo:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/photo" share: "{{ nas_ip }}:{{ env_default_nfs_path }}/photo"
mount: /mnt/diskstation/photo mount: /mnt/diskstation/photo
type: nfs type: nfs
options: options:
- "vers=4" - " "
automount: "{{ env_automount }}" automount: "{{ env_automount }}"
enabled: true enabled: true
diskstation_music: diskstation_music:
@ -12,7 +44,7 @@ systemd_mounts:
mount: /mnt/diskstation/music mount: /mnt/diskstation/music
type: nfs type: nfs
options: options:
- "vers=4" - " "
automount: "{{ env_automount }}" automount: "{{ env_automount }}"
enabled: true enabled: true
diskstation_media: diskstation_media:
@ -20,16 +52,23 @@ systemd_mounts:
mount: /mnt/diskstation/media mount: /mnt/diskstation/media
type: nfs type: nfs
options: options:
- "vers=4" - " "
automount: "{{ env_automount }}" automount: "{{ env_automount }}"
enabled: true enabled: true
diskstation_ebook: diskstation_ebook:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/ebook" share: "{{ nas_ip }}:{{ env_default_nfs_path }}/ebook"
mount: /mnt/diskstation/ebook mount: /mnt/diskstation/ebook
type: nfs type: nfs
options: options:
- "vers=4" - " "
automount: "{{ env_automount }}"
enabled: true
diskstation_archMirror:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/archMirror"
mount: /mnt/diskstation/archMirror
type: nfs
options:
- " "
automount: "{{ env_automount }}" automount: "{{ env_automount }}"
enabled: true enabled: true
diskstation_nomad: diskstation_nomad:
@ -40,11 +79,3 @@ systemd_mounts:
- " " - " "
automount: "{{ env_automount }}" automount: "{{ env_automount }}"
enabled: true enabled: true
diskstation_download:
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/download"
mount: /mnt/diskstation/download
type: nfs
options:
- "vers=4"
automount: "{{ env_automount }}"
enabled: true

View File

@ -24,10 +24,6 @@ postgresql_databases:
owner: pdns-auth owner: pdns-auth
- name: pdns-admin - name: pdns-admin
owner: pdns-admin owner: pdns-admin
- name: mealie
owner: mealie
- name: immich
owner: immich
postgresql_hba_entries: postgresql_hba_entries:
- {type: local, database: all, user: postgres, auth_method: peer} - {type: local, database: all, user: postgres, auth_method: peer}
@ -36,3 +32,5 @@ postgresql_hba_entries:
- {type: host, database: all, user: all, address: '::1/128', auth_method: md5} - {type: host, database: all, user: all, address: '::1/128', auth_method: md5}
- {type: host, database: all, user: all, address: '::0/128', auth_method: md5} - {type: host, database: all, user: all, address: '::0/128', auth_method: md5}
- {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5} - {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5}
- {type: host, database: replication, user: repli, address:192.168.1.42/32, auth_method: md5}
- {type: host, database: replication, user: repli, address:192.168.1.40/32, auth_method: md5}

View File

@ -1,54 +1,45 @@
$ANSIBLE_VAULT;1.1;AES256 $ANSIBLE_VAULT;1.1;AES256
39363436643831373861376361613830316334613939346338616636393462663033393261633838 64656332666561346439636331396439333566646361333031613764376634363061623635356630
6337336161393063646136613538396366653538656435360a303062636463383739653730346639 3832326235316435316264653637396130383465323234630a653138393161316232323236323366
61323634306265613336313634653039313639663836363032353261383566393865613166613032 32363661633631623132323864663366633766396266623630636135396165663062353434613231
3837313634633466610a313062646237396138316361303361663565353862363139343566306539 6363646665626439610a313233313639333232393035633139326561316431393837616231313933
38303161303163323265376539323939393938373965353934303535613962653534363362346563 38646532613665666136316635376533653161616630313532333330393364636662653331336637
61643638353138623162353364353736396162613735333063633739346132613161303564356437 39353462336130333933383033656634633461333461393730633333343330306432623466623062
62343535363263646463306466663536613937393463666336396332646533343439613433626566 32353962623338356630393935646537313335313335323464666265303732653633396332363965
38643363343065393165646134343935386461626166316662356365366666363737653336626631 36356338386330653863646134623234623230356232643535643763303162626132333530626639
64643230616431396666666462303366343164323233303139643939346635353730316234386163 39316166613862356264336362303833343236616635613136356433663766383861333832656261
35613235643034643833393233373536383863333763393066373564353535353463363336316335 35613662653266396461383162303230613865373232353437646131633063633634346633383563
63363537643432663266386438316563656663656462333039303861393364333966383430643263 31323736303537643433633235613464376230373332613331623439643462313362356437623463
63356435373064633861343137616637393161383361306135373864386235653034323732316663 65326335653938626461353332356434303962376630626666666631386334316261653639623633
65336465386135663532356433386562666639333464633362663131646237613034646563396133 34326633393330313064326562363838316366316361626662393435363262333264626333396136
33303464633635636233626633353038656230373266666132323561383866343632333561323363 66353936623763323865656632373763303365316131663064343830663330323566346535316436
61346664623338376436373332646232646235323639633262666166346535663238653563363239 63623931383461363364613632363661613734306535373536643236656161393634633435653862
34663365633363313433376333653534333364393635316235333965383262313563373161663065 34316666353234646633633635653934373335396635343035663238323636323662346632303865
36393565396534353235623238303835343334646632306638306332336539616463393966653538 35326333366439646661303437626238326435313032373031636535353963666263636635366234
35336462623031326539633139636533633632623137393463333531663935323765663139306361 36336562633666623932653465376237366232306262386565646631346432346631353566326535
66643434393533313039356434326438626265323066613966323634306632653765363834613034 32356337333762653161376439353035323633363833633862336134366132623963326231643461
30373039336536393865383265643335396232643537343363313338383838383030386665303237 35623863373730313935393631626266336465613261636364353533666233613831323031643035
64363666346535633237353462333232623132353031323231623338356136656261303662656465 32663630316264633932643132633061303438613339646264666334306630643038323632366330
31313039643561623635643435333133663032313964323061393231666336343233363038616231 31366365333039636434613537386436313539396632613766333136663638393462653263613165
36356262326530383233336130326361613431623866633832663361633937646461343731343938 33323937313031626233623237616464323939303131613465326362346632346538323161343362
33306262346463623935663466356264393837626239313739356431653163376563333234346566 65353839386133326233356561363864336261663135343865323861623330613736333835396261
38373663643532313635333131663239383736343930623735323861663037356136353433633865 64653361333530326630363633383836396565646463396239616261646635303535316135306537
63626435613936303661366637623338633961643137613933303735366265663933396130363039 64343830616566663633323531383464383834373539646637633465616533383238346565303337
34396637643638613839306639343765393539653164616536653661373264376436626639316666 34386561626266303833353665306335326264343533386263626562373633303135313735643733
61303835323761643531326438363035343539383464376433363534623934366534373631353364 37333766373465326133663663303166316134643732343938343930616631383137356137373564
61383866323737316430303736366533643939313637393631303833363431613562303639323939 31633831663264653762326534343635323364313632353661323330646638363062346137646337
66313434613963656464383964313734383938353366306462666537653563336465376464303538 61323334623434613333613038633637666131393338653839373835633062396661653537343138
34336531663334303938333739313638636363623562613536333736386137363139653164626261 61643961623366393735393438356461333731326265313937613066323038313163353835363135
62663662316365663563646164303935323866633336633939323837393962393130626330666233 33323932353264313536393865373232333930613636343661613033656165616237373439383531
63663661303565646236623130663034636264353235376561306630376365613966663536303963 38393932366633616639303964386333386462353935646432663330313137306465386634633931
63643161386435633831393334333035653761393863373731616239313235383033633439376166 33656533306665653836363830363164303039356463386130663536636330396138643363383838
39613762376162386231633938393036633461303732323337656430373430636435313337303365 35393966646630663535623836303262353739353063303763333530383630353838623939376535
37646461336339623339316663616636373036656564383462356562306465623762653162633963 34343239373831623232343530396561393730303066323236306539333263656133366363396534
35636466386138333564666564323034393162633965386133643235303938616439333130353637 30666662336435313561666536643231633562663037353837303936326164353366333032656431
61343536323034366464653138353665326436396133313432666563353335383733363335613562 39303063343536336431336637323239356432616562656565306561666664663930303232313464
61646365346665383866623364396138323666326338313530353663323938613362653038313339 34333236613239656562323037656137376135396636323361383565336636303338663138396238
32613663616535313661386538366330373364366637386634633437646362383764346263636434 65396130303931393266636630656637333464346361303763653931383464326365333232623437
35616166393065343038643861636333373738363335353164326435303961326662356230323262 61623263316562643636386637303531626238333131656130306236636230626362653935353331
35656531653535643630376330393731643532353132366662636664626132646632306361323035 34366663303235653431616135343963643935303336313231343562376430343564393832343335
31373136616435336362633439356339336466313337623538383763386132396135653864386638 36363130313533373137383738346438666634303537633232636535303835636333653636303937
31393864363466653137643565306462616238333435343036613331653866393532313861376331 39356339656234303432
33646636623666343439616332386363373664346164313963623861393134666463383366633539
35313761333564303635656364303566643436393130356163623137313530653539656537653139
38336636623732313630303933303962303561376436623737633139643564343166326335386639
31373437336139326562613339393235393065396538333566323864643639303132313733396132
35613532396363326166313061353136373965303964623534653634613639303764393038333037
63656131616463663565653134363336326139303736313138366262616338643339316231663631
30656132386462393433313261313466303239346138623433643634616465656139343764353338
62616139613731363665333438383861623837643432643134626461643631323034383262656439
33653563323434343964633236353434643739333863636630636363633639373630

View File

@ -3,7 +3,7 @@ dhcpd_lease_time: '72'
dhcpd_domain_name: "lan.{{ domain.name }}" dhcpd_domain_name: "lan.{{ domain.name }}"
dhcpd_nameservers: dhcpd_nameservers:
- '192.168.1.4' - '192.168.1.4'
- '192.168.1.40' - '192.168.1.41'
dhcpd_zones: dhcpd_zones:
- zone: "lan.{{ domain.name }}." - zone: "lan.{{ domain.name }}."
@ -41,10 +41,17 @@ dhcpd_hosts:
- hostname: 'oscar' - hostname: 'oscar'
address: '192.168.1.40' address: '192.168.1.40'
ethernet: '68:1D:EF:3C:F0:44' ethernet: '7C:83:34:B3:49:9A'
- hostname: 'bleys' - hostname: 'bleys'
address: '192.168.1.42' address: '192.168.1.42'
ethernet: '68:1d:ef:2b:3d:24' ethernet: '68:1d:ef:2b:3d:24'
- hostname: 'VMAS-HML'
address: '192.168.1.50'
ethernet: '52:54:00:02:74:ed'
- hostname: 'VMAS-BUILD'
address: '192.168.1.53'
ethernet: '52:54:13:1e:93'
- hostname: 'xiaomi-chambre-gateway' - hostname: 'xiaomi-chambre-gateway'
@ -62,7 +69,4 @@ dhcpd_hosts:
- hostname: 'shelly-chambre-ventilo' - hostname: 'shelly-chambre-ventilo'
address: '192.168.1.65' address: '192.168.1.65'
ethernet: 'e0:98:06:97:78:0b' ethernet: 'e0:98:06:97:78:0b'
- hostname: 'shelly-Bureau-chauffeau'
address: '192.168.1.66'
ethernet: '8c:aa:b5:42:b9:b9'

View File

@ -1,2 +1,3 @@
nomad_datacenter: homelab nomad_datacenter: homelab
nomad_allow_privileged: True
system_wol_enable: True system_wol_enable: True

View File

@ -7,7 +7,6 @@ nomad_client_meta:
- name: "env" - name: "env"
value: "production" value: "production"
vault_unseal_keys_dir_output: "~/vaultUnseal/production" vault_unseal_keys_dir_output: "~/vaultUnseal/production"
env_default_nfs_path: "" env_default_nfs_path: "/volume2"
env_media_nfs_path: "/volume1" env_media_nfs_path: "/volume1"
env_automount: true env_automount: true
nas_ip: "192.168.1.43"

View File

@ -1,5 +1,5 @@
domain: domain:
name: ducamps.dev name: ducamps-dev.eu
#systemd_mounts: [] #systemd_mounts: []
#systemd_mounts_enabled: [] #systemd_mounts_enabled: []
consul_bootstrap_expect: 2 consul_bootstrap_expect: 2
@ -14,8 +14,6 @@ hosts_entries:
- ip: "{{ hostvars['nas-dev']['ansible_default_ipv4']['address'] }}" - ip: "{{ hostvars['nas-dev']['ansible_default_ipv4']['address'] }}"
name: diskstation.ducamps.eu name: diskstation.ducamps.eu
env_default_nfs_path: "" env_default_nfs_path: "/var/local/volume1"
env_automount: true env_media_nfs_path: "{{ env_default_nfs_path }}"
nas_ip: "nfs.service.consul" env_automount: false

View File

@ -1,10 +1,6 @@
--- ---
ansible_host: "192.168.1.42" ansible_host: "192.168.1.42"
ansible_python_interpreter: "/usr/bin/python3" ansible_python_interpreter: "/usr/bin/python3"
default_interface: "enp2s0"
consul_iface: "{{ default_interface}}"
vault_iface: "{{ default_interface}}"
nfs_iface: "{{ default_interface}}"
wireguard_address: "10.0.0.7/24" wireguard_address: "10.0.0.7/24"
wireguard_byhost_allowed_ips: wireguard_byhost_allowed_ips:
merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24 merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24
@ -15,13 +11,13 @@ wireguard_endpoint: ""
wireguard_postup: wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT - iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT - iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{default_interface}} -j MASQUERADE - iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=1 - sysctl -w net.ipv4.ip_forward=1
wireguard_postdown: wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT - iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT - iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {default_interface} -j MASQUERADE - iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=0 - sysctl -w net.ipv4.ip_forward=0
partition_table: partition_table:

View File

@ -1,23 +1,22 @@
--- ---
ansible_host: 10.0.0.1 ansible_host: 10.0.0.1
#ansible_host: 135.181.150.203
default_interface: "eth0"
wireguard_address: "10.0.0.1/24" wireguard_address: "10.0.0.1/24"
wireguard_endpoint: "135.181.150.203" wireguard_endpoint: "135.181.150.203"
wireguard_persistent_keepalive: "20" wireguard_persistent_keepalive: "20"
wireguard_allowed_ips: 10.0.0.1 wireguard_allowed_ips: "10.0.0.1/32,10.0.0.3/32,10.0.0.5/32"
wireguard_postup: wireguard_postup:
- iptables -A FORWARD -o %i -j ACCEPT - iptables -A FORWARD -o %i -j ACCEPT
- iptables -A FORWARD -i %i -j ACCEPT - iptables -A FORWARD -i %i -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=1 - sysctl -w net.ipv4.ip_forward=1
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}' - resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
wireguard_postdown: wireguard_postdown:
- iptables -D FORWARD -i %i -j ACCEPT - iptables -D FORWARD -i %i -j ACCEPT
- iptables -D FORWARD -o %i -j ACCEPT - iptables -D FORWARD -o %i -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=0 - sysctl -w net.ipv4.ip_forward=0
wireguard_unmanaged_peers: wireguard_unmanaged_peers:

View File

@ -1,10 +1,6 @@
--- ---
ansible_host: "192.168.1.41" ansible_host: "192.168.1.41"
ansible_python_interpreter: "/usr/bin/python3" ansible_python_interpreter: "/usr/bin/python3"
default_interface: "enu1u1"
consul_iface: "{{ default_interface }}"
vault_iface: "{{ default_interface }}"
wireguard_address: "10.0.0.6/24" wireguard_address: "10.0.0.6/24"
wireguard_byhost_allowed_ips: wireguard_byhost_allowed_ips:
merlin: 10.0.0.6,192.168.1.41 merlin: 10.0.0.6,192.168.1.41
@ -15,10 +11,10 @@ wireguard_endpoint: ""
wireguard_postup: wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT - iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT - iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -A POSTROUTING -o enu1u1 -j MASQUERADE
wireguard_postdown: wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT - iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT - iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -D POSTROUTING -o enu1u1 -j MASQUERADE

View File

@ -1,8 +1,4 @@
--- ---
default_interface: eth0
vault_iface: "{{ default_interface}}"
ansible_host: gerard-dev.lan.ducamps.dev
wireguard_address: "10.0.1.6/24" wireguard_address: "10.0.1.6/24"
perrsistent_keepalive: "20" perrsistent_keepalive: "20"
wireguard_endpoint: "" wireguard_endpoint: ""
@ -10,10 +6,10 @@ wireguard_endpoint: ""
wireguard_postup: wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT - iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT - iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
wireguard_postdown: wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT - iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT - iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface}} -j MASQUERADE - iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE

View File

@ -1,39 +1,31 @@
--- ---
ansible_host: 10.0.0.4 ansible_host: 10.0.0.4
#ansible_host: 65.21.2.14
default_interface: "ens3"
nfs_iface: "wg0"
wireguard_address: "10.0.0.4/24" wireguard_address: "10.0.0.4/24"
wireguard_endpoint: "65.21.2.14" wireguard_endpoint: "95.216.217.5"
wireguard_persistent_keepalive: "20" wireguard_persistent_keepalive: "30"
wireguard_byhost_allowed_ips: wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3/32,10.0.0.5/32"
oscar: "0.0.0.0/0"
bleys: "0.0.0.0/0"
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3,10.0.0.5"
wireguard_postup: wireguard_postup:
- iptables -A FORWARD -o %i -j ACCEPT - iptables -A FORWARD -o %i -j ACCEPT
- iptables -A FORWARD -i %i -j ACCEPT - iptables -A FORWARD -i %i -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=1
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
wireguard_postdown: wireguard_postdown:
- iptables -D FORWARD -i %i -j ACCEPT - iptables -D FORWARD -i %i -j ACCEPT
- iptables -D FORWARD -o %i -j ACCEPT - iptables -D FORWARD -o %i -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
- sysctl -w net.ipv4.ip_forward=0
wireguard_unmanaged_peers: wireguard_unmanaged_peers:
phone: phone:
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc= public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
allowed_ips: 10.0.0.3/32 allowed_ips: 10.0.0.3/32
persistent_keepalive: 0 persistent_keepalive: 0
zen: zen:
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag= public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
allowed_ips: 10.0.0.5/32 allowed_ips: 10.0.0.5/32
persistent_keepalive: 0 persistent_keepalive: 0
wireguard_dns: "192.168.1.4,192.168.1.41" wireguard_dns: "192.168.1.41,192.168.1.4"
consul_client_addr: "127.0.0.1 10.0.0.4" consul_client_addr: "127.0.0.1 10.0.0.4"
consul_bind_address: "10.0.0.4" consul_bind_address: "10.0.0.4"
consul_ui: True consul_ui: True
@ -43,8 +35,7 @@ nomad_host_networks:
- name: "private" - name: "private"
interface: wg0 interface: wg0
- name: "public" - name: "public"
interface: ens3 interface: eth0
- name: "default" - name: "default"
interface: wg0 interface: wg0
vault_listener_address: 10.0.0.4 vault_listener_address: 10.0.0.4
nomad_plugins_podman: True

View File

@ -1,8 +1,4 @@
--- ---
ansible_host: merlin-dev.lan.ducamps.dev
default_interface: eth0
vault_iface: "{{ default_interface}}"
wireguard_address: "10.0.1.4/24" wireguard_address: "10.0.1.4/24"
wireguard_endpoint: "{{ ansible_default_ipv4.address }}" wireguard_endpoint: "{{ ansible_default_ipv4.address }}"
wireguard_persistent_keepalive: "30" wireguard_persistent_keepalive: "30"
@ -10,12 +6,12 @@ wireguard_persistent_keepalive: "30"
wireguard_postup: wireguard_postup:
- iptables -A FORWARD -o %i -j ACCEPT - iptables -A FORWARD -o %i -j ACCEPT
- iptables -A FORWARD -i %i -j ACCEPT - iptables -A FORWARD -i %i -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
wireguard_postdown: wireguard_postdown:
- iptables -D FORWARD -i %i -j ACCEPT - iptables -D FORWARD -i %i -j ACCEPT
- iptables -D FORWARD -o %i -j ACCEPT - iptables -D FORWARD -o %i -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
wireguard_unmanaged_peers: wireguard_unmanaged_peers:
phone: phone:

16
ansible/host_vars/nas Normal file
View File

@ -0,0 +1,16 @@
---
wireguard_address: "10.0.1.8/24"
perrsistent_keepalive: "30"
wireguard_endpoint: ""
wireguard_byhost_allowed_ips:
merlin: 10.0.0.8,192.168.1.10
corwin: 10.0.0.8,192.168.1.10
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE

View File

@ -1,7 +1,4 @@
--- ---
ansible_host: nas-dev.lan.ducamps.dev
default_interface: eth0
vault_iface: "{{ default_interface}}"
wireguard_address: "10.0.1.8/24" wireguard_address: "10.0.1.8/24"
perrsistent_keepalive: "30" perrsistent_keepalive: "30"
wireguard_endpoint: "" wireguard_endpoint: ""
@ -9,9 +6,9 @@ wireguard_endpoint: ""
wireguard_postup: wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT - iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT - iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
wireguard_postdown: wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT - iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT - iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE

View File

@ -1,19 +0,0 @@
---
wireguard_address: "10.0.0.8/24"
default_interface: "enp2s0"
consul_iface: "{{ default_interface}}"
vault_iface: "{{ default_interface}}"
perrsistent_keepalive: "30"
wireguard_endpoint: ""
wireguard_byhost_allowed_ips:
merlin: 10.0.0.8,192.168.1.43
corwin: 10.0.0.8,192.168.1.43
wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE

View File

@ -1,9 +1,4 @@
--- ---
default_interface: "enp1s0"
consul_iface: "{{ default_interface}}"
vault_iface: "{{ default_interface}}"
nfs_iface: "{{ default_interface}}"
nomad_client_cpu_total_compute: 8000
wireguard_address: "10.0.0.2/24" wireguard_address: "10.0.0.2/24"
wireguard_byhost_allowed_ips: wireguard_byhost_allowed_ips:
merlin: 10.0.0.2,192.168.1.40 merlin: 10.0.0.2,192.168.1.40
@ -14,12 +9,12 @@ wireguard_endpoint: ""
wireguard_postup: wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT - iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT - iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
wireguard_postdown: wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT - iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT - iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
partition_table: partition_table:
- device: "/dev/sda" - device: "/dev/sda"

View File

@ -1,7 +1,4 @@
--- ---
ansible_host: oscar-dev.lan.ducamps.dev
default_interface: eth0
vault_iface: "{{ default_interface}}"
wireguard_address: "10.0.1.2/24" wireguard_address: "10.0.1.2/24"
perrsistent_keepalive: "30" perrsistent_keepalive: "30"
wireguard_endpoint: "" wireguard_endpoint: ""
@ -9,9 +6,9 @@ wireguard_endpoint: ""
wireguard_postup: wireguard_postup:
- iptables -A FORWARD -i wg0 -j ACCEPT - iptables -A FORWARD -i wg0 -j ACCEPT
- iptables -A FORWARD -o wg0 -j ACCEPT - iptables -A FORWARD -o wg0 -j ACCEPT
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
wireguard_postdown: wireguard_postdown:
- iptables -D FORWARD -i wg0 -j ACCEPT - iptables -D FORWARD -i wg0 -j ACCEPT
- iptables -D FORWARD -o wg0 -j ACCEPT - iptables -D FORWARD -o wg0 -j ACCEPT
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE - iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE

View File

@ -5,11 +5,7 @@ requirements:
deploy_production: deploy_production:
ansible-playbook site.yml -i production -u ansible ansible-playbook site.yml -i production -u ansible
deploy_production_wiregard:
ansible-playbook playbooks/wireguard.yml -i production -u ansible
deploy_staging: deploy_staging:
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
ansible-playbook site.yml -i staging -u ansible ansible-playbook site.yml -i staging -u ansible

View File

@ -1,26 +1,14 @@
--- ---
- name: Consul install
hosts: all
roles:
- role: ansible-consul
become: true
- name: Vault install - name: Vault install
hosts: homelab hosts: homelab
roles: roles:
- role: ansible-hashicorp-vault - role: ansible-hashicorp-vault
become: true become: true
post_tasks: post_tasks:
- name: Stat root file
ansible.builtin.stat:
path: "{{ vault_unseal_keys_dir_output }}/rootkey"
register: rootkey_exist
delegate_to: localhost
- name: Reading root contents - name: Reading root contents
ansible.builtin.command: cat "{{ vault_unseal_keys_dir_output }}/rootkey" ansible.builtin.command: cat "{{ vault_unseal_keys_dir_output }}/rootkey"
register: root_token register: root_token
delegate_to: localhost delegate_to: localhost
when: rootkey_exist.stat.exists
changed_when: false changed_when: false
- name: debug - name: debug
ansible.builtin.debug: ansible.builtin.debug:
@ -32,7 +20,7 @@
period: 72h period: 72h
no_parent: true no_parent: true
token: "{{ root_token.stdout }}" token: "{{ root_token.stdout }}"
url: "http://active.vault.service.consul:8200" url: http://{{ ansible_default_ipv4.address }}:8200
retries: 4 retries: 4
run_once: true run_once: true
delegate_to: localhost delegate_to: localhost
@ -44,11 +32,13 @@
nomad_vault_token: "{{ nomad_token_data.login.auth.client_token }}" nomad_vault_token: "{{ nomad_token_data.login.auth.client_token }}"
when: nomad_token_data.login is defined when: nomad_token_data.login is defined
- name: nomad - name: Hashicorp stack
hosts: all hosts: all
vars: vars:
unseal_keys_dir_output: ~/vaultunseal unseal_keys_dir_output: ~/vaultunseal
roles: roles:
- role: ansible-consul
become: true
- role: ansible-nomad - role: ansible-nomad
become: true become: true
- role: docker - role: docker

View File

@ -1,6 +1,6 @@
--- ---
- hosts: all - hosts: all
gather_facts: false
become: true become: true
gather_facts: false
roles: roles:
- ansible_bootstrap - ansible_bootstrap

View File

@ -14,15 +14,3 @@
- docker - docker
become: true become: true
become_user: '{{ user.name }}' become_user: '{{ user.name }}'
- hosts: all
roles:
- role: user_config
vars:
user_config_username: "{{ user.name }}"
become_user: "{{ user.name }}"
become: true
- role: user_config
vars:
user_config_username: root
become: true

View File

@ -1,54 +1,16 @@
--- ---
- name: Database playbook - hosts: database
hosts: database
vars: vars:
# certbot_force: true # certbot_force: true
pre_tasks:
- name: Install Pg vertors (immich)
aur:
name: pgvecto.rs-bin
state: present
become: true
become_user: aur_builder
- name: Add database member to pg_hba replication
ansible.builtin.set_fact:
postgresql_hba_entries: "{{ postgresql_hba_entries + [\
{'type':'host', \
'database': 'replication',\
'user':'repli',\
'address':hostvars[item]['ansible_'+hostvars[item]['default_interface']]['ipv4']['address']+'/32',\
'auth_method':'trust'}] }}"
loop: '{{ groups.database }}'
roles: roles:
- role: ansible-role-postgresql - role: ansible-role-postgresql
become: true become: true
tasks: tasks:
- name: Launch replication - name: add pg_read_all_data to dump
ansible.builtin.command: pg_basebackup -D /var/lib/postgres/data -h {{groups["database_active"]|first}} -U repli -Fp -Xs -P -R -w community.postgresql.postgresql_membership:
args: target_roles:
creates: /var/lib/postgres/data/postgresql.conf - dump
groups:
- pg_read_all_data
become: true become: true
become_user: postgres become_user: "{{ postgresql_user }}"
when: inventory_hostname in groups["database_standby"]
- name: Ensure PostgreSQL is started and enabled on boot.
ansible.builtin.service:
name: '{{ postgresql_daemon }}'
state: '{{ postgresql_service_state }}'
enabled: '{{ postgresql_service_enabled }}'
become: true
- name: Set Postgress shared libraries
community.postgresql.postgresql_set:
name: shared_preload_libraries
value: vectors.so
become: true
become_user: postgres
when: inventory_hostname in groups["database_active"]
notify: Restart postgresql
- name: Set Postgress shared libraries
community.postgresql.postgresql_set:
name: search_path
value: '$user, public, vectors'
become: true
become_user: postgres
when: inventory_hostname in groups["database_active"]

View File

@ -1,28 +1,10 @@
--- ---
- name: gather all - hosts: NAS
hosts: all
- name: NAS playbook
hosts: NAS
vars: vars:
# certbot_force: true # certbot_force: true
pre_tasks:
- name: include task NasBind
ansible.builtin.include_tasks:
file: tasks/NasBind.yml
loop: "{{ nas_bind_source }}"
- name: create nomad folder
ansible.builtin.file:
path: "{{ nas_bind_target }}/nomad/{{ item.name }}"
owner: "{{ item.owner|default('root') }}"
state: directory
become: true
loop: "{{ NAS_nomad_folder }}"
roles: roles:
- role: ansible-role-nut - role: ansible-role-nfs
become: true become: true
- role: ansible-role-nfs - role: ansible-role-vsftpd
become: true
- role: ansible-role-pureftpd
become: true
- role: vladgh.samba.server
become: true become: true
#- samba

View File

@ -1,18 +0,0 @@
- name: Ensure base NFS directory exist
ansible.builtin.file:
path: "{{ item.dest }}"
state: directory
become: true
- name: Ensure source NFS directory exist
ansible.builtin.file:
path: "{{ item.source }}"
state: directory
become: true
- name: Bind NAS export
ansible.posix.mount:
path: "{{ item.dest }}"
src: "{{ item.source }}"
opts: bind
fstype: none
state: mounted
become: true

View File

@ -1 +0,0 @@
path = /exports/homes/%S

View File

@ -0,0 +1,12 @@
---
- hosts: all
roles:
- role: user_config
vars:
user_config_username: "{{ user.name }}"
become_user: "{{ user.name }}"
become: true
- role: user_config
vars:
user_config_username: root
become: true

View File

@ -1,8 +1,8 @@
[DNS] [DNS]
oscar gerard
[dhcp] [dhcp]
oberon gerard
[database_active] [database_active]
bleys bleys
@ -22,11 +22,11 @@ bleys
production production
[NAS] [NAS]
oberon nas
[cluster] [cluster]
oscar oscar
#gerard gerard
bleys bleys
@ -35,6 +35,7 @@ NAS
cluster cluster
[VPS] [VPS]
corwin
merlin merlin
[region:children] [region:children]
@ -43,10 +44,8 @@ VPS
production production
[production] [production]
corwin
oscar oscar
merlin merlin
#gerard gerard
bleys bleys
oberon
[staging]

View File

@ -1,5 +1,4 @@
--- ---
roles:
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-arch-provissionning.git - src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-arch-provissionning.git
scm: git scm: git
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-postgresql.git - src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-postgresql.git
@ -40,10 +39,6 @@ roles:
scm: git scm: git
- src: git@github.com:vincentDcmps/ansible-role-nfs.git - src: git@github.com:vincentDcmps/ansible-role-nfs.git
scm: git scm: git
- src: git@github.com:vincentDcmps/ansible-role-nut.git
scm: git
- src: git@git.ducamps.eu:2222/ansible-roles/ansible-role-pureftpd.git
scm: git
- src: https://github.com/PowerDNS/pdns_recursor-ansible.git - src: https://github.com/PowerDNS/pdns_recursor-ansible.git
collections: - src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-samba.git
- name: vladgh.samba scm: git

View File

@ -1,10 +1,12 @@
--- ---
- import_playbook: playbooks/server.yml - import_playbook: playbooks/server.yml
- import_playbook: playbooks/dhcpd.yml
- import_playbook: playbooks/dns.yml
- import_playbook: playbooks/HashicorpStack.yml
- import_playbook: playbooks/nas.yml - import_playbook: playbooks/nas.yml
- import_playbook: playbooks/autofs.yml - import_playbook: playbooks/autofs.yml
- import_playbook: playbooks/sssd.yml - import_playbook: playbooks/sssd.yml
- import_playbook: playbooks/wireguard.yml
- import_playbook: playbooks/dhcpd.yml
- import_playbook: playbooks/dns.yml
- import_playbook: playbooks/HashicorpStack.yml
- import_playbook: playbooks/database.yml - import_playbook: playbooks/database.yml
- import_playbook: playbooks/rsyncd.yml - import_playbook: playbooks/rsyncd.yml
- import_playbook: playbooks/create_user.yml

View File

@ -5,7 +5,6 @@ oscar-dev
oscar-dev oscar-dev
[database_standby] [database_standby]
gerard-dev
[database:children] [database:children]
database_active database_active
@ -40,5 +39,3 @@ oscar-dev
gerard-dev gerard-dev
merlin-dev merlin-dev
nas-dev nas-dev
[production]

View File

@ -100,18 +100,3 @@ agains:
put one recursor on cluster over authority server and keep the recursor on gerard for better recundancy put one recursor on cluster over authority server and keep the recursor on gerard for better recundancy
### Consequences ### Consequences
## 005 physical Recursor location
### Status
done
### Context
following NAS migration physical DNS Recursor was install directly on NAS this bring a SPOF when NAS failed Recursor on Nomad cluster are stopped because of volume dependance
### Decision
Put physical Recursor on a cluster node like that to have a DNS issue we need to have NAS and this nomad down on same Time

View File

@ -16,27 +16,11 @@ Storage:
- hot Data (nomad, document,fresh download file,music?) on SSD cold DATA on HDD (film, serie photo) - hot Data (nomad, document,fresh download file,music?) on SSD cold DATA on HDD (film, serie photo)
- at least 2 HDD and 2 SSD - at least 2 HDD and 2 SSD
Hardware: Hardware:
- network 2.5 gpbs will be good for evolve - network 2.5 gpbs will be good for evolve
- at least 4go ram (expansive will be appreciable) - at least 4go ram
Software: Software:
be able to install custom linux distrib be able to install custom linux distrib
### Decision
- Due to form factor/consumption and SSD capability my choise is on ASUSTOR Nimbustor 2 Gen 2 AS5402, he corresponding to need and less expensive than a DIY NAS
- buy only a new ssd of 2to in more to store system and hot data
### Cosequence
need to migrate Data and keep same disk
- install system
- copy all data from 2to HDD to SSD then format 2to HDD
- copy download data to FROM 4 to HDD to SSD
- copy serie to 2to HDD and copy film on external harddrive

View File

@ -1,25 +0,0 @@
# Docker Pull throught
# 001 architecture consideration
## Status
Accepted
## Context
docker hub get a pull limit if somebody go wrong on our infrastructure we can get quickyly this limit solution will be to implement a pull throught proxy.
### Decision
create two container task to create a dockerhub pull through and a ghcr one
we can add these registry to traefick to have both under the port 5000 but this will add a traefik dependancy on rebuild
so to begin we will use one trafick service on two diferent static port
## Consequences
- this registry need to be start first on cluster creation
- need to update all job image with local proxy url

View File

@ -1,8 +0,0 @@
# Troubleshooting
## issue with SMTP traefik port
ensure that no other traefik router (httt or TCP) listening on smtp or
all entrypoint this can pertuubate smtp TLS connection
see [https://doc.traefik.io/traefik/routing/routers/#entrypoints_1](here)

View File

@ -10,15 +10,12 @@ vault-dev:
./vault/standalone_vault.sh $(FILE);\ ./vault/standalone_vault.sh $(FILE);\
fi fi
vagranup: create-dev:
vagrant up vagrant up
create-dev: vagranup DNS-stagging
make -C ansible deploy_staging make -C ansible deploy_staging
make -C terraform deploy_vault env=staging
VAULT_TOKEN=$(shell cat ~/vaultUnseal/staging/rootkey) python ./script/generate-vault-secret
create-dev-base: vagranup DNS-stagging create-dev-base:
vagrant up
make -C ansible deploy_staging_base make -C ansible deploy_staging_base
@ -27,13 +24,3 @@ destroy-dev:
serve: serve:
mkdocs serve mkdocs serve
DNS-stagging:
$(eval dns := $(shell dig oscar-dev.lan.ducamps.dev +short))
$(eval dns1 := $(shell dig nas-dev.lan.ducamps.dev +short))
sudo resolvectl dns virbr2 "$(dns)" "$(dns1)";sudo resolvectl domain virbr2 "~consul";sudo systemctl restart systemd-resolved.service
DNS-production:
sudo resolvectl dns virbr2 "";sudo resolvectl domain virbr2 "";sudo systemctl restart systemd-resolved.service

View File

@ -35,7 +35,7 @@ job "MQTT" {
] ]
} }
config { config {
image = "docker.service.consul:5000/library/eclipse-mosquitto" image = "eclipse-mosquitto"
ports = ["mosquittoWS", "mosquittoMQTT"] ports = ["mosquittoWS", "mosquittoMQTT"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/mosquitto:/mosquitto/data", "/mnt/diskstation/nomad/mosquitto:/mosquitto/data",

View File

@ -8,11 +8,6 @@ job "alertmanager" {
vault { vault {
policies = ["alertmanager"] policies = ["alertmanager"]
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "alertmanager" { group "alertmanager" {
network { network {
mode = "host" mode = "host"
@ -30,7 +25,7 @@ job "alertmanager" {
"homer.enable=true", "homer.enable=true",
"homer.name=AlertManager", "homer.name=AlertManager",
"homer.service=Monitoring", "homer.service=Monitoring",
"homer.logo=http://${NOMAD_ADDR_http}/favicon.ico", "homer.logo=https://camo.githubusercontent.com/13ff7fc7ea6d8a6d98d856da8e3220501b9e6a89620f017d1db039007138e062/687474703a2f2f6465766f70792e696f2f77702d636f6e74656e742f75706c6f6164732f323031392f30322f7a616c2d3230302e706e67",
"homer.target=_blank", "homer.target=_blank",
"homer.url=http://${NOMAD_ADDR_http}", "homer.url=http://${NOMAD_ADDR_http}",
@ -45,7 +40,7 @@ job "alertmanager" {
} }
config { config {
image = "docker.service.consul:5000/prom/alertmanager" image = "prom/alertmanager"
args= ["--log.level=debug", "--config.file=/etc/alertmanager/alertmanager.yml"] args= ["--log.level=debug", "--config.file=/etc/alertmanager/alertmanager.yml"]
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
@ -58,7 +53,7 @@ job "alertmanager" {
global: global:
smtp_from: alert@ducamps.eu smtp_from: alert@ducamps.eu
smtp_smarthost: mail.ducamps.eu:465 smtp_smarthost: mail.ducamps.eu:465
smtp_hello: "mail.ducamps.eu" smtp_hello: "mail.ducamps.win"
smtp_require_tls: false smtp_require_tls: false
{{with secret "secrets/data/nomad/alertmanager/mail"}} {{with secret "secrets/data/nomad/alertmanager/mail"}}
smtp_auth_username: {{.Data.data.username}} smtp_auth_username: {{.Data.data.username}}

View File

@ -1,62 +0,0 @@
job "actualbudget" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "actualbudget"{
network {
mode = "host"
port "http" {
to = 5006
}
}
task "actualbudget-server" {
driver = "docker"
service {
name = "actualbudget"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`budget.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=budget.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"homer.enable=true",
"homer.name=${NOMAD_TASK_NAME}",
"homer.service=Application",
"homer.target=_blank",
"homer.logo=https://budget.ducamps.eu/apple-touch-icon.png",
"homer.url=https://budget.ducamps.eu",
]
}
config {
image = "ghcr.service.consul:5000/actualbudget/actual-server:latest"
ports = ["http"]
volumes = [
"/mnt/diskstation/nomad/actualbudget:/data"
]
}
env {
}
resources {
memory = 300
}
}
}
}

View File

@ -1,240 +0,0 @@
job "borgmatic" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "NAS"
}
group "borgmatic"{
vault{
policies= ["borgmatic"]
}
task "borgmatic" {
action "manual-backup" {
command = "/usr/local/bin/borgmatic"
args = ["create",
"prune",
"--verbosity",
"1"
]
}
action "list-backup" {
command = "/usr/local/bin/borgmatic"
args = ["rlist"]
}
driver = "docker"
config {
image = "ghcr.service.consul:5000/borgmatic-collective/borgmatic"
volumes = [
"/exports:/exports",
"local/borgmatic.d:/etc/borgmatic.d",
"secret/id_rsa:/root/.ssh/id_rsa",
"secret/known_hosts:/root/.ssh/known_hosts",
"/exports/nomad/borgmatic:/root/.cache/borg",
]
}
env {
}
template {
data= <<EOH
BORG_RSH="ssh -i /root/.ssh/id_rsa -p 23"
{{ with secret "secrets/data/nomad/borgmatic"}}
BORG_PASSPHRASE= {{.Data.data.passphrase}}
{{end}}
EOH
destination = "secrets/sample.env"
env = true
}
template {
data= <<EOH
0 2 * * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic create prune --verbosity 1
0 23 1 * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic check
EOH
destination = "local/borgmatic.d/crontab.txt"
}
template {
data= <<EOH
# List of source directories to backup (required). Globs and
# tildes are expanded. Do not backslash spaces in path names.
source_directories:
- /exports/ebook
- /exports/homes
- /exports/music
- /exports/nomad
- /exports/photo
repositories:
- path: ssh://u304977@u304977.your-storagebox.de/./{{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
label: {{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
exclude_patterns:
- '*/nomad/jellyfin/cache'
- '*nomad/loki/'
- '*nomad/prometheus'
- '*nomad/registry'
- '*nomad/pacoloco'
- '*nomad/pihole'
- '*nomad/jellyfin/*'
- '*.log*'
match_archives: '*'
archive_name_format: '{{ env "node.datacenter" }}-{now:%Y-%m-%dT%H:%M:%S.%f}'
extra_borg_options:
# Extra command-line options to pass to "borg init".
# init: --extra-option
# Extra command-line options to pass to "borg prune".
# prune: --extra-option
# Extra command-line options to pass to "borg compact".
# compact: --extra-option
# Extra command-line options to pass to "borg create".
create: --progress --stats
# Extra command-line options to pass to "borg check".
# check: --extra-option
# Keep all archives within this time interval.
# keep_within: 3H
# Number of secondly archives to keep.
# keep_secondly: 60
# Number of minutely archives to keep.
# keep_minutely: 60
# Number of hourly archives to keep.
# keep_hourly: 24
# Number of daily archives to keep.
keep_daily: 7
# Number of weekly archives to keep.
keep_weekly: 4
# Number of monthly archives to keep.
# keep_monthly: 6
# Number of yearly archives to keep.
# keep_yearly: 1
checks:
- name: repository
# - archives
# check_repositories:
# - user@backupserver:sourcehostname.borg
# check_last: 3
# output:
# color: false
# List of one or more shell commands or scripts to execute
# before creating a backup, run once per configuration file.
# before_backup:
# - echo "Starting a backup."
# List of one or more shell commands or scripts to execute
# before pruning, run once per configuration file.
# before_prune:
# - echo "Starting pruning."
# List of one or more shell commands or scripts to execute
# before compaction, run once per configuration file.
# before_compact:
# - echo "Starting compaction."
# List of one or more shell commands or scripts to execute
# before consistency checks, run once per configuration file.
# before_check:
# - echo "Starting checks."
# List of one or more shell commands or scripts to execute
# before extracting a backup, run once per configuration file.
# before_extract:
# - echo "Starting extracting."
# List of one or more shell commands or scripts to execute
# after creating a backup, run once per configuration file.
# after_backup:
# - echo "Finished a backup."
# List of one or more shell commands or scripts to execute
# after compaction, run once per configuration file.
# after_compact:
# - echo "Finished compaction."
# List of one or more shell commands or scripts to execute
# after pruning, run once per configuration file.
# after_prune:
# - echo "Finished pruning."
# List of one or more shell commands or scripts to execute
# after consistency checks, run once per configuration file.
# after_check:
# - echo "Finished checks."
# List of one or more shell commands or scripts to execute
# after extracting a backup, run once per configuration file.
# after_extract:
# - echo "Finished extracting."
# List of one or more shell commands or scripts to execute
# when an exception occurs during a "prune", "compact",
# "create", or "check" action or an associated before/after
# hook.
# on_error:
# - echo "Error during prune/compact/create/check."
# List of one or more shell commands or scripts to execute
# before running all actions (if one of them is "create").
# These are collected from all configuration files and then
# run once before all of them (prior to all actions).
# before_everything:
# - echo "Starting actions."
# List of one or more shell commands or scripts to execute
# after running all actions (if one of them is "create").
# These are collected from all configuration files and then
# run once after all of them (after any action).
# after_everything:
# - echo "Completed actions."
EOH
destination = "local/borgmatic.d/config.yaml"
}
template {
data= <<EOH
{{ with secret "secrets/data/nomad/borgmatic"}}
{{.Data.data.privatekey}}
{{end}}
EOH
destination = "secret/id_rsa"
perms= "700"
}
template {
data= <<EOH
[u304977.your-storagebox.de]:23 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs
[u304977.your-storagebox.de]:23 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5EB5p/5Hp3hGW1oHok+PIOH9Pbn7cnUiGmUEBrCVjnAw+HrKyN8bYVV0dIGllswYXwkG/+bgiBlE6IVIBAq+JwVWu1Sss3KarHY3OvFJUXZoZyRRg/Gc/+LRCE7lyKpwWQ70dbelGRyyJFH36eNv6ySXoUYtGkwlU5IVaHPApOxe4LHPZa/qhSRbPo2hwoh0orCtgejRebNtW5nlx00DNFgsvn8Svz2cIYLxsPVzKgUxs8Zxsxgn+Q/UvR7uq4AbAhyBMLxv7DjJ1pc7PJocuTno2Rw9uMZi1gkjbnmiOh6TTXIEWbnroyIhwc8555uto9melEUmWNQ+C+PwAK+MPw==
[u304977.your-storagebox.de]:23 ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGK0po6usux4Qv2d8zKZN1dDvbWjxKkGsx7XwFdSUCnF19Q8psHEUWR7C/LtSQ5crU/g+tQVRBtSgoUcE8T+FWp5wBxKvWG2X9gD+s9/4zRmDeSJR77W6gSA/+hpOZoSE+4KgNdnbYSNtbZH/dN74EG7GLb/gcIpbUUzPNXpfKl7mQitw==
EOH
destination = "secret/known_hosts"
perms="700"
}
resources {
memory = 300
memory_max = 1000
}
}
}
}

View File

@ -1,146 +0,0 @@
job "immich" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "immich" {
network {
mode = "host"
port "http" {
to = 3001
}
port "redis" {
to = 6379
}
port "machinelearning" {
to = 3003
}
}
volume "immich-upload" {
type = "csi"
source = "immich-upload"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
volume "immich-cache" {
type = "csi"
source = "immich-cache"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
volume "photo" {
type = "csi"
source = "photo"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault {
policies = ["immich"]
}
task "immich-server" {
driver = "docker"
service {
name = "immich"
port = "http"
tags = [
"homer.enable=true",
"homer.name=immich",
"homer.service=Application",
"homer.logo=https://immich.ducamps.eu/favicon-144.png",
"homer.target=_blank",
"homer.url=https://immich.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
volume_mount {
volume = "immich-upload"
destination = "/usr/src/app/upload"
}
volume_mount {
volume = "photo"
destination = "/photo"
}
config {
image = "ghcr.service.consul:5000/immich-app/immich-server:release"
ports = ["http"]
volumes = [
"/etc/localtime:/etc/localtime"
]
}
template {
data = <<EOH
{{ with secret "secrets/data/database/immich"}}
DB_PASSWORD= {{ .Data.data.password }}
{{end}}
DB_DATABASE_NAME= immich
DB_USERNAME= immich
DB_HOSTNAME= active.db.service.consul
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
IMMICH_MACHINE_LEARNING_URL = http://{{ env "NOMAD_ADDR_machinelearning"}}
EOH
destination = "secrets/immich.env"
env = true
}
resources {
memory = 600
memory_max = 1800
}
}
task "immich-machine-learning" {
driver = "docker"
volume_mount {
volume = "immich-cache"
destination = "/cache"
}
config {
image = "ghcr.service.consul:5000/immich-app/immich-machine-learning:main"
ports = ["machinelearning"]
}
template {
data = <<EOH
{{ with secret "secrets/data/database/immich"}}
DB_PASSWORD= {{ .Data.data.password }}
{{end}}
DB_DATABASE_NAME= immich
DB_USERNAME= immich
DB_HOSTNAME= active.db.service.consul
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
EOH
destination = "secrets/immich.env"
env = true
}
resources {
memory = 200
memory_max = 1800
}
}
task "redis" {
driver = "docker"
config {
image="docker.service.consul:5000/library/redis:6.2-alpine"
ports = ["redis"]
}
resources {
memory = 50
}
}
}
}

View File

@ -1 +0,0 @@
../makefile

View File

@ -1,95 +0,0 @@
job "mealie" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "mealie" {
network {
mode = "host"
port "http" {
to = 9000
}
}
volume "mealie-data" {
type = "csi"
source = "mealie-data"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault {
policies = ["mealie"]
}
task "mealie-server" {
driver = "docker"
service {
name = "mealie"
port = "http"
tags = [
"homer.enable=true",
"homer.name=Mealie",
"homer.service=Application",
"homer.subtitle=Mealie",
"homer.logo=https://mealie.ducamps.eu/favicon.ico",
"homer.target=_blank",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
config {
image = "ghcr.io/mealie-recipes/mealie"
ports = ["http"]
}
volume_mount {
volume = "mealie-data"
destination = "/app/data"
}
env {
PUID = "1000001"
PGID = "1000001"
TZ = "Europe/Paris"
MAX_WORKERS = 1
WEB_CONCURRENCY = 1
BASE_URL = "https://mealie.ducamps.eu"
OIDC_USER_GROUP = "MealieUsers"
OIDC_ADMIN_GROUP = "MealieAdmins"
OIDC_AUTH_ENABLED = "True"
OIDC_SIGNUP_ENABLED = "true"
OIDC_CONFIGURATION_URL = "https://auth.ducamps.eu/.well-known/openid-configuration"
OIDC_CLIENT_ID = "mealie"
OIDC_AUTO_REDIRECT = "false"
OIDC_PROVIDER_NAME = "authelia"
DB_ENGINE = "postgres"
POSTGRES_USER = "mealie"
POSTGRES_SERVER = "active.db.service.consul"
POSTGRES_PORT = 5432
POSTGRES_DB = "mealie"
LOG_LEVEL = "DEBUG"
}
template {
data = <<EOH
{{ with secret "secrets/data/database/mealie"}}POSTGRES_PASSWORD= "{{ .Data.data.password }}" {{end}}
{{ with secret "secrets/data/authelia/mealie"}}OIDC_CLIENT_SECRET= "{{ .Data.data.password }}" {{end}}
EOH
destination = "secrets/var.env"
env = true
}
resources {
memory = 400
}
}
}
}

View File

@ -1,64 +0,0 @@
job "rutorrentlocal" {
datacenters = ["homelab"]
priority = 80
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.unique.name}"
operator = "set_contains"
value = "oberon"
}
group "bittorent" {
network {
mode = "host"
port "http" {
to = 8080
}
port "torrent" {
static = 6881
}
port "ecoute" {
static = 50000
}
}
task "bittorent" {
driver = "podman"
service {
name = "bittorentlocal"
port = "http"
address_mode= "host"
tags = [
]
}
user = "root"
config {
image = "docker.service.consul:5000/crazymax/rtorrent-rutorrent:edge"
ports = [
"http",
"torrent",
"ecoute"
]
volumes = [
"/exports/nomad/rutorrent/data:/data",
"/exports/nomad/rutorrent/downloads:/downloads"
]
}
env {
PUID = 100001
PGID = 10
UMASK = 002
WEBUI_PORT = "8080"
}
resources {
memory = 650
}
}
}
}

View File

@ -1,89 +0,0 @@
job "vikunja" {
datacenters = ["homelab"]
priority = 70
type = "service"
meta {
forcedeploy = "0"
}
group "vikunja" {
network {
mode = "host"
port "front" {
to = 80
}
port "api" {
to = 3456
}
}
vault {
policies = ["vikunja"]
}
task "api" {
driver = "docker"
service {
name = "vikunja-api"
port = "api"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.entrypoints=web,websecure",
"homer.enable=true",
"homer.name=vikunka",
"homer.service=Application",
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/images/icons/apple-touch-icon-180x180.png",
"homer.target=_blank",
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
]
}
config {
image = "docker.service.consul:5000/vikunja/vikunja"
ports = ["api", "front"]
volumes = ["local/config.yml:/etc/vikunja/config.yml"]
}
env {
VIKUNJA_DATABASE_HOST = "active.db.service.consul"
VIKUNJA_DATABASE_TYPE = "postgres"
VIKUNJA_DATABASE_USER = "vikunja"
VIKUNJA_DATABASE_DATABASE = "vikunja"
VIKUNJA_SERVICE_JWTSECRET = uuidv4()
VIKUNJA_SERVICE_FRONTENDURL = "https://${NOMAD_JOB_NAME}.ducamps.eu/"
VIKUNJA_AUTH_LOCAL = False
}
template {
data = <<EOH
{{ with secret "secrets/data/database/vikunja"}}
VIKUNJA_DATABASE_PASSWORD= "{{ .Data.data.password }}"
{{end}}
EOH
destination = "secrets/sample.env"
env = true
}
template {
data = <<EOH
auth:
openid:
enabled: true
redirecturl: https://vikunja.ducamps.eu/auth/openid/
providers:
- name: Authelia
authurl: https://auth.ducamps.eu
clientid: vikunja
clientsecret: {{ with secret "secrets/data/authelia/vikunja"}} {{ .Data.data.password }} {{end}}
scope: openid profile email
EOH
destination = "local/config.yml"
}
resources {
memory = 100
}
}
}
}

View File

@ -6,11 +6,7 @@ job "backup-consul" {
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint { constraint {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
@ -26,9 +22,9 @@ job "backup-consul" {
task "consul-backup" { task "consul-backup" {
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/ducampsv/docker-consul-backup:latest" image = "ducampsv/docker-consul-backup:latest"
volumes = [ volumes = [
"/mnt/diskstation/nomad/backup/consul:/backup" "/mnt/diskstation/git/backup/consul:/backup"
] ]
} }
resources { resources {

View File

@ -6,11 +6,7 @@ job "backup-postgress" {
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint { constraint {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
@ -32,9 +28,9 @@ job "backup-postgress" {
name = "backup-postgress" name = "backup-postgress"
} }
config { config {
image = "docker.service.consul:5000/ducampsv/docker-backup-postgres:latest" image = "ducampsv/docker-backup-postgres:latest"
volumes = [ volumes = [
"/mnt/diskstation/nomad/backup/postgres:/backup" "/mnt/diskstation/git/backup/postgres:/backup"
] ]
} }
template { template {
@ -49,8 +45,7 @@ job "backup-postgress" {
env = true env = true
} }
resources { resources {
memory = 180 memory = 125
memory_max = 400
} }
} }

View File

@ -6,11 +6,7 @@ job "backup-vault" {
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint { constraint {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
@ -29,9 +25,9 @@ job "backup-vault" {
task "backup-vault" { task "backup-vault" {
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/ducampsv/docker-vault-backup:latest" image = "ducampsv/docker-vault-backup:latest"
volumes = [ volumes = [
"/mnt/diskstation/nomad/backup/vault:/backup" "/mnt/diskstation/git/backup/vault:/backup"
] ]
} }
template { template {

View File

@ -13,7 +13,7 @@ job "batch-rutorrent" {
task "cleanForwardFolder" { task "cleanForwardFolder" {
driver= "docker" driver= "docker"
config { config {
image = "docker.service.consul:5000/library/alpine" image = "alpine"
volumes = [ volumes = [
"/mnt/hetzner/storagebox/file/forward:/file" "/mnt/hetzner/storagebox/file/forward:/file"
] ]

View File

@ -6,11 +6,7 @@ job "batch-seedboxsync" {
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint { constraint {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
@ -32,9 +28,9 @@ job "batch-seedboxsync" {
name = "seedboxsync" name = "seedboxsync"
} }
config { config {
image = "docker.service.consul:5000/ducampsv/rsync:latest" image = "ducampsv/rsync:latest"
volumes = [ volumes = [
"/mnt/diskstation/download:/media", "/mnt/diskstation/media/download:/media",
"local/id_rsa:/home/rsyncuser/.ssh/id_rsa" "local/id_rsa:/home/rsyncuser/.ssh/id_rsa"
] ]
command = "rsync" command = "rsync"
@ -74,7 +70,6 @@ job "batch-seedboxsync" {
} }
resources { resources {
memory = 500 memory = 500
memory_max = 1000
} }
} }

View File

@ -1,87 +0,0 @@
job "torrent_automation" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "prowlarr"{
network {
mode = "host"
port "prowlarr" {
static = 9696
to = 9696
}
port "flaresolverr" {
static = 8191
to = 8191
}
}
task "flaresolverr" {
driver = "docker"
service {
name = "flaresolverr"
port = "flaresolverr"
}
config {
image = "alexfozor/flaresolverr:pr-1300-experimental"
ports = ["flaresolverr"]
}
env {
}
resources {
memory = 300
memory_max = 500
}
}
task "prowlarr" {
driver = "docker"
service {
name = "prowlarr"
port = "prowlarr"
tags = [
"homer.enable=true",
"homer.name=Prowlarr",
"homer.service=Application",
"homer.logo=http://${NOMAD_ADDR_prowlarr}/Content/Images/logo.png",
"homer.target=_blank",
"homer.url=http://${NOMAD_ADDR_prowlarr}",
]
}
config {
image = "ghcr.io/linuxserver/prowlarr:latest"
ports = ["prowlarr"]
volumes = [
"/mnt/diskstation/nomad/prowlarr:/config"
]
}
env {
PUID=1000001
PGID=1000001
TZ="Europe/Paris"
}
resources {
memory = 150
}
}
}
}

219
nomad-job/borgmatic.nomad Normal file
View File

@ -0,0 +1,219 @@
job "borgmatic" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "NAS"
}
group "borgmatic"{
vault{
policies= ["borgmatic"]
}
task "borgmatic" {
driver = "docker"
config {
image = "ghcr.io/borgmatic-collective/borgmatic"
volumes = [
"/var/local/volume1:/var/local/volume1",
"local/borgmatic.d:/etc/borgmatic.d",
"secret/id_rsa:/root/.ssh/id_rsa",
"/mnt/diskstation/nomad/borgmatic:/root/.cache/borg",
]
}
env {
}
template {
data= <<EOH
BORG_RSH="ssh -i /root/.ssh/id_rsa -p 23"
{{ with secret "secrets/data/nomad/borgmatic"}}
BORG_PASSPHRASE= {{.Data.data.passphrase}}
{{end}}
EOH
destination = "secrets/sample.env"
env = true
}
template {
data= <<EOH
0 2 * * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic --create --prune -v 1
0 23 1 * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic -check
EOH
destination = "local/borgmatic.d/crontab.txt"
}
template {
data= <<EOH
location:
# List of source directories to backup (required). Globs and
# tildes are expanded. Do not backslash spaces in path names.
source_directories:
- /volume1/CardDav
- /volume1/ebook
- /volume1/git
- /volume1/homes
- /volume1/hubert
- /volume1/music
- /volume1/nomad
- /volume1/photo
repositories:
- u304977@u304977.your-storagebox.de:{{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
exclude_patterns:
- '*/nomad/jellyfin/cache'
- '*/loki/chunks'
# - /home/*/.cache
# - '*/.vim*.tmp'
# - /etc/ssl
# - /home/user/path with spaces
storage:
extra_borg_options:
# Extra command-line options to pass to "borg init".
# init: --extra-option
# Extra command-line options to pass to "borg prune".
# prune: --extra-option
# Extra command-line options to pass to "borg compact".
# compact: --extra-option
# Extra command-line options to pass to "borg create".
create: --progress --stats
# Extra command-line options to pass to "borg check".
# check: --extra-option
retention:
# Keep all archives within this time interval.
# keep_within: 3H
# Number of secondly archives to keep.
# keep_secondly: 60
# Number of minutely archives to keep.
# keep_minutely: 60
# Number of hourly archives to keep.
# keep_hourly: 24
# Number of daily archives to keep.
keep_daily: 7
# Number of weekly archives to keep.
keep_weekly: 4
# Number of monthly archives to keep.
# keep_monthly: 6
# Number of yearly archives to keep.
# keep_yearly: 1
consistency:
checks:
- repository
# - archives
# check_repositories:
# - user@backupserver:sourcehostname.borg
# check_last: 3
# output:
# color: false
# hooks:
# List of one or more shell commands or scripts to execute
# before creating a backup, run once per configuration file.
# before_backup:
# - echo "Starting a backup."
# List of one or more shell commands or scripts to execute
# before pruning, run once per configuration file.
# before_prune:
# - echo "Starting pruning."
# List of one or more shell commands or scripts to execute
# before compaction, run once per configuration file.
# before_compact:
# - echo "Starting compaction."
# List of one or more shell commands or scripts to execute
# before consistency checks, run once per configuration file.
# before_check:
# - echo "Starting checks."
# List of one or more shell commands or scripts to execute
# before extracting a backup, run once per configuration file.
# before_extract:
# - echo "Starting extracting."
# List of one or more shell commands or scripts to execute
# after creating a backup, run once per configuration file.
# after_backup:
# - echo "Finished a backup."
# List of one or more shell commands or scripts to execute
# after compaction, run once per configuration file.
# after_compact:
# - echo "Finished compaction."
# List of one or more shell commands or scripts to execute
# after pruning, run once per configuration file.
# after_prune:
# - echo "Finished pruning."
# List of one or more shell commands or scripts to execute
# after consistency checks, run once per configuration file.
# after_check:
# - echo "Finished checks."
# List of one or more shell commands or scripts to execute
# after extracting a backup, run once per configuration file.
# after_extract:
# - echo "Finished extracting."
# List of one or more shell commands or scripts to execute
# when an exception occurs during a "prune", "compact",
# "create", or "check" action or an associated before/after
# hook.
# on_error:
# - echo "Error during prune/compact/create/check."
# List of one or more shell commands or scripts to execute
# before running all actions (if one of them is "create").
# These are collected from all configuration files and then
# run once before all of them (prior to all actions).
# before_everything:
# - echo "Starting actions."
# List of one or more shell commands or scripts to execute
# after running all actions (if one of them is "create").
# These are collected from all configuration files and then
# run once after all of them (after any action).
# after_everything:
# - echo "Completed actions."
EOH
destination = "local/borgmatic.d/config.yaml"
}
template {
data= <<EOH
{{ with secret "secrets/data/nomad/borgmatic"}}
{{.Data.data.privatekey}}
{{end}}
EOH
destination = "secret/id_rsa"
perms= "700"
}
resources {
memory = 300
}
}
}
}

View File

@ -39,7 +39,7 @@ job "chainetv" {
] ]
} }
config { config {
image = "docker.service.consul:5000/ducampsv/chainetv:latest" image = "ducampsv/chainetv:latest"
ports = ["http"] ports = ["http"]
} }
resources { resources {

View File

@ -27,7 +27,7 @@ job "crowdsec-agent" {
} }
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/crowdsecurity/crowdsec" image = "crowdsecurity/crowdsec"
ports = ["metric"] ports = ["metric"]
volumes = [ volumes = [
"/var/run/docker.sock:/var/run/docker.sock", "/var/run/docker.sock:/var/run/docker.sock",

View File

@ -5,15 +5,9 @@ job "crowdsec-api" {
meta { meta {
forcedeploy = "-1" forcedeploy = "-1"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
vault { vault {
policies = ["crowdsec"] policies = ["crowdsec"]
} }
group "crowdsec-api" { group "crowdsec-api" {
network { network {
mode = "host" mode = "host"
@ -41,11 +35,11 @@ job "crowdsec-api" {
] ]
} }
config { config {
image = "docker.service.consul:5000/crowdsecurity/crowdsec" image = "crowdsecurity/crowdsec"
ports = ["http", "metric"] ports = ["http", "metric"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/crowdsec/db:/var/lib/crowdsec/data", "/mnt/diskstation/nomad/crowdsec/db:/var/lib/crowdsec/data",
"/mnt/diskstation/nomad/crowdsec/data:/etc/crowdsec", "/mnt/diskstation/nomad/crowdsec/data:/etc/crowdsec_data",
] ]
} }

View File

@ -6,11 +6,7 @@ job "dashboard" {
meta { meta {
forcedeploy = "1" forcedeploy = "1"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "dashboard" { group "dashboard" {
network { network {
mode = "host" mode = "host"
@ -33,7 +29,7 @@ job "dashboard" {
] ]
} }
config { config {
image = "docker.service.consul:5000/b4bz/homer" image = "b4bz/homer"
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/homer:/www/assets" "/mnt/diskstation/nomad/homer:/www/assets"

View File

@ -1,69 +0,0 @@
job "lldap" {
datacenters = ["homelab"]
priority = 50
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "lldap"{
network {
mode = "host"
port "ldap" {
to = 3890
static = 3890
}
port "http" {
to = 17170
}
}
# vault{
# policies= ["lldap"]
#
# }
service {
name = "lldapHttp"
port = "http"
tags = [
]
}
service {
name = "lldapLDAP"
port = "ldap"
tags = [
]
}
task "lldap" {
driver = "docker"
config {
image = "docker.service.consul:5000/ducampsv/lldap:latest"
ports = ["ldap","http"]
volumes = [
"/mnt/diskstation/nomad/lldap:/data"
]
}
template {
data= <<EOH
UID=1000000
GID=1000
LLDAP_JWT_SECRET=
LLDAP_LDAP_USER_PASS=REPLACE_WITH_PASSWORD
LLDAP_LDAP_BASE_DN=dc=ducamps,dc=eu
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 300
}
}
}
}

View File

@ -1,5 +1,5 @@
job "dockermailserver" { job "dockermailserver" {
datacenters = ["homelab"] datacenters = ["hetzner"]
priority = 90 priority = 90
type = "service" type = "service"
meta { meta {
@ -9,11 +9,7 @@ job "dockermailserver" {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "dockermailserver" { group "dockermailserver" {
network { network {
mode = "host" mode = "host"
@ -119,7 +115,7 @@ job "dockermailserver" {
task "docker-mailserver" { task "docker-mailserver" {
driver = "docker" driver = "docker"
config { config {
image = "ghcr.service.consul:5000/docker-mailserver/docker-mailserver:latest" image = "ghcr.io/docker-mailserver/docker-mailserver:latest"
ports = ["smtp", "esmtp", "imap","rspamd"] ports = ["smtp", "esmtp", "imap","rspamd"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/dms/mail-data:/var/mail", "/mnt/diskstation/nomad/dms/mail-data:/var/mail",
@ -137,7 +133,7 @@ job "dockermailserver" {
env { env {
OVERRIDE_HOSTNAME = "mail.ducamps.eu" OVERRIDE_HOSTNAME = "mail.ducamps.eu"
DMS_VMAIL_UID = 1000000 DMS_VMAIL_UID = 1000000
DMS_VMAIL_GID = 984 DMS_VMAIL_GID = 100
SSL_TYPE= "letsencrypt" SSL_TYPE= "letsencrypt"
LOG_LEVEL="info" LOG_LEVEL="info"
POSTMASTER_ADDRESS="vincent@ducamps.eu" POSTMASTER_ADDRESS="vincent@ducamps.eu"
@ -173,7 +169,7 @@ submissions/inet/smtpd_upstream_proxy_protocol=haproxy
} }
template { template {
data = <<EOH data = <<EOH
haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1, 192.168.1.0/24 haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1
haproxy_timeout = 3 secs haproxy_timeout = 3 secs
service imap-login { service imap-login {
inet_listener imaps { inet_listener imaps {

View File

@ -16,7 +16,7 @@ job "drone-runner" {
task "drone-runner" { task "drone-runner" {
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/drone/drone-runner-docker:latest" image = "drone/drone-runner-docker:latest"
volumes = [ volumes = [
"/var/run/docker.sock:/var/run/docker.sock", "/var/run/docker.sock:/var/run/docker.sock",
] ]

View File

@ -45,7 +45,7 @@ job "drone" {
] ]
} }
config { config {
image = "docker.service.consul:5000/drone/drone:latest" image = "drone/drone:latest"
ports = [ ports = [
"http" "http"
] ]

View File

@ -1,6 +1,6 @@
job "filestash" { job "filestash" {
datacenters = ["homelab"] datacenters = ["hetzner"]
priority = 50 priority = 50
type = "service" type = "service"
meta { meta {
@ -10,11 +10,7 @@ job "filestash" {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "filestash" { group "filestash" {
network { network {
@ -48,7 +44,7 @@ job "filestash" {
] ]
} }
config { config {
image = "docker.service.consul:5000/machines/filestash" image = "machines/filestash"
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/filestash:/app/data/state" "/mnt/diskstation/nomad/filestash:/app/data/state"

View File

@ -27,7 +27,7 @@ job "ghostfolio" {
task "redis" { task "redis" {
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/library/redis" image = "redis"
ports = ["redis"] ports = ["redis"]
} }
resources { resources {
@ -51,7 +51,7 @@ job "ghostfolio" {
] ]
} }
config { config {
image = "docker.service.consul:5000/ghostfolio/ghostfolio:latest" image = "ghostfolio/ghostfolio:latest"
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
] ]
@ -80,7 +80,6 @@ job "ghostfolio" {
} }
resources { resources {
memory = 400 memory = 400
memory_max = 600
} }
} }

View File

@ -8,11 +8,6 @@ job "git" {
constraint { constraint {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
} }
group "gitea" { group "gitea" {
network { network {
@ -59,12 +54,13 @@ job "git" {
] ]
} }
config { config {
image = "docker.service.consul:5000/gitea/gitea:latest" image = "gitea/gitea:latest"
ports = [ ports = [
"http", "http",
"ssh" "ssh"
] ]
volumes = [ volumes = [
"/mnt/diskstation/git:/repo",
"/mnt/diskstation/nomad/gitea:/data" "/mnt/diskstation/nomad/gitea:/data"
] ]
} }
@ -81,14 +77,10 @@ job "git" {
GITEA__database__HOST = "active.db.service.consul" GITEA__database__HOST = "active.db.service.consul"
GITEA__database__NAME = "gitea" GITEA__database__NAME = "gitea"
GITEA__database__USER = "gitea" GITEA__database__USER = "gitea"
GITEA__service__DISABLE_REGISTRATION = "false" GITEA__service__DISABLE_REGISTRATION = "true"
GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION = "true" GITEA__repository__ROOT = "/repo"
GITEA__service__SHOW_REGISTRATION_BUTTON = "false"
GITEA__openid__ENABLE_OPENID_SIGNIN = "false"
GITEA__openid__ENABLE_OPENID_SIGNUP = "true"
GITEA__repository__ROOT = "/data/gitea-repositories"
GITEA__server__APP_DATA_PATH = "/data" GITEA__server__APP_DATA_PATH = "/data"
GITEA__server__LFS_CONTENT_PATH = "/data/lfs" GITEA__server__LFS_CONTENT_PATH = "/repo/LFS"
GITEA__webhook__ALLOWED_HOST_LIST = "drone.ducamps.eu" GITEA__webhook__ALLOWED_HOST_LIST = "drone.ducamps.eu"
GITEA__webhook__DELIVER_TIMEOUT = "30" GITEA__webhook__DELIVER_TIMEOUT = "30"
} }

View File

@ -2,17 +2,8 @@ job "grafana" {
datacenters = ["homelab"] datacenters = ["homelab"]
priority = 50 priority = 50
type = "service" type = "service"
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
meta { meta {
forcedeploiement = 2 forcedeploiement = 1
}
vault {
policies = ["grafana"]
} }
group "grafana" { group "grafana" {
network { network {
@ -20,6 +11,7 @@ job "grafana" {
to = 3000 to = 3000
} }
} }
service { service {
name = "grafana" name = "grafana"
port = "http" port = "http"
@ -44,37 +36,13 @@ job "grafana" {
task "dashboard" { task "dashboard" {
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/grafana/grafana" image = "grafana/grafana"
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
"local/grafana.ini:/etc/grafana/grafana.ini", "/mnt/diskstation/nomad/grafana/config:/etc/grafana",
"/mnt/diskstation/nomad/grafana/lib:/var/lib/grafana" "/mnt/diskstation/nomad/grafana/lib:/var/lib/grafana"
] ]
} }
template {
data = <<EOH
force_migration=true
[server]
root_url = https://grafana.ducamps.eu
[auth.generic_oauth]
enabled = true
name = Authelia
icon = signin
client_id = grafana
client_secret = {{ with secret "secrets/data/authelia/grafana"}} {{ .Data.data.password }} {{end}}
scopes = openid profile email groups
empty_scopes = false
auth_url = https://auth.ducamps.eu/api/oidc/authorization
token_url = https://auth.ducamps.eu/api/oidc/token
api_url = https://auth.ducamps.eu/api/oidc/userinfo
login_attribute_path = preferred_username
groups_attribute_path = groups
name_attribute_path = name
use_pkce = true
role_attribute_path=contains(groups[*], 'GrafanaAdmins') && 'Admin' || contains(groups[*], 'GrafanaUsers') && 'Viewer'
EOH
destination = "local/grafana.ini"
}
resources { resources {
memory = 250 memory = 250
} }

View File

@ -3,11 +3,6 @@ job "homeassistant" {
datacenters = ["homelab"] datacenters = ["homelab"]
priority = 90 priority = 90
type = "service" type = "service"
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
@ -57,7 +52,7 @@ job "homeassistant" {
} }
} }
config { config {
image = "docker.service.consul:5000/homeassistant/home-assistant:stable" image = "homeassistant/home-assistant:stable"
ports = ["http", "coap"] ports = ["http", "coap"]
privileged = "true" privileged = "true"
network_mode = "host" network_mode = "host"

View File

@ -2,7 +2,6 @@ job "jellyfin" {
datacenters = ["homelab"] datacenters = ["homelab"]
priority = 30 priority = 30
type = "service" type = "service"
meta { meta {
forcedeploy = "1" forcedeploy = "1"
} }
@ -10,11 +9,6 @@ job "jellyfin" {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group jellyfin-vue { group jellyfin-vue {
network { network {
mode = "host" mode = "host"
@ -43,7 +37,7 @@ job "jellyfin" {
} }
config { config {
image = "ghcr.service.consul:5000/jellyfin/jellyfin-vue:unstable" image = "ghcr.io/jellyfin/jellyfin-vue:unstable"
ports = ["http"] ports = ["http"]
} }
env { env {
@ -88,13 +82,13 @@ job "jellyfin" {
] ]
} }
config { config {
image = "docker.service.consul:5000/jellyfin/jellyfin" image = "jellyfin/jellyfin"
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/jellyfin/config:/config", "/mnt/diskstation/nomad/jellyfin/config:/config",
"/mnt/diskstation/nomad/jellyfin/cache:/cache", "/mnt/diskstation/nomad/jellyfin/cache:/cache",
"/mnt/diskstation/media:/media", "/mnt/diskstation/media/:/media",
"/mnt/diskstation/music:/music", "/mnt/diskstation/music/:/media2"
] ]
devices = [ devices = [
{ {

View File

@ -6,11 +6,7 @@ job "loki" {
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "loki" { group "loki" {
network { network {
mode = "host" mode = "host"
@ -38,7 +34,7 @@ job "loki" {
} }
} }
config { config {
image = "docker.service.consul:5000/grafana/loki" image = "grafana/loki"
ports = ["http"] ports = ["http"]
args = [ args = [
"-config.file", "-config.file",
@ -53,58 +49,56 @@ job "loki" {
auth_enabled: false auth_enabled: false
server: server:
http_listen_port: 3100 http_listen_port: 3100
ingester:
common: lifecycler:
instance_addr: 127.0.0.1 address: 127.0.0.1
path_prefix: /loki ring:
storage: kvstore:
filesystem: store: inmemory
chunks_directory: /loki/chunks replication_factor: 1
rules_directory: /loki/rules final_sleep: 0s
replication_factor: 1 # Any chunk not receiving new logs in this time will be flushed
ring: chunk_idle_period: 1h
kvstore: # All chunks will be flushed when they hit this age, default is 1h
store: inmemory max_chunk_age: 1h
# Loki will attempt to build chunks up to 1.5MB, flushing if chunk_idle_period or max_chunk_age is reached first
chunk_target_size: 1048576
# Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
chunk_retain_period: 30s
max_transfer_retries: 0 # Chunk transfers disabled
schema_config: schema_config:
configs: configs:
- from: "2023-04-08" # <---- A date in the future - from: 2020-10-24
index: store: boltdb-shipper
period: 24h
prefix: index_
object_store: filesystem object_store: filesystem
schema: v13 schema: v11
store: tsdb index:
prefix: index_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /loki/boltdb-shipper-active
cache_location: /loki/boltdb-shipper-cache
cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space
shared_store: filesystem
filesystem:
directory: /loki/chunks
compactor: compactor:
retention_enabled: true working_directory: /tmp/loki/boltdb-shipper-compactor
working_directory: /loki/tsdb-shipper-compactor
shared_store: filesystem shared_store: filesystem
limits_config: limits_config:
split_queries_by_interval: 24h
max_query_parallelism: 100
max_entries_limit_per_query: 10000
injection_rate_strategy: local
retention_period: 90d
reject_old_samples: true reject_old_samples: true
reject_old_samples_max_age: 168h reject_old_samples_max_age: 168h
query_scheduler: chunk_store_config:
max_outstanding_requests_per_tenant: 4096 max_look_back_period: 0s
querier: table_manager:
max_concurrent: 4096 retention_deletes_enabled: false
frontend: retention_period: 0s
max_outstanding_per_tenant: 4096
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
EOH EOH
destination = "local/loki/local-config.yaml" destination = "local/loki/local-config.yaml"
} }
resources { resources {
memory = 300 memory = 300
memory_max = 1000
} }
} }

View File

@ -32,7 +32,7 @@ job "node-exporter" {
task "node-exporter" { task "node-exporter" {
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/prom/node-exporter" image = "prom/node-exporter"
ports = ["http"] ports = ["http"]
args = [ args = [
"--web.listen-address=:${NOMAD_PORT_http}", "--web.listen-address=:${NOMAD_PORT_http}",

View File

@ -18,12 +18,6 @@ job "sample" {
to = 0000 to = 0000
} }
} }
volume "sample-data" {
type = "csi"
source = "sapmle-data"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault{ vault{
policies= ["policy_name"] policies= ["policy_name"]
@ -38,15 +32,10 @@ job "sample" {
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)", "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win", "traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver", "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
] ]
} }
volume_mount {
volume = "sample-data"
destination = "/app/data"
}
config { config {
image = "sample" image = "sample"
ports = ["http"] ports = ["http"]

View File

@ -29,11 +29,11 @@ job "nut_exporter" {
task "nut_exporter" { task "nut_exporter" {
driver = "docker" driver = "docker"
config { config {
image = "ghcr.service.consul:5000/druggeri/nut_exporter" image = "ghcr.io/druggeri/nut_exporter"
ports = ["http"] ports = ["http"]
} }
env { env {
NUT_EXPORTER_SERVER= "192.168.1.43" NUT_EXPORTER_SERVER= "192.168.1.10"
NUT_EXPORTER_VARIABLES = "battery.runtime,battery.charge,input.voltage,output.voltage,output.voltage.nominal,ups.load,ups.status,ups.realpower" NUT_EXPORTER_VARIABLES = "battery.runtime,battery.charge,input.voltage,output.voltage,output.voltage.nominal,ups.load,ups.status,ups.realpower"
} }

View File

@ -6,11 +6,7 @@ job "pacoloco" {
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "pacoloco" { group "pacoloco" {
network { network {
mode = "host" mode = "host"
@ -32,10 +28,10 @@ job "pacoloco" {
] ]
} }
config { config {
image = "docker.service.consul:5000/ducampsv/pacoloco" image = "ducampsv/pacoloco"
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/pacoloco:/var/cache/pacoloco", "/mnt/diskstation/archMirror:/var/cache/pacoloco",
"local/pacoloco.yaml:/etc/pacoloco.yaml" "local/pacoloco.yaml:/etc/pacoloco.yaml"
] ]

View File

@ -6,11 +6,7 @@ job "paperless-ng" {
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
constraint { constraint {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
@ -33,7 +29,7 @@ job "paperless-ng" {
task "redis" { task "redis" {
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/library/redis" image = "redis"
ports = ["redis"] ports = ["redis"]
} }
resources { resources {
@ -51,7 +47,6 @@ job "paperless-ng" {
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu", "traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver", "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure", "traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia",
"homer.enable=true", "homer.enable=true",
"homer.name=Paperless", "homer.name=Paperless",
"homer.service=Application", "homer.service=Application",
@ -68,7 +63,7 @@ job "paperless-ng" {
} }
} }
config { config {
image = "ghcr.service.consul:5000/paperless-ngx/paperless-ngx" image = "ghcr.io/paperless-ngx/paperless-ngx"
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/paperless-ng/media:/usr/src/paperless/media", "/mnt/diskstation/nomad/paperless-ng/media:/usr/src/paperless/media",
@ -87,9 +82,6 @@ job "paperless-ng" {
PAPERLESS_CONSUMER_POLLING = "60" PAPERLESS_CONSUMER_POLLING = "60"
PAPERLESS_URL = "https://${NOMAD_JOB_NAME}.ducamps.eu" PAPERLESS_URL = "https://${NOMAD_JOB_NAME}.ducamps.eu"
PAPERLESS_ALLOWED_HOSTS = "192.168.1.42,192.168.1.40" PAPERLESS_ALLOWED_HOSTS = "192.168.1.42,192.168.1.40"
PAPERLESS_ENABLE_HTTP_REMOTE_USER = "true"
PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME = "HTTP_REMOTE_USER"
PAPERLESS_LOGOUT_REDIRECT_URL= "https://auth.ducamps.eu/logout"
} }
template { template {
@ -101,7 +93,6 @@ job "paperless-ng" {
} }
resources { resources {
memory = 950 memory = 950
memory_max = 1500
cpu = 2000 cpu = 2000
} }
} }

View File

@ -10,36 +10,34 @@ job "pdns-auth" {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
vault {
policies = ["pdns"]
}
group "pdns-auth" { group "pdns-auth" {
network { network {
port "dns" { port "dns" {
static = 5300 static=5300
} }
port "http" { port "http" {
static = 8081 static = 8081
} }
port "pdnsadmin"{
to = 80
}
} }
task "pdns-auth" { vault {
policies = ["pdns"]
}
task "pdns-auth" {
driver = "docker" driver = "docker"
service { service {
name = "pdns-auth" name = "pdns-auth"
port = "dns" port = "dns"
} }
config { config {
image = "docker.service.consul:5000/powerdns/pdns-auth-master:latest" image = "powerdns/pdns-auth-master:latest"
network_mode = "host" network_mode = "host"
privileged = true privileged=true
cap_add = ["net_bind_service"] cap_add= ["net_bind_service"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/pdns-auth/var:/var/lib/powerdns/", "/mnt/diskstation/nomad/pdns-auth/var:/var/lib/powerdns/",
"local/dnsupdate.conf:/etc/powerdns/pdns.d/dnsupdate.conf", "local/dnsupdate.conf:/etc/powerdns/pdns.d/dnsupdate.conf",
@ -54,20 +52,20 @@ job "pdns-auth" {
PDNS_AUTH_API_KEY="{{.Data.data.API_KEY}}" PDNS_AUTH_API_KEY="{{.Data.data.API_KEY}}"
{{ end }} {{ end }}
EOH EOH
env = true env = true
} }
template { template{
destination = "local/dnsupdate.conf" destination = "local/dnsupdate.conf"
data = <<EOH data = <<EOH
dnsupdate=yes dnsupdate=yes
allow-dnsupdate-from=192.168.1.43/24 allow-dnsupdate-from=192.168.1.41/24
local-address=192.168.1.5 local-address=192.168.1.5
local-port=53 local-port=53
EOH EOH
} }
template { template{
destination = "local/pdns.conf" destination = "local/pdns.conf"
data = <<EOH data = <<EOH
launch=gpgsql launch=gpgsql
gpgsql-host=active.db.service.consul gpgsql-host=active.db.service.consul
gpgsql-port=5432 gpgsql-port=5432
@ -75,8 +73,6 @@ gpgsql-user=pdns-auth
{{ with secret "secrets/data/database/pdns"}} {{ with secret "secrets/data/database/pdns"}}
gpgsql-password={{ .Data.data.pdnsauth }} gpgsql-password={{ .Data.data.pdnsauth }}
{{ end }} {{ end }}
resolver=192.168.1.6
expand-alias=yes
include-dir=/etc/powerdns/pdns.d include-dir=/etc/powerdns/pdns.d
EOH EOH
} }
@ -84,17 +80,56 @@ include-dir=/etc/powerdns/pdns.d
memory = 100 memory = 100
} }
} }
task "pnds-admin" {
service {
name = "pdns-admin"
tags = [
"homer.enable=true",
"homer.name=PDNS-ADMIN",
"homer.service=Application",
"homer.target=_blank",
"homer.url=http://${NOMAD_ADDR_pdnsadmin}",
task "pdns-recursor" { ]
port = "pdnsadmin"
driver = "docker" }
config { driver = "docker"
image = "docker.service.consul:5000/powerdns/pdns-recursor-master:latest" config {
network_mode = "host" image = "powerdnsadmin/pda-legacy:latest"
volumes = [ ports= ["pdnsadmin"]
"local/recursor.conf:/etc/powerdns/recursor.conf", volumes = [
] "/mnt/diskstation/nomad/pdns-admin/:/data/node_module/",
]
} }
template{
destination = "secrets/pdns-admin.env"
env = true
data = <<EOH
{{ with secret "secrets/data/nomad/pdns"}}
SECRET_KEY="{{ .Data.data.SECRET_KEY }}"
GUNICORN_WORKERS=2
{{ end }}
{{ with secret "secrets/data/database/pdns"}}
SQLALCHEMY_DATABASE_URI=postgresql://pdns-admin:{{ .Data.data.pdnsadmin }}@active.db.service.consul/pdns-admin
{{end}}
EOH
}
resources {
cpu = 100
memory = 200
}
}
task "pdns-recursor" {
driver = "docker"
config {
image = "powerdns/pdns-recursor-master:latest"
network_mode = "host"
volumes = [
"local/recursor.conf:/etc/powerdns/recursor.conf",
]
}
template{ template{
destination = "local/recursor.conf" destination = "local/recursor.conf"
data= <<EOH data= <<EOH
@ -103,34 +138,34 @@ dnssec=off
forward-zones=consul=127.0.0.1:8600,ducamps.eu=192.168.1.5,1.168.192.in-addr.arpa=192.168.1.5 forward-zones=consul=127.0.0.1:8600,ducamps.eu=192.168.1.5,1.168.192.in-addr.arpa=192.168.1.5
local-address=192.168.1.6 local-address=192.168.1.6
EOH EOH
}
resources {
cpu = 50
memory = 50
}
} }
task "keepalived" { resources {
driver = "docker" cpu = 100
memory = 50
}
}
task "keepalived" {
driver = "docker"
lifecycle { lifecycle {
hook = "prestart" hook = "prestart"
sidecar = true sidecar = true
} }
env { env {
KEEPALIVED_ROUTER_ID = "52" KEEPALIVED_ROUTER_ID = "52"
KEEPALIVED_STATE = "MASTER" KEEPALIVED_STATE = "MASTER"
KEEPALIVED_VIRTUAL_IPS = "#PYTHON2BASH:['192.168.1.5','192.168.1.6']" KEEPALIVED_VIRTUAL_IPS = "#PYTHON2BASH:['192.168.1.5','192.168.1.6']"
} }
template { template{
destination = "local/env.yaml" destination = "local/env.yaml"
change_mode = "restart" change_mode = "restart"
env = true env= true
data = <<EOH data = <<EOH
KEEPALIVED_INTERFACE= {{ sockaddr "GetPrivateInterfaces | include \"network\" \"192.168.1.0/24\" | attr \"name\"" }} KEEPALIVED_INTERFACE= {{ sockaddr "GetPrivateInterfaces | include \"network\" \"192.168.1.0/24\" | attr \"name\"" }}
EOH EOH
} }
config { config {
image = "docker.service.consul:5000/osixia/keepalived:2.0.20" image = "osixia/keepalived:2.0.20"
network_mode = "host" network_mode = "host"
cap_add = [ cap_add = [
"NET_ADMIN", "NET_ADMIN",
@ -144,53 +179,4 @@ local-address=192.168.1.6
} }
} }
} }
group "pdns-admin" {
network {
port "pdnsadmin" {
to = 80
}
}
task "pnds-admin" {
service {
name = "pdns-admin"
tags = [
"homer.enable=true",
"homer.name=PDNS-ADMIN",
"homer.service=Application",
"homer.logo=http://${NOMAD_ADDR_pdnsadmin}/static/img/favicon.png",
"homer.target=_blank",
"homer.url=http://${NOMAD_ADDR_pdnsadmin}",
]
port = "pdnsadmin"
}
driver = "docker"
config {
image = "docker.service.consul:5000/powerdnsadmin/pda-legacy:latest"
ports = ["pdnsadmin"]
volumes = [
"/mnt/diskstation/nomad/pdns-admin/:/data/node_module/",
]
}
template {
destination = "secrets/pdns-admin.env"
env = true
data = <<EOH
{{ with secret "secrets/data/nomad/pdns"}}
SECRET_KEY="{{ .Data.data.SECRET_KEY }}"
GUNICORN_WORKERS=2
{{ end }}
{{ with secret "secrets/data/database/pdns"}}
SQLALCHEMY_DATABASE_URI=postgresql://pdns-admin:{{ .Data.data.pdnsadmin }}@active.db.service.consul/pdns-admin
{{end}}
EOH
}
resources {
cpu = 50
memory = 200
}
}
}
} }

View File

@ -9,11 +9,6 @@ job "pihole" {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "pi-hole" { group "pi-hole" {
network { network {
port "dns" { port "dns" {
@ -43,9 +38,22 @@ job "pihole" {
name = "dns" name = "dns"
port = "dns" port = "dns"
check {
name = "service: dns dig check"
type = "script"
command = "/usr/bin/dig"
args = ["+short", "@192.168.1.4"]
interval = "10s"
timeout = "2s"
check_restart {
limit = 3
grace = "60s"
}
}
} }
config { config {
image = "docker.service.consul:5000/pihole/pihole:2023.10.0" image = "pihole/pihole:2023.10.0"
network_mode = "host" network_mode = "host"
volumes = [ volumes = [
"local/dnsmasq.d/02-localresolver.conf:/etc/dnsmasq.d/02-localresolver.conf", "local/dnsmasq.d/02-localresolver.conf:/etc/dnsmasq.d/02-localresolver.conf",
@ -60,7 +68,7 @@ job "pihole" {
env { env {
TZ = "Europe/Paris" TZ = "Europe/Paris"
DNS1 = "192.168.1.5" DNS1 = "192.168.1.5"
DNS2 = "192.168.1.40" DNS2 = "192.168.1.41"
WEB_PORT = "${NOMAD_PORT_http}" WEB_PORT = "${NOMAD_PORT_http}"
} }
@ -90,7 +98,6 @@ local-ttl=2
} }
resources { resources {
memory = 100 memory = 100
memory_max =200
} }
} }

View File

@ -1,285 +0,0 @@
job "authelia" {
datacenters = ["homelab"]
priority = 80
type = "service"
meta {
forcedeploy = "0"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
group "authelia" {
network {
mode = "host"
port "authelia" {
to = 9091
}
}
volume "authelia-config" {
type = "csi"
source = "authelia-config"
access_mode = "multi-node-multi-writer"
attachment_mode = "file-system"
}
vault {
policies = ["authelia"]
}
task "authelia" {
driver = "docker"
service {
name = "authelia"
port = "authelia"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`auth.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=auth.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
action "generate-client-secret" {
command = "authelia"
args = ["crypto",
"hash",
"generate",
"pbkdf2",
"--random",
"--random.length",
"72",
"--random.charset",
"rfc3986"
]
}
config {
image = "authelia/authelia"
ports = ["authelia"]
args = [
"--config",
"/local/configuration.yml",
]
}
volume_mount {
volume = "authelia-config"
destination = "/config"
}
env {
AUTHELIA_SESSION_SECRET = uuidv4()
AUTHELIA_IDENTITY_VALIDATION_RESET_PASSWORD_JWT_SECRET = uuidv4()
}
template {
data = <<EOH
---
###############################################################
# Authelia configuration #
###############################################################
server:
address: 'tcp://:9091'
endpoints:
authz:
forward-auth:
implementation: 'ForwardAuth'
legacy:
implementation: 'Legacy'
identity_providers:
oidc:
hmac_secret: {{ with secret "secrets/data/nomad/authelia"}}{{ .Data.data.hmac}}{{end}}
jwks:
- key_id: 'key'
key: |
{{ with secret "secrets/data/nomad/authelia"}}{{ .Data.data.rsakey|indent 8 }}{{end}}
cors:
endpoints:
- userinfo
- authorization
- token
- revocation
- introspection
allowed_origins:
- https://mealie.ducamps.eu
allowed_origins_from_client_redirect_uris: true
clients:
- client_id: 'ttrss'
client_name: 'ttrss'
client_secret: {{ with secret "secrets/data/authelia/ttrss"}} {{ .Data.data.hash }} {{end}}
public: false
scopes:
- openid
- email
- profile
redirect_uris:
- 'https://www.ducamps.eu/tt-rss'
userinfo_signed_response_alg: none
authorization_policy: 'one_factor'
pre_configured_consent_duration: 3M
- client_id: 'mealie'
client_name: 'mealie'
client_secret: {{ with secret "secrets/data/authelia/mealie"}} {{ .Data.data.hash }} {{end}}
public: false
require_pkce: true
pkce_challenge_method: 'S256'
scopes:
- openid
- email
- profile
- groups
redirect_uris:
- 'https://mealie.ducamps.eu/login'
userinfo_signed_response_alg: none
authorization_policy: 'one_factor'
pre_configured_consent_duration: 3M
- client_id: 'immich'
client_name: 'immich'
client_secret: {{ with secret "secrets/data/authelia/immich"}} {{ .Data.data.hash }} {{end}}
public: false
authorization_policy: 'one_factor'
redirect_uris:
- 'https://immich.ducamps.eu/auth/login'
- 'https://immich.ducamps.eu/user-settings'
- 'app.immich:/'
scopes:
- 'openid'
- 'profile'
- 'email'
userinfo_signed_response_alg: 'none'
pre_configured_consent_duration: 3M
- client_id: 'grafana'
client_name: 'Grafana'
client_secret:{{ with secret "secrets/data/authelia/grafana"}} {{ .Data.data.hash }} {{end}}
public: false
authorization_policy: 'one_factor'
require_pkce: true
pkce_challenge_method: 'S256'
redirect_uris:
- 'https://grafana.ducamps.eu/login/generic_oauth'
scopes:
- 'openid'
- 'profile'
- 'groups'
- 'email'
userinfo_signed_response_alg: 'none'
token_endpoint_auth_method: 'client_secret_basic'
pre_configured_consent_duration: 3M
- client_id: 'vikunja'
client_name: 'vikunja'
client_secret:{{ with secret "secrets/data/authelia/vikunja"}} {{ .Data.data.hash }} {{end}}
public: false
authorization_policy: 'one_factor'
redirect_uris:
- 'https://vikunja.ducamps.eu/auth/openid/authelia'
scopes:
- 'openid'
- 'profile'
- 'email'
userinfo_signed_response_alg: 'none'
token_endpoint_auth_method: 'client_secret_basic'
pre_configured_consent_duration: 3M
- client_id: 'gitea'
client_name: 'gitea'
client_secret:{{ with secret "secrets/data/authelia/gitea"}} {{ .Data.data.hash }} {{end}}
public: false
authorization_policy: 'one_factor'
redirect_uris:
- 'https://git.ducamps.eu/user/oauth2/authelia/callback'
scopes:
- 'openid'
- 'profile'
- 'email'
userinfo_signed_response_alg: 'none'
token_endpoint_auth_method: 'client_secret_basic'
pre_configured_consent_duration: 3M
log:
level: 'trace'
totp:
issuer: 'authelia.com'
authentication_backend:
ldap:
address: 'ldaps://ldap.service.consul'
implementation: 'custom'
timeout: '5s'
start_tls: false
tls:
skip_verify: true
minimum_version: 'TLS1.2'
base_dn: 'DC=ducamps,DC=eu'
additional_users_dn: 'OU=users'
users_filter: '(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person))'
additional_groups_dn: 'OU=groups'
#groups_filter: '(&(member=UID={input},OU=users,DC=ducamps,DC=eu)(objectClass=groupOfNames))'
groups_filter: '(&(|{memberof:rdn})(objectClass=groupOfNames))'
group_search_mode: 'memberof'
user: 'uid=authelia,ou=serviceAccount,ou=users,dc=ducamps,dc=eu'
password:{{ with secret "secrets/data/nomad/authelia"}} '{{ .Data.data.ldapPassword }}'{{ end }}
attributes:
distinguished_name: ''
username: 'uid'
mail: 'mail'
member_of: 'memberOf'
group_name: 'cn'
access_control:
default_policy: 'deny'
rules:
# Rules applied to everyone
- domain: '*.ducamps.eu'
policy: 'one_factor'
session:
cookies:
- name: 'authelia_session'
domain: 'ducamps.eu' # Should match whatever your root protected domain is
authelia_url: 'https://auth.ducamps.eu'
expiration: '12 hour'
inactivity: '5 minutes'
regulation:
max_retries: 3
find_time: '2 minutes'
ban_time: '5 minutes'
storage:
{{ with secret "secrets/data/nomad/authelia"}}
encryption_key: '{{.Data.data.encryptionKeys }}'
{{end}}
local:
path: '/config/db.sqlite3'
notifier:
disable_startup_check: true
smtp:
username: 'authelia@ducamps.eu'
{{ with secret "secrets/data/nomad/authelia"}}
password: '{{ .Data.data.mailPassword}}'
{{end}}
address: submissions://mail.ducamps.eu:465
disable_require_tls: true
sender: 'authelia@ducamps.eu'
tls:
server_name: 'mail.ducamps.eu'
skip_verify: true
EOH
destination = "local/configuration.yml"
}
resources {
memory = 100
}
}
}
}

View File

@ -1 +0,0 @@
../makefile

View File

@ -9,11 +9,7 @@ job "prometheus" {
meta{ meta{
force_deploy= 1 force_deploy= 1
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "prometheus" { group "prometheus" {
count = 1 count = 1
@ -250,7 +246,7 @@ EOH
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/prom/prometheus:latest" image = "prom/prometheus:latest"
args = [ args = [
"--config.file=/etc/prometheus/prometheus.yml", "--config.file=/etc/prometheus/prometheus.yml",
"--storage.tsdb.path=/prometheus", "--storage.tsdb.path=/prometheus",
@ -289,7 +285,6 @@ EOH
} }
resources { resources {
memory = 350 memory = 350
memory_max = 500
} }
} }
} }

View File

@ -6,11 +6,6 @@ job "radicale" {
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "radicale" { group "radicale" {
network { network {
mode = "host" mode = "host"
@ -44,11 +39,11 @@ job "radicale" {
] ]
} }
config { config {
image = "docker.service.consul:5000/tomsquest/docker-radicale" image = "tomsquest/docker-radicale"
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
"local/config:/config/config", "local/config:/config/config",
"/mnt/diskstation/nomad/radicale:/data" "/mnt/diskstation/CardDav:/data"
] ]
} }

View File

@ -6,6 +6,9 @@ job "torrent" {
meta { meta {
forcedeploy = "0" forcedeploy = "0"
} }
vault {
policies= ["torrent"]
}
group "bittorent" { group "bittorent" {
network { network {
mode = "host" mode = "host"
@ -23,7 +26,7 @@ job "torrent" {
} }
} }
task "bittorent" { task "bittorent" {
driver = "docker" driver = "podman"
service { service {
name = "bittorent" name = "bittorent"
port = "http" port = "http"
@ -33,35 +36,43 @@ job "torrent" {
"homer.name=torrent", "homer.name=torrent",
"homer.url=https://torrent.ducamps.eu", "homer.url=https://torrent.ducamps.eu",
"homer.service=Application", "homer.service=Application",
"homer.logo=https://fleet.linuxserver.io/images/linuxserver_rutorrent.png", "homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/images/favicon-196x196.png",
"homer.target=_blank", "homer.target=_blank",
"traefik.enable=true", "traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)", "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu", "traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver", "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure", "traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia-basic", "traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=torrentauth",
"traefik.http.middlewares.torrentauth.basicauth.users=admin:${ADMIN_HASHED_PWD}"
] ]
} }
template {
data = <<-EOF
ADMIN_HASHED_PWD={{ with secret "secrets/nomad/torrent" }}{{.Data.data.hashed_pwd}}{{ end }}
EOF
destination = "secrets/env"
env = true
}
user = "root"
config { config {
ulimit {
nofile = "8192:8192" image = "docker.io/crazymax/rtorrent-rutorrent:latest"
} privileged = "true"
image = "docker.service.consul:5000/crazymax/rtorrent-rutorrent:edge"
ports = [ ports = [
"http", "http",
"torrent", "torrent",
"ecoute" "ecoute"
] ]
volumes = [ volumes = [
"/opt/rutorrentConfig:/data", "/mnt/hetzner/storagebox/rutorrentConfig:/data",
"/mnt/hetzner/storagebox/file:/downloads" "/mnt/hetzner/storagebox/file:/downloads"
] ]
} }
env { env {
PUID = 100001 PUID = 100001
PGID = 10 PGID = 984
UMASK = 002 UMASK = 002
WEBUI_PORT = "8080" WEBUI_PORT = "8080"
} }

View File

@ -10,11 +10,7 @@ job "supysonic" {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "supysonic" { group "supysonic" {
network { network {
mode = "host" mode = "host"
@ -53,7 +49,7 @@ job "supysonic" {
task "supysonic-frontend" { task "supysonic-frontend" {
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/library/nginx:alpine" image = "nginx:alpine"
ports = [ ports = [
"http" "http"
] ]
@ -96,7 +92,7 @@ http {
task "supysonic-server" { task "supysonic-server" {
driver = "docker" driver = "docker"
config { config {
image = "docker.service.consul:5000/ducampsv/supysonic:latest" image = "ducampsv/supysonic:latest"
ports = ["fcgi"] ports = ["fcgi"]
force_pull = true force_pull = true
volumes = [ volumes = [
@ -109,10 +105,10 @@ http {
SUPYSONIC_DAEMON_ENABLED = "true" SUPYSONIC_DAEMON_ENABLED = "true"
SUPYSONIC_WEBAPP_LOG_LEVEL = "DEBUG" SUPYSONIC_WEBAPP_LOG_LEVEL = "DEBUG"
SUPYSONIC_DAEMON_LOG_LEVEL = "INFO" SUPYSONIC_DAEMON_LOG_LEVEL = "INFO"
SUPYSONIC_LDAP_SERVER = "LDAPS://ldaps.service.consul" SUPYSONIC_LDAP_SERVER = "LDAP://ldap.ducamps.eu"
SUPYSONIC_LDAP_BASE_DN = "dc=ducamps,dc=eu" SUPYSONIC_LDAP_BASE_DN = "dc=ducamps,dc=win"
SUPYSONIC_LDAP_USER_FILTER = "(&(memberOf=cn=SupysonicUsers,ou=groups,dc=ducamps,dc=eu))" SUPYSONIC_LDAP_USER_FILTER = "(&(memberOf=CN=SupysonicUsers,cn=groups,dc=ducamps,dc=win))"
SUPYSONIC_LDAP_ADMIN_FILTER= "(&(memberOf=cn=SupysonicAdmins,ou=groups,dc=ducamps,dc=eu))" SUPYSONIC_LDAP_ADMIN_FILTER= "(&(memberOf=CN=SupysonicAdmins,cn=groups,dc=ducamps,dc=win))"
} }
template { template {

View File

@ -10,11 +10,7 @@ job "syncthing" {
attribute = "${attr.cpu.arch}" attribute = "${attr.cpu.arch}"
value = "amd64" value = "amd64"
} }
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
group "syncthing" { group "syncthing" {
network { network {
@ -44,7 +40,7 @@ job "syncthing" {
] ]
} }
config { config {
image = "docker.service.consul:5000/linuxserver/syncthing" image = "linuxserver/syncthing"
ports = ["http"] ports = ["http"]
volumes = [ volumes = [
"/mnt/diskstation/nomad/syncthing/config:/config", "/mnt/diskstation/nomad/syncthing/config:/config",
@ -52,11 +48,6 @@ job "syncthing" {
] ]
} }
env{
PUID = 1000001
GUID = 1000001
}
resources { resources {
memory = 200 memory = 200
} }

View File

@ -1,26 +0,0 @@
job "csi-nfs-controller" {
datacenters = ["homelab"]
group "controller" {
task "csi-nfs-controller" {
driver = "docker"
config {
image = "registry.k8s.io/sig-storage/nfsplugin:v4.7.0"
args = [
"--v=5",
"--nodeid=${attr.unique.hostname}",
"--endpoint=unix:///csi/csi.sock",
"--drivername=nfs.csi.k8s.io"
]
}
csi_plugin {
id = "nfs"
type = "controller"
mount_dir = "/csi"
}
resources {
memory = 32
cpu = 100
}
}
}
}

View File

@ -1,29 +0,0 @@
job "csi-nfs-nodes" {
datacenters = ["homelab","hetzner"]
type = "system"
group "csi-nfs-nodes" {
task "plugin" {
driver = "docker"
config {
image = "registry.k8s.io/sig-storage/nfsplugin:v4.7.0"
args = [
"--v=5",
"--nodeid=${attr.unique.hostname}",
"--endpoint=unix:///csi/csi.sock",
"--drivername=nfs.csi.k8s.io"
]
# node plugins must run as privileged jobs because they
# mount disks to the host
privileged = true
}
csi_plugin {
id = "nfs"
type = "node"
mount_dir = "/csi"
}
resources {
memory = 50
}
}
}
}

View File

@ -1 +0,0 @@
../makefile

View File

@ -1,31 +0,0 @@
dn: cn=module,cn=config
cn: module
objectClass: olcModuleList
olcModuleLoad: memberof
olcModuleLoad: refint
olcModulePath: /opt/bitnami/openldap/lib/openldap
dn: olcOverlay={0}memberof,olcDatabase={2}mdb,cn=config
objectClass: olcConfig
objectClass: olcMemberOf
objectClass: olcOverlayConfig
objectClass: top
olcOverlay: memberof
olcMemberOfDangling: ignore
olcMemberOfRefInt: TRUE
olcMemberOfGroupOC: groupOfNames
olcMemberOfMemberAD: member
olcMemberOfMemberOfAD: memberOf
dn: olcOverlay={1}refint,olcDatabase={2}mdb,cn=config
objectClass: olcConfig
objectClass: olcOverlayConfig
objectClass: olcRefintConfig
objectClass: top
olcOverlay: {1}refint
olcRefintAttribute: memberof
olcRefintAttribute: member
olcRefintAttribute: manager
olcRefintAttribute: owner

View File

@ -1,194 +0,0 @@
job "openldap" {
datacenters = ["homelab"]
priority = 90
type = "service"
meta {
forcedeploy = "1"
}
constraint {
attribute = "${attr.cpu.arch}"
value = "amd64"
}
constraint {
attribute = "${node.class}"
operator = "set_contains"
value = "cluster"
}
vault {
policies = ["ldap"]
}
group "openldap" {
network {
mode = "host"
port "ldap" {
static = 389
to = 1389
}
port "ldaps" {
static = 636
to = 1636
}
}
task "selfsignedCertificate" {
lifecycle {
hook= "prestart"
sidecar = false
}
driver= "docker"
config{
image= "stakater/ssl-certs-generator"
mount {
type = "bind"
source = "..${NOMAD_ALLOC_DIR}/data"
target = "/certs"
}
}
env {
SSL_DNS="ldaps.service.consul,ldap.service.consul"
}
resources {
memory = 50
}
}
task "openldap" {
driver = "docker"
service {
name = "ldap"
port = "ldap"
tags = [
]
}
service {
name = "ldaps"
port = "ldaps"
tags = [
]
}
config {
image = "bitnami/openldap"
ports = ["ldap", "ldaps"]
volumes = [
"/mnt/diskstation/nomad/openldap:/bitnami/openldap",
]
}
env {
LDAP_ADMIN_USERNAME = "admin"
LDAP_ROOT = "dc=ducamps,dc=eu"
LDAP_EXTRA_SCHEMAS = "cosine, inetorgperson"
LDAP_CUSTOM_SCHEMA_DIR = "/local/schema"
LDAP_CUSTOM_LDIF_DIR = "/local/ldif"
LDAP_CONFIGURE_PPOLICY = "yes"
LDAP_ALLOW_ANON_BINDING = "no"
LDAP_LOGLEVEL = 64
LDAP_ENABLE_TLS = "yes"
LDAP_TLS_CERT_FILE = "${NOMAD_ALLOC_DIR}/data/cert.pem"
LDAP_TLS_KEY_FILE = "${NOMAD_ALLOC_DIR}/data/key.pem"
LDAP_TLS_CA_FILE = "${NOMAD_ALLOC_DIR}/data/ca.pem"
}
template {
data = <<EOH
{{ with secret "secrets/data/nomad/ldap"}}
LDAP_ADMIN_PASSWORD="{{ .Data.data.admin}}"
{{end}}
EOH
env=true
destination= "secrets/env"
}
#memberOf issue
#https://github.com/bitnami/containers/issues/28335
# https://tylersguides.com/guides/openldap-memberof-overlay
template {
data = file("memberofOverlay.ldif")
destination = "local/schema/memberofOverlay.ldif"
}
template {
data = file("smbkrb5pwd.ldif")
destination = "local/smbkrb5pwd.ldif"
}
template {
data = file("rfc2307bis.ldif")
destination = "local/schema/rfc2307bis.ldif"
}
template {
data = file("samba.ldif")
destination = "local/schema/samba.ldif"
}
template {
data = file("tree.ldif")
destination = "local/ldif/tree.ldif"
}
resources {
memory = 150
}
}
}
group ldpp-user-manager{
network{
mode = "host"
port "http" {
to = 80
}
}
task ldap-user-manager {
driver = "docker"
service {
name = "ldap-user-manager"
port = "http"
tags = [
"traefik.enable=true",
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`ldap.ducamps.eu`)",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=ldap.ducamps.eu",
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
]
}
config {
image = "wheelybird/ldap-user-manager"
ports = ["http"]
}
template {
data = <<EOH
SERVER_HOSTNAME="ldap.ducamps.eu"
LDAP_URI="ldaps://ldaps.service.consul"
LDAP_BASE_DN="dc=ducamps,dc=eu"
LDAP_ADMIN_BIND_DN="cn=admin,dc=ducamps,dc=eu"
LDAP_GROUP_MEMBERSHIP_ATTRIBUTE = "member"
{{ with secret "secrets/data/nomad/ldap"}}
LDAP_ADMIN_BIND_PWD="{{ .Data.data.admin}}"
{{end}}
LDAP_IGNORE_CERT_ERRORS="true"
LDAP_REQUIRE_STARTTLS="false"
LDAP_ADMINS_GROUP="LDAP Operators"
LDAP_USER_OU="users"
NO_HTTPS="true"
EMAIL_DOMAIN="ducamps.eu"
DEFAULT_USER_GROUP="users"
DEFAULT_USER_SHELL="/bin/sh"
USERNAME_FORMAT="{first_name}"
LDAP_RFC2307BIS_SCHEMA="TRUE"
USERNAME_REGEX="^[a-zA-Z][a-zA-Z0-9._-]{3,32}$"
LDAP_GROUP_ADDITIONAL_OBJECTCLASSES="groupOfNames,posixGroup,top"
SHOW_POSIX_ATTRIBUTES="TRUE"
EOH
destination = "secrets/env"
env = true
}
resources {
memory = 70
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More