Compare commits
No commits in common. "master" and "migratevault" have entirely different histories.
master
...
migratevau
@ -22,6 +22,13 @@ make create-dev
|
|||||||
|
|
||||||
## Rebuild
|
## Rebuild
|
||||||
|
|
||||||
|
to rebuild from scratch ansible need a vault server up and unseal
|
||||||
|
you can rebuild a standalone vault server with a consul database snaphot with
|
||||||
|
|
||||||
|
```sh
|
||||||
|
make vault-dev FILE=./yourconsulsnaphot.snap
|
||||||
|
```
|
||||||
|
|
||||||
## Architecture
|
## Architecture
|
||||||
|
|
||||||
```mermaid
|
```mermaid
|
||||||
|
35
Vagrantfile
vendored
35
Vagrantfile
vendored
@ -1,10 +1,9 @@
|
|||||||
Vagrant.configure('2') do |config|
|
Vagrant.configure('2') do |config|
|
||||||
if Vagrant.has_plugin?('vagrant-cachier')
|
if Vagrant.has_plugin?('vagrant-cachier')
|
||||||
config.cache.scope = 'machine'
|
config.cache.scope = 'machine'
|
||||||
config.cache.enable :pacman
|
|
||||||
end
|
end
|
||||||
config.vm.provider :libvirt do |libvirt|
|
config.vm.provider :libvirt do |libvirt|
|
||||||
libvirt.management_network_domain = "lan.ducamps.dev"
|
libvirt.management_network_domain = "ducamps-dev.win"
|
||||||
|
|
||||||
end
|
end
|
||||||
config.vm.define "oscar-dev" do |c|
|
config.vm.define "oscar-dev" do |c|
|
||||||
@ -20,20 +19,14 @@ Vagrant.configure('2') do |config|
|
|||||||
# Provider
|
# Provider
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
c.vm.provider "libvirt" do |libvirt, override|
|
||||||
|
|
||||||
libvirt.memory = 2048
|
libvirt.memory = 1024
|
||||||
libvirt.cpus = 2
|
libvirt.cpus = 2
|
||||||
end
|
end
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="oscar-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
config.vm.define "merlin-dev" do |c|
|
config.vm.define "merlin-dev" do |c|
|
||||||
# Box definition
|
# Box definition
|
||||||
c.vm.box = "archlinux/archlinux"
|
c.vm.box = "generic/rocky9"
|
||||||
# Config options
|
# Config options
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
c.vm.synced_folder ".", "/vagrant", disabled: true
|
||||||
c.ssh.insert_key = true
|
c.ssh.insert_key = true
|
||||||
@ -43,21 +36,15 @@ Vagrant.configure('2') do |config|
|
|||||||
# Provider
|
# Provider
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
c.vm.provider "libvirt" do |libvirt, override|
|
||||||
|
|
||||||
libvirt.memory = 512
|
libvirt.memory = 1024
|
||||||
libvirt.cpus = 2
|
libvirt.cpus = 2
|
||||||
|
|
||||||
end
|
end
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="merlin-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
config.vm.define "gerard-dev" do |c|
|
config.vm.define "gerard-dev" do |c|
|
||||||
# Box definition
|
# Box definition
|
||||||
c.vm.box = "archlinux/archlinux"
|
c.vm.box = "debian/bookworm64"
|
||||||
# Config options
|
# Config options
|
||||||
|
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
c.vm.synced_folder ".", "/vagrant", disabled: true
|
||||||
@ -67,15 +54,9 @@ Vagrant.configure('2') do |config|
|
|||||||
# instance_raw_config_args
|
# instance_raw_config_args
|
||||||
# Provider
|
# Provider
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
c.vm.provider "libvirt" do |libvirt, override|
|
||||||
libvirt.memory = 2048
|
libvirt.memory = 1024
|
||||||
libvirt.cpus = 2
|
libvirt.cpus = 2
|
||||||
end
|
end
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="gerard-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
||||||
config.vm.define "nas-dev" do |c|
|
config.vm.define "nas-dev" do |c|
|
||||||
@ -90,14 +71,14 @@ Vagrant.configure('2') do |config|
|
|||||||
# Provider
|
# Provider
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
c.vm.provider "libvirt" do |libvirt, override|
|
||||||
|
|
||||||
libvirt.memory = 2048
|
libvirt.memory = 1024
|
||||||
libvirt.cpus = 2
|
libvirt.cpus = 2
|
||||||
end
|
end
|
||||||
|
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
c.vm.provision "ansible" do |bootstrap|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
bootstrap.galaxy_roles_path= "ansible/roles"
|
||||||
bootstrap.limit="nas-dev"
|
bootstrap.limit="all"
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -99,7 +99,7 @@ host_key_checking = False
|
|||||||
#sudo_flags = -H -S -n
|
#sudo_flags = -H -S -n
|
||||||
|
|
||||||
# SSH timeout
|
# SSH timeout
|
||||||
timeout = 30
|
#timeout = 10
|
||||||
|
|
||||||
# default user to use for playbooks if user is not specified
|
# default user to use for playbooks if user is not specified
|
||||||
# (/usr/bin/ansible will use current user as default)
|
# (/usr/bin/ansible will use current user as default)
|
||||||
@ -136,7 +136,7 @@ timeout = 30
|
|||||||
|
|
||||||
# If set, configures the path to the Vault password file as an alternative to
|
# If set, configures the path to the Vault password file as an alternative to
|
||||||
# specifying --vault-password-file on the command line.
|
# specifying --vault-password-file on the command line.
|
||||||
vault_password_file = ./misc/vault-keyring-client.sh
|
#vault_password_file = /path/to/vault_password_file
|
||||||
|
|
||||||
# format of string {{ ansible_managed }} available within Jinja2
|
# format of string {{ ansible_managed }} available within Jinja2
|
||||||
# templates indicates to users editing templates files will be replaced.
|
# templates indicates to users editing templates files will be replaced.
|
||||||
|
@ -1,24 +0,0 @@
|
|||||||
pdns_config:
|
|
||||||
local-address: "127.0.0.1"
|
|
||||||
local-port: "5300"
|
|
||||||
api: yes
|
|
||||||
api-key:
|
|
||||||
|
|
||||||
pdns_backends:
|
|
||||||
gsqlite3:
|
|
||||||
dnssec: yes
|
|
||||||
database: "/var/lib/powerdns/powerdns.sqlite"
|
|
||||||
pdns_sqlite_databases_locations:
|
|
||||||
- "/var/lib/powerdns/powerdns.sqlite"
|
|
||||||
|
|
||||||
pdns_rec_config:
|
|
||||||
forward-zones:
|
|
||||||
- "{{ consul_domain }}=127.0.0.1:8600"
|
|
||||||
- "ducamps.win=192.168.1.10"
|
|
||||||
- "{{ domain.name }}=192.168.1.5"
|
|
||||||
- "lan.{{ domain.name }}=192.168.1.5"
|
|
||||||
- "1.168.192.in-addr.arpa=192.168.1.5:5300"
|
|
||||||
|
|
||||||
local-address: "{{ hostvars[inventory_hostname]['ansible_'+ default_interface].ipv4.address|default(ansible_default_ipv4.address) }}"
|
|
||||||
dnssec: "off"
|
|
||||||
|
|
@ -1,90 +0,0 @@
|
|||||||
NAS_nomad_folder:
|
|
||||||
- name: actualbudget
|
|
||||||
- name: archiso
|
|
||||||
owner: 1000001
|
|
||||||
- name: backup
|
|
||||||
owner: 1000001
|
|
||||||
- name: borgmatic
|
|
||||||
- name: crowdsec
|
|
||||||
owner: 1000001
|
|
||||||
- name: dms
|
|
||||||
owner: 1000001
|
|
||||||
- name: filestash
|
|
||||||
owner: 1000
|
|
||||||
- name: gitea
|
|
||||||
owner: 1000000
|
|
||||||
- name: grafana
|
|
||||||
owner: 472
|
|
||||||
- name: hass
|
|
||||||
owner: 1000001
|
|
||||||
- name: homer
|
|
||||||
owner: 1000001
|
|
||||||
- name: immich/cache
|
|
||||||
- name: immich/upload
|
|
||||||
- name: jellyfin
|
|
||||||
owner: 1000001
|
|
||||||
- name: loki
|
|
||||||
owner: 10001
|
|
||||||
- name: mealie
|
|
||||||
owner: 1000001
|
|
||||||
- name: mosquito
|
|
||||||
owner: 1883
|
|
||||||
- name: pacoloco
|
|
||||||
owner: 1000001
|
|
||||||
- name: pdns-auth
|
|
||||||
owner: 1000001
|
|
||||||
- name: pdns-admin
|
|
||||||
owner: 1000001
|
|
||||||
- name: pihole
|
|
||||||
owner: 999
|
|
||||||
- name: prometheus
|
|
||||||
owner: 65534
|
|
||||||
- name: prowlarr
|
|
||||||
owner: 1000001
|
|
||||||
- name: radicale
|
|
||||||
owner: 1000001
|
|
||||||
- name: openldap
|
|
||||||
owner: 1001
|
|
||||||
- name: registry/ghcr
|
|
||||||
- name: registry/docker
|
|
||||||
- name: syncthing
|
|
||||||
owner: 1000001
|
|
||||||
- name: traefik
|
|
||||||
owner: 1000001
|
|
||||||
- name: tt-rss
|
|
||||||
owner: 1000001
|
|
||||||
- name: vaultwarden
|
|
||||||
owner: 1000001
|
|
||||||
- name: zigbee2mqtt
|
|
||||||
owner: 1000001
|
|
||||||
nas_bind_target: "/exports"
|
|
||||||
|
|
||||||
nas_bind_source:
|
|
||||||
- dest: "{{ nas_bind_target }}/nomad"
|
|
||||||
source: /data/data1/nomad
|
|
||||||
- dest: "{{ nas_bind_target }}/music"
|
|
||||||
source: /data/data1/music
|
|
||||||
- dest: "{{ nas_bind_target }}/download"
|
|
||||||
source: /data/data1/download
|
|
||||||
- dest: "{{ nas_bind_target }}/media/serie"
|
|
||||||
source: /data/data2/serie
|
|
||||||
- dest: "{{ nas_bind_target }}/media/film"
|
|
||||||
source: /data/data3/film
|
|
||||||
- dest: "{{ nas_bind_target }}/photo"
|
|
||||||
source: /data/data1/photo
|
|
||||||
- dest: "{{ nas_bind_target }}/homes"
|
|
||||||
source: /data/data1/homes
|
|
||||||
- dest: "{{ nas_bind_target }}/ebook"
|
|
||||||
source: /data/data1/ebook
|
|
||||||
- dest: "{{ nas_bind_target }}/media/download/serie"
|
|
||||||
source: /data/data1/download/serie
|
|
||||||
- dest: "{{ nas_bind_target }}/media/download/film"
|
|
||||||
source: /data/data1/download/film
|
|
||||||
- dest: "{{ nas_bind_target }}/music/download/"
|
|
||||||
source: /data/data1/download/music
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
|||||||
vsftpd_config: {}
|
|
@ -1,15 +0,0 @@
|
|||||||
nfs_cluster_list: "{% for server in groups['all']%} {% if hostvars[server]['ansible_default_ipv4']['address'] is defined %} {{hostvars[server]['ansible_' + hostvars[server]['nfs_iface']|default('')].ipv4.address|default(hostvars[server]['ansible_default_ipv4']['address'],true)}}{{ nfs_options }} {% endif %} {%endfor%}"
|
|
||||||
nfs_options: "(rw,no_root_squash,crossmnt,async,insecure_locks,sec=sys)"
|
|
||||||
nfs_consul_service: true
|
|
||||||
nfs_bind_target: "/exports"
|
|
||||||
|
|
||||||
|
|
||||||
nfs_exports:
|
|
||||||
- "{{ nas_bind_target }} *(fsid=0,insecure,no_subtree_check)"
|
|
||||||
- "{{ nas_bind_target }}/nomad {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/download {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/music {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/media {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/photo {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/homes {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/ebook {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
@ -1 +0,0 @@
|
|||||||
nomad_node_class: 'NAS'
|
|
@ -1,25 +0,0 @@
|
|||||||
samba_passdb_backend: tdbsam
|
|
||||||
samba_shares_root: /exports
|
|
||||||
samba_shares:
|
|
||||||
- name: media
|
|
||||||
comment: "media"
|
|
||||||
write_list: "@NAS_media"
|
|
||||||
browseable: true
|
|
||||||
- name: ebook
|
|
||||||
comment: "ebook"
|
|
||||||
write_list: "@NAS_ebook"
|
|
||||||
browseable: true
|
|
||||||
- name: music
|
|
||||||
comment: "music"
|
|
||||||
write_list: "@NAS_music"
|
|
||||||
browseable: true
|
|
||||||
- name: photo
|
|
||||||
comment: "photo"
|
|
||||||
write_list: "@NAS_photo"
|
|
||||||
browseable: true
|
|
||||||
- name: download
|
|
||||||
comment: "downlaod"
|
|
||||||
write_list: "@NAS_download"
|
|
||||||
browseable: true
|
|
||||||
samba_load_homes: True
|
|
||||||
samba_homes_include: samba_homes_include.conf
|
|
@ -42,4 +42,35 @@ nomad_datacenter: hetzner
|
|||||||
|
|
||||||
consul_server: False
|
consul_server: False
|
||||||
nomad_server: False
|
nomad_server: False
|
||||||
|
systemd_mounts:
|
||||||
|
diskstation_nomad:
|
||||||
|
share: diskstation.ducamps.win:/volume2/nomad
|
||||||
|
mount: /mnt/diskstation/nomad
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
hetzner_storage:
|
||||||
|
share: //u304977.your-storagebox.de/backup
|
||||||
|
mount: /mnt/hetzner/storagebox
|
||||||
|
type: cifs
|
||||||
|
options:
|
||||||
|
- credentials=/etc/creds/hetzner_credentials
|
||||||
|
- uid= 1024
|
||||||
|
- gid= 10
|
||||||
|
- vers=3.0
|
||||||
|
- mfsymlinks
|
||||||
|
automount: true
|
||||||
|
|
||||||
|
credentials_files:
|
||||||
|
1:
|
||||||
|
type: smb
|
||||||
|
path: /etc/creds/hetzner_credentials
|
||||||
|
username: u304977
|
||||||
|
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
systemd_mounts_enabled:
|
||||||
|
- diskstation_nomad
|
||||||
|
- hetzner_storage
|
@ -1,28 +0,0 @@
|
|||||||
systemd_mounts:
|
|
||||||
diskstation_nomad:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
|
||||||
mount: /mnt/diskstation/nomad
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
hetzner_storage:
|
|
||||||
share: //u304977.your-storagebox.de/backup
|
|
||||||
mount: /mnt/hetzner/storagebox
|
|
||||||
type: cifs
|
|
||||||
options:
|
|
||||||
- credentials=/etc/creds/hetzner_credentials
|
|
||||||
- uid=100001
|
|
||||||
- gid=10
|
|
||||||
- vers=3.0
|
|
||||||
- mfsymlinks
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
credentials_files:
|
|
||||||
1:
|
|
||||||
type: smb
|
|
||||||
path: /etc/creds/hetzner_credentials
|
|
||||||
username: u304977
|
|
||||||
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
|
|
@ -1,12 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
31303539336464336239376636623862303066336438383739356163616431643366386565366361
|
|
||||||
3264336232303135336334333663326234393832343235640a313638323963666631353836373531
|
|
||||||
61636261623662396330653135326238363630363938323166303861313563393063386161393238
|
|
||||||
3231336232663533640a333763643864363939336566333731353031313739616633623537386435
|
|
||||||
39613934663133613733356433616162363430616439623830663837343530623937656434366663
|
|
||||||
33656466396263616132356337326236383761363834663363643163343231366563333865656433
|
|
||||||
39316365663734653734363362363539623636666261333534313935343566646166316233623535
|
|
||||||
32323831626463656337313266343634303830633936396232663966373264313762346235646665
|
|
||||||
61333139363039363436393962666365336334663164306230393433636664623934343039323637
|
|
||||||
33383036323233646237343031633030353330633734353232343633623864333834646239346362
|
|
||||||
643634303135656333646235343366636361
|
|
@ -1,45 +0,0 @@
|
|||||||
# defaults file for ansible-arch-provissionning
|
|
||||||
partition_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
label: gpt
|
|
||||||
settings:
|
|
||||||
- number: 1
|
|
||||||
part_end: 64MB
|
|
||||||
flags: [boot, esp]
|
|
||||||
fstype: vfat
|
|
||||||
format: yes
|
|
||||||
- number: 2
|
|
||||||
part_start: 512MB
|
|
||||||
part_end: 1524MB
|
|
||||||
flags: []
|
|
||||||
fstype: swap
|
|
||||||
format: yes
|
|
||||||
- number: 3
|
|
||||||
part_start: 1524MB
|
|
||||||
flags: [lvm]
|
|
||||||
fstype: ext4
|
|
||||||
format: yes
|
|
||||||
#- device: "/dev/sdb"
|
|
||||||
#settings:
|
|
||||||
#- number: 1
|
|
||||||
#name: home
|
|
||||||
#fstype: ext4
|
|
||||||
#format:
|
|
||||||
mount_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
settings:
|
|
||||||
- number: 3
|
|
||||||
mountpath: /mnt
|
|
||||||
fstype: ext4
|
|
||||||
- number: 1
|
|
||||||
mountpath: /mnt/boot
|
|
||||||
fstype: vfat
|
|
||||||
|
|
||||||
#need vfat boot partition with esp label
|
|
||||||
provissionning_UEFI_Enable: True
|
|
||||||
#sssd_configure: False
|
|
||||||
nomad_datacenter: hetzner
|
|
||||||
|
|
||||||
consul_server: False
|
|
||||||
nomad_server: False
|
|
||||||
|
|
@ -1,7 +1,36 @@
|
|||||||
ansible_python_interpreter: /usr/bin/python3
|
ansible_python_interpreter: /usr/bin/python3
|
||||||
|
user:
|
||||||
|
name: vincent
|
||||||
|
home: /home/vincent
|
||||||
|
uid: 1024
|
||||||
|
mail: vincent@ducamps.win
|
||||||
|
groups:
|
||||||
|
- docker
|
||||||
|
authorized_keys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
||||||
|
privatekey:
|
||||||
|
- keyname: "id_gitea"
|
||||||
|
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||||
|
|
||||||
|
user_config_repo: "ssh://git@git.{{ domain.name }}:2222/vincent/conf2.git"
|
||||||
|
domain:
|
||||||
|
name: ducamps.win
|
||||||
|
|
||||||
hass_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDfVei9iC/Ra5qmSZcLu8z2CTaXCmfn4JSS4o3eu0HhykdYGSqhBTcUDD3/FhcTPQJVFsu1P4Gwqq1dCE+EvaZZRQaMUqVKUpOliThSG6etbImkvqLQQsC1qt+/NqSvfzu2+28A6+YspzuxsViGo7e3Gg9MdwV3LMGh0mcOr/uXb/HIk18sJg5yQpwMfYTj0Wda90nyegcN3F2iZMeauh/aaFJzWcHNakAAewceDYOErU07NhlZgVA2C8HgkJ8HL7AqIVqt9VOx3xLp91DbKTNXSxvyM0X4NQP24P7ZFxAOk/j0AX3hAWhaNmievCHyBWvQve1VshZXFwEIiuHm8q4GSCxK2r0oQudKdtIuQMfuUALigdiSxo522oEiML/2kSk17WsxZwh7SxfD0DKa82fy9iAwcAluWLwJ+yN3nGnDFF/tHYaamSiowpmTTmQ9ycyIPWPLVZclt3BlEt9WH/FPOdzAyY7YLzW9X6jhsU3QwViyaTRGqAdqzUAiflKCMsNzb5kq0oYsDFC+/eqp1USlgTZDhoKtTKRGEjW2KuUlDsXGBeB6w1D8XZxXJXAaHuMh4oMUgLswjLUdTH3oLnnAvfOrl8O66kTkmcQ8i/kr1wDODMy/oNUzs8q4DeRuhD5dpUiTUGYDTWPYj6m6U/GAEHvN/2YEqSgfVff1iQ4VBw==
|
hass_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDfVei9iC/Ra5qmSZcLu8z2CTaXCmfn4JSS4o3eu0HhykdYGSqhBTcUDD3/FhcTPQJVFsu1P4Gwqq1dCE+EvaZZRQaMUqVKUpOliThSG6etbImkvqLQQsC1qt+/NqSvfzu2+28A6+YspzuxsViGo7e3Gg9MdwV3LMGh0mcOr/uXb/HIk18sJg5yQpwMfYTj0Wda90nyegcN3F2iZMeauh/aaFJzWcHNakAAewceDYOErU07NhlZgVA2C8HgkJ8HL7AqIVqt9VOx3xLp91DbKTNXSxvyM0X4NQP24P7ZFxAOk/j0AX3hAWhaNmievCHyBWvQve1VshZXFwEIiuHm8q4GSCxK2r0oQudKdtIuQMfuUALigdiSxo522oEiML/2kSk17WsxZwh7SxfD0DKa82fy9iAwcAluWLwJ+yN3nGnDFF/tHYaamSiowpmTTmQ9ycyIPWPLVZclt3BlEt9WH/FPOdzAyY7YLzW9X6jhsU3QwViyaTRGqAdqzUAiflKCMsNzb5kq0oYsDFC+/eqp1USlgTZDhoKtTKRGEjW2KuUlDsXGBeB6w1D8XZxXJXAaHuMh4oMUgLswjLUdTH3oLnnAvfOrl8O66kTkmcQ8i/kr1wDODMy/oNUzs8q4DeRuhD5dpUiTUGYDTWPYj6m6U/GAEHvN/2YEqSgfVff1iQ4VBw==
|
||||||
|
|
||||||
system_arch_local_mirror: "https://arch.{{domain.name}}/repo/archlinux_$arch"
|
system_arch_local_mirror: "https://arch.{{domain.name}}/repo/archlinux_$arch"
|
||||||
|
|
||||||
|
|
||||||
system_sudoers_group: "serverAdmin"
|
system_sudoers_group: "serverAdmin"
|
||||||
system_ipV6_disable: True
|
system_ipV6_disable: True
|
||||||
system_ip_unprivileged_port_start: 0
|
|
||||||
wireguard_mtu: 1420
|
user_custom_host:
|
||||||
|
- host: "git.ducamps.win"
|
||||||
|
user: "git"
|
||||||
|
keyfile: "~/.ssh/id_gitea"
|
||||||
|
- host: "gitlab.com"
|
||||||
|
user: "git"
|
||||||
|
keyfile: "~/.ssh/id_consort"
|
||||||
|
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
consul_client_addr: "0.0.0.0"
|
|
||||||
consul_datacenter: "homelab"
|
|
||||||
consul_backup_location: "/mnt/diskstation/git/backup/consul"
|
|
||||||
consul_ansible_group: all
|
|
||||||
consul_systemd_resolved_enable: true
|
|
@ -1,8 +0,0 @@
|
|||||||
docker_daemon_config:
|
|
||||||
dns:
|
|
||||||
- 172.17.0.1
|
|
||||||
- 192.168.1.6
|
|
||||||
mtu: 1420
|
|
||||||
insecure-registries:
|
|
||||||
- 192.168.1.0/24
|
|
||||||
- 192.168.121.0/24
|
|
@ -1,9 +0,0 @@
|
|||||||
nomad_docker_allow_caps:
|
|
||||||
- NET_ADMIN
|
|
||||||
- NET_BROADCAST
|
|
||||||
- NET_RAW
|
|
||||||
nomad_allow_privileged: True
|
|
||||||
nomad_vault_enabled: true
|
|
||||||
nomad_vault_address: "http://active.vault.service.{{consul_domain}}:8200"
|
|
||||||
nomad_vault_role: "nomad-cluster"
|
|
||||||
nomad_docker_extra_labels: ["job_name", "task_group_name", "task_name", "namespace", "node_name"]
|
|
42
ansible/group_vars/all/server
Normal file
42
ansible/group_vars/all/server
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
consul_client_addr: "0.0.0.0"
|
||||||
|
consul_datacenter: "homelab"
|
||||||
|
consul_backup_location: "/mnt/diskstation/git/backup/consul"
|
||||||
|
consul_ansible_group: all
|
||||||
|
consul_bootstrap_expect: 3
|
||||||
|
nomad_docker_allow_caps:
|
||||||
|
- NET_ADMIN
|
||||||
|
- NET_BROADCAST
|
||||||
|
- NET_RAW
|
||||||
|
nomad_vault_enabled: true
|
||||||
|
nomad_vault_address: "http://active.vault.service.consul:8200"
|
||||||
|
nomad_vault_role: "nomad-cluster"
|
||||||
|
nomad_vault_token: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:nomad_vault_token') }}"
|
||||||
|
nomad_bootstrap_expect: 3
|
||||||
|
notification_mail: "{{inventory_hostname}}@{{ domain.name }}"
|
||||||
|
msmtp_mailhub: smtp.{{ domain.name }}
|
||||||
|
msmtp_auth_user: "{{ user.mail }}"
|
||||||
|
msmtp_auth_pass: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:email') }}"
|
||||||
|
|
||||||
|
system_user:
|
||||||
|
- name: drone-deploy
|
||||||
|
home: /home/drone-deploy
|
||||||
|
shell: /bin/bash
|
||||||
|
privatekey:
|
||||||
|
- keyname: id_gitea
|
||||||
|
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||||
|
|
||||||
|
|
||||||
|
authorized_keys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
|
||||||
|
|
||||||
|
- name: ansible
|
||||||
|
home: /home/ansible
|
||||||
|
shell: /bin/bash
|
||||||
|
|
||||||
|
- name: root
|
||||||
|
home: /root
|
||||||
|
privatekey:
|
||||||
|
- keyname: id_gitea
|
||||||
|
key: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||||
|
|
||||||
|
|
@ -1,5 +1,9 @@
|
|||||||
sssd_configure: true
|
sssd_configure: true
|
||||||
# sssd_configure is False by default - by default nothing is done by this role.
|
# sssd_configure is False by default - by default nothing is done by this role.
|
||||||
ldap_search_base: "dc=ducamps,dc=eu"
|
ldap_search_base: "dc=ducamps,dc=win"
|
||||||
ldap_uri: "ldaps://ldaps.service.consul"
|
ldap_uri: "ldaps://ldap.ducamps.win"
|
||||||
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=eu"
|
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=win"
|
||||||
|
ldap_default_bind_dn : "uid=vaultserviceaccount,cn=users,dc=ducamps,dc=win"
|
||||||
|
ldap_password : "{{lookup('hashi_vault', 'secret=secrets/data/ansible/other:vaulserviceaccount')}}"
|
||||||
|
userPassword: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/user:userPassword')}}"
|
||||||
|
|
||||||
|
@ -1,42 +0,0 @@
|
|||||||
user:
|
|
||||||
name: vincent
|
|
||||||
home: /home/vincent
|
|
||||||
uid: 1024
|
|
||||||
mail: vincent@ducamps.eu
|
|
||||||
groups:
|
|
||||||
- docker
|
|
||||||
authorized_keys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
|
||||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
|
||||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
|
||||||
privatekey:
|
|
||||||
- keyname: "id_gitea"
|
|
||||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
system_user:
|
|
||||||
- name: drone-deploy
|
|
||||||
home: /home/drone-deploy
|
|
||||||
shell: /bin/bash
|
|
||||||
authorized_keys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
|
|
||||||
|
|
||||||
- name: ansible
|
|
||||||
home: /home/ansible
|
|
||||||
shell: /bin/bash
|
|
||||||
|
|
||||||
- name: root
|
|
||||||
home: /root
|
|
||||||
privatekey:
|
|
||||||
- keyname: id_gitea
|
|
||||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
user_custom_host:
|
|
||||||
- host: "git.ducamps.eu"
|
|
||||||
user: "git"
|
|
||||||
keyfile: "~/.ssh/id_gitea"
|
|
||||||
|
|
||||||
user_config_repo: "ssh://git@git.ducamps.eu:2222/vincent/conf2.git"
|
|
@ -1 +0,0 @@
|
|||||||
vault_raft_group_name: "homelab"
|
|
@ -1,11 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
39613433313663653039643961643165643632313938626339653365376633613135653436363938
|
|
||||||
6331623132366638633665636163336462393333336264320a666466303465663839646435626231
|
|
||||||
38396437363034313236383261326637306238616162303131356537393635363939376236386130
|
|
||||||
6466353961643233310a306631333664363332336263656638623763393732306361306632386662
|
|
||||||
37623934633932653965316532386664353130653830356237313337643266366233346633323265
|
|
||||||
37616533303561363864626531396366323565396536383133643539663630636633356238386633
|
|
||||||
34383464333363663532643239363438626135336632316135393537643930613532336231633064
|
|
||||||
35376561663637623932313365636261306131353233636661313435643563323534623365346436
|
|
||||||
65366132333635643832353464323961643466343832376635386531393834336535386364396333
|
|
||||||
3932393561646133336437643138373230366266633430663937
|
|
@ -1,12 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
61326233336236343231396231306638373837653661313334313261313539316532373437346132
|
|
||||||
3931306637303530373032663236363466383433316161310a396439393564643731656664663639
|
|
||||||
32386130663837303663376432633930393663386436666263313939326631616466643237333138
|
|
||||||
3365346131636333330a376436323964656563363664336638653564656231636136663635303439
|
|
||||||
35346461356337303064623861326331346263373539336335393566623462343464323065366237
|
|
||||||
61346637326336613232643462323733366530656439626234663335633965376335623733336162
|
|
||||||
37323739376237323534613361333831396531663637666161666366656237353563626164626632
|
|
||||||
33326336353663356235373835666166643465666562616663336539316233373430633862613133
|
|
||||||
36363831623361393230653161626131353264366634326233363232336635306266376363363739
|
|
||||||
66373434343330633337633436316135656533613465613963363931383266323466653762623365
|
|
||||||
363332393662393532313063613066653964
|
|
@ -1,14 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
35303137383361396262313561623237626336306366376630663065396664643630383638376436
|
|
||||||
3930346265616235383331383735613166383461643233310a663564356266663366633539303630
|
|
||||||
37616532393035356133653838323964393464333230313861356465326433353339336435363263
|
|
||||||
3162653932646662650a613762393062613433343362633365316434663661306637623363333834
|
|
||||||
61303231303362313133346461373738633239613933303564383532353537626538363636306461
|
|
||||||
66663330346566356637623036363964396137646435333139323430353639386134396537366334
|
|
||||||
39303130386432366335383433626431663034656466626265393863623438366130346562623365
|
|
||||||
63653963393663353666313631326131636361333230386461383638333338393137336562323935
|
|
||||||
37343034363961306663303232346139356534613837663230393962323333656536303161373939
|
|
||||||
65626164336166306264653538313661393934383966303135356161336331623835663235646332
|
|
||||||
63343764643861366537383962616230323036326331386333346463353835393762653735353862
|
|
||||||
32323839663365353337303363313535633362643231653663393936363539363933636430613832
|
|
||||||
32336566633962646463316636346330336265626130373636643335323762363661
|
|
@ -1,14 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
64396261616266633665646330393631316463386334633032353965323964633464333331323334
|
|
||||||
6261653930313764313836366531383462313965336231620a656637623439623639383931373361
|
|
||||||
37373434636531623563336565356136633031633835633636643436653165386436636564616130
|
|
||||||
3763383036343739370a376565343130636631653635616566653531323464343632623566313436
|
|
||||||
32396165636333393032636636613030373663393238323964396462323163616162613933626536
|
|
||||||
31623931343633346131636563643563393230323839636438373933666137393031326532356535
|
|
||||||
32363439306338623533353734613966396362303164616335363535333438326234623161653732
|
|
||||||
66613762653966613763623966633939323634346536636334343364306332323563653361346563
|
|
||||||
65313433376634363261323934376637646233636233346536316262386634353666376539613235
|
|
||||||
63666432396636373139663861393164626165383665663933383734303165623464666630343231
|
|
||||||
33323339663138373530396636636333323439616137313434316465633162396237306238343366
|
|
||||||
30326162306539396630633738323435323432646338633331626665363838376363343835336534
|
|
||||||
3635
|
|
@ -1,50 +0,0 @@
|
|||||||
systemd_mounts:
|
|
||||||
diskstation_photo:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/photo"
|
|
||||||
mount: /mnt/diskstation/photo
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_music:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/music"
|
|
||||||
mount: /mnt/diskstation/music
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_media:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/media"
|
|
||||||
mount: /mnt/diskstation/media
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
diskstation_ebook:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/ebook"
|
|
||||||
mount: /mnt/diskstation/ebook
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_nomad:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
|
||||||
mount: /mnt/diskstation/nomad
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- " "
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_download:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/download"
|
|
||||||
mount: /mnt/diskstation/download
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
@ -1 +0,0 @@
|
|||||||
nomad_node_class: 'cluster'
|
|
50
ansible/group_vars/database
Normal file
50
ansible/group_vars/database
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
|
||||||
|
postgresql_users:
|
||||||
|
- name: root
|
||||||
|
role_attr_flags: SUPERUSER
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:root')}}"
|
||||||
|
- name: wikijs
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/wikijs:password')}}"
|
||||||
|
- name: ttrss
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/ttrss:password')}}"
|
||||||
|
- name: gitea
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/gitea:password')}}"
|
||||||
|
- name: supysonic
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/supysonic:password')}}"
|
||||||
|
- name: hass
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/homeassistant:password')}}"
|
||||||
|
- name: vaultwarden
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/vaultwarden:password')}}"
|
||||||
|
- name: drone
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/droneci:password')}}"
|
||||||
|
- name: dendrite
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/dendrite:password')}}"
|
||||||
|
- name: paperless
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/paperless:password')}}"
|
||||||
|
- name: dump
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/dump:password')}}"
|
||||||
|
- name: vikunja
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/database/vikunja:password')}}"
|
||||||
|
|
||||||
|
postgresql_databases:
|
||||||
|
- name: wikijs
|
||||||
|
owner: wikijs
|
||||||
|
- name: ttrss
|
||||||
|
owner: ttrss
|
||||||
|
- name: gitea
|
||||||
|
owner: gitea
|
||||||
|
- name: supysonic
|
||||||
|
owner: supysonic
|
||||||
|
- name: hass
|
||||||
|
owner: hass
|
||||||
|
- name: vaultwarden
|
||||||
|
owner: vaultwarden
|
||||||
|
- name: drone
|
||||||
|
owner: drone
|
||||||
|
- name: dendrite
|
||||||
|
owner: dendrite
|
||||||
|
- name: paperless
|
||||||
|
owner: paperless
|
||||||
|
- name: vikunja
|
||||||
|
owner: vikunja
|
||||||
|
|
@ -1,38 +0,0 @@
|
|||||||
postgres_consul_service: true
|
|
||||||
postgres_consul_service_name: db
|
|
||||||
|
|
||||||
postgresql_databases:
|
|
||||||
- name: ttrss
|
|
||||||
owner: ttrss
|
|
||||||
- name: gitea
|
|
||||||
owner: gitea
|
|
||||||
- name: supysonic
|
|
||||||
owner: supysonic
|
|
||||||
- name: hass
|
|
||||||
owner: hass
|
|
||||||
- name: vaultwarden
|
|
||||||
owner: vaultwarden
|
|
||||||
- name: drone
|
|
||||||
owner: drone
|
|
||||||
- name: paperless
|
|
||||||
owner: paperless
|
|
||||||
- name: vikunja
|
|
||||||
owner: vikunja
|
|
||||||
- name: ghostfolio
|
|
||||||
owner: ghostfolio
|
|
||||||
- name: pdns-auth
|
|
||||||
owner: pdns-auth
|
|
||||||
- name: pdns-admin
|
|
||||||
owner: pdns-admin
|
|
||||||
- name: mealie
|
|
||||||
owner: mealie
|
|
||||||
- name: immich
|
|
||||||
owner: immich
|
|
||||||
|
|
||||||
postgresql_hba_entries:
|
|
||||||
- {type: local, database: all, user: postgres, auth_method: peer}
|
|
||||||
- {type: local, database: all, user: all, auth_method: peer}
|
|
||||||
- {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5}
|
|
||||||
- {type: host, database: all, user: all, address: '::1/128', auth_method: md5}
|
|
||||||
- {type: host, database: all, user: all, address: '::0/128', auth_method: md5}
|
|
||||||
- {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5}
|
|
@ -1,54 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
39363436643831373861376361613830316334613939346338616636393462663033393261633838
|
|
||||||
6337336161393063646136613538396366653538656435360a303062636463383739653730346639
|
|
||||||
61323634306265613336313634653039313639663836363032353261383566393865613166613032
|
|
||||||
3837313634633466610a313062646237396138316361303361663565353862363139343566306539
|
|
||||||
38303161303163323265376539323939393938373965353934303535613962653534363362346563
|
|
||||||
61643638353138623162353364353736396162613735333063633739346132613161303564356437
|
|
||||||
62343535363263646463306466663536613937393463666336396332646533343439613433626566
|
|
||||||
38643363343065393165646134343935386461626166316662356365366666363737653336626631
|
|
||||||
64643230616431396666666462303366343164323233303139643939346635353730316234386163
|
|
||||||
35613235643034643833393233373536383863333763393066373564353535353463363336316335
|
|
||||||
63363537643432663266386438316563656663656462333039303861393364333966383430643263
|
|
||||||
63356435373064633861343137616637393161383361306135373864386235653034323732316663
|
|
||||||
65336465386135663532356433386562666639333464633362663131646237613034646563396133
|
|
||||||
33303464633635636233626633353038656230373266666132323561383866343632333561323363
|
|
||||||
61346664623338376436373332646232646235323639633262666166346535663238653563363239
|
|
||||||
34663365633363313433376333653534333364393635316235333965383262313563373161663065
|
|
||||||
36393565396534353235623238303835343334646632306638306332336539616463393966653538
|
|
||||||
35336462623031326539633139636533633632623137393463333531663935323765663139306361
|
|
||||||
66643434393533313039356434326438626265323066613966323634306632653765363834613034
|
|
||||||
30373039336536393865383265643335396232643537343363313338383838383030386665303237
|
|
||||||
64363666346535633237353462333232623132353031323231623338356136656261303662656465
|
|
||||||
31313039643561623635643435333133663032313964323061393231666336343233363038616231
|
|
||||||
36356262326530383233336130326361613431623866633832663361633937646461343731343938
|
|
||||||
33306262346463623935663466356264393837626239313739356431653163376563333234346566
|
|
||||||
38373663643532313635333131663239383736343930623735323861663037356136353433633865
|
|
||||||
63626435613936303661366637623338633961643137613933303735366265663933396130363039
|
|
||||||
34396637643638613839306639343765393539653164616536653661373264376436626639316666
|
|
||||||
61303835323761643531326438363035343539383464376433363534623934366534373631353364
|
|
||||||
61383866323737316430303736366533643939313637393631303833363431613562303639323939
|
|
||||||
66313434613963656464383964313734383938353366306462666537653563336465376464303538
|
|
||||||
34336531663334303938333739313638636363623562613536333736386137363139653164626261
|
|
||||||
62663662316365663563646164303935323866633336633939323837393962393130626330666233
|
|
||||||
63663661303565646236623130663034636264353235376561306630376365613966663536303963
|
|
||||||
63643161386435633831393334333035653761393863373731616239313235383033633439376166
|
|
||||||
39613762376162386231633938393036633461303732323337656430373430636435313337303365
|
|
||||||
37646461336339623339316663616636373036656564383462356562306465623762653162633963
|
|
||||||
35636466386138333564666564323034393162633965386133643235303938616439333130353637
|
|
||||||
61343536323034366464653138353665326436396133313432666563353335383733363335613562
|
|
||||||
61646365346665383866623364396138323666326338313530353663323938613362653038313339
|
|
||||||
32613663616535313661386538366330373364366637386634633437646362383764346263636434
|
|
||||||
35616166393065343038643861636333373738363335353164326435303961326662356230323262
|
|
||||||
35656531653535643630376330393731643532353132366662636664626132646632306361323035
|
|
||||||
31373136616435336362633439356339336466313337623538383763386132396135653864386638
|
|
||||||
31393864363466653137643565306462616238333435343036613331653866393532313861376331
|
|
||||||
33646636623666343439616332386363373664346164313963623861393134666463383366633539
|
|
||||||
35313761333564303635656364303566643436393130356163623137313530653539656537653139
|
|
||||||
38336636623732313630303933303962303561376436623737633139643564343166326335386639
|
|
||||||
31373437336139326562613339393235393065396538333566323864643639303132313733396132
|
|
||||||
35613532396363326166313061353136373965303964623534653634613639303764393038333037
|
|
||||||
63656131616463663565653134363336326139303736313138366262616338643339316231663631
|
|
||||||
30656132386462393433313261313466303239346138623433643634616465656139343764353338
|
|
||||||
62616139613731363665333438383861623837643432643134626461643631323034383262656439
|
|
||||||
33653563323434343964633236353434643739333863636630636363633639373630
|
|
@ -1 +0,0 @@
|
|||||||
postgres_consul_tag: "active"
|
|
@ -1 +0,0 @@
|
|||||||
postgres_consul_tag: "standby"
|
|
@ -1,24 +1,28 @@
|
|||||||
dhcpd_authoritative: True
|
dhcpd_authoritative: True
|
||||||
dhcpd_lease_time: '72'
|
dhcpd_lease_time: '72'
|
||||||
dhcpd_domain_name: "lan.{{ domain.name }}"
|
dhcpd_domain_name: "{{ domain.name }}"
|
||||||
dhcpd_nameservers:
|
dhcpd_nameservers:
|
||||||
- '192.168.1.4'
|
- '192.168.1.4'
|
||||||
- '192.168.1.40'
|
- '192.168.1.10'
|
||||||
|
dhcpd_keys:
|
||||||
|
- key: dhcp
|
||||||
|
algorithm: HMAC-MD5
|
||||||
|
secret: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:dhcpd_key') }}"
|
||||||
|
|
||||||
dhcpd_zones:
|
dhcpd_zones:
|
||||||
- zone: "lan.{{ domain.name }}."
|
- zone: "{{ domain.name }}."
|
||||||
primary: "192.168.1.5"
|
primary: "192.168.1.10"
|
||||||
key: "dhcpdupdate"
|
key: "dhcp"
|
||||||
- zone: "1.168.192.in-addr.arpa."
|
- zone: "1.168.192.in-addr.arpa."
|
||||||
primary: "192.168.1.5"
|
primary: "192.168.1.10"
|
||||||
key: "dhcpdupdate"
|
key: "dhcp"
|
||||||
|
|
||||||
dhcpd_options: |
|
dhcpd_options: |
|
||||||
ddns-updates on;
|
ddns-updates on;
|
||||||
ddns-update-style interim;
|
ddns-update-style interim;
|
||||||
ignore client-updates;
|
ignore client-updates;
|
||||||
update-static-leases on;
|
update-static-leases on;
|
||||||
ddns-domainname "lan.{{ domain.name }}.";
|
ddns-domainname "ducamps.win.";
|
||||||
ddns-rev-domainname "in-addr.arpa.";
|
ddns-rev-domainname "in-addr.arpa.";
|
||||||
|
|
||||||
|
|
||||||
@ -41,10 +45,17 @@ dhcpd_hosts:
|
|||||||
|
|
||||||
- hostname: 'oscar'
|
- hostname: 'oscar'
|
||||||
address: '192.168.1.40'
|
address: '192.168.1.40'
|
||||||
ethernet: '68:1D:EF:3C:F0:44'
|
ethernet: '7C:83:34:B3:49:9A'
|
||||||
- hostname: 'bleys'
|
- hostname: 'bleys'
|
||||||
address: '192.168.1.42'
|
address: '192.168.1.42'
|
||||||
ethernet: '68:1d:ef:2b:3d:24'
|
ethernet: '68:1d:ef:2b:3d:24'
|
||||||
|
- hostname: 'VMAS-HML'
|
||||||
|
address: '192.168.1.50'
|
||||||
|
ethernet: '52:54:00:02:74:ed'
|
||||||
|
|
||||||
|
- hostname: 'VMAS-BUILD'
|
||||||
|
address: '192.168.1.53'
|
||||||
|
ethernet: '52:54:13:1e:93'
|
||||||
|
|
||||||
|
|
||||||
- hostname: 'xiaomi-chambre-gateway'
|
- hostname: 'xiaomi-chambre-gateway'
|
||||||
@ -62,7 +73,4 @@ dhcpd_hosts:
|
|||||||
- hostname: 'shelly-chambre-ventilo'
|
- hostname: 'shelly-chambre-ventilo'
|
||||||
address: '192.168.1.65'
|
address: '192.168.1.65'
|
||||||
ethernet: 'e0:98:06:97:78:0b'
|
ethernet: 'e0:98:06:97:78:0b'
|
||||||
- hostname: 'shelly-Bureau-chauffeau'
|
|
||||||
address: '192.168.1.66'
|
|
||||||
ethernet: '8c:aa:b5:42:b9:b9'
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
65303666336535386536653939626336646338623431353161636565393532623264316534326539
|
|
||||||
6265393839323438376666393030383839326239323261660a333132613538306137383332336538
|
|
||||||
38323830353062366133643734303138343939323135333532333666653039326437316361353463
|
|
||||||
6665393263376132620a346239386437326462363565636335303766306638393331656664376665
|
|
||||||
63373131373039653065633861626263646635323634333538343163346239633937303761366362
|
|
||||||
31376438363731613666393531656232653033336332653261313866396434616461303831353336
|
|
||||||
38663965636536313932346133363733636636643938366364366435366237316435643062336231
|
|
||||||
34343931653963613431336465653036616431323263613731393963656637303561366461663038
|
|
||||||
31336131346266393035343135323131636435333865323733386439363763376638383337613530
|
|
||||||
34356331356361636665383933633130343564373739343630663835313164326565393439306163
|
|
||||||
31386538633033333961386534323234653833323537356565616436346462613333663139623035
|
|
||||||
30636265313230383162633466373937353262383965313631326336666133653331366230653961
|
|
||||||
6131
|
|
@ -1,2 +1,3 @@
|
|||||||
nomad_datacenter: homelab
|
nomad_datacenter: homelab
|
||||||
|
nomad_allow_privileged: True
|
||||||
system_wol_enable: True
|
system_wol_enable: True
|
||||||
|
83
ansible/group_vars/homelab/mount
Normal file
83
ansible/group_vars/homelab/mount
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
systemd_mounts:
|
||||||
|
diskstation_git:
|
||||||
|
share: diskstation.ducamps.win:/volume2/git
|
||||||
|
mount: /mnt/diskstation/git
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_CardDav:
|
||||||
|
share: diskstation.ducamps.win:/volume2/CardDav
|
||||||
|
mount: /mnt/diskstation/CardDav
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
backup_disk:
|
||||||
|
share: /dev/sdb1
|
||||||
|
mount: /mnt/backup
|
||||||
|
type: ntfs-3g
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_home:
|
||||||
|
share: diskstation.ducamps.win:/volume2/homes/admin
|
||||||
|
mount: /mnt/diskstation/home
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_photo:
|
||||||
|
share: diskstation.ducamps.win:/volume2/photo
|
||||||
|
mount: /mnt/diskstation/photo
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_music:
|
||||||
|
share: diskstation.ducamps.win:/volume2/music
|
||||||
|
mount: /mnt/diskstation/music
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_media:
|
||||||
|
share: diskstation.ducamps.win:/volume1/media
|
||||||
|
mount: /mnt/diskstation/media
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_ebook:
|
||||||
|
share: diskstation.ducamps.win:/volume2/ebook
|
||||||
|
mount: /mnt/diskstation/ebook
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_archMirror:
|
||||||
|
share: diskstation.ducamps.win:/volume2/archMirror
|
||||||
|
mount: /mnt/diskstation/archMirror
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_nomad:
|
||||||
|
share: diskstation.ducamps.win:/volume2/nomad
|
||||||
|
mount: /mnt/diskstation/nomad
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
|
||||||
|
systemd_mounts_enabled:
|
||||||
|
- diskstation_git
|
||||||
|
- diskstation_music
|
||||||
|
- backup_disk
|
||||||
|
- diskstation_photo
|
||||||
|
- diskstation_home
|
||||||
|
- diskstation_CardDav
|
||||||
|
- diskstation_media
|
||||||
|
- diskstation_ebook
|
||||||
|
- diskstation_archMirror
|
||||||
|
- diskstation_nomad
|
@ -1,13 +0,0 @@
|
|||||||
domain:
|
|
||||||
name: ducamps.eu
|
|
||||||
consul_bootstrap_expect: 3
|
|
||||||
consul_domain: "consul"
|
|
||||||
nomad_bootstrap_expect: 3
|
|
||||||
nomad_client_meta:
|
|
||||||
- name: "env"
|
|
||||||
value: "production"
|
|
||||||
vault_unseal_keys_dir_output: "~/vaultUnseal/production"
|
|
||||||
env_default_nfs_path: ""
|
|
||||||
env_media_nfs_path: "/volume1"
|
|
||||||
env_automount: true
|
|
||||||
nas_ip: "192.168.1.43"
|
|
@ -1,21 +1,4 @@
|
|||||||
domain:
|
systemd_mounts: []
|
||||||
name: ducamps.dev
|
systemd_mounts_enabled: []
|
||||||
#systemd_mounts: []
|
|
||||||
#systemd_mounts_enabled: []
|
|
||||||
consul_bootstrap_expect: 2
|
consul_bootstrap_expect: 2
|
||||||
consul_domain: "consul"
|
|
||||||
nomad_bootstrap_expect: 2
|
nomad_bootstrap_expect: 2
|
||||||
nomad_client_meta:
|
|
||||||
- name: "env"
|
|
||||||
value: "staging"
|
|
||||||
|
|
||||||
vault_unseal_keys_dir_output: "~/vaultUnseal/staging"
|
|
||||||
hosts_entries:
|
|
||||||
- ip: "{{ hostvars['nas-dev']['ansible_default_ipv4']['address'] }}"
|
|
||||||
name: diskstation.ducamps.eu
|
|
||||||
|
|
||||||
env_default_nfs_path: ""
|
|
||||||
env_automount: true
|
|
||||||
nas_ip: "nfs.service.consul"
|
|
||||||
|
|
||||||
|
|
||||||
|
0
ansible/group_vars/wireguard
Normal file
0
ansible/group_vars/wireguard
Normal file
@ -1,10 +1,6 @@
|
|||||||
---
|
---
|
||||||
ansible_host: "192.168.1.42"
|
ansible_host: "192.168.1.42"
|
||||||
ansible_python_interpreter: "/usr/bin/python3"
|
ansible_python_interpreter: "/usr/bin/python3"
|
||||||
default_interface: "enp2s0"
|
|
||||||
consul_iface: "{{ default_interface}}"
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
nfs_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.0.7/24"
|
wireguard_address: "10.0.0.7/24"
|
||||||
wireguard_byhost_allowed_ips:
|
wireguard_byhost_allowed_ips:
|
||||||
merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24
|
merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24
|
||||||
@ -15,13 +11,13 @@ wireguard_endpoint: ""
|
|||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{default_interface}} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=1
|
- sysctl -w net.ipv4.ip_forward=1
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {default_interface} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=0
|
- sysctl -w net.ipv4.ip_forward=0
|
||||||
|
|
||||||
partition_table:
|
partition_table:
|
||||||
|
@ -1,23 +1,22 @@
|
|||||||
---
|
---
|
||||||
ansible_host: 10.0.0.1
|
ansible_host: 135.181.150.203
|
||||||
#ansible_host: 135.181.150.203
|
|
||||||
default_interface: "eth0"
|
|
||||||
wireguard_address: "10.0.0.1/24"
|
wireguard_address: "10.0.0.1/24"
|
||||||
wireguard_endpoint: "135.181.150.203"
|
wireguard_endpoint: "135.181.150.203"
|
||||||
wireguard_persistent_keepalive: "20"
|
wireguard_persistent_keepalive: "20"
|
||||||
wireguard_allowed_ips: 10.0.0.1
|
wireguard_allowed_ips: "10.0.0.1/32,10.0.0.3/32,10.0.0.5/32"
|
||||||
|
|
||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -o %i -j ACCEPT
|
- iptables -A FORWARD -o %i -j ACCEPT
|
||||||
- iptables -A FORWARD -i %i -j ACCEPT
|
- iptables -A FORWARD -i %i -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=1
|
- sysctl -w net.ipv4.ip_forward=1
|
||||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
- resolvectl dns %i 192.168.1.4 192.168.1.10; resolvectl domain %i '~ducamps.win' '~consul'
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i %i -j ACCEPT
|
- iptables -D FORWARD -i %i -j ACCEPT
|
||||||
- iptables -D FORWARD -o %i -j ACCEPT
|
- iptables -D FORWARD -o %i -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=0
|
- sysctl -w net.ipv4.ip_forward=0
|
||||||
|
|
||||||
wireguard_unmanaged_peers:
|
wireguard_unmanaged_peers:
|
||||||
@ -29,7 +28,7 @@ wireguard_unmanaged_peers:
|
|||||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
||||||
allowed_ips: 10.0.0.5/32
|
allowed_ips: 10.0.0.5/32
|
||||||
persistent_keepalive: 0
|
persistent_keepalive: 0
|
||||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
wireguard_dns: "192.168.1.4,192.168.1.10"
|
||||||
consul_client_addr: "127.0.0.1 10.0.0.1"
|
consul_client_addr: "127.0.0.1 10.0.0.1"
|
||||||
consul_bind_address: "10.0.0.1"
|
consul_bind_address: "10.0.0.1"
|
||||||
consul_ui: True
|
consul_ui: True
|
||||||
@ -42,6 +41,5 @@ nomad_host_networks:
|
|||||||
interface: eth0
|
interface: eth0
|
||||||
- name: "default"
|
- name: "default"
|
||||||
interface: wg0
|
interface: wg0
|
||||||
nomad_client_network_interface : "wg0"
|
|
||||||
vault_listener_address: 10.0.0.1
|
vault_listener_address: 10.0.0.1
|
||||||
nomad_plugins_podman: True
|
nomad_plugins_podman: True
|
||||||
|
@ -1,10 +1,6 @@
|
|||||||
---
|
---
|
||||||
ansible_host: "192.168.1.41"
|
ansible_host: "192.168.1.41"
|
||||||
ansible_python_interpreter: "/usr/bin/python3"
|
ansible_python_interpreter: "/usr/bin/python3"
|
||||||
default_interface: "enu1u1"
|
|
||||||
consul_iface: "{{ default_interface }}"
|
|
||||||
vault_iface: "{{ default_interface }}"
|
|
||||||
|
|
||||||
wireguard_address: "10.0.0.6/24"
|
wireguard_address: "10.0.0.6/24"
|
||||||
wireguard_byhost_allowed_ips:
|
wireguard_byhost_allowed_ips:
|
||||||
merlin: 10.0.0.6,192.168.1.41
|
merlin: 10.0.0.6,192.168.1.41
|
||||||
@ -15,10 +11,10 @@ wireguard_endpoint: ""
|
|||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o enu1u1 -j MASQUERADE
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o enu1u1 -j MASQUERADE
|
||||||
|
|
||||||
|
@ -1,8 +1,4 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
ansible_host: gerard-dev.lan.ducamps.dev
|
|
||||||
wireguard_address: "10.0.1.6/24"
|
wireguard_address: "10.0.1.6/24"
|
||||||
perrsistent_keepalive: "20"
|
perrsistent_keepalive: "20"
|
||||||
wireguard_endpoint: ""
|
wireguard_endpoint: ""
|
||||||
@ -10,10 +6,10 @@ wireguard_endpoint: ""
|
|||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface}} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||||
|
|
||||||
|
@ -1,39 +1,31 @@
|
|||||||
---
|
---
|
||||||
ansible_host: 10.0.0.4
|
ansible_host: 10.0.0.4
|
||||||
#ansible_host: 65.21.2.14
|
|
||||||
default_interface: "ens3"
|
|
||||||
nfs_iface: "wg0"
|
|
||||||
wireguard_address: "10.0.0.4/24"
|
wireguard_address: "10.0.0.4/24"
|
||||||
wireguard_endpoint: "65.21.2.14"
|
wireguard_endpoint: "95.216.217.5"
|
||||||
wireguard_persistent_keepalive: "20"
|
wireguard_persistent_keepalive: "30"
|
||||||
wireguard_byhost_allowed_ips:
|
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3/32,10.0.0.5/32"
|
||||||
oscar: "0.0.0.0/0"
|
|
||||||
bleys: "0.0.0.0/0"
|
|
||||||
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3,10.0.0.5"
|
|
||||||
|
|
||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -o %i -j ACCEPT
|
- iptables -A FORWARD -o %i -j ACCEPT
|
||||||
- iptables -A FORWARD -i %i -j ACCEPT
|
- iptables -A FORWARD -i %i -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=1
|
|
||||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i %i -j ACCEPT
|
- iptables -D FORWARD -i %i -j ACCEPT
|
||||||
- iptables -D FORWARD -o %i -j ACCEPT
|
- iptables -D FORWARD -o %i -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=0
|
|
||||||
|
|
||||||
wireguard_unmanaged_peers:
|
wireguard_unmanaged_peers:
|
||||||
phone:
|
phone:
|
||||||
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
|
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
|
||||||
allowed_ips: 10.0.0.3/32
|
allowed_ips: 10.0.0.3/32
|
||||||
persistent_keepalive: 0
|
persistent_keepalive: 0
|
||||||
zen:
|
zen:
|
||||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
||||||
allowed_ips: 10.0.0.5/32
|
allowed_ips: 10.0.0.5/32
|
||||||
persistent_keepalive: 0
|
persistent_keepalive: 0
|
||||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
wireguard_dns: "192.168.1.40,192.168.1.10"
|
||||||
consul_client_addr: "127.0.0.1 10.0.0.4"
|
consul_client_addr: "127.0.0.1 10.0.0.4"
|
||||||
consul_bind_address: "10.0.0.4"
|
consul_bind_address: "10.0.0.4"
|
||||||
consul_ui: True
|
consul_ui: True
|
||||||
@ -43,8 +35,7 @@ nomad_host_networks:
|
|||||||
- name: "private"
|
- name: "private"
|
||||||
interface: wg0
|
interface: wg0
|
||||||
- name: "public"
|
- name: "public"
|
||||||
interface: ens3
|
interface: eth0
|
||||||
- name: "default"
|
- name: "default"
|
||||||
interface: wg0
|
interface: wg0
|
||||||
vault_listener_address: 10.0.0.4
|
vault_listener_address: 10.0.0.4
|
||||||
nomad_plugins_podman: True
|
|
||||||
|
@ -1,8 +1,4 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
ansible_host: merlin-dev.lan.ducamps.dev
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.1.4/24"
|
wireguard_address: "10.0.1.4/24"
|
||||||
wireguard_endpoint: "{{ ansible_default_ipv4.address }}"
|
wireguard_endpoint: "{{ ansible_default_ipv4.address }}"
|
||||||
wireguard_persistent_keepalive: "30"
|
wireguard_persistent_keepalive: "30"
|
||||||
@ -10,12 +6,12 @@ wireguard_persistent_keepalive: "30"
|
|||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -o %i -j ACCEPT
|
- iptables -A FORWARD -o %i -j ACCEPT
|
||||||
- iptables -A FORWARD -i %i -j ACCEPT
|
- iptables -A FORWARD -i %i -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i %i -j ACCEPT
|
- iptables -D FORWARD -i %i -j ACCEPT
|
||||||
- iptables -D FORWARD -o %i -j ACCEPT
|
- iptables -D FORWARD -o %i -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||||
|
|
||||||
wireguard_unmanaged_peers:
|
wireguard_unmanaged_peers:
|
||||||
phone:
|
phone:
|
||||||
|
@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: nas-dev.lan.ducamps.dev
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.1.8/24"
|
|
||||||
perrsistent_keepalive: "30"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
wireguard_address: "10.0.0.8/24"
|
|
||||||
default_interface: "enp2s0"
|
|
||||||
consul_iface: "{{ default_interface}}"
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
perrsistent_keepalive: "30"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
wireguard_byhost_allowed_ips:
|
|
||||||
merlin: 10.0.0.8,192.168.1.43
|
|
||||||
corwin: 10.0.0.8,192.168.1.43
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
@ -1,9 +1,4 @@
|
|||||||
---
|
---
|
||||||
default_interface: "enp1s0"
|
|
||||||
consul_iface: "{{ default_interface}}"
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
nfs_iface: "{{ default_interface}}"
|
|
||||||
nomad_client_cpu_total_compute: 8000
|
|
||||||
wireguard_address: "10.0.0.2/24"
|
wireguard_address: "10.0.0.2/24"
|
||||||
wireguard_byhost_allowed_ips:
|
wireguard_byhost_allowed_ips:
|
||||||
merlin: 10.0.0.2,192.168.1.40
|
merlin: 10.0.0.2,192.168.1.40
|
||||||
@ -14,13 +9,18 @@ wireguard_endpoint: ""
|
|||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o enp2s0 -j MASQUERADE
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o enp2s0 -j MASQUERADE
|
||||||
|
consul_snapshot: True
|
||||||
|
|
||||||
|
vault_snapshot: true
|
||||||
|
vault_backup_location: "/mnt/diskstation/git/backup/vault"
|
||||||
|
vault_roleID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_approle') }}"
|
||||||
|
vault_secretID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_secretID') }}"
|
||||||
partition_table:
|
partition_table:
|
||||||
- device: "/dev/sda"
|
- device: "/dev/sda"
|
||||||
label: gpt
|
label: gpt
|
||||||
|
@ -1,7 +1,4 @@
|
|||||||
---
|
---
|
||||||
ansible_host: oscar-dev.lan.ducamps.dev
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.1.2/24"
|
wireguard_address: "10.0.1.2/24"
|
||||||
perrsistent_keepalive: "30"
|
perrsistent_keepalive: "30"
|
||||||
wireguard_endpoint: ""
|
wireguard_endpoint: ""
|
||||||
@ -9,9 +6,14 @@ wireguard_endpoint: ""
|
|||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||||
|
consul_snapshot: True
|
||||||
|
vault_snapshot: True
|
||||||
|
vault_backup_location: "/mnt/diskstation/git/backup/vault"
|
||||||
|
vault_roleID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_approle') }}"
|
||||||
|
vault_secretID: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:vault-snapshot_secretID') }}"
|
||||||
|
@ -2,24 +2,12 @@
|
|||||||
requirements:
|
requirements:
|
||||||
ansible-galaxy install -g -r roles/requirements.yml
|
ansible-galaxy install -g -r roles/requirements.yml
|
||||||
|
|
||||||
deploy_production:
|
deploy_production: generate-token
|
||||||
ansible-playbook site.yml -i production -u ansible
|
ansible-playbook site.yml -i production -u ansible
|
||||||
|
|
||||||
deploy_production_wiregard:
|
deploy_staging: generate-token
|
||||||
ansible-playbook playbooks/wireguard.yml -i production -u ansible
|
|
||||||
|
|
||||||
deploy_staging:
|
|
||||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
|
||||||
ansible-playbook site.yml -i staging -u ansible
|
ansible-playbook site.yml -i staging -u ansible
|
||||||
|
|
||||||
|
generate-token:
|
||||||
deploy_staging_base:
|
export VAULT_TOKEN=`vault token create -policy=ansible -field="token" -period 6h`
|
||||||
ansible-playbook playbooks/sssd.yml -i staging -u ansible
|
|
||||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
|
||||||
ansible-playbook playbooks/server.yml -i staging -u ansible
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
view-allvault:
|
|
||||||
ansible-vault view `git grep -l "ANSIBLE_VAULT;1.1;AES256$$"`
|
|
||||||
|
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
readonly vault_password_file_encrypted="$(dirname $0)/vault-password.gpg"
|
|
||||||
|
|
||||||
# flock used to work around "gpg: decryption failed: No secret key" in tf-stage2
|
|
||||||
# would otherwise need 'auto-expand-secmem' (https://dev.gnupg.org/T3530#106174)
|
|
||||||
flock "$vault_password_file_encrypted" \
|
|
||||||
gpg --batch --decrypt --quiet "$vault_password_file_encrypted"
|
|
||||||
|
|
Binary file not shown.
@ -1,45 +0,0 @@
|
|||||||
---
|
|
||||||
prerun: false
|
|
||||||
dependency:
|
|
||||||
name: galaxy
|
|
||||||
enabled: false
|
|
||||||
driver:
|
|
||||||
name: vagrant
|
|
||||||
provider:
|
|
||||||
name: libvirt
|
|
||||||
default_box: archlinux/archlinux
|
|
||||||
platforms:
|
|
||||||
- name: oscar-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: archlinux/archlinux
|
|
||||||
- name: merlin-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: generic/rocky9
|
|
||||||
- name: gerard-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: debian/bookworm64
|
|
||||||
- name: nas-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: archlinux/archlinux
|
|
||||||
provisioner:
|
|
||||||
name: ansible
|
|
||||||
connection_options:
|
|
||||||
ansible_ssh_user: vagrant
|
|
||||||
ansible_become: true
|
|
||||||
env:
|
|
||||||
ANSIBLE_CONFIG: ../../ansible.cfg
|
|
||||||
ANSIBLE_ROLES_PATH: "../../roles"
|
|
||||||
log: true
|
|
||||||
lint:
|
|
||||||
name: ansible-lint
|
|
||||||
inventory:
|
|
||||||
host_vars: []
|
|
||||||
links:
|
|
||||||
group_vars: ../../group_vars
|
|
||||||
hosts: ../../staging
|
|
||||||
verifier:
|
|
||||||
name: ansible
|
|
@ -1,54 +1,12 @@
|
|||||||
---
|
---
|
||||||
- name: Consul install
|
- hosts: all
|
||||||
hosts: all
|
name: Hashicorp stack
|
||||||
roles:
|
|
||||||
- role: ansible-consul
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Vault install
|
|
||||||
hosts: homelab
|
|
||||||
roles:
|
roles:
|
||||||
- role: ansible-hashicorp-vault
|
- role: ansible-hashicorp-vault
|
||||||
|
when: inventory_hostname not in groups['VPS']
|
||||||
|
become: true
|
||||||
|
- role: ansible-consul
|
||||||
become: true
|
become: true
|
||||||
post_tasks:
|
|
||||||
- name: Stat root file
|
|
||||||
ansible.builtin.stat:
|
|
||||||
path: "{{ vault_unseal_keys_dir_output }}/rootkey"
|
|
||||||
register: rootkey_exist
|
|
||||||
delegate_to: localhost
|
|
||||||
- name: Reading root contents
|
|
||||||
ansible.builtin.command: cat "{{ vault_unseal_keys_dir_output }}/rootkey"
|
|
||||||
register: root_token
|
|
||||||
delegate_to: localhost
|
|
||||||
when: rootkey_exist.stat.exists
|
|
||||||
changed_when: false
|
|
||||||
- name: debug
|
|
||||||
ansible.builtin.debug:
|
|
||||||
var: root_token
|
|
||||||
- name: Generate nomad token
|
|
||||||
community.hashi_vault.vault_token_create:
|
|
||||||
renewable: true
|
|
||||||
policies: "nomad-server-policy"
|
|
||||||
period: 72h
|
|
||||||
no_parent: true
|
|
||||||
token: "{{ root_token.stdout }}"
|
|
||||||
url: "http://active.vault.service.consul:8200"
|
|
||||||
retries: 4
|
|
||||||
run_once: true
|
|
||||||
delegate_to: localhost
|
|
||||||
when: root_token.stdout is defined
|
|
||||||
register: nomad_token_data
|
|
||||||
|
|
||||||
- name: Gather nomad token
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
nomad_vault_token: "{{ nomad_token_data.login.auth.client_token }}"
|
|
||||||
when: nomad_token_data.login is defined
|
|
||||||
|
|
||||||
- name: nomad
|
|
||||||
hosts: all
|
|
||||||
vars:
|
|
||||||
unseal_keys_dir_output: ~/vaultunseal
|
|
||||||
roles:
|
|
||||||
- role: ansible-nomad
|
- role: ansible-nomad
|
||||||
become: true
|
become: true
|
||||||
- role: docker
|
- role: docker
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts:
|
|
||||||
- homelab
|
|
||||||
- VPS
|
|
||||||
- NAS
|
|
||||||
vars:
|
|
||||||
# certbot_force: true
|
|
||||||
roles:
|
|
||||||
- autofs
|
|
@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
- hosts: all
|
- hosts: all
|
||||||
gather_facts: false
|
|
||||||
become: true
|
become: true
|
||||||
roles:
|
roles:
|
||||||
- ansible_bootstrap
|
- ansible_bootstrap
|
||||||
|
@ -1,54 +1,16 @@
|
|||||||
---
|
---
|
||||||
- name: Database playbook
|
- hosts: database
|
||||||
hosts: database
|
|
||||||
vars:
|
vars:
|
||||||
# certbot_force: true
|
# certbot_force: true
|
||||||
pre_tasks:
|
|
||||||
- name: Install Pg vertors (immich)
|
|
||||||
aur:
|
|
||||||
name: pgvecto.rs-bin
|
|
||||||
state: present
|
|
||||||
become: true
|
|
||||||
become_user: aur_builder
|
|
||||||
- name: Add database member to pg_hba replication
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
postgresql_hba_entries: "{{ postgresql_hba_entries + [\
|
|
||||||
{'type':'host', \
|
|
||||||
'database': 'replication',\
|
|
||||||
'user':'repli',\
|
|
||||||
'address':hostvars[item]['ansible_'+hostvars[item]['default_interface']]['ipv4']['address']+'/32',\
|
|
||||||
'auth_method':'trust'}] }}"
|
|
||||||
loop: '{{ groups.database }}'
|
|
||||||
roles:
|
roles:
|
||||||
- role: ansible-role-postgresql
|
- role: ansible-role-postgresql
|
||||||
become: true
|
become: true
|
||||||
tasks:
|
tasks:
|
||||||
- name: Launch replication
|
- name: add pg_read_all_data to dump
|
||||||
ansible.builtin.command: pg_basebackup -D /var/lib/postgres/data -h {{groups["database_active"]|first}} -U repli -Fp -Xs -P -R -w
|
community.postgresql.postgresql_membership:
|
||||||
args:
|
target_roles:
|
||||||
creates: /var/lib/postgres/data/postgresql.conf
|
- dump
|
||||||
|
groups:
|
||||||
|
- pg_read_all_data
|
||||||
become: true
|
become: true
|
||||||
become_user: postgres
|
become_user: "{{ postgresql_user }}"
|
||||||
when: inventory_hostname in groups["database_standby"]
|
|
||||||
- name: Ensure PostgreSQL is started and enabled on boot.
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: '{{ postgresql_daemon }}'
|
|
||||||
state: '{{ postgresql_service_state }}'
|
|
||||||
enabled: '{{ postgresql_service_enabled }}'
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Set Postgress shared libraries
|
|
||||||
community.postgresql.postgresql_set:
|
|
||||||
name: shared_preload_libraries
|
|
||||||
value: vectors.so
|
|
||||||
become: true
|
|
||||||
become_user: postgres
|
|
||||||
when: inventory_hostname in groups["database_active"]
|
|
||||||
notify: Restart postgresql
|
|
||||||
- name: Set Postgress shared libraries
|
|
||||||
community.postgresql.postgresql_set:
|
|
||||||
name: search_path
|
|
||||||
value: '$user, public, vectors'
|
|
||||||
become: true
|
|
||||||
become_user: postgres
|
|
||||||
when: inventory_hostname in groups["database_active"]
|
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
- name: DNS playbook
|
|
||||||
hosts: DNS
|
|
||||||
roles:
|
|
||||||
- role: pdns_recursor-ansible
|
|
||||||
become: true
|
|
@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
- name: gather all
|
|
||||||
hosts: all
|
|
||||||
- name: NAS playbook
|
|
||||||
hosts: NAS
|
|
||||||
vars:
|
|
||||||
# certbot_force: true
|
|
||||||
pre_tasks:
|
|
||||||
- name: include task NasBind
|
|
||||||
ansible.builtin.include_tasks:
|
|
||||||
file: tasks/NasBind.yml
|
|
||||||
loop: "{{ nas_bind_source }}"
|
|
||||||
- name: create nomad folder
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ nas_bind_target }}/nomad/{{ item.name }}"
|
|
||||||
owner: "{{ item.owner|default('root') }}"
|
|
||||||
state: directory
|
|
||||||
become: true
|
|
||||||
loop: "{{ NAS_nomad_folder }}"
|
|
||||||
roles:
|
|
||||||
- role: ansible-role-nut
|
|
||||||
become: true
|
|
||||||
- role: ansible-role-nfs
|
|
||||||
become: true
|
|
||||||
- role: ansible-role-pureftpd
|
|
||||||
become: true
|
|
||||||
- role: vladgh.samba.server
|
|
||||||
become: true
|
|
@ -2,7 +2,6 @@
|
|||||||
- hosts:
|
- hosts:
|
||||||
- homelab
|
- homelab
|
||||||
- VPS
|
- VPS
|
||||||
- NAS
|
|
||||||
vars:
|
vars:
|
||||||
# certbot_force: true
|
# certbot_force: true
|
||||||
tasks:
|
tasks:
|
||||||
@ -23,4 +22,7 @@
|
|||||||
loop_var: create
|
loop_var: create
|
||||||
roles:
|
roles:
|
||||||
- system
|
- system
|
||||||
|
- autofs
|
||||||
|
- role: msmtp
|
||||||
|
when: ansible_os_family != "RedHat"
|
||||||
- cronie
|
- cronie
|
||||||
|
@ -1,18 +0,0 @@
|
|||||||
- name: Ensure base NFS directory exist
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item.dest }}"
|
|
||||||
state: directory
|
|
||||||
become: true
|
|
||||||
- name: Ensure source NFS directory exist
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item.source }}"
|
|
||||||
state: directory
|
|
||||||
become: true
|
|
||||||
- name: Bind NAS export
|
|
||||||
ansible.posix.mount:
|
|
||||||
path: "{{ item.dest }}"
|
|
||||||
src: "{{ item.source }}"
|
|
||||||
opts: bind
|
|
||||||
fstype: none
|
|
||||||
state: mounted
|
|
||||||
become: true
|
|
@ -1 +0,0 @@
|
|||||||
path = /exports/homes/%S
|
|
@ -14,13 +14,10 @@
|
|||||||
- docker
|
- docker
|
||||||
become: true
|
become: true
|
||||||
become_user: '{{ user.name }}'
|
become_user: '{{ user.name }}'
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
roles:
|
|
||||||
- role: user_config
|
- role: user_config
|
||||||
vars:
|
vars:
|
||||||
user_config_username: "{{ user.name }}"
|
user_config_username: '{{ user.name }}'
|
||||||
become_user: "{{ user.name }}"
|
become_user: '{{ user.name }}'
|
||||||
become: true
|
become: true
|
||||||
- role: user_config
|
- role: user_config
|
||||||
vars:
|
vars:
|
@ -1,52 +1,27 @@
|
|||||||
[DNS]
|
[homelab]
|
||||||
oscar
|
oscar
|
||||||
|
bleys
|
||||||
|
gerard
|
||||||
|
|
||||||
|
[VPS]
|
||||||
|
corwin
|
||||||
|
merlin
|
||||||
|
|
||||||
|
|
||||||
[dhcp]
|
[dhcp]
|
||||||
oberon
|
gerard
|
||||||
|
|
||||||
[database_active]
|
[wireguard]
|
||||||
|
corwin
|
||||||
|
oscar
|
||||||
|
merlin
|
||||||
|
gerard
|
||||||
bleys
|
bleys
|
||||||
|
|
||||||
[database_standby]
|
[database]
|
||||||
oscar
|
oscar
|
||||||
|
bleys
|
||||||
[database:children]
|
|
||||||
database_active
|
|
||||||
database_standby
|
|
||||||
|
|
||||||
[rsyncd]
|
[rsyncd]
|
||||||
oscar
|
oscar
|
||||||
bleys
|
bleys
|
||||||
|
|
||||||
[wireguard:children]
|
|
||||||
production
|
|
||||||
|
|
||||||
[NAS]
|
|
||||||
oberon
|
|
||||||
|
|
||||||
[cluster]
|
|
||||||
oscar
|
|
||||||
#gerard
|
|
||||||
bleys
|
|
||||||
|
|
||||||
|
|
||||||
[homelab:children]
|
|
||||||
NAS
|
|
||||||
cluster
|
|
||||||
|
|
||||||
[VPS]
|
|
||||||
merlin
|
|
||||||
|
|
||||||
[region:children]
|
|
||||||
homelab
|
|
||||||
VPS
|
|
||||||
production
|
|
||||||
|
|
||||||
[production]
|
|
||||||
oscar
|
|
||||||
merlin
|
|
||||||
#gerard
|
|
||||||
bleys
|
|
||||||
oberon
|
|
||||||
|
|
||||||
[staging]
|
|
||||||
|
@ -1,12 +1,16 @@
|
|||||||
---
|
---
|
||||||
- hosts: all
|
- hosts: all
|
||||||
remote_user: root
|
remote_user: root
|
||||||
|
vars:
|
||||||
|
provissionning_default_root: "{{ lookup('hashi_vault','secret=secrets/data/ansible/user:root') }}"
|
||||||
roles:
|
roles:
|
||||||
- ansible-arch-provissionning
|
- ansible-arch-provissionning
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
remote_user: root
|
remote_user: root
|
||||||
roles:
|
vars:
|
||||||
|
ansible_password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/user:root') }}"
|
||||||
|
roles:
|
||||||
- ansible_bootstrap
|
- ansible_bootstrap
|
||||||
|
|
||||||
# - remote_user: "{{ user.name }}"
|
# - remote_user: "{{ user.name }}"
|
||||||
|
@ -1,49 +1,41 @@
|
|||||||
---
|
---
|
||||||
roles:
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-arch-provissionning.git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-arch-provissionning.git
|
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-postgresql.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-postgresql.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-sssd
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-sssd
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible_bootstrap.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible_bootstrap.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/autofs.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/autofs.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/cronie.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/cronie.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/docker.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/docker.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/hass-client-control.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/hass-client-control.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/msmtp.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/msmtp.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/rsyncd.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/rsyncd.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/system.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/system.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/user_config.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/user_config.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: git@github.com:vincentDcmps/ansible-role-wireguard.git
|
- src: git@github.com:vincentDcmps/ansible-role-wireguard.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-consul.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-consul.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-hashicorp-vault.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-hashicorp-vault.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-nomad.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-nomad.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/mpd.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/mpd.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-dhcpd.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-dhcpd.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-user.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-user.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: git@github.com:vincentDcmps/ansible-role-nfs.git
|
- src: git@github.com:vincentDcmps/ansible-role-nfs.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: git@github.com:vincentDcmps/ansible-role-nut.git
|
|
||||||
scm: git
|
|
||||||
- src: git@git.ducamps.eu:2222/ansible-roles/ansible-role-pureftpd.git
|
|
||||||
scm: git
|
|
||||||
- src: https://github.com/PowerDNS/pdns_recursor-ansible.git
|
|
||||||
collections:
|
|
||||||
- name: vladgh.samba
|
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
---
|
---
|
||||||
- import_playbook: playbooks/server.yml
|
|
||||||
- import_playbook: playbooks/dhcpd.yml
|
|
||||||
- import_playbook: playbooks/dns.yml
|
|
||||||
- import_playbook: playbooks/HashicorpStack.yml
|
|
||||||
- import_playbook: playbooks/nas.yml
|
|
||||||
- import_playbook: playbooks/autofs.yml
|
|
||||||
- import_playbook: playbooks/sssd.yml
|
- import_playbook: playbooks/sssd.yml
|
||||||
|
- import_playbook: playbooks/wireguard.yml
|
||||||
|
- import_playbook: playbooks/server.yml
|
||||||
|
- import_playbook: playbooks/HashicorpStack.yml
|
||||||
- import_playbook: playbooks/database.yml
|
- import_playbook: playbooks/database.yml
|
||||||
- import_playbook: playbooks/rsyncd.yml
|
- import_playbook: playbooks/rsyncd.yml
|
||||||
|
- import_playbook: playbooks/music-player.yml
|
||||||
|
- import_playbook: playbooks/dhcpd.yml
|
||||||
|
- import_playbook: playbooks/user_config.yml
|
||||||
|
@ -1,44 +1,18 @@
|
|||||||
[DNS]
|
[homelab]
|
||||||
oscar-dev
|
|
||||||
|
|
||||||
[database_active]
|
|
||||||
oscar-dev
|
|
||||||
|
|
||||||
[database_standby]
|
|
||||||
gerard-dev
|
|
||||||
|
|
||||||
[database:children]
|
|
||||||
database_active
|
|
||||||
database_standby
|
|
||||||
|
|
||||||
[wireguard:children]
|
|
||||||
staging
|
|
||||||
|
|
||||||
[NAS]
|
|
||||||
nas-dev
|
|
||||||
|
|
||||||
[cluster]
|
|
||||||
oscar-dev
|
oscar-dev
|
||||||
gerard-dev
|
gerard-dev
|
||||||
|
|
||||||
[homelab:children]
|
|
||||||
NAS
|
|
||||||
cluster
|
|
||||||
|
|
||||||
[VPS]
|
[VPS]
|
||||||
merlin-dev
|
merlin-dev
|
||||||
|
|
||||||
[region:children]
|
[database]
|
||||||
homelab
|
oscar-dev
|
||||||
VPS
|
|
||||||
|
[wireguard:children]
|
||||||
staging
|
staging
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[staging]
|
[staging]
|
||||||
oscar-dev
|
oscar-dev
|
||||||
gerard-dev
|
gerard-dev
|
||||||
merlin-dev
|
merlin-dev
|
||||||
nas-dev
|
|
||||||
|
|
||||||
[production]
|
|
||||||
|
@ -6,16 +6,15 @@
|
|||||||
"tags": [
|
"tags": [
|
||||||
"homer.enable=true",
|
"homer.enable=true",
|
||||||
"homer.name=Diskstation",
|
"homer.name=Diskstation",
|
||||||
"homer.url=https://syno.ducamps.eu",
|
"homer.url=https://syno.ducamps.win",
|
||||||
"homer.logo=https://syno.ducamps.eu/webman/resources/images/icon_dsm_96.png",
|
"homer.logo=https://syno.ducamps.win/webman/resources/images/icon_dsm_96.png",
|
||||||
"homer.service=Application",
|
"homer.service=Application",
|
||||||
"homer.target=_blank",
|
"homer.target=_blank",
|
||||||
|
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.syno.rule=Host(`syno.ducamps.eu`)",
|
"traefik.http.routers.syno.rule=Host(`syno.ducamps.win`)",
|
||||||
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.eu",
|
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.win",
|
||||||
"traefik.http.routers.syno.tls.certresolver=myresolver",
|
"traefik.http.routers.syno.tls.certresolver=myresolver"
|
||||||
"traefik.http.routers.syno.entrypoints=web,websecure"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|
||||||
|
|
||||||
## Context
|
## Context
|
||||||
|
|
||||||
Currently vault Backend is onboard in Consul KV
|
Currently vault Backend is onboard in Consul KV
|
||||||
@ -15,9 +16,7 @@ migrate to vault integrated storage
|
|||||||
## Consequences
|
## Consequences
|
||||||
|
|
||||||
to do:
|
to do:
|
||||||
|
- migration plan https://developer.hashicorp.com/vault/tutorials/raft/raft-migration
|
||||||
- [migration plan]("https://developer.hashicorp.com/vault/tutorials/raft/raft-migration")
|
|
||||||
|
|
||||||
1. basculer oscar,gerard et bleys and itegrated storage merlin restera en storage consul pendant l'opé avant décom
|
1. basculer oscar,gerard et bleys and itegrated storage merlin restera en storage consul pendant l'opé avant décom
|
||||||
2. stoper le service vault sur oscar
|
2. stoper le service vault sur oscar
|
||||||
3. lancer la commande de migration
|
3. lancer la commande de migration
|
||||||
@ -25,4 +24,4 @@ to do:
|
|||||||
5. décom vault sur merlin
|
5. décom vault sur merlin
|
||||||
6. adapter job backup
|
6. adapter job backup
|
||||||
|
|
||||||
- [backup]("https://developer.hashicorp.com/vault/tutorials/standard-procedures/sop-backup")
|
- backup https://developer.hashicorp.com/vault/tutorials/standard-procedures/sop-backup
|
||||||
|
@ -1,54 +0,0 @@
|
|||||||
# 003-mailserver
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
Gandi free email will become a pay service in 2 month.
|
|
||||||
|
|
||||||
In this condition it will be interesting to study selfhosted mail solution.
|
|
||||||
|
|
||||||
### domain name
|
|
||||||
|
|
||||||
do I take advantage of this to change domaine name:
|
|
||||||
|
|
||||||
Pro:
|
|
||||||
|
|
||||||
- could test more easy
|
|
||||||
- could redirect old domain name to new one untile end of gandi domain (2026)
|
|
||||||
- get a more "normal" extention
|
|
||||||
|
|
||||||
con:
|
|
||||||
|
|
||||||
- need to progresively update every personal account
|
|
||||||
|
|
||||||
### Container localisation
|
|
||||||
|
|
||||||
on hetzner:
|
|
||||||
|
|
||||||
- need to increase memory
|
|
||||||
|
|
||||||
on homelab:
|
|
||||||
|
|
||||||
- need to redirect all serveur flux to hetzner to be sure to be sure that mail will be send with hetzner IP (control PTR on this IP)
|
|
||||||
- hetzner will be too a SPOF
|
|
||||||
|
|
||||||
### software choose
|
|
||||||
|
|
||||||
mail server will run in nomad cluster.
|
|
||||||
|
|
||||||
docker-mailserver -> 1 container
|
|
||||||
mailu
|
|
||||||
|
|
||||||
## Decision
|
|
||||||
|
|
||||||
we will switch to another domain name on "https://www.bookmyname.com/": ducamps.eu""
|
|
||||||
docker-mailserver will be more easier to configure because only one container to migrate to nomad
|
|
||||||
for begining container will be launch on hetzner
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
- need to buy a new domaine name and configure DNS (done)
|
|
||||||
- inprove memory on corwin (done)
|
|
@ -1,117 +0,0 @@
|
|||||||
# DNS
|
|
||||||
|
|
||||||
## 001 Recursor out off NAS
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
curently main local domain DNS is located on NAS.
|
|
||||||
|
|
||||||
goal:
|
|
||||||
|
|
||||||
- avoid DNS outtage in case of NAS reboot (my synology have 10 years and is a litle long to reboot) morever during NAS reboot we lost the adblock DNS in the nomad cluster because nomad depend of the NFS share.
|
|
||||||
- remove the direct redirection to service.consul DNS and the IPTABLE rule use to redirect port 53 on consul on gerard instead new DNS could be forward directly to an active consul node on port 8300
|
|
||||||
|
|
||||||
#### DNS software
|
|
||||||
|
|
||||||
need DHCP Dynamic update
|
|
||||||
could redirect domain on other port than port 53
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
we will migrate Main Domain DNS from NAS to gerard (powerDNS)
|
|
||||||
powerDNS provide two disting binaries one for authority server one other for recursor
|
|
||||||
goal is to first migrate the recursice part from synology to a physical service
|
|
||||||
and in second time migrate authority server in nmad cluster
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
before to move authority server need to remove DB dns dependance (create db consul services)
|
|
||||||
need to delete the iptable rule on gerard before deploy
|
|
||||||
|
|
||||||
## 002 each node request self consul client for consul dns query
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
to avoid a cluster failled in case of the DNS recursor default.
|
|
||||||
I would like that each cluster client request their own consul client
|
|
||||||
first to resolve consul DNS query
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
Implement sytemd-resolved on all cluster member and add a DNS redirection
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
need to modify annsible system role for systemd-resolved activation and consul role for configure redirection
|
|
||||||
|
|
||||||
## 003 migrate authority DNS from NAS to cluster
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
we have curently three authority domain on NAS:
|
|
||||||
|
|
||||||
- ducamps.win
|
|
||||||
- ducamps.eu
|
|
||||||
- lan.ducamps.eu
|
|
||||||
|
|
||||||
we could migrate authority DNS in cluster
|
|
||||||
ducamps.win and ducamps.eu are only use for application access so no dependence with cluster build
|
|
||||||
need to study cluster build dependance for lan.ducamps.eu-> in every case in case of build from scratch need to use IP
|
|
||||||
need keepalive IP and check if no conflict if store on same machine than pihole->ok don't need to listen on 53 only request by recursor
|
|
||||||
DNS authority will dependant to storage (less problematic than recursor)
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
## 004 migrate recurson in cluster
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
now that cluster doesn't depend of recursor because request self consul agent for consul query need
|
|
||||||
need to study if we can migrate recursor in nomad wihout break dependance
|
|
||||||
advantage:
|
|
||||||
|
|
||||||
- recursor could change client in case of faillure
|
|
||||||
|
|
||||||
agains:
|
|
||||||
|
|
||||||
- this job need a keepalive IP like pihole
|
|
||||||
- *loss recursor if lost nomad cluster*
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
put one recursor on cluster over authority server and keep the recursor on gerard for better recundancy
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
|
|
||||||
## 005 physical Recursor location
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
following NAS migration physical DNS Recursor was install directly on NAS this bring a SPOF when NAS failed Recursor on Nomad cluster are stopped because of volume dependance
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
Put physical Recursor on a cluster node like that to have a DNS issue we need to have NAS and this nomad down on same Time
|
|
@ -1,42 +0,0 @@
|
|||||||
# NAS
|
|
||||||
|
|
||||||
## 001 New Nas spec
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
In progress
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
Storage:
|
|
||||||
|
|
||||||
- Data filesytem will be in btrfs.
|
|
||||||
- Study if keep root filesystem in EXT4.
|
|
||||||
- Need to use LVM over btrfs added posibility to add cache later (cache on cold data useless on beginning maybe write cache in future use).
|
|
||||||
- hot Data (nomad, document,fresh download file,music?) on SSD cold DATA on HDD (film, serie photo)
|
|
||||||
- at least 2 HDD and 2 SSD
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Hardware:
|
|
||||||
|
|
||||||
- network 2.5 gpbs will be good for evolve
|
|
||||||
- at least 4go ram (expansive will be appreciable)
|
|
||||||
|
|
||||||
Software:
|
|
||||||
|
|
||||||
be able to install custom linux distrib
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
- Due to form factor/consumption and SSD capability my choise is on ASUSTOR Nimbustor 2 Gen 2 AS5402, he corresponding to need and less expensive than a DIY NAS
|
|
||||||
- buy only a new ssd of 2to in more to store system and hot data
|
|
||||||
|
|
||||||
### Cosequence
|
|
||||||
|
|
||||||
need to migrate Data and keep same disk
|
|
||||||
|
|
||||||
- install system
|
|
||||||
- copy all data from 2to HDD to SSD then format 2to HDD
|
|
||||||
- copy download data to FROM 4 to HDD to SSD
|
|
||||||
- copy serie to 2to HDD and copy film on external harddrive
|
|
@ -1,25 +0,0 @@
|
|||||||
# Docker Pull throught
|
|
||||||
|
|
||||||
# 001 architecture consideration
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
Accepted
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
docker hub get a pull limit if somebody go wrong on our infrastructure we can get quickyly this limit solution will be to implement a pull throught proxy.
|
|
||||||
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
create two container task to create a dockerhub pull through and a ghcr one
|
|
||||||
|
|
||||||
we can add these registry to traefick to have both under the port 5000 but this will add a traefik dependancy on rebuild
|
|
||||||
|
|
||||||
so to begin we will use one trafick service on two diferent static port
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
- this registry need to be start first on cluster creation
|
|
||||||
- need to update all job image with local proxy url
|
|
@ -3,34 +3,30 @@
|
|||||||
```mermaid
|
```mermaid
|
||||||
flowchart LR
|
flowchart LR
|
||||||
subgraph External
|
subgraph External
|
||||||
externalRecursor[recursor]
|
recursor
|
||||||
GandiDns[ hetzner ducamps.win]
|
GandiDns[ Gandi ducamps.win]
|
||||||
end
|
end
|
||||||
subgraph Internal
|
subgraph Internal
|
||||||
pihole[pihole]--ducamps.win-->NAS
|
pihole[pihole]----ducamps.win-->NAS
|
||||||
pihole--service.consul-->consul[consul cluster]
|
pihole--service.consul-->consul[consul cluster]
|
||||||
pihole--->recursor
|
|
||||||
recursor--service.consul-->consul
|
|
||||||
DHCP --dynamic update--> NAS
|
DHCP --dynamic update--> NAS
|
||||||
NAS
|
NAS--service.consul-->consul
|
||||||
recursor--ducamps.win-->NAS
|
|
||||||
consul--service.consul--->consul
|
|
||||||
clients--->pihole
|
|
||||||
clients--->recursor
|
|
||||||
end
|
end
|
||||||
pihole --> externalRecursor
|
NAS --> recursor
|
||||||
recursor-->External
|
pihole --> recursor
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Detail
|
## Detail
|
||||||
|
|
||||||
Pihole container in nomad cluster is set as primary DNS as add blocker secondary DNS recursore is locate on gerard
|
Pihole container in nomad cluster is set as primary DNS as add blocker secondary DNS is locate on NAS
|
||||||
|
|
||||||
DNS locate on NAS manage domain *ducamps.win* on local network each recursor forward each request on *ducamps.win* to this DNS.
|
DNS locate on NAS manage domain *ducamps.win* on local network pihole forward each request on *ducamps.win* to this DNS.
|
||||||
|
|
||||||
Each DNS forward *service.consul* request to the consul cluster.
|
Each DNS forward *service.consul* request to the consul cluster. On Pihole a template configure each consul server.
|
||||||
Each consul node have a consul redirection in systemd-resolved to theire own consul client
|
|
||||||
|
On diskstation every request as forward to one consul node this point is to improve we because we have a possibility of outtage. du to synology DNSServer limitation we only put a forward on port 53 so we need on the target consul node to redirect port 53 to 8300 by iptables rules.
|
||||||
|
|
||||||
a DHCP service is set to do dynamic update on NAS DNS on lease delivery
|
a DHCP service is set to do dynamic update on NAS DNS on lease delivery
|
||||||
|
|
||||||
external recursor are set on pihole on cloudflare and FDN in case of recursors faillure
|
external recursor are on cloudflare and FDN
|
||||||
|
@ -1,25 +0,0 @@
|
|||||||
# ansible vault management
|
|
||||||
|
|
||||||
ansible password are encoded with a gpg key store in ansible/misc
|
|
||||||
to renew password follow this workflown
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# Generate a new password for the default vault
|
|
||||||
pwgen -s 64 default-pw
|
|
||||||
|
|
||||||
# Re-encrypt all default vaults
|
|
||||||
ansible-vault rekey --new-vault-password-file ./default-pw \
|
|
||||||
$(git grep -l 'ANSIBLE_VAULT;1.1;AES256$')
|
|
||||||
|
|
||||||
# Save the new password in encrypted form
|
|
||||||
# (replace "RECIPIENT" with your email)
|
|
||||||
gpg -r RECIPIENT -o misc/vault--password.gpg -e default-pw
|
|
||||||
|
|
||||||
# Ensure the new password is usable
|
|
||||||
ansible-vault view misc/vaults/vault_hcloud.yml
|
|
||||||
|
|
||||||
# Remove the unencrypted password file
|
|
||||||
rm new-default-pw
|
|
||||||
```
|
|
||||||
|
|
||||||
script `vault-keyring-client.sh` is set in ansible.cfg as vault_password_file to decrypt the gpg file
|
|
@ -1,8 +0,0 @@
|
|||||||
# Troubleshooting
|
|
||||||
|
|
||||||
## issue with SMTP traefik port
|
|
||||||
|
|
||||||
ensure that no other traefik router (httt or TCP) listening on smtp or
|
|
||||||
all entrypoint this can pertuubate smtp TLS connection
|
|
||||||
see [https://doc.traefik.io/traefik/routing/routers/#entrypoints_1](here)
|
|
||||||
|
|
23
infra/.terraform.lock.hcl
Normal file
23
infra/.terraform.lock.hcl
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# This file is maintained automatically by "terraform init".
|
||||||
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
|
provider "registry.terraform.io/hetznercloud/hcloud" {
|
||||||
|
version = "1.35.2"
|
||||||
|
hashes = [
|
||||||
|
"h1:a/DH+2jHvgikSDajup5feRZRUwNw8OT9NBPKezjgM5g=",
|
||||||
|
"zh:1a7cb8f9cbd51b62bdbb4f36cdb070dd99059d86115c4777193e0f8536798d4d",
|
||||||
|
"zh:29c104aae7f7a4e1a4aea32febc9caa2d7d86589cd9d01d5b93dbe2cb0a73220",
|
||||||
|
"zh:29f082195d8f4e4cfb4050fae2ed62ed5616659c6dfaa7b5f1eb42d94d130864",
|
||||||
|
"zh:3cfe3876763659e27696adcb945e6da2dc2ec014ff8a2e8f0f3e610e3bfd9b73",
|
||||||
|
"zh:3d967f4b1aef78ffce389dd32cdea4b558ef826cec96ceb4bdafde4bb4a9b655",
|
||||||
|
"zh:3e160f581f7912c2053f86d6d8a3e3470fcf1fe8228b59ac216a7e40a1dd444c",
|
||||||
|
"zh:5138022c8b4c8a572e8097749241d929a96d3522e67ce25f86bb9fd51c4b343c",
|
||||||
|
"zh:5783febc4d8ac4b7fdb49607cab92ad13509d87ad4ca1999067ac3d20e815d12",
|
||||||
|
"zh:7f8ce9268d48beb5fa0103a8510d4fe644aaac6cd328fc4441dd37e8bdbfadab",
|
||||||
|
"zh:8ab6aea82657fd6f97d79b41e6cd129a33a47ce727a7d0b52205590fa3785ce1",
|
||||||
|
"zh:9e4bebe3bbee7875dc2e3ceca3cf0fec3254a8b481c7b96ba9a5d65647ea9092",
|
||||||
|
"zh:af2a912db9a6fce844ac8c0e695a5d92a5625f2df126129940051a6b1021443d",
|
||||||
|
"zh:bfe86d80e55f44a99dbbdca9d1caf0c837fe21d91e78674ee36263b7de71fd38",
|
||||||
|
"zh:d9538a361bd8979c4a87273a82fc5dec7110f3aa7ec69fffb8c70fe8937bc1f4",
|
||||||
|
]
|
||||||
|
}
|
@ -1,24 +1,6 @@
|
|||||||
resource "hcloud_firewall" "prod" {
|
resource "hcloud_firewall" "prod" {
|
||||||
name= "prod"
|
name= "prod"
|
||||||
|
|
||||||
rule {
|
rule {
|
||||||
direction ="in"
|
|
||||||
protocol = "icmp"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
|
||||||
protocol = "udp"
|
|
||||||
port = "51820"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
direction ="in"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port = "80"
|
port = "80"
|
||||||
@ -36,11 +18,28 @@ resource "hcloud_firewall" "prod" {
|
|||||||
"::/0"
|
"::/0"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
# torrent UDH port
|
||||||
|
rule {
|
||||||
|
direction ="in"
|
||||||
|
protocol = "udp"
|
||||||
|
port = "6881"
|
||||||
|
source_ips = [
|
||||||
|
"0.0.0.0/0",
|
||||||
|
"::/0"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
# wireguard port
|
||||||
|
rule {
|
||||||
|
direction ="in"
|
||||||
|
protocol = "udp"
|
||||||
|
port = "51820"
|
||||||
|
source_ips = [
|
||||||
|
"0.0.0.0/0",
|
||||||
|
"::/0"
|
||||||
|
]
|
||||||
|
|
||||||
|
}
|
||||||
resource "hcloud_firewall" "torrent" {
|
# torrent listen port
|
||||||
name = "torrent"
|
|
||||||
rule {
|
rule {
|
||||||
direction ="in"
|
direction ="in"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
@ -51,10 +50,9 @@ resource "hcloud_firewall" "torrent" {
|
|||||||
]
|
]
|
||||||
|
|
||||||
}
|
}
|
||||||
rule {
|
rule {
|
||||||
direction ="in"
|
direction ="in"
|
||||||
protocol = "udp"
|
protocol = "icmp"
|
||||||
port = "6881"
|
|
||||||
source_ips = [
|
source_ips = [
|
||||||
"0.0.0.0/0",
|
"0.0.0.0/0",
|
||||||
"::/0"
|
"::/0"
|
||||||
@ -87,34 +85,3 @@ resource "hcloud_firewall" "Gitea_SSH" {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
resource "hcloud_firewall" "mail" {
|
|
||||||
name= "mail"
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
|
||||||
protocol = "tcp"
|
|
||||||
port="25"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
|
||||||
protocol = "tcp"
|
|
||||||
port="993"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
|
||||||
protocol = "tcp"
|
|
||||||
port="465"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,13 +1,13 @@
|
|||||||
output "homelab_servers_status" {
|
output "homelab_servers_status" {
|
||||||
value = {
|
value = {
|
||||||
for server in hcloud_server.merlin :
|
for server in hcloud_server.HomeLab2 :
|
||||||
server.name => server.status
|
server.name => server.status
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
output "homelab_servers_ips" {
|
output "homelab_servers_ips" {
|
||||||
value = {
|
value = {
|
||||||
for server in hcloud_server.merlin :
|
for server in hcloud_server.HomeLab2 :
|
||||||
server.name => server.ipv4_address
|
server.name => server.ipv4_address
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -1,12 +1,8 @@
|
|||||||
terraform {
|
terraform {
|
||||||
|
|
||||||
required_providers {
|
required_providers {
|
||||||
hcloud = {
|
hcloud = {
|
||||||
source = "hetznercloud/hcloud"
|
source = "hetznercloud/hcloud"
|
||||||
}
|
}
|
||||||
hetznerdns = {
|
|
||||||
source="timohirt/hetznerdns"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
backend "consul" {
|
backend "consul" {
|
||||||
path = "terraform/infra"
|
path = "terraform/infra"
|
||||||
@ -17,6 +13,3 @@ terraform {
|
|||||||
provider "hcloud" {
|
provider "hcloud" {
|
||||||
token = var.hcloud_token
|
token = var.hcloud_token
|
||||||
}
|
}
|
||||||
provider "hetznerdns" {
|
|
||||||
apitoken = var.hdns_token
|
|
||||||
}
|
|
17
infra/server.tf
Normal file
17
infra/server.tf
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
resource "hcloud_server" "HomeLab2" {
|
||||||
|
count = var.instances
|
||||||
|
name = "corwin"
|
||||||
|
image = "rocky-9"
|
||||||
|
server_type = var.server_type
|
||||||
|
location = var.location
|
||||||
|
ssh_keys = [hcloud_ssh_key.default.id]
|
||||||
|
firewall_ids = [hcloud_firewall.prod.id,hcloud_firewall.Gitea_SSH.id]
|
||||||
|
labels = {
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [
|
||||||
|
ssh_keys,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
@ -1,26 +1,19 @@
|
|||||||
variable "hcloud_token" {
|
variable "hcloud_token" {
|
||||||
type = string
|
|
||||||
# default = <your-api-token>
|
# default = <your-api-token>
|
||||||
}
|
}
|
||||||
variable "hdns_token" {
|
|
||||||
type=string
|
|
||||||
}
|
|
||||||
variable "location" {
|
variable "location" {
|
||||||
type=string
|
|
||||||
default = "hel1"
|
default = "hel1"
|
||||||
}
|
}
|
||||||
variable "instances" {
|
variable "instances" {
|
||||||
type=number
|
|
||||||
default = "1"
|
default = "1"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "server_type" {
|
variable "server_type" {
|
||||||
type=string
|
default = "cpx11"
|
||||||
default = "cpx21"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "os_type" {
|
variable "os_type" {
|
||||||
type=string
|
default = "rocky-8"
|
||||||
default = "rocky-9"
|
|
||||||
}
|
}
|
||||||
|
|
20
makefile
20
makefile
@ -10,30 +10,12 @@ vault-dev:
|
|||||||
./vault/standalone_vault.sh $(FILE);\
|
./vault/standalone_vault.sh $(FILE);\
|
||||||
fi
|
fi
|
||||||
|
|
||||||
vagranup:
|
create-dev:
|
||||||
vagrant up
|
vagrant up
|
||||||
|
|
||||||
create-dev: vagranup DNS-stagging
|
|
||||||
make -C ansible deploy_staging
|
make -C ansible deploy_staging
|
||||||
make -C terraform deploy_vault env=staging
|
|
||||||
VAULT_TOKEN=$(shell cat ~/vaultUnseal/staging/rootkey) python ./script/generate-vault-secret
|
|
||||||
|
|
||||||
create-dev-base: vagranup DNS-stagging
|
|
||||||
make -C ansible deploy_staging_base
|
|
||||||
|
|
||||||
|
|
||||||
destroy-dev:
|
destroy-dev:
|
||||||
vagrant destroy --force
|
vagrant destroy --force
|
||||||
|
|
||||||
serve:
|
serve:
|
||||||
mkdocs serve
|
mkdocs serve
|
||||||
|
|
||||||
DNS-stagging:
|
|
||||||
$(eval dns := $(shell dig oscar-dev.lan.ducamps.dev +short))
|
|
||||||
$(eval dns1 := $(shell dig nas-dev.lan.ducamps.dev +short))
|
|
||||||
sudo resolvectl dns virbr2 "$(dns)" "$(dns1)";sudo resolvectl domain virbr2 "~consul";sudo systemctl restart systemd-resolved.service
|
|
||||||
|
|
||||||
|
|
||||||
DNS-production:
|
|
||||||
sudo resolvectl dns virbr2 "";sudo resolvectl domain virbr2 "";sudo systemctl restart systemd-resolved.service
|
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ job "MQTT" {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
config {
|
config {
|
||||||
image = "docker.service.consul:5000/library/eclipse-mosquitto"
|
image = "eclipse-mosquitto"
|
||||||
ports = ["mosquittoWS", "mosquittoMQTT"]
|
ports = ["mosquittoWS", "mosquittoMQTT"]
|
||||||
volumes = [
|
volumes = [
|
||||||
"/mnt/diskstation/nomad/mosquitto:/mosquitto/data",
|
"/mnt/diskstation/nomad/mosquitto:/mosquitto/data",
|
50
nomad-job/alertmanager.nomad
Normal file
50
nomad-job/alertmanager.nomad
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
|
||||||
|
job "alertmanager" {
|
||||||
|
datacenters = ["homelab"]
|
||||||
|
type = "service"
|
||||||
|
meta {
|
||||||
|
forcedeploy = "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
group "alertmanager" {
|
||||||
|
network {
|
||||||
|
mode = "host"
|
||||||
|
port "http" {
|
||||||
|
static = 9093
|
||||||
|
}
|
||||||
|
}
|
||||||
|
task "alertmanager" {
|
||||||
|
driver = "docker"
|
||||||
|
service {
|
||||||
|
name = "alertmanager"
|
||||||
|
port = "http"
|
||||||
|
tags = [
|
||||||
|
"urlprefix-/alertmanager strip=/alertmanager",
|
||||||
|
"homer.enable=true",
|
||||||
|
"homer.name=AlertManager",
|
||||||
|
"homer.service=Monitoring",
|
||||||
|
"homer.logo=https://camo.githubusercontent.com/13ff7fc7ea6d8a6d98d856da8e3220501b9e6a89620f017d1db039007138e062/687474703a2f2f6465766f70792e696f2f77702d636f6e74656e742f75706c6f6164732f323031392f30322f7a616c2d3230302e706e67",
|
||||||
|
"homer.target=_blank",
|
||||||
|
"homer.url=http://${NOMAD_ADDR_http}",
|
||||||
|
|
||||||
|
]
|
||||||
|
check {
|
||||||
|
name = "alertmanager_ui port alive"
|
||||||
|
type = "http"
|
||||||
|
path = "/-/healthy"
|
||||||
|
interval = "10s"
|
||||||
|
timeout = "2s"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config {
|
||||||
|
image = "prom/alertmanager"
|
||||||
|
ports = ["http"]
|
||||||
|
|
||||||
|
}
|
||||||
|
resources {
|
||||||
|
memory = 75
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,62 +0,0 @@
|
|||||||
|
|
||||||
job "actualbudget" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 50
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "cluster"
|
|
||||||
}
|
|
||||||
group "actualbudget"{
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "http" {
|
|
||||||
to = 5006
|
|
||||||
}
|
|
||||||
}
|
|
||||||
task "actualbudget-server" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "actualbudget"
|
|
||||||
port = "http"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`budget.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=budget.ducamps.eu",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=${NOMAD_TASK_NAME}",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.logo=https://budget.ducamps.eu/apple-touch-icon.png",
|
|
||||||
"homer.url=https://budget.ducamps.eu",
|
|
||||||
|
|
||||||
]
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "ghcr.service.consul:5000/actualbudget/actual-server:latest"
|
|
||||||
ports = ["http"]
|
|
||||||
volumes = [
|
|
||||||
"/mnt/diskstation/nomad/actualbudget:/data"
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
memory = 300
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,240 +0,0 @@
|
|||||||
|
|
||||||
job "borgmatic" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 50
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "NAS"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "borgmatic"{
|
|
||||||
vault{
|
|
||||||
policies= ["borgmatic"]
|
|
||||||
|
|
||||||
}
|
|
||||||
task "borgmatic" {
|
|
||||||
action "manual-backup" {
|
|
||||||
command = "/usr/local/bin/borgmatic"
|
|
||||||
args = ["create",
|
|
||||||
"prune",
|
|
||||||
"--verbosity",
|
|
||||||
"1"
|
|
||||||
|
|
||||||
]
|
|
||||||
}
|
|
||||||
action "list-backup" {
|
|
||||||
command = "/usr/local/bin/borgmatic"
|
|
||||||
args = ["rlist"]
|
|
||||||
}
|
|
||||||
driver = "docker"
|
|
||||||
config {
|
|
||||||
image = "ghcr.service.consul:5000/borgmatic-collective/borgmatic"
|
|
||||||
volumes = [
|
|
||||||
"/exports:/exports",
|
|
||||||
"local/borgmatic.d:/etc/borgmatic.d",
|
|
||||||
"secret/id_rsa:/root/.ssh/id_rsa",
|
|
||||||
"secret/known_hosts:/root/.ssh/known_hosts",
|
|
||||||
"/exports/nomad/borgmatic:/root/.cache/borg",
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
BORG_RSH="ssh -i /root/.ssh/id_rsa -p 23"
|
|
||||||
{{ with secret "secrets/data/nomad/borgmatic"}}
|
|
||||||
BORG_PASSPHRASE= {{.Data.data.passphrase}}
|
|
||||||
{{end}}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/sample.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
0 2 * * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic create prune --verbosity 1
|
|
||||||
0 23 1 * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic check
|
|
||||||
EOH
|
|
||||||
destination = "local/borgmatic.d/crontab.txt"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
# List of source directories to backup (required). Globs and
|
|
||||||
# tildes are expanded. Do not backslash spaces in path names.
|
|
||||||
source_directories:
|
|
||||||
- /exports/ebook
|
|
||||||
- /exports/homes
|
|
||||||
- /exports/music
|
|
||||||
- /exports/nomad
|
|
||||||
- /exports/photo
|
|
||||||
|
|
||||||
repositories:
|
|
||||||
- path: ssh://u304977@u304977.your-storagebox.de/./{{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
|
|
||||||
label: {{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
|
|
||||||
|
|
||||||
exclude_patterns:
|
|
||||||
- '*/nomad/jellyfin/cache'
|
|
||||||
- '*nomad/loki/'
|
|
||||||
- '*nomad/prometheus'
|
|
||||||
- '*nomad/registry'
|
|
||||||
- '*nomad/pacoloco'
|
|
||||||
- '*nomad/pihole'
|
|
||||||
- '*nomad/jellyfin/*'
|
|
||||||
- '*.log*'
|
|
||||||
|
|
||||||
match_archives: '*'
|
|
||||||
archive_name_format: '{{ env "node.datacenter" }}-{now:%Y-%m-%dT%H:%M:%S.%f}'
|
|
||||||
extra_borg_options:
|
|
||||||
# Extra command-line options to pass to "borg init".
|
|
||||||
# init: --extra-option
|
|
||||||
|
|
||||||
# Extra command-line options to pass to "borg prune".
|
|
||||||
# prune: --extra-option
|
|
||||||
|
|
||||||
# Extra command-line options to pass to "borg compact".
|
|
||||||
# compact: --extra-option
|
|
||||||
|
|
||||||
# Extra command-line options to pass to "borg create".
|
|
||||||
create: --progress --stats
|
|
||||||
|
|
||||||
# Extra command-line options to pass to "borg check".
|
|
||||||
# check: --extra-option
|
|
||||||
|
|
||||||
# Keep all archives within this time interval.
|
|
||||||
# keep_within: 3H
|
|
||||||
|
|
||||||
# Number of secondly archives to keep.
|
|
||||||
# keep_secondly: 60
|
|
||||||
|
|
||||||
# Number of minutely archives to keep.
|
|
||||||
# keep_minutely: 60
|
|
||||||
|
|
||||||
# Number of hourly archives to keep.
|
|
||||||
# keep_hourly: 24
|
|
||||||
|
|
||||||
# Number of daily archives to keep.
|
|
||||||
keep_daily: 7
|
|
||||||
|
|
||||||
# Number of weekly archives to keep.
|
|
||||||
keep_weekly: 4
|
|
||||||
|
|
||||||
# Number of monthly archives to keep.
|
|
||||||
# keep_monthly: 6
|
|
||||||
|
|
||||||
# Number of yearly archives to keep.
|
|
||||||
# keep_yearly: 1
|
|
||||||
|
|
||||||
checks:
|
|
||||||
- name: repository
|
|
||||||
# - archives
|
|
||||||
# check_repositories:
|
|
||||||
# - user@backupserver:sourcehostname.borg
|
|
||||||
# check_last: 3
|
|
||||||
# output:
|
|
||||||
# color: false
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before creating a backup, run once per configuration file.
|
|
||||||
# before_backup:
|
|
||||||
# - echo "Starting a backup."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before pruning, run once per configuration file.
|
|
||||||
# before_prune:
|
|
||||||
# - echo "Starting pruning."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before compaction, run once per configuration file.
|
|
||||||
# before_compact:
|
|
||||||
# - echo "Starting compaction."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before consistency checks, run once per configuration file.
|
|
||||||
# before_check:
|
|
||||||
# - echo "Starting checks."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before extracting a backup, run once per configuration file.
|
|
||||||
# before_extract:
|
|
||||||
# - echo "Starting extracting."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after creating a backup, run once per configuration file.
|
|
||||||
# after_backup:
|
|
||||||
# - echo "Finished a backup."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after compaction, run once per configuration file.
|
|
||||||
# after_compact:
|
|
||||||
# - echo "Finished compaction."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after pruning, run once per configuration file.
|
|
||||||
# after_prune:
|
|
||||||
# - echo "Finished pruning."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after consistency checks, run once per configuration file.
|
|
||||||
# after_check:
|
|
||||||
# - echo "Finished checks."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after extracting a backup, run once per configuration file.
|
|
||||||
# after_extract:
|
|
||||||
# - echo "Finished extracting."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# when an exception occurs during a "prune", "compact",
|
|
||||||
# "create", or "check" action or an associated before/after
|
|
||||||
# hook.
|
|
||||||
# on_error:
|
|
||||||
# - echo "Error during prune/compact/create/check."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before running all actions (if one of them is "create").
|
|
||||||
# These are collected from all configuration files and then
|
|
||||||
# run once before all of them (prior to all actions).
|
|
||||||
# before_everything:
|
|
||||||
# - echo "Starting actions."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after running all actions (if one of them is "create").
|
|
||||||
# These are collected from all configuration files and then
|
|
||||||
# run once after all of them (after any action).
|
|
||||||
# after_everything:
|
|
||||||
# - echo "Completed actions."
|
|
||||||
EOH
|
|
||||||
destination = "local/borgmatic.d/config.yaml"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
{{ with secret "secrets/data/nomad/borgmatic"}}
|
|
||||||
{{.Data.data.privatekey}}
|
|
||||||
{{end}}
|
|
||||||
EOH
|
|
||||||
destination = "secret/id_rsa"
|
|
||||||
perms= "700"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
[u304977.your-storagebox.de]:23 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs
|
|
||||||
[u304977.your-storagebox.de]:23 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5EB5p/5Hp3hGW1oHok+PIOH9Pbn7cnUiGmUEBrCVjnAw+HrKyN8bYVV0dIGllswYXwkG/+bgiBlE6IVIBAq+JwVWu1Sss3KarHY3OvFJUXZoZyRRg/Gc/+LRCE7lyKpwWQ70dbelGRyyJFH36eNv6ySXoUYtGkwlU5IVaHPApOxe4LHPZa/qhSRbPo2hwoh0orCtgejRebNtW5nlx00DNFgsvn8Svz2cIYLxsPVzKgUxs8Zxsxgn+Q/UvR7uq4AbAhyBMLxv7DjJ1pc7PJocuTno2Rw9uMZi1gkjbnmiOh6TTXIEWbnroyIhwc8555uto9melEUmWNQ+C+PwAK+MPw==
|
|
||||||
[u304977.your-storagebox.de]:23 ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGK0po6usux4Qv2d8zKZN1dDvbWjxKkGsx7XwFdSUCnF19Q8psHEUWR7C/LtSQ5crU/g+tQVRBtSgoUcE8T+FWp5wBxKvWG2X9gD+s9/4zRmDeSJR77W6gSA/+hpOZoSE+4KgNdnbYSNtbZH/dN74EG7GLb/gcIpbUUzPNXpfKl7mQitw==
|
|
||||||
EOH
|
|
||||||
destination = "secret/known_hosts"
|
|
||||||
perms="700"
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 300
|
|
||||||
memory_max = 1000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,194 +0,0 @@
|
|||||||
job "dockermailserver" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 90
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "cluster"
|
|
||||||
}
|
|
||||||
group "dockermailserver" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "smtp" {
|
|
||||||
to = 25
|
|
||||||
}
|
|
||||||
port "imap" {
|
|
||||||
to = 10993
|
|
||||||
}
|
|
||||||
port "esmtp" {
|
|
||||||
to = 465
|
|
||||||
}
|
|
||||||
port "rspamd" {
|
|
||||||
to = 11334
|
|
||||||
}
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "smtp"
|
|
||||||
port = "smtp"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.tcp.routers.smtp.service=smtp",
|
|
||||||
"traefik.tcp.routers.smtp.entrypoints=smtp",
|
|
||||||
"traefik.tcp.routers.smtp.rule=HostSNI(`*`)",
|
|
||||||
"traefik.tcp.services.smtp.loadbalancer.proxyProtocol.version=1",
|
|
||||||
]
|
|
||||||
check {
|
|
||||||
name = "smtp_probe"
|
|
||||||
type = "tcp"
|
|
||||||
interval = "20s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "esmtp"
|
|
||||||
port = "esmtp"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.tcp.routers.esmtp.service=esmtp",
|
|
||||||
"traefik.tcp.routers.esmtp.entrypoints=esmtp",
|
|
||||||
"traefik.tcp.routers.esmtp.rule=HostSNI(`*`)",
|
|
||||||
"traefik.tcp.routers.esmtp.tls.passthrough=true",
|
|
||||||
"traefik.tcp.services.esmtp.loadbalancer.proxyProtocol.version=1",
|
|
||||||
]
|
|
||||||
check {
|
|
||||||
name = "esmtp_probe"
|
|
||||||
type = "tcp"
|
|
||||||
interval = "20s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "imap"
|
|
||||||
port = "imap"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.tcp.routers.imap.service=imap",
|
|
||||||
"traefik.tcp.routers.imap.entrypoints=imap",
|
|
||||||
"traefik.tcp.routers.imap.rule=HostSNI(`*`)",
|
|
||||||
"traefik.tcp.routers.imap.tls.passthrough=true",
|
|
||||||
"traefik.tcp.services.imap.loadbalancer.proxyProtocol.version=2",
|
|
||||||
]
|
|
||||||
check {
|
|
||||||
name = "imap_probe"
|
|
||||||
type = "tcp"
|
|
||||||
interval = "20s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "certmail"
|
|
||||||
tags =[
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.certmail.entrypoints=web,websecure",
|
|
||||||
"traefik.http.routers.certmail.tls.domains[0].sans=mail.ducamps.eu",
|
|
||||||
"traefik.http.routers.certmail.tls.certresolver=myresolver",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "rspamdUI"
|
|
||||||
port = "rspamd"
|
|
||||||
tags = [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=RSPAMD",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.logo=http://${NOMAD_ADDR_rspamd}/img/rspamd_logo_navbar.png",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.url=http://${NOMAD_ADDR_rspamd}/",
|
|
||||||
]
|
|
||||||
check {
|
|
||||||
name = "rspamd_probe"
|
|
||||||
type = "http"
|
|
||||||
path = "/"
|
|
||||||
interval = "60s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# vault{
|
|
||||||
# policies= ["policy_name"]
|
|
||||||
#
|
|
||||||
#}
|
|
||||||
task "docker-mailserver" {
|
|
||||||
driver = "docker"
|
|
||||||
config {
|
|
||||||
image = "ghcr.service.consul:5000/docker-mailserver/docker-mailserver:latest"
|
|
||||||
ports = ["smtp", "esmtp", "imap","rspamd"]
|
|
||||||
volumes = [
|
|
||||||
"/mnt/diskstation/nomad/dms/mail-data:/var/mail",
|
|
||||||
"/mnt/diskstation/nomad/dms/mail-state:/var/mail-state",
|
|
||||||
"/mnt/diskstation/nomad/dms/mail-logs:/var/log/mail",
|
|
||||||
"/mnt/diskstation/nomad/dms/config:/tmp/docker-mailserver",
|
|
||||||
"/etc/localtime:/etc/localtime",
|
|
||||||
"local/postfix-main.cf:/tmp/docker-mailserver/postfix-main.cf",
|
|
||||||
"local/postfix-master.cf:/tmp/docker-mailserver/postfix-master.cf",
|
|
||||||
"local/dovecot.cf:/tmp/docker-mailserver/dovecot.cf",
|
|
||||||
"/mnt/diskstation/nomad/traefik/acme.json:/etc/letsencrypt/acme.json"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
env {
|
|
||||||
OVERRIDE_HOSTNAME = "mail.ducamps.eu"
|
|
||||||
DMS_VMAIL_UID = 1000000
|
|
||||||
DMS_VMAIL_GID = 984
|
|
||||||
SSL_TYPE= "letsencrypt"
|
|
||||||
LOG_LEVEL="info"
|
|
||||||
POSTMASTER_ADDRESS="vincent@ducamps.eu"
|
|
||||||
ENABLE_RSPAMD=1
|
|
||||||
ENABLE_OPENDKIM=0
|
|
||||||
ENABLE_OPENDMARC=0
|
|
||||||
ENABLE_POLICYD_SPF=0
|
|
||||||
ENABLE_UPDATE_CHECK=0
|
|
||||||
UPDATE_CHECK_INTERVAL="1d"
|
|
||||||
RSPAMD_CHECK_AUTHENTICATED=0
|
|
||||||
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
|
|
||||||
EOH
|
|
||||||
destination = "secrets/config"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
postscreen_upstream_proxy_protocol = haproxy
|
|
||||||
EOH
|
|
||||||
destination = "local/postfix-main.cf"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
submission/inet/smtpd_upstream_proxy_protocol=haproxy
|
|
||||||
submissions/inet/smtpd_upstream_proxy_protocol=haproxy
|
|
||||||
EOH
|
|
||||||
destination = "local/postfix-master.cf"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1, 192.168.1.0/24
|
|
||||||
haproxy_timeout = 3 secs
|
|
||||||
service imap-login {
|
|
||||||
inet_listener imaps {
|
|
||||||
haproxy = yes
|
|
||||||
ssl = yes
|
|
||||||
port = 10993
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOH
|
|
||||||
destination = "local/dovecot.cf"
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 1000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,88 +0,0 @@
|
|||||||
|
|
||||||
job "ghostfolio" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 50
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "main"{
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "http" {
|
|
||||||
}
|
|
||||||
port "redis" {
|
|
||||||
to = 6379
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vault{
|
|
||||||
policies= ["ghostfolio"]
|
|
||||||
|
|
||||||
}
|
|
||||||
task "redis" {
|
|
||||||
driver = "docker"
|
|
||||||
config {
|
|
||||||
image = "docker.service.consul:5000/library/redis"
|
|
||||||
ports = ["redis"]
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 50
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
task "server" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "${NOMAD_JOB_NAME}"
|
|
||||||
port = "http"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
|
||||||
|
|
||||||
|
|
||||||
]
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "docker.service.consul:5000/ghostfolio/ghostfolio:latest"
|
|
||||||
ports = ["http"]
|
|
||||||
volumes = [
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
NODE_ENV = "production"
|
|
||||||
REDIS_HOST= "${NOMAD_IP_redis}"
|
|
||||||
REDIS_PORT = "${NOMAD_HOST_PORT_redis}"
|
|
||||||
PORT = "${NOMAD_PORT_http}"
|
|
||||||
JWT_SECRET_KEY = uuidv4()
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
{{ with secret "secrets/data/database/ghostfolio"}}
|
|
||||||
DATABASE_URL = postgresql://ghostfolio:{{.Data.data.password}}@active.db.service.consul/ghostfolio?connect_timeout=300&sslmode=prefer
|
|
||||||
{{end}}
|
|
||||||
{{ with secret "secrets/data/nomad/ghostfolio"}}
|
|
||||||
ACCESS_TOKEN_SALT = {{.Data.data.token}}
|
|
||||||
{{end}}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/ghostfolio.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 400
|
|
||||||
memory_max = 600
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,146 +0,0 @@
|
|||||||
job "immich" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 50
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "immich" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "http" {
|
|
||||||
to = 3001
|
|
||||||
}
|
|
||||||
port "redis" {
|
|
||||||
to = 6379
|
|
||||||
}
|
|
||||||
port "machinelearning" {
|
|
||||||
to = 3003
|
|
||||||
}
|
|
||||||
}
|
|
||||||
volume "immich-upload" {
|
|
||||||
type = "csi"
|
|
||||||
source = "immich-upload"
|
|
||||||
access_mode = "multi-node-multi-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
volume "immich-cache" {
|
|
||||||
type = "csi"
|
|
||||||
source = "immich-cache"
|
|
||||||
access_mode = "multi-node-multi-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
volume "photo" {
|
|
||||||
type = "csi"
|
|
||||||
source = "photo"
|
|
||||||
access_mode = "multi-node-multi-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
vault {
|
|
||||||
policies = ["immich"]
|
|
||||||
}
|
|
||||||
task "immich-server" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "immich"
|
|
||||||
port = "http"
|
|
||||||
tags = [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=immich",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.logo=https://immich.ducamps.eu/favicon-144.png",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.url=https://immich.ducamps.eu",
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
volume_mount {
|
|
||||||
volume = "immich-upload"
|
|
||||||
destination = "/usr/src/app/upload"
|
|
||||||
}
|
|
||||||
volume_mount {
|
|
||||||
volume = "photo"
|
|
||||||
destination = "/photo"
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "ghcr.service.consul:5000/immich-app/immich-server:release"
|
|
||||||
ports = ["http"]
|
|
||||||
volumes = [
|
|
||||||
"/etc/localtime:/etc/localtime"
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
{{ with secret "secrets/data/database/immich"}}
|
|
||||||
DB_PASSWORD= {{ .Data.data.password }}
|
|
||||||
{{end}}
|
|
||||||
DB_DATABASE_NAME= immich
|
|
||||||
DB_USERNAME= immich
|
|
||||||
DB_HOSTNAME= active.db.service.consul
|
|
||||||
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
|
|
||||||
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
|
|
||||||
IMMICH_MACHINE_LEARNING_URL = http://{{ env "NOMAD_ADDR_machinelearning"}}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/immich.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 600
|
|
||||||
memory_max = 1800
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "immich-machine-learning" {
|
|
||||||
driver = "docker"
|
|
||||||
volume_mount {
|
|
||||||
volume = "immich-cache"
|
|
||||||
destination = "/cache"
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "ghcr.service.consul:5000/immich-app/immich-machine-learning:main"
|
|
||||||
ports = ["machinelearning"]
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
{{ with secret "secrets/data/database/immich"}}
|
|
||||||
DB_PASSWORD= {{ .Data.data.password }}
|
|
||||||
{{end}}
|
|
||||||
DB_DATABASE_NAME= immich
|
|
||||||
DB_USERNAME= immich
|
|
||||||
DB_HOSTNAME= active.db.service.consul
|
|
||||||
REDIS_HOSTNAME = {{env "NOMAD_IP_redis"}}
|
|
||||||
REDIS_PORT = {{env "NOMAD_HOST_PORT_redis"}}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/immich.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 200
|
|
||||||
memory_max = 1800
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "redis" {
|
|
||||||
driver = "docker"
|
|
||||||
config {
|
|
||||||
image="docker.service.consul:5000/library/redis:6.2-alpine"
|
|
||||||
ports = ["redis"]
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 50
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,119 +0,0 @@
|
|||||||
job "jellyfin" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 30
|
|
||||||
type = "service"
|
|
||||||
|
|
||||||
meta {
|
|
||||||
forcedeploy = "1"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "cluster"
|
|
||||||
}
|
|
||||||
group jellyfin-vue {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "http" {
|
|
||||||
to = 80
|
|
||||||
}
|
|
||||||
}
|
|
||||||
task "jellyfin-vue" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "jellyfin-vue"
|
|
||||||
port = "http"
|
|
||||||
tags = [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=${NOMAD_TASK_NAME}",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.logo=https://${NOMAD_TASK_NAME}.ducamps.eu/icon.png",
|
|
||||||
"homer.url=https://${NOMAD_TASK_NAME}.ducamps.eu",
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_TASK_NAME}.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_TASK_NAME}.ducamps.eu",
|
|
||||||
"traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.${NOMAD_TASK_NAME}.entrypoints=web,websecure",
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "ghcr.service.consul:5000/jellyfin/jellyfin-vue:unstable"
|
|
||||||
ports = ["http"]
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
DEFAULT_SERVERS = "${NOMAD_TASK_NAME}.ducamps.eu"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
|
|
||||||
memory = 50
|
|
||||||
cpu = 100
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
group "jellyfin" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "http" {
|
|
||||||
to = 8096
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "jellyfin" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "jellyfin"
|
|
||||||
port = "http"
|
|
||||||
tags = [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=jellyfin",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/web/assets/img/banner-light.png",
|
|
||||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
|
||||||
|
|
||||||
|
|
||||||
]
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "docker.service.consul:5000/jellyfin/jellyfin"
|
|
||||||
ports = ["http"]
|
|
||||||
volumes = [
|
|
||||||
"/mnt/diskstation/nomad/jellyfin/config:/config",
|
|
||||||
"/mnt/diskstation/nomad/jellyfin/cache:/cache",
|
|
||||||
"/mnt/diskstation/media:/media",
|
|
||||||
"/mnt/diskstation/music:/music",
|
|
||||||
]
|
|
||||||
devices = [
|
|
||||||
{
|
|
||||||
host_path = "/dev/dri/renderD128"
|
|
||||||
container_path = "/dev/dri/renderD128"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host_path = "/dev/dri/card0"
|
|
||||||
container_path = "/dev/dri/card0"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 2000
|
|
||||||
memory_max = 4000
|
|
||||||
cpu = 3000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1 +0,0 @@
|
|||||||
../makefile
|
|
@ -1,95 +0,0 @@
|
|||||||
|
|
||||||
job "mealie" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 50
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "mealie" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "http" {
|
|
||||||
to = 9000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
volume "mealie-data" {
|
|
||||||
type = "csi"
|
|
||||||
source = "mealie-data"
|
|
||||||
access_mode = "multi-node-multi-writer"
|
|
||||||
attachment_mode = "file-system"
|
|
||||||
}
|
|
||||||
vault {
|
|
||||||
policies = ["mealie"]
|
|
||||||
|
|
||||||
}
|
|
||||||
task "mealie-server" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "mealie"
|
|
||||||
port = "http"
|
|
||||||
tags = [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=Mealie",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.subtitle=Mealie",
|
|
||||||
"homer.logo=https://mealie.ducamps.eu/favicon.ico",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "ghcr.io/mealie-recipes/mealie"
|
|
||||||
ports = ["http"]
|
|
||||||
}
|
|
||||||
volume_mount {
|
|
||||||
volume = "mealie-data"
|
|
||||||
destination = "/app/data"
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
PUID = "1000001"
|
|
||||||
PGID = "1000001"
|
|
||||||
TZ = "Europe/Paris"
|
|
||||||
MAX_WORKERS = 1
|
|
||||||
WEB_CONCURRENCY = 1
|
|
||||||
BASE_URL = "https://mealie.ducamps.eu"
|
|
||||||
OIDC_USER_GROUP = "MealieUsers"
|
|
||||||
OIDC_ADMIN_GROUP = "MealieAdmins"
|
|
||||||
OIDC_AUTH_ENABLED = "True"
|
|
||||||
OIDC_SIGNUP_ENABLED = "true"
|
|
||||||
OIDC_CONFIGURATION_URL = "https://auth.ducamps.eu/.well-known/openid-configuration"
|
|
||||||
OIDC_CLIENT_ID = "mealie"
|
|
||||||
OIDC_AUTO_REDIRECT = "false"
|
|
||||||
OIDC_PROVIDER_NAME = "authelia"
|
|
||||||
DB_ENGINE = "postgres"
|
|
||||||
POSTGRES_USER = "mealie"
|
|
||||||
POSTGRES_SERVER = "active.db.service.consul"
|
|
||||||
POSTGRES_PORT = 5432
|
|
||||||
POSTGRES_DB = "mealie"
|
|
||||||
LOG_LEVEL = "DEBUG"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
{{ with secret "secrets/data/database/mealie"}}POSTGRES_PASSWORD= "{{ .Data.data.password }}" {{end}}
|
|
||||||
{{ with secret "secrets/data/authelia/mealie"}}OIDC_CLIENT_SECRET= "{{ .Data.data.password }}" {{end}}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/var.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 400
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,64 +0,0 @@
|
|||||||
|
|
||||||
job "rutorrentlocal" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 80
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.unique.name}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "oberon"
|
|
||||||
}
|
|
||||||
group "bittorent" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "http" {
|
|
||||||
to = 8080
|
|
||||||
}
|
|
||||||
port "torrent" {
|
|
||||||
static = 6881
|
|
||||||
}
|
|
||||||
port "ecoute" {
|
|
||||||
static = 50000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
task "bittorent" {
|
|
||||||
driver = "podman"
|
|
||||||
service {
|
|
||||||
name = "bittorentlocal"
|
|
||||||
port = "http"
|
|
||||||
address_mode= "host"
|
|
||||||
tags = [
|
|
||||||
]
|
|
||||||
}
|
|
||||||
user = "root"
|
|
||||||
config {
|
|
||||||
|
|
||||||
image = "docker.service.consul:5000/crazymax/rtorrent-rutorrent:edge"
|
|
||||||
ports = [
|
|
||||||
"http",
|
|
||||||
"torrent",
|
|
||||||
"ecoute"
|
|
||||||
]
|
|
||||||
volumes = [
|
|
||||||
"/exports/nomad/rutorrent/data:/data",
|
|
||||||
"/exports/nomad/rutorrent/downloads:/downloads"
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
PUID = 100001
|
|
||||||
PGID = 10
|
|
||||||
UMASK = 002
|
|
||||||
WEBUI_PORT = "8080"
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
memory = 650
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,89 +0,0 @@
|
|||||||
|
|
||||||
job "vikunja" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 70
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "vikunja" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "front" {
|
|
||||||
to = 80
|
|
||||||
}
|
|
||||||
port "api" {
|
|
||||||
to = 3456
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vault {
|
|
||||||
policies = ["vikunja"]
|
|
||||||
|
|
||||||
}
|
|
||||||
task "api" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "vikunja-api"
|
|
||||||
port = "api"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.eu",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}-${NOMAD_TASK_NAME}.entrypoints=web,websecure",
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=vikunka",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.logo=https://${NOMAD_JOB_NAME}.ducamps.eu/images/icons/apple-touch-icon-180x180.png",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.url=https://${NOMAD_JOB_NAME}.ducamps.eu",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "docker.service.consul:5000/vikunja/vikunja"
|
|
||||||
ports = ["api", "front"]
|
|
||||||
volumes = ["local/config.yml:/etc/vikunja/config.yml"]
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
VIKUNJA_DATABASE_HOST = "active.db.service.consul"
|
|
||||||
VIKUNJA_DATABASE_TYPE = "postgres"
|
|
||||||
VIKUNJA_DATABASE_USER = "vikunja"
|
|
||||||
VIKUNJA_DATABASE_DATABASE = "vikunja"
|
|
||||||
VIKUNJA_SERVICE_JWTSECRET = uuidv4()
|
|
||||||
VIKUNJA_SERVICE_FRONTENDURL = "https://${NOMAD_JOB_NAME}.ducamps.eu/"
|
|
||||||
VIKUNJA_AUTH_LOCAL = False
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
{{ with secret "secrets/data/database/vikunja"}}
|
|
||||||
VIKUNJA_DATABASE_PASSWORD= "{{ .Data.data.password }}"
|
|
||||||
{{end}}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/sample.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
auth:
|
|
||||||
openid:
|
|
||||||
enabled: true
|
|
||||||
redirecturl: https://vikunja.ducamps.eu/auth/openid/
|
|
||||||
providers:
|
|
||||||
- name: Authelia
|
|
||||||
authurl: https://auth.ducamps.eu
|
|
||||||
clientid: vikunja
|
|
||||||
clientsecret: {{ with secret "secrets/data/authelia/vikunja"}} {{ .Data.data.password }} {{end}}
|
|
||||||
scope: openid profile email
|
|
||||||
EOH
|
|
||||||
destination = "local/config.yml"
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 100
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -6,17 +6,13 @@ job "backup-postgress" {
|
|||||||
meta {
|
meta {
|
||||||
forcedeploy = "0"
|
forcedeploy = "0"
|
||||||
}
|
}
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "cluster"
|
|
||||||
}
|
|
||||||
constraint {
|
constraint {
|
||||||
attribute = "${attr.cpu.arch}"
|
attribute = "${attr.cpu.arch}"
|
||||||
value = "amd64"
|
value = "amd64"
|
||||||
}
|
}
|
||||||
periodic {
|
periodic {
|
||||||
crons = ["0 3 * * *"]
|
cron = "0 3 * * *"
|
||||||
prohibit_overlap = true
|
prohibit_overlap = true
|
||||||
}
|
}
|
||||||
group "backup-postgress" {
|
group "backup-postgress" {
|
||||||
@ -32,9 +28,9 @@ job "backup-postgress" {
|
|||||||
name = "backup-postgress"
|
name = "backup-postgress"
|
||||||
}
|
}
|
||||||
config {
|
config {
|
||||||
image = "docker.service.consul:5000/ducampsv/docker-backup-postgres:latest"
|
image = "ducampsv/docker-backup-postgres:latest"
|
||||||
volumes = [
|
volumes = [
|
||||||
"/mnt/diskstation/nomad/backup/postgres:/backup"
|
"/mnt/diskstation/git/backup/postgres:/backup"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
template {
|
template {
|
||||||
@ -42,15 +38,14 @@ job "backup-postgress" {
|
|||||||
{{ with secret "secrets/data/database/dump"}}
|
{{ with secret "secrets/data/database/dump"}}
|
||||||
PGUSER = "dump"
|
PGUSER = "dump"
|
||||||
PGPASSWORD = "{{ .Data.data.password }}"
|
PGPASSWORD = "{{ .Data.data.password }}"
|
||||||
PGHOST = "active.db.service.consul"
|
PGHOST = "db1.ducamps.win"
|
||||||
{{end}}
|
{{end}}
|
||||||
EOH
|
EOH
|
||||||
destination = "secrets/secrets.env"
|
destination = "secrets/secrets.env"
|
||||||
env = true
|
env = true
|
||||||
}
|
}
|
||||||
resources {
|
resources {
|
||||||
memory = 180
|
memory = 100
|
||||||
memory_max = 400
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1,40 +0,0 @@
|
|||||||
|
|
||||||
job "backup-consul" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 50
|
|
||||||
type = "batch"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "cluster"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
periodic {
|
|
||||||
crons = ["30 3 * * *"]
|
|
||||||
prohibit_overlap = true
|
|
||||||
}
|
|
||||||
group "backup-consul" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
}
|
|
||||||
task "consul-backup" {
|
|
||||||
driver = "docker"
|
|
||||||
config {
|
|
||||||
image = "docker.service.consul:5000/ducampsv/docker-consul-backup:latest"
|
|
||||||
volumes = [
|
|
||||||
"/mnt/diskstation/nomad/backup/consul:/backup"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 100
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,53 +0,0 @@
|
|||||||
|
|
||||||
job "backup-vault" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 50
|
|
||||||
type = "batch"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "cluster"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
periodic {
|
|
||||||
crons = ["30 3 * * *"]
|
|
||||||
prohibit_overlap = true
|
|
||||||
}
|
|
||||||
group "backup-vault" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
}
|
|
||||||
vault {
|
|
||||||
policies = ["vault-backup"]
|
|
||||||
}
|
|
||||||
task "backup-vault" {
|
|
||||||
driver = "docker"
|
|
||||||
config {
|
|
||||||
image = "docker.service.consul:5000/ducampsv/docker-vault-backup:latest"
|
|
||||||
volumes = [
|
|
||||||
"/mnt/diskstation/nomad/backup/vault:/backup"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
{{ with secret "secrets/data/nomad/vault-backup"}}
|
|
||||||
VAULT_APPROLEID = "{{ .Data.data.VAULT_APPROLEID }}"
|
|
||||||
VAULT_SECRETID = "{{ .Data.data.VAULT_SECRETID }}"
|
|
||||||
{{end}}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/secrets.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 100
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user