Compare commits
No commits in common. "master" and "retour" have entirely different histories.
66
.drone.yml
66
.drone.yml
@ -1,66 +0,0 @@
|
|||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: lint
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: yaml linting
|
|
||||||
image: pipelinecomponents/yamllint
|
|
||||||
commands:
|
|
||||||
- yamllint .
|
|
||||||
- name: markdown linting
|
|
||||||
image: 06kellyjac/markdownlint-cli
|
|
||||||
commands:
|
|
||||||
- markdownlint . --config .markdownlint.yaml
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: test build
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: squidfunk/mkdocs-material
|
|
||||||
commands:
|
|
||||||
- mkdocs build --clean --strict --verbose --site-dir build
|
|
||||||
trigger:
|
|
||||||
event:
|
|
||||||
exclude:
|
|
||||||
- push
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: deploy
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: squidfunk/mkdocs-material
|
|
||||||
commands:
|
|
||||||
- mkdocs build --clean --strict --verbose --site-dir homelab
|
|
||||||
|
|
||||||
- name: deploy
|
|
||||||
image: appleboy/drone-scp
|
|
||||||
when:
|
|
||||||
status:
|
|
||||||
- success
|
|
||||||
settings:
|
|
||||||
host: www.service.consul
|
|
||||||
user: drone-deploy
|
|
||||||
overwrite: true
|
|
||||||
key:
|
|
||||||
from_secret: dronePrivateKey
|
|
||||||
target: /srv/http
|
|
||||||
source: homelab
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
branch:
|
|
||||||
- master
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: secret
|
|
||||||
name: dronePrivateKey
|
|
||||||
get:
|
|
||||||
path: secrets/data/droneci/keyRSA
|
|
||||||
name: dronePrivateKey
|
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -35,7 +35,4 @@ override.tf.json
|
|||||||
# Ignore CLI configuration files
|
# Ignore CLI configuration files
|
||||||
.terraformrc
|
.terraformrc
|
||||||
terraform.rc
|
terraform.rc
|
||||||
site
|
|
||||||
|
|
||||||
|
|
||||||
.vagrant
|
|
||||||
|
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
# Default state for all rules
|
|
||||||
default: true
|
|
||||||
MD009:
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
MD013: false
|
|
||||||
MD033: false
|
|
||||||
MD024: false
|
|
||||||
MD041: false
|
|
33
.yamllint
33
.yamllint
@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
# Based on ansible-lint config
|
|
||||||
extends: default
|
|
||||||
|
|
||||||
rules:
|
|
||||||
braces:
|
|
||||||
max-spaces-inside: 1
|
|
||||||
level: error
|
|
||||||
brackets:
|
|
||||||
max-spaces-inside: 1
|
|
||||||
level: error
|
|
||||||
colons:
|
|
||||||
max-spaces-after: -1
|
|
||||||
level: error
|
|
||||||
commas:
|
|
||||||
max-spaces-after: -1
|
|
||||||
level: error
|
|
||||||
comments: disable
|
|
||||||
comments-indentation: disable
|
|
||||||
document-start: disable
|
|
||||||
empty-lines:
|
|
||||||
max: 3
|
|
||||||
level: error
|
|
||||||
hyphens:
|
|
||||||
level: error
|
|
||||||
indentation: disable
|
|
||||||
key-duplicates: enable
|
|
||||||
line-length: disable
|
|
||||||
new-line-at-end-of-file: disable
|
|
||||||
new-lines:
|
|
||||||
type: unix
|
|
||||||
trailing-spaces: disable
|
|
||||||
truthy: disable
|
|
48
README.md
48
README.md
@ -1,48 +0,0 @@
|
|||||||
# Homelab
|
|
||||||
|
|
||||||
This repository contain my homelab Infrastructure As Code
|
|
||||||
|
|
||||||
this Homelab is build over Hashicorp software stack:
|
|
||||||
|
|
||||||
- Nomad
|
|
||||||
- Consul
|
|
||||||
- Vault
|
|
||||||
|
|
||||||
## Dev
|
|
||||||
|
|
||||||
dev stack is build over vagrant box with libvirt provider
|
|
||||||
|
|
||||||
curently need to have vault and ldap production up to be correctly provision
|
|
||||||
|
|
||||||
to launch dev stack provissionning :
|
|
||||||
|
|
||||||
```sh
|
|
||||||
make create-dev
|
|
||||||
```
|
|
||||||
|
|
||||||
## Rebuild
|
|
||||||
|
|
||||||
## Architecture
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart LR
|
|
||||||
subgraph Home
|
|
||||||
bleys[bleys]
|
|
||||||
oscar[oscar]
|
|
||||||
gerard[gerard]
|
|
||||||
LAN
|
|
||||||
NAS
|
|
||||||
end
|
|
||||||
subgraph Cloud
|
|
||||||
corwin[corwin]
|
|
||||||
end
|
|
||||||
LAN--main road--ooscar
|
|
||||||
LAN --- bleys
|
|
||||||
LAN --- gerard
|
|
||||||
LAN --- NAS
|
|
||||||
bleys <--wireguard--> corwin
|
|
||||||
oscar <--wiregard--> corwin
|
|
||||||
gerard <--wiregard--> corwin
|
|
||||||
corwin <--> internet
|
|
||||||
|
|
||||||
```
|
|
11
Readme.md
Normal file
11
Readme.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# homelab
|
||||||
|
|
||||||
|
|
||||||
|
## rebuild
|
||||||
|
to rebuild from scratch ansible need a vault server up and unseal
|
||||||
|
you can rebuild a standalone vault srver with a consul database snaphot with
|
||||||
|
|
||||||
|
```
|
||||||
|
make vault-dev FILE=./yourconsulsnaphot.snap
|
||||||
|
```
|
||||||
|
|
105
Vagrantfile
vendored
105
Vagrantfile
vendored
@ -1,105 +0,0 @@
|
|||||||
Vagrant.configure('2') do |config|
|
|
||||||
if Vagrant.has_plugin?('vagrant-cachier')
|
|
||||||
config.cache.scope = 'machine'
|
|
||||||
config.cache.enable :pacman
|
|
||||||
end
|
|
||||||
config.vm.provider :libvirt do |libvirt|
|
|
||||||
libvirt.management_network_domain = "lan.ducamps.dev"
|
|
||||||
|
|
||||||
end
|
|
||||||
config.vm.define "oscar-dev" do |c|
|
|
||||||
# Box definition
|
|
||||||
c.vm.box = "archlinux/archlinux"
|
|
||||||
# Config options
|
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
|
||||||
c.ssh.insert_key = true
|
|
||||||
c.vm.hostname = "oscar-dev"
|
|
||||||
# Network
|
|
||||||
|
|
||||||
# instance_raw_config_args
|
|
||||||
# Provider
|
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
|
||||||
|
|
||||||
libvirt.memory = 2048
|
|
||||||
libvirt.cpus = 2
|
|
||||||
end
|
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="oscar-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.define "merlin-dev" do |c|
|
|
||||||
# Box definition
|
|
||||||
c.vm.box = "archlinux/archlinux"
|
|
||||||
# Config options
|
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
|
||||||
c.ssh.insert_key = true
|
|
||||||
c.vm.hostname = "merlin-dev"
|
|
||||||
# Network
|
|
||||||
# instance_raw_config_args
|
|
||||||
# Provider
|
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
|
||||||
|
|
||||||
libvirt.memory = 512
|
|
||||||
libvirt.cpus = 2
|
|
||||||
|
|
||||||
end
|
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="merlin-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.define "gerard-dev" do |c|
|
|
||||||
# Box definition
|
|
||||||
c.vm.box = "archlinux/archlinux"
|
|
||||||
# Config options
|
|
||||||
|
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
|
||||||
c.ssh.insert_key = true
|
|
||||||
c.vm.hostname = "gerard-dev"
|
|
||||||
# Network
|
|
||||||
# instance_raw_config_args
|
|
||||||
# Provider
|
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
|
||||||
libvirt.memory = 2048
|
|
||||||
libvirt.cpus = 2
|
|
||||||
end
|
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="gerard-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.define "nas-dev" do |c|
|
|
||||||
# Box definition
|
|
||||||
c.vm.box = "archlinux/archlinux"
|
|
||||||
# Config options
|
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
|
||||||
c.ssh.insert_key = true
|
|
||||||
c.vm.hostname = "nas-dev"
|
|
||||||
# Network
|
|
||||||
# instance_raw_config_args
|
|
||||||
# Provider
|
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
|
||||||
|
|
||||||
libvirt.memory = 2048
|
|
||||||
libvirt.cpus = 2
|
|
||||||
end
|
|
||||||
|
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="nas-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
@ -1,2 +0,0 @@
|
|||||||
skip_list:
|
|
||||||
- 'fcqn-builtins'
|
|
@ -99,7 +99,7 @@ host_key_checking = False
|
|||||||
#sudo_flags = -H -S -n
|
#sudo_flags = -H -S -n
|
||||||
|
|
||||||
# SSH timeout
|
# SSH timeout
|
||||||
timeout = 30
|
#timeout = 10
|
||||||
|
|
||||||
# default user to use for playbooks if user is not specified
|
# default user to use for playbooks if user is not specified
|
||||||
# (/usr/bin/ansible will use current user as default)
|
# (/usr/bin/ansible will use current user as default)
|
||||||
@ -136,7 +136,7 @@ timeout = 30
|
|||||||
|
|
||||||
# If set, configures the path to the Vault password file as an alternative to
|
# If set, configures the path to the Vault password file as an alternative to
|
||||||
# specifying --vault-password-file on the command line.
|
# specifying --vault-password-file on the command line.
|
||||||
vault_password_file = ./misc/vault-keyring-client.sh
|
#vault_password_file = /path/to/vault_password_file
|
||||||
|
|
||||||
# format of string {{ ansible_managed }} available within Jinja2
|
# format of string {{ ansible_managed }} available within Jinja2
|
||||||
# templates indicates to users editing templates files will be replaced.
|
# templates indicates to users editing templates files will be replaced.
|
||||||
@ -275,7 +275,7 @@ retry_files_enabled = False
|
|||||||
# turn this on to have behaviour more like Ansible prior to 2.1.x. See
|
# turn this on to have behaviour more like Ansible prior to 2.1.x. See
|
||||||
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
|
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
|
||||||
# for more secure ways to fix this than enabling this option.
|
# for more secure ways to fix this than enabling this option.
|
||||||
allow_world_readable_tmpfiles = True
|
#allow_world_readable_tmpfiles = False
|
||||||
|
|
||||||
# controls the compression level of variables sent to
|
# controls the compression level of variables sent to
|
||||||
# worker processes. At the default of 0, no compression
|
# worker processes. At the default of 0, no compression
|
||||||
|
@ -1,24 +0,0 @@
|
|||||||
pdns_config:
|
|
||||||
local-address: "127.0.0.1"
|
|
||||||
local-port: "5300"
|
|
||||||
api: yes
|
|
||||||
api-key:
|
|
||||||
|
|
||||||
pdns_backends:
|
|
||||||
gsqlite3:
|
|
||||||
dnssec: yes
|
|
||||||
database: "/var/lib/powerdns/powerdns.sqlite"
|
|
||||||
pdns_sqlite_databases_locations:
|
|
||||||
- "/var/lib/powerdns/powerdns.sqlite"
|
|
||||||
|
|
||||||
pdns_rec_config:
|
|
||||||
forward-zones:
|
|
||||||
- "{{ consul_domain }}=127.0.0.1:8600"
|
|
||||||
- "ducamps.win=192.168.1.10"
|
|
||||||
- "{{ domain.name }}=192.168.1.5"
|
|
||||||
- "lan.{{ domain.name }}=192.168.1.5"
|
|
||||||
- "1.168.192.in-addr.arpa=192.168.1.5:5300"
|
|
||||||
|
|
||||||
local-address: "{{ hostvars[inventory_hostname]['ansible_'+ default_interface].ipv4.address|default(ansible_default_ipv4.address) }}"
|
|
||||||
dnssec: "off"
|
|
||||||
|
|
@ -1,90 +0,0 @@
|
|||||||
NAS_nomad_folder:
|
|
||||||
- name: actualbudget
|
|
||||||
- name: archiso
|
|
||||||
owner: 1000001
|
|
||||||
- name: backup
|
|
||||||
owner: 1000001
|
|
||||||
- name: borgmatic
|
|
||||||
- name: crowdsec
|
|
||||||
owner: 1000001
|
|
||||||
- name: dms
|
|
||||||
owner: 1000001
|
|
||||||
- name: filestash
|
|
||||||
owner: 1000
|
|
||||||
- name: gitea
|
|
||||||
owner: 1000000
|
|
||||||
- name: grafana
|
|
||||||
owner: 472
|
|
||||||
- name: hass
|
|
||||||
owner: 1000001
|
|
||||||
- name: homer
|
|
||||||
owner: 1000001
|
|
||||||
- name: immich/cache
|
|
||||||
- name: immich/upload
|
|
||||||
- name: jellyfin
|
|
||||||
owner: 1000001
|
|
||||||
- name: loki
|
|
||||||
owner: 10001
|
|
||||||
- name: mealie
|
|
||||||
owner: 1000001
|
|
||||||
- name: mosquito
|
|
||||||
owner: 1883
|
|
||||||
- name: pacoloco
|
|
||||||
owner: 1000001
|
|
||||||
- name: pdns-auth
|
|
||||||
owner: 1000001
|
|
||||||
- name: pdns-admin
|
|
||||||
owner: 1000001
|
|
||||||
- name: pihole
|
|
||||||
owner: 999
|
|
||||||
- name: prometheus
|
|
||||||
owner: 65534
|
|
||||||
- name: prowlarr
|
|
||||||
owner: 1000001
|
|
||||||
- name: radicale
|
|
||||||
owner: 1000001
|
|
||||||
- name: openldap
|
|
||||||
owner: 1001
|
|
||||||
- name: registry/ghcr
|
|
||||||
- name: registry/docker
|
|
||||||
- name: syncthing
|
|
||||||
owner: 1000001
|
|
||||||
- name: traefik
|
|
||||||
owner: 1000001
|
|
||||||
- name: tt-rss
|
|
||||||
owner: 1000001
|
|
||||||
- name: vaultwarden
|
|
||||||
owner: 1000001
|
|
||||||
- name: zigbee2mqtt
|
|
||||||
owner: 1000001
|
|
||||||
nas_bind_target: "/exports"
|
|
||||||
|
|
||||||
nas_bind_source:
|
|
||||||
- dest: "{{ nas_bind_target }}/nomad"
|
|
||||||
source: /data/data1/nomad
|
|
||||||
- dest: "{{ nas_bind_target }}/music"
|
|
||||||
source: /data/data1/music
|
|
||||||
- dest: "{{ nas_bind_target }}/download"
|
|
||||||
source: /data/data1/download
|
|
||||||
- dest: "{{ nas_bind_target }}/media/serie"
|
|
||||||
source: /data/data2/serie
|
|
||||||
- dest: "{{ nas_bind_target }}/media/film"
|
|
||||||
source: /data/data3/film
|
|
||||||
- dest: "{{ nas_bind_target }}/photo"
|
|
||||||
source: /data/data1/photo
|
|
||||||
- dest: "{{ nas_bind_target }}/homes"
|
|
||||||
source: /data/data1/homes
|
|
||||||
- dest: "{{ nas_bind_target }}/ebook"
|
|
||||||
source: /data/data1/ebook
|
|
||||||
- dest: "{{ nas_bind_target }}/media/download/serie"
|
|
||||||
source: /data/data1/download/serie
|
|
||||||
- dest: "{{ nas_bind_target }}/media/download/film"
|
|
||||||
source: /data/data1/download/film
|
|
||||||
- dest: "{{ nas_bind_target }}/music/download/"
|
|
||||||
source: /data/data1/download/music
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
|||||||
vsftpd_config: {}
|
|
@ -1,15 +0,0 @@
|
|||||||
nfs_cluster_list: "{% for server in groups['all']%} {% if hostvars[server]['ansible_default_ipv4']['address'] is defined %} {{hostvars[server]['ansible_' + hostvars[server]['nfs_iface']|default('')].ipv4.address|default(hostvars[server]['ansible_default_ipv4']['address'],true)}}{{ nfs_options }} {% endif %} {%endfor%}"
|
|
||||||
nfs_options: "(rw,no_root_squash,crossmnt,async,insecure_locks,sec=sys)"
|
|
||||||
nfs_consul_service: true
|
|
||||||
nfs_bind_target: "/exports"
|
|
||||||
|
|
||||||
|
|
||||||
nfs_exports:
|
|
||||||
- "{{ nas_bind_target }} *(fsid=0,insecure,no_subtree_check)"
|
|
||||||
- "{{ nas_bind_target }}/nomad {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/download {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/music {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/media {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/photo {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/homes {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/ebook {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
@ -1 +0,0 @@
|
|||||||
nomad_node_class: 'NAS'
|
|
@ -1,25 +0,0 @@
|
|||||||
samba_passdb_backend: tdbsam
|
|
||||||
samba_shares_root: /exports
|
|
||||||
samba_shares:
|
|
||||||
- name: media
|
|
||||||
comment: "media"
|
|
||||||
write_list: "@NAS_media"
|
|
||||||
browseable: true
|
|
||||||
- name: ebook
|
|
||||||
comment: "ebook"
|
|
||||||
write_list: "@NAS_ebook"
|
|
||||||
browseable: true
|
|
||||||
- name: music
|
|
||||||
comment: "music"
|
|
||||||
write_list: "@NAS_music"
|
|
||||||
browseable: true
|
|
||||||
- name: photo
|
|
||||||
comment: "photo"
|
|
||||||
write_list: "@NAS_photo"
|
|
||||||
browseable: true
|
|
||||||
- name: download
|
|
||||||
comment: "downlaod"
|
|
||||||
write_list: "@NAS_download"
|
|
||||||
browseable: true
|
|
||||||
samba_load_homes: True
|
|
||||||
samba_homes_include: samba_homes_include.conf
|
|
24
ansible/group_vars/VMServer
Normal file
24
ansible/group_vars/VMServer
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
system_upgrade: true
|
||||||
|
nginx_error_log: "/var/log/nginx/error.log debug"
|
||||||
|
|
||||||
|
hosts_entries:
|
||||||
|
- name: ducamps.win
|
||||||
|
ip: 127.0.0.1
|
||||||
|
aliases:
|
||||||
|
- arch.ducamps.win
|
||||||
|
- www.ducamps.win
|
||||||
|
- file.ducamps.win
|
||||||
|
- supysonic.ducamps.win
|
||||||
|
- syno.ducamps.win
|
||||||
|
- vault.ducamps.win
|
||||||
|
- ww.ducamps.win
|
||||||
|
- hass.ducamps.win
|
||||||
|
- git.ducamps.win
|
||||||
|
|
||||||
|
consul_bootstrap_expect: 1
|
||||||
|
nomad_bootstrap_expect: 1
|
||||||
|
nomad_datacenter: hml
|
||||||
|
consul_server: False
|
||||||
|
nomad_server: False
|
||||||
|
consul_retry_join_force:
|
||||||
|
- 192.168.1.40
|
99
ansible/group_vars/VPS
Normal file
99
ansible/group_vars/VPS
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
# defaults file for ansible-arch-provissionning
|
||||||
|
partition_table:
|
||||||
|
- device: "/dev/sda"
|
||||||
|
label: gpt
|
||||||
|
settings:
|
||||||
|
- number: 1
|
||||||
|
part_end: 64MB
|
||||||
|
flags: [boot, esp]
|
||||||
|
fstype: vfat
|
||||||
|
format: yes
|
||||||
|
- number: 2
|
||||||
|
part_start: 512MB
|
||||||
|
part_end: 1524MB
|
||||||
|
flags: []
|
||||||
|
fstype: swap
|
||||||
|
format: yes
|
||||||
|
- number: 3
|
||||||
|
part_start: 1524MB
|
||||||
|
flags: [lvm]
|
||||||
|
fstype: ext4
|
||||||
|
format: yes
|
||||||
|
#- device: "/dev/sdb"
|
||||||
|
#settings:
|
||||||
|
#- number: 1
|
||||||
|
#name: home
|
||||||
|
#fstype: ext4
|
||||||
|
#format:
|
||||||
|
mount_table:
|
||||||
|
- device: "/dev/sda"
|
||||||
|
settings:
|
||||||
|
- number: 3
|
||||||
|
mountpath: /mnt
|
||||||
|
fstype: ext4
|
||||||
|
- number: 1
|
||||||
|
mountpath: /mnt/boot
|
||||||
|
fstype: vfat
|
||||||
|
|
||||||
|
#need vfat boot partition with esp label
|
||||||
|
provissionning_UEFI_Enable: True
|
||||||
|
sssd_configure: False
|
||||||
|
nomad_datacenter: hetzner
|
||||||
|
|
||||||
|
systemd_mounts:
|
||||||
|
diskstation_nomad:
|
||||||
|
share: diskstation.ducamps.win:/volume2/nomad
|
||||||
|
mount: /mnt/diskstation/nomad
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
hetzner_storage:
|
||||||
|
share: //u304977.your-storagebox.de/backup
|
||||||
|
mount: /mnt/hetzner/storagebox
|
||||||
|
type: cifs
|
||||||
|
options:
|
||||||
|
- credentials=/etc/creds/hetzner_credentials
|
||||||
|
- uid= 1024
|
||||||
|
- gid= 10
|
||||||
|
- vers=3.0
|
||||||
|
- mfsymlinks
|
||||||
|
automount: true
|
||||||
|
diskstation_git:
|
||||||
|
share: diskstation.ducamps.win:/volume2/git
|
||||||
|
mount: /mnt/diskstation/git
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_CardDav:
|
||||||
|
share: diskstation.ducamps.win:/volume2/CardDav
|
||||||
|
mount: /mnt/diskstation/CardDav
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_music:
|
||||||
|
share: diskstation.ducamps.win:/volume2/music
|
||||||
|
mount: /mnt/diskstation/music
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
|
||||||
|
|
||||||
|
credentials_files:
|
||||||
|
1:
|
||||||
|
type: smb
|
||||||
|
path: /etc/creds/hetzner_credentials
|
||||||
|
username: u304977
|
||||||
|
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
systemd_mounts_enabled:
|
||||||
|
- diskstation_nomad
|
||||||
|
- hetzner_storage
|
||||||
|
- diskstation_git
|
||||||
|
- diskstation_music
|
||||||
|
- diskstation_CardDav
|
@ -1,45 +0,0 @@
|
|||||||
# defaults file for ansible-arch-provissionning
|
|
||||||
partition_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
label: gpt
|
|
||||||
settings:
|
|
||||||
- number: 1
|
|
||||||
part_end: 64MB
|
|
||||||
flags: [boot, esp]
|
|
||||||
fstype: vfat
|
|
||||||
format: yes
|
|
||||||
- number: 2
|
|
||||||
part_start: 512MB
|
|
||||||
part_end: 1524MB
|
|
||||||
flags: []
|
|
||||||
fstype: swap
|
|
||||||
format: yes
|
|
||||||
- number: 3
|
|
||||||
part_start: 1524MB
|
|
||||||
flags: [lvm]
|
|
||||||
fstype: ext4
|
|
||||||
format: yes
|
|
||||||
#- device: "/dev/sdb"
|
|
||||||
#settings:
|
|
||||||
#- number: 1
|
|
||||||
#name: home
|
|
||||||
#fstype: ext4
|
|
||||||
#format:
|
|
||||||
mount_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
settings:
|
|
||||||
- number: 3
|
|
||||||
mountpath: /mnt
|
|
||||||
fstype: ext4
|
|
||||||
- number: 1
|
|
||||||
mountpath: /mnt/boot
|
|
||||||
fstype: vfat
|
|
||||||
|
|
||||||
#need vfat boot partition with esp label
|
|
||||||
provissionning_UEFI_Enable: True
|
|
||||||
#sssd_configure: False
|
|
||||||
nomad_datacenter: hetzner
|
|
||||||
|
|
||||||
consul_server: False
|
|
||||||
nomad_server: False
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
|||||||
systemd_mounts:
|
|
||||||
diskstation_nomad:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
|
||||||
mount: /mnt/diskstation/nomad
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
hetzner_storage:
|
|
||||||
share: //u304977.your-storagebox.de/backup
|
|
||||||
mount: /mnt/hetzner/storagebox
|
|
||||||
type: cifs
|
|
||||||
options:
|
|
||||||
- credentials=/etc/creds/hetzner_credentials
|
|
||||||
- uid=100001
|
|
||||||
- gid=10
|
|
||||||
- vers=3.0
|
|
||||||
- mfsymlinks
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
credentials_files:
|
|
||||||
1:
|
|
||||||
type: smb
|
|
||||||
path: /etc/creds/hetzner_credentials
|
|
||||||
username: u304977
|
|
||||||
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
|
|
@ -1,12 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
31303539336464336239376636623862303066336438383739356163616431643366386565366361
|
|
||||||
3264336232303135336334333663326234393832343235640a313638323963666631353836373531
|
|
||||||
61636261623662396330653135326238363630363938323166303861313563393063386161393238
|
|
||||||
3231336232663533640a333763643864363939336566333731353031313739616633623537386435
|
|
||||||
39613934663133613733356433616162363430616439623830663837343530623937656434366663
|
|
||||||
33656466396263616132356337326236383761363834663363643163343231366563333865656433
|
|
||||||
39316365663734653734363362363539623636666261333534313935343566646166316233623535
|
|
||||||
32323831626463656337313266343634303830633936396232663966373264313762346235646665
|
|
||||||
61333139363039363436393962666365336334663164306230393433636664623934343039323637
|
|
||||||
33383036323233646237343031633030353330633734353232343633623864333834646239346362
|
|
||||||
643634303135656333646235343366636361
|
|
@ -1,45 +0,0 @@
|
|||||||
# defaults file for ansible-arch-provissionning
|
|
||||||
partition_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
label: gpt
|
|
||||||
settings:
|
|
||||||
- number: 1
|
|
||||||
part_end: 64MB
|
|
||||||
flags: [boot, esp]
|
|
||||||
fstype: vfat
|
|
||||||
format: yes
|
|
||||||
- number: 2
|
|
||||||
part_start: 512MB
|
|
||||||
part_end: 1524MB
|
|
||||||
flags: []
|
|
||||||
fstype: swap
|
|
||||||
format: yes
|
|
||||||
- number: 3
|
|
||||||
part_start: 1524MB
|
|
||||||
flags: [lvm]
|
|
||||||
fstype: ext4
|
|
||||||
format: yes
|
|
||||||
#- device: "/dev/sdb"
|
|
||||||
#settings:
|
|
||||||
#- number: 1
|
|
||||||
#name: home
|
|
||||||
#fstype: ext4
|
|
||||||
#format:
|
|
||||||
mount_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
settings:
|
|
||||||
- number: 3
|
|
||||||
mountpath: /mnt
|
|
||||||
fstype: ext4
|
|
||||||
- number: 1
|
|
||||||
mountpath: /mnt/boot
|
|
||||||
fstype: vfat
|
|
||||||
|
|
||||||
#need vfat boot partition with esp label
|
|
||||||
provissionning_UEFI_Enable: True
|
|
||||||
#sssd_configure: False
|
|
||||||
nomad_datacenter: hetzner
|
|
||||||
|
|
||||||
consul_server: False
|
|
||||||
nomad_server: False
|
|
||||||
|
|
@ -1,7 +1,20 @@
|
|||||||
ansible_python_interpreter: /usr/bin/python3
|
##ansible_python_interpreter: /usr/bin/python2
|
||||||
|
user:
|
||||||
|
name: vincent
|
||||||
|
uid: 1024
|
||||||
|
mail: vincent@ducamps.win
|
||||||
|
|
||||||
|
domain:
|
||||||
|
name: ducamps.win
|
||||||
|
|
||||||
hass_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDfVei9iC/Ra5qmSZcLu8z2CTaXCmfn4JSS4o3eu0HhykdYGSqhBTcUDD3/FhcTPQJVFsu1P4Gwqq1dCE+EvaZZRQaMUqVKUpOliThSG6etbImkvqLQQsC1qt+/NqSvfzu2+28A6+YspzuxsViGo7e3Gg9MdwV3LMGh0mcOr/uXb/HIk18sJg5yQpwMfYTj0Wda90nyegcN3F2iZMeauh/aaFJzWcHNakAAewceDYOErU07NhlZgVA2C8HgkJ8HL7AqIVqt9VOx3xLp91DbKTNXSxvyM0X4NQP24P7ZFxAOk/j0AX3hAWhaNmievCHyBWvQve1VshZXFwEIiuHm8q4GSCxK2r0oQudKdtIuQMfuUALigdiSxo522oEiML/2kSk17WsxZwh7SxfD0DKa82fy9iAwcAluWLwJ+yN3nGnDFF/tHYaamSiowpmTTmQ9ycyIPWPLVZclt3BlEt9WH/FPOdzAyY7YLzW9X6jhsU3QwViyaTRGqAdqzUAiflKCMsNzb5kq0oYsDFC+/eqp1USlgTZDhoKtTKRGEjW2KuUlDsXGBeB6w1D8XZxXJXAaHuMh4oMUgLswjLUdTH3oLnnAvfOrl8O66kTkmcQ8i/kr1wDODMy/oNUzs8q4DeRuhD5dpUiTUGYDTWPYj6m6U/GAEHvN/2YEqSgfVff1iQ4VBw==
|
hass_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDfVei9iC/Ra5qmSZcLu8z2CTaXCmfn4JSS4o3eu0HhykdYGSqhBTcUDD3/FhcTPQJVFsu1P4Gwqq1dCE+EvaZZRQaMUqVKUpOliThSG6etbImkvqLQQsC1qt+/NqSvfzu2+28A6+YspzuxsViGo7e3Gg9MdwV3LMGh0mcOr/uXb/HIk18sJg5yQpwMfYTj0Wda90nyegcN3F2iZMeauh/aaFJzWcHNakAAewceDYOErU07NhlZgVA2C8HgkJ8HL7AqIVqt9VOx3xLp91DbKTNXSxvyM0X4NQP24P7ZFxAOk/j0AX3hAWhaNmievCHyBWvQve1VshZXFwEIiuHm8q4GSCxK2r0oQudKdtIuQMfuUALigdiSxo522oEiML/2kSk17WsxZwh7SxfD0DKa82fy9iAwcAluWLwJ+yN3nGnDFF/tHYaamSiowpmTTmQ9ycyIPWPLVZclt3BlEt9WH/FPOdzAyY7YLzW9X6jhsU3QwViyaTRGqAdqzUAiflKCMsNzb5kq0oYsDFC+/eqp1USlgTZDhoKtTKRGEjW2KuUlDsXGBeB6w1D8XZxXJXAaHuMh4oMUgLswjLUdTH3oLnnAvfOrl8O66kTkmcQ8i/kr1wDODMy/oNUzs8q4DeRuhD5dpUiTUGYDTWPYj6m6U/GAEHvN/2YEqSgfVff1iQ4VBw==
|
||||||
system_arch_local_mirror: "https://arch.{{domain.name}}/repo/archlinux_$arch"
|
|
||||||
system_sudoers_group: "serverAdmin"
|
system_arch_local_mirror: "https://arch.{{domain.name}}"
|
||||||
system_ipV6_disable: True
|
|
||||||
system_ip_unprivileged_port_start: 0
|
privatekeytodeploy:
|
||||||
wireguard_mtu: 1420
|
- user: "{{user.name}}"
|
||||||
|
keyfile: "/home/{{user.name}}/.ssh/id_gitea"
|
||||||
|
privatekey: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||||
|
- user: root
|
||||||
|
keyfile: /root/.ssh/id_gitea
|
||||||
|
privatekey: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
consul_client_addr: "0.0.0.0"
|
|
||||||
consul_datacenter: "homelab"
|
|
||||||
consul_backup_location: "/mnt/diskstation/git/backup/consul"
|
|
||||||
consul_ansible_group: all
|
|
||||||
consul_systemd_resolved_enable: true
|
|
@ -1,8 +0,0 @@
|
|||||||
docker_daemon_config:
|
|
||||||
dns:
|
|
||||||
- 172.17.0.1
|
|
||||||
- 192.168.1.6
|
|
||||||
mtu: 1420
|
|
||||||
insecure-registries:
|
|
||||||
- 192.168.1.0/24
|
|
||||||
- 192.168.121.0/24
|
|
@ -1,9 +0,0 @@
|
|||||||
nomad_docker_allow_caps:
|
|
||||||
- NET_ADMIN
|
|
||||||
- NET_BROADCAST
|
|
||||||
- NET_RAW
|
|
||||||
nomad_allow_privileged: True
|
|
||||||
nomad_vault_enabled: true
|
|
||||||
nomad_vault_address: "http://active.vault.service.{{consul_domain}}:8200"
|
|
||||||
nomad_vault_role: "nomad-cluster"
|
|
||||||
nomad_docker_extra_labels: ["job_name", "task_group_name", "task_name", "namespace", "node_name"]
|
|
37
ansible/group_vars/all/server
Normal file
37
ansible/group_vars/all/server
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
consul_client_addr: "0.0.0.0"
|
||||||
|
consul_datacenter: "homelab"
|
||||||
|
consul_backup_location: "/mnt/diskstation/git/backup/consul"
|
||||||
|
consul_ansible_group: all
|
||||||
|
consul_bootstrap_expect: 2
|
||||||
|
nomad_vault_enabled: true
|
||||||
|
nomad_vault_address: "http://active.vault.service.consul:8200"
|
||||||
|
nomad_vault_role: "nomad-cluster"
|
||||||
|
nomad_vault_token: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:nomad_vault_token') }}"
|
||||||
|
nomad_bootstrap_expect: 2
|
||||||
|
notification_mail: "{{inventory_hostname}}@{{ domain.name }}"
|
||||||
|
msmtp_mailhub: smtp.{{ domain.name }}
|
||||||
|
msmtp_auth_user: "{{ user.mail }}"
|
||||||
|
msmtp_auth_pass: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:email') }}"
|
||||||
|
|
||||||
|
docker_users: "{{user.name}}"
|
||||||
|
|
||||||
|
system_user:
|
||||||
|
- name: drone-deploy
|
||||||
|
home: /home/drone-deploy
|
||||||
|
shell: /bin/bash
|
||||||
|
|
||||||
|
keystodeploy:
|
||||||
|
- name: juicessh with password
|
||||||
|
user: "{{user.name}}"
|
||||||
|
sshkey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
||||||
|
- name: fixe-pc new
|
||||||
|
user: "{{user.name}}"
|
||||||
|
sshkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
||||||
|
- name: zen-pc
|
||||||
|
user: "{{user.name}}"
|
||||||
|
sshkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
||||||
|
- name: drone
|
||||||
|
user: drone-deploy
|
||||||
|
sshkey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
|
||||||
|
|
||||||
|
|
@ -1,5 +1,9 @@
|
|||||||
sssd_configure: true
|
sssd_configure: true
|
||||||
# sssd_configure is False by default - by default nothing is done by this role.
|
# sssd_configure is False by default - by default nothing is done by this role.
|
||||||
ldap_search_base: "dc=ducamps,dc=eu"
|
ldap_search_base: "dc=ducamps,dc=win"
|
||||||
ldap_uri: "ldaps://ldaps.service.consul"
|
ldap_uri: "ldaps://ldap.ducamps.win"
|
||||||
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=eu"
|
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=win"
|
||||||
|
ldap_default_bind_dn : "uid=vaultserviceaccount,cn=users,dc=ducamps,dc=win"
|
||||||
|
ldap_password : "{{lookup('hashi_vault', 'secret=secrets/data/ansible/other:vaulserviceaccount')}}"
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,42 +0,0 @@
|
|||||||
user:
|
|
||||||
name: vincent
|
|
||||||
home: /home/vincent
|
|
||||||
uid: 1024
|
|
||||||
mail: vincent@ducamps.eu
|
|
||||||
groups:
|
|
||||||
- docker
|
|
||||||
authorized_keys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
|
||||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
|
||||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
|
||||||
privatekey:
|
|
||||||
- keyname: "id_gitea"
|
|
||||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
system_user:
|
|
||||||
- name: drone-deploy
|
|
||||||
home: /home/drone-deploy
|
|
||||||
shell: /bin/bash
|
|
||||||
authorized_keys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
|
|
||||||
|
|
||||||
- name: ansible
|
|
||||||
home: /home/ansible
|
|
||||||
shell: /bin/bash
|
|
||||||
|
|
||||||
- name: root
|
|
||||||
home: /root
|
|
||||||
privatekey:
|
|
||||||
- keyname: id_gitea
|
|
||||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
user_custom_host:
|
|
||||||
- host: "git.ducamps.eu"
|
|
||||||
user: "git"
|
|
||||||
keyfile: "~/.ssh/id_gitea"
|
|
||||||
|
|
||||||
user_config_repo: "ssh://git@git.ducamps.eu:2222/vincent/conf2.git"
|
|
@ -1 +0,0 @@
|
|||||||
vault_raft_group_name: "homelab"
|
|
@ -1,11 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
39613433313663653039643961643165643632313938626339653365376633613135653436363938
|
|
||||||
6331623132366638633665636163336462393333336264320a666466303465663839646435626231
|
|
||||||
38396437363034313236383261326637306238616162303131356537393635363939376236386130
|
|
||||||
6466353961643233310a306631333664363332336263656638623763393732306361306632386662
|
|
||||||
37623934633932653965316532386664353130653830356237313337643266366233346633323265
|
|
||||||
37616533303561363864626531396366323565396536383133643539663630636633356238386633
|
|
||||||
34383464333363663532643239363438626135336632316135393537643930613532336231633064
|
|
||||||
35376561663637623932313365636261306131353233636661313435643563323534623365346436
|
|
||||||
65366132333635643832353464323961643466343832376635386531393834336535386364396333
|
|
||||||
3932393561646133336437643138373230366266633430663937
|
|
@ -1,12 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
61326233336236343231396231306638373837653661313334313261313539316532373437346132
|
|
||||||
3931306637303530373032663236363466383433316161310a396439393564643731656664663639
|
|
||||||
32386130663837303663376432633930393663386436666263313939326631616466643237333138
|
|
||||||
3365346131636333330a376436323964656563363664336638653564656231636136663635303439
|
|
||||||
35346461356337303064623861326331346263373539336335393566623462343464323065366237
|
|
||||||
61346637326336613232643462323733366530656439626234663335633965376335623733336162
|
|
||||||
37323739376237323534613361333831396531663637666161666366656237353563626164626632
|
|
||||||
33326336353663356235373835666166643465666562616663336539316233373430633862613133
|
|
||||||
36363831623361393230653161626131353264366634326233363232336635306266376363363739
|
|
||||||
66373434343330633337633436316135656533613465613963363931383266323466653762623365
|
|
||||||
363332393662393532313063613066653964
|
|
@ -1,14 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
35303137383361396262313561623237626336306366376630663065396664643630383638376436
|
|
||||||
3930346265616235383331383735613166383461643233310a663564356266663366633539303630
|
|
||||||
37616532393035356133653838323964393464333230313861356465326433353339336435363263
|
|
||||||
3162653932646662650a613762393062613433343362633365316434663661306637623363333834
|
|
||||||
61303231303362313133346461373738633239613933303564383532353537626538363636306461
|
|
||||||
66663330346566356637623036363964396137646435333139323430353639386134396537366334
|
|
||||||
39303130386432366335383433626431663034656466626265393863623438366130346562623365
|
|
||||||
63653963393663353666313631326131636361333230386461383638333338393137336562323935
|
|
||||||
37343034363961306663303232346139356534613837663230393962323333656536303161373939
|
|
||||||
65626164336166306264653538313661393934383966303135356161336331623835663235646332
|
|
||||||
63343764643861366537383962616230323036326331386333346463353835393762653735353862
|
|
||||||
32323839663365353337303363313535633362643231653663393936363539363933636430613832
|
|
||||||
32336566633962646463316636346330336265626130373636643335323762363661
|
|
@ -1,14 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
64396261616266633665646330393631316463386334633032353965323964633464333331323334
|
|
||||||
6261653930313764313836366531383462313965336231620a656637623439623639383931373361
|
|
||||||
37373434636531623563336565356136633031633835633636643436653165386436636564616130
|
|
||||||
3763383036343739370a376565343130636631653635616566653531323464343632623566313436
|
|
||||||
32396165636333393032636636613030373663393238323964396462323163616162613933626536
|
|
||||||
31623931343633346131636563643563393230323839636438373933666137393031326532356535
|
|
||||||
32363439306338623533353734613966396362303164616335363535333438326234623161653732
|
|
||||||
66613762653966613763623966633939323634346536636334343364306332323563653361346563
|
|
||||||
65313433376634363261323934376637646233636233346536316262386634353666376539613235
|
|
||||||
63666432396636373139663861393164626165383665663933383734303165623464666630343231
|
|
||||||
33323339663138373530396636636333323439616137313434316465633162396237306238343366
|
|
||||||
30326162306539396630633738323435323432646338633331626665363838376363343835336534
|
|
||||||
3635
|
|
@ -1,50 +0,0 @@
|
|||||||
systemd_mounts:
|
|
||||||
diskstation_photo:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/photo"
|
|
||||||
mount: /mnt/diskstation/photo
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_music:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/music"
|
|
||||||
mount: /mnt/diskstation/music
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_media:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/media"
|
|
||||||
mount: /mnt/diskstation/media
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
diskstation_ebook:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/ebook"
|
|
||||||
mount: /mnt/diskstation/ebook
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_nomad:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
|
||||||
mount: /mnt/diskstation/nomad
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- " "
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_download:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/download"
|
|
||||||
mount: /mnt/diskstation/download
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
@ -1 +0,0 @@
|
|||||||
nomad_node_class: 'cluster'
|
|
47
ansible/group_vars/database
Normal file
47
ansible/group_vars/database
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
|
||||||
|
postgresql_users:
|
||||||
|
- name: root
|
||||||
|
role_attr_flags: SUPERUSER
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:root')}}"
|
||||||
|
- name: wikijs
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:wikijs')}}"
|
||||||
|
- name: ttrss
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:ttrss')}}"
|
||||||
|
- name: gitea
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:gitea')}}"
|
||||||
|
- name: supysonic
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:supysonic')}}"
|
||||||
|
- name: hass
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:hass')}}"
|
||||||
|
- name: nextcloud
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:nextcloud')}}"
|
||||||
|
- name: vaultwarden
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:vaultwarden')}}"
|
||||||
|
- name: drone
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:drone')}}"
|
||||||
|
- name: dendrite
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:dendrite')}}"
|
||||||
|
- name: paperless
|
||||||
|
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:paperless')}}"
|
||||||
|
|
||||||
|
postgresql_databases:
|
||||||
|
- name: wikijs
|
||||||
|
owner: wikijs
|
||||||
|
- name: ttrss
|
||||||
|
owner: ttrss
|
||||||
|
- name: gitea
|
||||||
|
owner: gitea
|
||||||
|
- name: supysonic
|
||||||
|
owner: supysonic
|
||||||
|
- name: hass
|
||||||
|
owner: hass
|
||||||
|
- name: nextcloud
|
||||||
|
owner: nextcloud
|
||||||
|
- name: vaultwarden
|
||||||
|
owner: vaultwarden
|
||||||
|
- name: drone
|
||||||
|
owner: drone
|
||||||
|
- name: dendrite
|
||||||
|
owner: dendrite
|
||||||
|
- name: paperless
|
||||||
|
owner: paperless
|
@ -1,38 +0,0 @@
|
|||||||
postgres_consul_service: true
|
|
||||||
postgres_consul_service_name: db
|
|
||||||
|
|
||||||
postgresql_databases:
|
|
||||||
- name: ttrss
|
|
||||||
owner: ttrss
|
|
||||||
- name: gitea
|
|
||||||
owner: gitea
|
|
||||||
- name: supysonic
|
|
||||||
owner: supysonic
|
|
||||||
- name: hass
|
|
||||||
owner: hass
|
|
||||||
- name: vaultwarden
|
|
||||||
owner: vaultwarden
|
|
||||||
- name: drone
|
|
||||||
owner: drone
|
|
||||||
- name: paperless
|
|
||||||
owner: paperless
|
|
||||||
- name: vikunja
|
|
||||||
owner: vikunja
|
|
||||||
- name: ghostfolio
|
|
||||||
owner: ghostfolio
|
|
||||||
- name: pdns-auth
|
|
||||||
owner: pdns-auth
|
|
||||||
- name: pdns-admin
|
|
||||||
owner: pdns-admin
|
|
||||||
- name: mealie
|
|
||||||
owner: mealie
|
|
||||||
- name: immich
|
|
||||||
owner: immich
|
|
||||||
|
|
||||||
postgresql_hba_entries:
|
|
||||||
- {type: local, database: all, user: postgres, auth_method: peer}
|
|
||||||
- {type: local, database: all, user: all, auth_method: peer}
|
|
||||||
- {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5}
|
|
||||||
- {type: host, database: all, user: all, address: '::1/128', auth_method: md5}
|
|
||||||
- {type: host, database: all, user: all, address: '::0/128', auth_method: md5}
|
|
||||||
- {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5}
|
|
@ -1,54 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
39363436643831373861376361613830316334613939346338616636393462663033393261633838
|
|
||||||
6337336161393063646136613538396366653538656435360a303062636463383739653730346639
|
|
||||||
61323634306265613336313634653039313639663836363032353261383566393865613166613032
|
|
||||||
3837313634633466610a313062646237396138316361303361663565353862363139343566306539
|
|
||||||
38303161303163323265376539323939393938373965353934303535613962653534363362346563
|
|
||||||
61643638353138623162353364353736396162613735333063633739346132613161303564356437
|
|
||||||
62343535363263646463306466663536613937393463666336396332646533343439613433626566
|
|
||||||
38643363343065393165646134343935386461626166316662356365366666363737653336626631
|
|
||||||
64643230616431396666666462303366343164323233303139643939346635353730316234386163
|
|
||||||
35613235643034643833393233373536383863333763393066373564353535353463363336316335
|
|
||||||
63363537643432663266386438316563656663656462333039303861393364333966383430643263
|
|
||||||
63356435373064633861343137616637393161383361306135373864386235653034323732316663
|
|
||||||
65336465386135663532356433386562666639333464633362663131646237613034646563396133
|
|
||||||
33303464633635636233626633353038656230373266666132323561383866343632333561323363
|
|
||||||
61346664623338376436373332646232646235323639633262666166346535663238653563363239
|
|
||||||
34663365633363313433376333653534333364393635316235333965383262313563373161663065
|
|
||||||
36393565396534353235623238303835343334646632306638306332336539616463393966653538
|
|
||||||
35336462623031326539633139636533633632623137393463333531663935323765663139306361
|
|
||||||
66643434393533313039356434326438626265323066613966323634306632653765363834613034
|
|
||||||
30373039336536393865383265643335396232643537343363313338383838383030386665303237
|
|
||||||
64363666346535633237353462333232623132353031323231623338356136656261303662656465
|
|
||||||
31313039643561623635643435333133663032313964323061393231666336343233363038616231
|
|
||||||
36356262326530383233336130326361613431623866633832663361633937646461343731343938
|
|
||||||
33306262346463623935663466356264393837626239313739356431653163376563333234346566
|
|
||||||
38373663643532313635333131663239383736343930623735323861663037356136353433633865
|
|
||||||
63626435613936303661366637623338633961643137613933303735366265663933396130363039
|
|
||||||
34396637643638613839306639343765393539653164616536653661373264376436626639316666
|
|
||||||
61303835323761643531326438363035343539383464376433363534623934366534373631353364
|
|
||||||
61383866323737316430303736366533643939313637393631303833363431613562303639323939
|
|
||||||
66313434613963656464383964313734383938353366306462666537653563336465376464303538
|
|
||||||
34336531663334303938333739313638636363623562613536333736386137363139653164626261
|
|
||||||
62663662316365663563646164303935323866633336633939323837393962393130626330666233
|
|
||||||
63663661303565646236623130663034636264353235376561306630376365613966663536303963
|
|
||||||
63643161386435633831393334333035653761393863373731616239313235383033633439376166
|
|
||||||
39613762376162386231633938393036633461303732323337656430373430636435313337303365
|
|
||||||
37646461336339623339316663616636373036656564383462356562306465623762653162633963
|
|
||||||
35636466386138333564666564323034393162633965386133643235303938616439333130353637
|
|
||||||
61343536323034366464653138353665326436396133313432666563353335383733363335613562
|
|
||||||
61646365346665383866623364396138323666326338313530353663323938613362653038313339
|
|
||||||
32613663616535313661386538366330373364366637386634633437646362383764346263636434
|
|
||||||
35616166393065343038643861636333373738363335353164326435303961326662356230323262
|
|
||||||
35656531653535643630376330393731643532353132366662636664626132646632306361323035
|
|
||||||
31373136616435336362633439356339336466313337623538383763386132396135653864386638
|
|
||||||
31393864363466653137643565306462616238333435343036613331653866393532313861376331
|
|
||||||
33646636623666343439616332386363373664346164313963623861393134666463383366633539
|
|
||||||
35313761333564303635656364303566643436393130356163623137313530653539656537653139
|
|
||||||
38336636623732313630303933303962303561376436623737633139643564343166326335386639
|
|
||||||
31373437336139326562613339393235393065396538333566323864643639303132313733396132
|
|
||||||
35613532396363326166313061353136373965303964623534653634613639303764393038333037
|
|
||||||
63656131616463663565653134363336326139303736313138366262616338643339316231663631
|
|
||||||
30656132386462393433313261313466303239346138623433643634616465656139343764353338
|
|
||||||
62616139613731363665333438383861623837643432643134626461643631323034383262656439
|
|
||||||
33653563323434343964633236353434643739333863636630636363633639373630
|
|
@ -1 +0,0 @@
|
|||||||
postgres_consul_tag: "active"
|
|
@ -1 +0,0 @@
|
|||||||
postgres_consul_tag: "standby"
|
|
152
ansible/group_vars/dhcp
Normal file
152
ansible/group_vars/dhcp
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
dhcpd_authoritative: True
|
||||||
|
dhcpd_lease_time: '72'
|
||||||
|
dhcpd_domain_name: "{{ domain.name }}"
|
||||||
|
dhcpd_nameservers:
|
||||||
|
- '192.168.1.40'
|
||||||
|
- '192.168.1.10'
|
||||||
|
dhcpd_keys:
|
||||||
|
- key: dhcp
|
||||||
|
algorithm: HMAC-MD5
|
||||||
|
secret: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:dhcpd_key') }}"
|
||||||
|
|
||||||
|
dhcpd_zones:
|
||||||
|
- zone: "{{ domain.name }}."
|
||||||
|
primary: "192.168.1.10"
|
||||||
|
key: "dhcp"
|
||||||
|
- zone: "1.168.192.in-addr.arpa."
|
||||||
|
primary: "192.168.1.10"
|
||||||
|
key: "dhcp"
|
||||||
|
|
||||||
|
dhcpd_options: |
|
||||||
|
ddns-updates on;
|
||||||
|
ddns-update-style interim;
|
||||||
|
ignore client-updates;
|
||||||
|
update-static-leases on;
|
||||||
|
ddns-domainname "ducamps.win.";
|
||||||
|
ddns-rev-domainname "in-addr.arpa.";
|
||||||
|
|
||||||
|
|
||||||
|
dhcpd_subnets:
|
||||||
|
- subnet: '192.168.1.0'
|
||||||
|
netmask: '255.255.255.0'
|
||||||
|
options: |
|
||||||
|
option routers 192.168.1.1;
|
||||||
|
pools:
|
||||||
|
- range: '192.168.1.100 192.168.1.140'
|
||||||
|
|
||||||
|
dhcpd_hosts:
|
||||||
|
- hostname: 'zen-pc'
|
||||||
|
address: '192.168.1.14'
|
||||||
|
ethernet: 'f0:d5:bf:f4:ce:d7'
|
||||||
|
|
||||||
|
- hostname: 'fixe-pc'
|
||||||
|
address: '192.168.1.15'
|
||||||
|
ethernet: 'ee:35:20:fc:7b:04'
|
||||||
|
|
||||||
|
- hostname: 'oscar'
|
||||||
|
address: '192.168.1.40'
|
||||||
|
ethernet: '7C:83:34:B3:49:9A'
|
||||||
|
|
||||||
|
- hostname: 'VMAS-HML'
|
||||||
|
address: '192.168.1.50'
|
||||||
|
ethernet: '52:54:00:02:74:ed'
|
||||||
|
|
||||||
|
- hostname: 'VMAS-BUILD'
|
||||||
|
address: '192.168.1.53'
|
||||||
|
ethernet: '52:54:13:1e:93'
|
||||||
|
|
||||||
|
|
||||||
|
- hostname: 'xiaomi-chambre-gateway'
|
||||||
|
address: '192.168.1.61'
|
||||||
|
ethernet: '04:cf:8c:9c:f7:f0'
|
||||||
|
- hostname: 'xiaomi-ampoule-chambre'
|
||||||
|
address: '192.168.1.62'
|
||||||
|
ethernet: '44:23:7c:88:1f:ea'
|
||||||
|
- hostname: 'shelly-chambre-ecran'
|
||||||
|
address: '192.168.1.63'
|
||||||
|
ethernet: 'b4:e6:2d:7a:ea:77'
|
||||||
|
- hostname: 'shelly-salon-cadre'
|
||||||
|
address: '192.168.1.64'
|
||||||
|
ethernet: 'b4:e6:2d:7a:e6:1e'
|
||||||
|
- hostname: 'shelly-chambre-ventilo'
|
||||||
|
address: '192.168.1.65'
|
||||||
|
ethernet: 'e0:98:06:97:78:0b'
|
||||||
|
|
||||||
|
keystodeploy:
|
||||||
|
- name: juicessh with password
|
||||||
|
user: "{{user.name}}"
|
||||||
|
sshkey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
||||||
|
- name: fixe-pc new
|
||||||
|
user: "{{user.name}}"
|
||||||
|
sshkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
||||||
|
- name: zen-pc
|
||||||
|
user: "{{user.name}}"
|
||||||
|
sshkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
||||||
|
|
||||||
|
nomad_datacenter: homelab
|
||||||
|
|
||||||
|
|
||||||
|
systemd_mounts:
|
||||||
|
diskstation_nomad:
|
||||||
|
share: diskstation.ducamps.win:/volume2/nomad
|
||||||
|
mount: /mnt/diskstation/nomad
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_git:
|
||||||
|
share: diskstation.ducamps.win:/volume2/git
|
||||||
|
mount: /mnt/diskstation/git
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_music:
|
||||||
|
share: diskstation.ducamps.win:/volume2/music
|
||||||
|
mount: /mnt/diskstation/music
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_nextcloud:
|
||||||
|
share: //diskstation.ducamps.win/nextcloud
|
||||||
|
mount: /mnt/diskstation/nextcloud
|
||||||
|
type: cifs
|
||||||
|
options:
|
||||||
|
- credentials=/etc/creds/.diskstation_credentials
|
||||||
|
- uid=33
|
||||||
|
- gid=33
|
||||||
|
- vers=3.0
|
||||||
|
- dir_mode=0770
|
||||||
|
- _netdev
|
||||||
|
automount: true
|
||||||
|
diskstation_CardDav:
|
||||||
|
share: diskstation.ducamps.win:/volume2/CardDav
|
||||||
|
mount: /mnt/diskstation/CardDav
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_archMirror:
|
||||||
|
share: diskstation.ducamps.win:/volume2/archMirror
|
||||||
|
mount: /mnt/diskstation/archMirror
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
|
||||||
|
credentials_files:
|
||||||
|
1:
|
||||||
|
type: smb
|
||||||
|
path: /etc/creds/.diskstation_credentials
|
||||||
|
username: admin
|
||||||
|
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:diskstation_admin') }}"
|
||||||
|
|
||||||
|
|
||||||
|
systemd_mounts_enabled:
|
||||||
|
- diskstation_nomad
|
||||||
|
- diskstation_git
|
||||||
|
- diskstation_music
|
||||||
|
- diskstation_nextcloud
|
||||||
|
- diskstation_CardDav
|
||||||
|
- diskstation_archMirror
|
@ -1,68 +0,0 @@
|
|||||||
dhcpd_authoritative: True
|
|
||||||
dhcpd_lease_time: '72'
|
|
||||||
dhcpd_domain_name: "lan.{{ domain.name }}"
|
|
||||||
dhcpd_nameservers:
|
|
||||||
- '192.168.1.4'
|
|
||||||
- '192.168.1.40'
|
|
||||||
|
|
||||||
dhcpd_zones:
|
|
||||||
- zone: "lan.{{ domain.name }}."
|
|
||||||
primary: "192.168.1.5"
|
|
||||||
key: "dhcpdupdate"
|
|
||||||
- zone: "1.168.192.in-addr.arpa."
|
|
||||||
primary: "192.168.1.5"
|
|
||||||
key: "dhcpdupdate"
|
|
||||||
|
|
||||||
dhcpd_options: |
|
|
||||||
ddns-updates on;
|
|
||||||
ddns-update-style interim;
|
|
||||||
ignore client-updates;
|
|
||||||
update-static-leases on;
|
|
||||||
ddns-domainname "lan.{{ domain.name }}.";
|
|
||||||
ddns-rev-domainname "in-addr.arpa.";
|
|
||||||
|
|
||||||
|
|
||||||
dhcpd_subnets:
|
|
||||||
- subnet: '192.168.1.0'
|
|
||||||
netmask: '255.255.255.0'
|
|
||||||
options: |
|
|
||||||
option routers 192.168.1.1;
|
|
||||||
pools:
|
|
||||||
- range: '192.168.1.100 192.168.1.140'
|
|
||||||
|
|
||||||
dhcpd_hosts:
|
|
||||||
- hostname: 'zen-pc'
|
|
||||||
address: '192.168.1.14'
|
|
||||||
ethernet: 'f0:d5:bf:f4:ce:d7'
|
|
||||||
|
|
||||||
- hostname: 'fixe-pc'
|
|
||||||
address: '192.168.1.15'
|
|
||||||
ethernet: 'ee:35:20:fc:7b:04'
|
|
||||||
|
|
||||||
- hostname: 'oscar'
|
|
||||||
address: '192.168.1.40'
|
|
||||||
ethernet: '68:1D:EF:3C:F0:44'
|
|
||||||
- hostname: 'bleys'
|
|
||||||
address: '192.168.1.42'
|
|
||||||
ethernet: '68:1d:ef:2b:3d:24'
|
|
||||||
|
|
||||||
|
|
||||||
- hostname: 'xiaomi-chambre-gateway'
|
|
||||||
address: '192.168.1.61'
|
|
||||||
ethernet: '04:cf:8c:9c:f7:f0'
|
|
||||||
- hostname: 'xiaomi-ampoule-chambre'
|
|
||||||
address: '192.168.1.62'
|
|
||||||
ethernet: '44:23:7c:88:1f:ea'
|
|
||||||
- hostname: 'shelly-chambre-ecran'
|
|
||||||
address: '192.168.1.63'
|
|
||||||
ethernet: 'b4:e6:2d:7a:ea:77'
|
|
||||||
- hostname: 'shelly-salon-cadre'
|
|
||||||
address: '192.168.1.64'
|
|
||||||
ethernet: 'b4:e6:2d:7a:e6:1e'
|
|
||||||
- hostname: 'shelly-chambre-ventilo'
|
|
||||||
address: '192.168.1.65'
|
|
||||||
ethernet: 'e0:98:06:97:78:0b'
|
|
||||||
- hostname: 'shelly-Bureau-chauffeau'
|
|
||||||
address: '192.168.1.66'
|
|
||||||
ethernet: '8c:aa:b5:42:b9:b9'
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
65303666336535386536653939626336646338623431353161636565393532623264316534326539
|
|
||||||
6265393839323438376666393030383839326239323261660a333132613538306137383332336538
|
|
||||||
38323830353062366133643734303138343939323135333532333666653039326437316361353463
|
|
||||||
6665393263376132620a346239386437326462363565636335303766306638393331656664376665
|
|
||||||
63373131373039653065633861626263646635323634333538343163346239633937303761366362
|
|
||||||
31376438363731613666393531656232653033336332653261313866396434616461303831353336
|
|
||||||
38663965636536313932346133363733636636643938366364366435366237316435643062336231
|
|
||||||
34343931653963613431336465653036616431323263613731393963656637303561366461663038
|
|
||||||
31336131346266393035343135323131636435333865323733386439363763376638383337613530
|
|
||||||
34356331356361636665383933633130343564373739343630663835313164326565393439306163
|
|
||||||
31386538633033333961386534323234653833323537356565616436346462613333663139623035
|
|
||||||
30636265313230383162633466373937353262383965313631326336666133653331366230653961
|
|
||||||
6131
|
|
@ -1,2 +1,10 @@
|
|||||||
|
chisel_server: true
|
||||||
|
chisel_server_port: 9090
|
||||||
|
chisel_server_backend: https://www.{{domain.name}}
|
||||||
|
chisel_server_auth:
|
||||||
|
user: chisel
|
||||||
|
pass: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:chisel_pass') }}"
|
||||||
|
arch_mirror_location: "/mnt/diskstation/archMirror"
|
||||||
|
|
||||||
nomad_datacenter: homelab
|
nomad_datacenter: homelab
|
||||||
system_wol_enable: True
|
nomad_allow_privileged: True
|
||||||
|
92
ansible/group_vars/homelab/mount
Normal file
92
ansible/group_vars/homelab/mount
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
systemd_mounts:
|
||||||
|
diskstation_git:
|
||||||
|
share: diskstation.ducamps.win:/volume2/git
|
||||||
|
mount: /mnt/diskstation/git
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_CardDav:
|
||||||
|
share: diskstation.ducamps.win:/volume2/CardDav
|
||||||
|
mount: /mnt/diskstation/CardDav
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
backup_disk:
|
||||||
|
share: /dev/sdb1
|
||||||
|
mount: /mnt/backup
|
||||||
|
type: ntfs-3g
|
||||||
|
options:
|
||||||
|
- "uid=1024
|
||||||
|
- guid=100
|
||||||
|
- vers=3.0"
|
||||||
|
automount: true
|
||||||
|
diskstation_home:
|
||||||
|
share: diskstation.ducamps.win:/volume2/homes/admin
|
||||||
|
mount: /mnt/diskstation/home
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_photo:
|
||||||
|
share: diskstation.ducamps.win:/volume2/photo
|
||||||
|
mount: /mnt/diskstation/photo
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_music:
|
||||||
|
share: diskstation.ducamps.win:/volume2/music
|
||||||
|
mount: /mnt/diskstation/music
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_media:
|
||||||
|
share: diskstation.ducamps.win:/volume1/media
|
||||||
|
mount: /mnt/diskstation/media
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_ebook:
|
||||||
|
share: diskstation.ducamps.win:/volume2/ebook
|
||||||
|
mount: /mnt/diskstation/ebook
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_archMirror:
|
||||||
|
share: diskstation.ducamps.win:/volume2/archMirror
|
||||||
|
mount: /mnt/diskstation/archMirror
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
diskstation_nomad:
|
||||||
|
share: diskstation.ducamps.win:/volume2/nomad
|
||||||
|
mount: /mnt/diskstation/nomad
|
||||||
|
type: nfs
|
||||||
|
options:
|
||||||
|
- " "
|
||||||
|
automount: true
|
||||||
|
|
||||||
|
systemd_mounts_enabled:
|
||||||
|
- diskstation_git
|
||||||
|
- diskstation_music
|
||||||
|
- backup_disk
|
||||||
|
- diskstation_photo
|
||||||
|
- diskstation_home
|
||||||
|
- diskstation_CardDav
|
||||||
|
- diskstation_media
|
||||||
|
- diskstation_ebook
|
||||||
|
- diskstation_archMirror
|
||||||
|
- diskstation_nomad
|
||||||
|
|
||||||
|
credentials_files:
|
||||||
|
1:
|
||||||
|
type: smb
|
||||||
|
path: /etc/creds/.diskstation_credentials
|
||||||
|
username: admin
|
||||||
|
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:diskstation_admin') }}"
|
@ -1,13 +0,0 @@
|
|||||||
domain:
|
|
||||||
name: ducamps.eu
|
|
||||||
consul_bootstrap_expect: 3
|
|
||||||
consul_domain: "consul"
|
|
||||||
nomad_bootstrap_expect: 3
|
|
||||||
nomad_client_meta:
|
|
||||||
- name: "env"
|
|
||||||
value: "production"
|
|
||||||
vault_unseal_keys_dir_output: "~/vaultUnseal/production"
|
|
||||||
env_default_nfs_path: ""
|
|
||||||
env_media_nfs_path: "/volume1"
|
|
||||||
env_automount: true
|
|
||||||
nas_ip: "192.168.1.43"
|
|
@ -1,21 +0,0 @@
|
|||||||
domain:
|
|
||||||
name: ducamps.dev
|
|
||||||
#systemd_mounts: []
|
|
||||||
#systemd_mounts_enabled: []
|
|
||||||
consul_bootstrap_expect: 2
|
|
||||||
consul_domain: "consul"
|
|
||||||
nomad_bootstrap_expect: 2
|
|
||||||
nomad_client_meta:
|
|
||||||
- name: "env"
|
|
||||||
value: "staging"
|
|
||||||
|
|
||||||
vault_unseal_keys_dir_output: "~/vaultUnseal/staging"
|
|
||||||
hosts_entries:
|
|
||||||
- ip: "{{ hostvars['nas-dev']['ansible_default_ipv4']['address'] }}"
|
|
||||||
name: diskstation.ducamps.eu
|
|
||||||
|
|
||||||
env_default_nfs_path: ""
|
|
||||||
env_automount: true
|
|
||||||
nas_ip: "nfs.service.consul"
|
|
||||||
|
|
||||||
|
|
0
ansible/group_vars/wireguard
Normal file
0
ansible/group_vars/wireguard
Normal file
2
ansible/host_vars/VMAS-BUILD
Normal file
2
ansible/host_vars/VMAS-BUILD
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
|
||||||
|
chainetv_repo_branch: dev
|
2
ansible/host_vars/VMAS-HML
Normal file
2
ansible/host_vars/VMAS-HML
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
|
||||||
|
chainetv_repo_branch: master
|
2
ansible/host_vars/VMDR
Normal file
2
ansible/host_vars/VMDR
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
|
||||||
|
wireguard_address: "10.0.0.100/24"
|
@ -1,65 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: "192.168.1.42"
|
|
||||||
ansible_python_interpreter: "/usr/bin/python3"
|
|
||||||
default_interface: "enp2s0"
|
|
||||||
consul_iface: "{{ default_interface}}"
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
nfs_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.0.7/24"
|
|
||||||
wireguard_byhost_allowed_ips:
|
|
||||||
merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24
|
|
||||||
corwin: 10.0.0.7,192.168.1.42,192.168.1.0/24
|
|
||||||
perrsistent_keepalive: "20"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{default_interface}} -j MASQUERADE
|
|
||||||
- sysctl -w net.ipv4.ip_forward=1
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {default_interface} -j MASQUERADE
|
|
||||||
- sysctl -w net.ipv4.ip_forward=0
|
|
||||||
|
|
||||||
partition_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
label: gpt
|
|
||||||
settings:
|
|
||||||
- number: 1
|
|
||||||
part_end: 300MB
|
|
||||||
flags: [boot, esp]
|
|
||||||
fstype: vfat
|
|
||||||
format: yes
|
|
||||||
- number: 2
|
|
||||||
part_start: 512MB
|
|
||||||
part_end: 1524MB
|
|
||||||
flags: []
|
|
||||||
fstype: swap
|
|
||||||
format: yes
|
|
||||||
- number: 3
|
|
||||||
part_start: 1524MB
|
|
||||||
flags: [lvm]
|
|
||||||
fstype: ext4
|
|
||||||
format: yes
|
|
||||||
#- device: "/dev/sdb"
|
|
||||||
#settings:
|
|
||||||
#- number: 1
|
|
||||||
#name: home
|
|
||||||
#fstype: ext4
|
|
||||||
#format:
|
|
||||||
mount_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
settings:
|
|
||||||
- number: 3
|
|
||||||
mountpath: /mnt
|
|
||||||
fstype: ext4
|
|
||||||
- number: 1
|
|
||||||
mountpath: /mnt/boot
|
|
||||||
fstype: vfat
|
|
||||||
|
|
||||||
#need vfat boot partition with esp label
|
|
||||||
provissionning_UEFI_Enable: True
|
|
||||||
|
|
@ -1,35 +1,30 @@
|
|||||||
---
|
---
|
||||||
ansible_host: 10.0.0.1
|
ansible_host: 65.108.221.233
|
||||||
#ansible_host: 135.181.150.203
|
|
||||||
default_interface: "eth0"
|
|
||||||
wireguard_address: "10.0.0.1/24"
|
wireguard_address: "10.0.0.1/24"
|
||||||
wireguard_endpoint: "135.181.150.203"
|
wireguard_endpoint: "65.108.221.233"
|
||||||
wireguard_persistent_keepalive: "20"
|
wireguard_persistent_keepalive: "30"
|
||||||
wireguard_allowed_ips: 10.0.0.1
|
wireguard_allowed_ips: "10.0.0.1/32"
|
||||||
|
|
||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -o %i -j ACCEPT
|
- iptables -A FORWARD -o %i -j ACCEPT
|
||||||
- iptables -A FORWARD -i %i -j ACCEPT
|
- iptables -A FORWARD -i %i -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o enp1s0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=1
|
|
||||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i %i -j ACCEPT
|
- iptables -D FORWARD -i %i -j ACCEPT
|
||||||
- iptables -D FORWARD -o %i -j ACCEPT
|
- iptables -D FORWARD -o %i -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o enp1s0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=0
|
|
||||||
|
|
||||||
wireguard_unmanaged_peers:
|
wireguard_unmanaged_peers:
|
||||||
phone:
|
phone:
|
||||||
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
|
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
|
||||||
allowed_ips: 10.0.0.3/32
|
allowed_ips: 10.0.0.3/32
|
||||||
persistent_keepalive: 0
|
persistent_keepalive: 0
|
||||||
zen:
|
zen:
|
||||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
||||||
allowed_ips: 10.0.0.5/32
|
allowed_ips: 10.0.0.5/32
|
||||||
persistent_keepalive: 0
|
persistent_keepalive: 0
|
||||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
|
||||||
consul_client_addr: "127.0.0.1 10.0.0.1"
|
consul_client_addr: "127.0.0.1 10.0.0.1"
|
||||||
consul_bind_address: "10.0.0.1"
|
consul_bind_address: "10.0.0.1"
|
||||||
consul_ui: True
|
consul_ui: True
|
||||||
@ -39,9 +34,7 @@ nomad_host_networks:
|
|||||||
- name: "private"
|
- name: "private"
|
||||||
interface: wg0
|
interface: wg0
|
||||||
- name: "public"
|
- name: "public"
|
||||||
interface: eth0
|
interface: enp1s0
|
||||||
- name: "default"
|
- name: "default"
|
||||||
interface: wg0
|
interface: wg0
|
||||||
nomad_client_network_interface : "wg0"
|
|
||||||
vault_listener_address: 10.0.0.1
|
vault_listener_address: 10.0.0.1
|
||||||
nomad_plugins_podman: True
|
|
||||||
|
@ -1,24 +1,18 @@
|
|||||||
---
|
---
|
||||||
ansible_host: "192.168.1.41"
|
ansible_host: "192.168.1.41"
|
||||||
ansible_python_interpreter: "/usr/bin/python3"
|
ansible_python_interpreter: "/usr/bin/python3"
|
||||||
default_interface: "enu1u1"
|
wireguard_address: "10.0.0.5/24"
|
||||||
consul_iface: "{{ default_interface }}"
|
wireguard_allowed_ips: "10.0.0.5/32,192.168.1.0/24"
|
||||||
vault_iface: "{{ default_interface }}"
|
perrsistent_keepalive: "30"
|
||||||
|
|
||||||
wireguard_address: "10.0.0.6/24"
|
|
||||||
wireguard_byhost_allowed_ips:
|
|
||||||
merlin: 10.0.0.6,192.168.1.41
|
|
||||||
corwin: 10.0.0.6,192.168.1.41
|
|
||||||
perrsistent_keepalive: "20"
|
|
||||||
wireguard_endpoint: ""
|
wireguard_endpoint: ""
|
||||||
|
|
||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o eno1 -j MASQUERADE
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o eno1 -j MASQUERADE
|
||||||
|
|
||||||
|
@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
ansible_host: gerard-dev.lan.ducamps.dev
|
|
||||||
wireguard_address: "10.0.1.6/24"
|
|
||||||
perrsistent_keepalive: "20"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface}} -j MASQUERADE
|
|
||||||
|
|
@ -1,39 +1,31 @@
|
|||||||
---
|
---
|
||||||
ansible_host: 10.0.0.4
|
ansible_host: 65.109.13.133
|
||||||
#ansible_host: 65.21.2.14
|
|
||||||
default_interface: "ens3"
|
|
||||||
nfs_iface: "wg0"
|
|
||||||
wireguard_address: "10.0.0.4/24"
|
wireguard_address: "10.0.0.4/24"
|
||||||
wireguard_endpoint: "65.21.2.14"
|
wireguard_endpoint: "65.109.13.133"
|
||||||
wireguard_persistent_keepalive: "20"
|
wireguard_persistent_keepalive: "30"
|
||||||
wireguard_byhost_allowed_ips:
|
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3/32,10.0.0.5/32"
|
||||||
oscar: "0.0.0.0/0"
|
|
||||||
bleys: "0.0.0.0/0"
|
|
||||||
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3,10.0.0.5"
|
|
||||||
|
|
||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -o %i -j ACCEPT
|
- iptables -A FORWARD -o %i -j ACCEPT
|
||||||
- iptables -A FORWARD -i %i -j ACCEPT
|
- iptables -A FORWARD -i %i -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=1
|
|
||||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i %i -j ACCEPT
|
- iptables -D FORWARD -i %i -j ACCEPT
|
||||||
- iptables -D FORWARD -o %i -j ACCEPT
|
- iptables -D FORWARD -o %i -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||||
- sysctl -w net.ipv4.ip_forward=0
|
|
||||||
|
|
||||||
wireguard_unmanaged_peers:
|
wireguard_unmanaged_peers:
|
||||||
phone:
|
phone:
|
||||||
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
|
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
|
||||||
allowed_ips: 10.0.0.3/32
|
allowed_ips: 10.0.0.3/32
|
||||||
persistent_keepalive: 0
|
persistent_keepalive: 0
|
||||||
zen:
|
zen:
|
||||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
||||||
allowed_ips: 10.0.0.5/32
|
allowed_ips: 10.0.0.5/32
|
||||||
persistent_keepalive: 0
|
persistent_keepalive: 0
|
||||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
wireguard_dns: "192.168.1.41,192.168.1.10"
|
||||||
consul_client_addr: "127.0.0.1 10.0.0.4"
|
consul_client_addr: "127.0.0.1 10.0.0.4"
|
||||||
consul_bind_address: "10.0.0.4"
|
consul_bind_address: "10.0.0.4"
|
||||||
consul_ui: True
|
consul_ui: True
|
||||||
@ -43,8 +35,7 @@ nomad_host_networks:
|
|||||||
- name: "private"
|
- name: "private"
|
||||||
interface: wg0
|
interface: wg0
|
||||||
- name: "public"
|
- name: "public"
|
||||||
interface: ens3
|
interface: eth0
|
||||||
- name: "default"
|
- name: "default"
|
||||||
interface: wg0
|
interface: wg0
|
||||||
vault_listener_address: 10.0.0.4
|
vault_listener_address: 10.0.0.4
|
||||||
nomad_plugins_podman: True
|
|
||||||
|
@ -1,41 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
ansible_host: merlin-dev.lan.ducamps.dev
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.1.4/24"
|
|
||||||
wireguard_endpoint: "{{ ansible_default_ipv4.address }}"
|
|
||||||
wireguard_persistent_keepalive: "30"
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -o %i -j ACCEPT
|
|
||||||
- iptables -A FORWARD -i %i -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i %i -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o %i -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_unmanaged_peers:
|
|
||||||
phone:
|
|
||||||
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
|
|
||||||
allowed_ips: 10.0.1.3/32
|
|
||||||
persistent_keepalive: 0
|
|
||||||
zen:
|
|
||||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
|
||||||
allowed_ips: 10.0.1.5/32
|
|
||||||
persistent_keepalive: 0
|
|
||||||
consul_client_addr: "127.0.0.1 10.0.1.4"
|
|
||||||
consul_bind_address: "10.0.1.4"
|
|
||||||
consul_ui: True
|
|
||||||
consul_iface: "wg0"
|
|
||||||
nomad_bind_addr: "10.0.1.4"
|
|
||||||
nomad_host_networks:
|
|
||||||
- name: "private"
|
|
||||||
interface: wg0
|
|
||||||
- name: "public"
|
|
||||||
interface: eth0
|
|
||||||
- name: "default"
|
|
||||||
interface: wg0
|
|
||||||
vault_listener_address: 10.0.1.4
|
|
@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: nas-dev.lan.ducamps.dev
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.1.8/24"
|
|
||||||
perrsistent_keepalive: "30"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
wireguard_address: "10.0.0.8/24"
|
|
||||||
default_interface: "enp2s0"
|
|
||||||
consul_iface: "{{ default_interface}}"
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
perrsistent_keepalive: "30"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
wireguard_byhost_allowed_ips:
|
|
||||||
merlin: 10.0.0.8,192.168.1.43
|
|
||||||
corwin: 10.0.0.8,192.168.1.43
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
@ -1,25 +1,19 @@
|
|||||||
---
|
---
|
||||||
default_interface: "enp1s0"
|
|
||||||
consul_iface: "{{ default_interface}}"
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
nfs_iface: "{{ default_interface}}"
|
|
||||||
nomad_client_cpu_total_compute: 8000
|
|
||||||
wireguard_address: "10.0.0.2/24"
|
wireguard_address: "10.0.0.2/24"
|
||||||
wireguard_byhost_allowed_ips:
|
wireguard_allowed_ips: "10.0.0.2/32,192.168.1.0/24"
|
||||||
merlin: 10.0.0.2,192.168.1.40
|
|
||||||
corwin: 10.0.0.2,192.168.1.40
|
|
||||||
perrsistent_keepalive: "30"
|
perrsistent_keepalive: "30"
|
||||||
wireguard_endpoint: ""
|
wireguard_endpoint: ""
|
||||||
|
|
||||||
wireguard_postup:
|
wireguard_postup:
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -A POSTROUTING -o eno1 -j MASQUERADE
|
||||||
|
|
||||||
wireguard_postdown:
|
wireguard_postdown:
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
- iptables -t nat -D POSTROUTING -o eno1 -j MASQUERADE
|
||||||
|
consul_snapshot: True
|
||||||
|
|
||||||
partition_table:
|
partition_table:
|
||||||
- device: "/dev/sda"
|
- device: "/dev/sda"
|
||||||
|
@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: oscar-dev.lan.ducamps.dev
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.1.2/24"
|
|
||||||
perrsistent_keepalive: "30"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
@ -1,25 +1,12 @@
|
|||||||
|
|
||||||
requirements:
|
requirements:
|
||||||
ansible-galaxy install -g -r roles/requirements.yml
|
ansible-galaxy install -g -f -r roles/requirements.yml
|
||||||
|
|
||||||
deploy_production:
|
deploy_production:
|
||||||
ansible-playbook site.yml -i production -u ansible
|
ansible-playbook site.yml -i production
|
||||||
|
|
||||||
deploy_production_wiregard:
|
|
||||||
ansible-playbook playbooks/wireguard.yml -i production -u ansible
|
|
||||||
|
|
||||||
deploy_staging:
|
deploy_staging:
|
||||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
ansible-playbook site.yml -i staging
|
||||||
ansible-playbook site.yml -i staging -u ansible
|
|
||||||
|
|
||||||
|
|
||||||
deploy_staging_base:
|
|
||||||
ansible-playbook playbooks/sssd.yml -i staging -u ansible
|
|
||||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
|
||||||
ansible-playbook playbooks/server.yml -i staging -u ansible
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
view-allvault:
|
|
||||||
ansible-vault view `git grep -l "ANSIBLE_VAULT;1.1;AES256$$"`
|
|
||||||
|
|
||||||
|
generate-token:
|
||||||
|
@echo export VAULT_TOKEN=`vault token create -policy=ansible -field="token" -period 6h`
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
readonly vault_password_file_encrypted="$(dirname $0)/vault-password.gpg"
|
|
||||||
|
|
||||||
# flock used to work around "gpg: decryption failed: No secret key" in tf-stage2
|
|
||||||
# would otherwise need 'auto-expand-secmem' (https://dev.gnupg.org/T3530#106174)
|
|
||||||
flock "$vault_password_file_encrypted" \
|
|
||||||
gpg --batch --decrypt --quiet "$vault_password_file_encrypted"
|
|
||||||
|
|
Binary file not shown.
@ -1,45 +0,0 @@
|
|||||||
---
|
|
||||||
prerun: false
|
|
||||||
dependency:
|
|
||||||
name: galaxy
|
|
||||||
enabled: false
|
|
||||||
driver:
|
|
||||||
name: vagrant
|
|
||||||
provider:
|
|
||||||
name: libvirt
|
|
||||||
default_box: archlinux/archlinux
|
|
||||||
platforms:
|
|
||||||
- name: oscar-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: archlinux/archlinux
|
|
||||||
- name: merlin-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: generic/rocky9
|
|
||||||
- name: gerard-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: debian/bookworm64
|
|
||||||
- name: nas-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: archlinux/archlinux
|
|
||||||
provisioner:
|
|
||||||
name: ansible
|
|
||||||
connection_options:
|
|
||||||
ansible_ssh_user: vagrant
|
|
||||||
ansible_become: true
|
|
||||||
env:
|
|
||||||
ANSIBLE_CONFIG: ../../ansible.cfg
|
|
||||||
ANSIBLE_ROLES_PATH: "../../roles"
|
|
||||||
log: true
|
|
||||||
lint:
|
|
||||||
name: ansible-lint
|
|
||||||
inventory:
|
|
||||||
host_vars: []
|
|
||||||
links:
|
|
||||||
group_vars: ../../group_vars
|
|
||||||
hosts: ../../staging
|
|
||||||
verifier:
|
|
||||||
name: ansible
|
|
@ -1,54 +1,10 @@
|
|||||||
---
|
- hosts: all
|
||||||
- name: Consul install
|
|
||||||
hosts: all
|
|
||||||
roles:
|
roles:
|
||||||
- role: ansible-consul
|
- role: ansible-consul
|
||||||
become: true
|
become: true
|
||||||
|
|
||||||
- name: Vault install
|
|
||||||
hosts: homelab
|
|
||||||
roles:
|
|
||||||
- role: ansible-hashicorp-vault
|
- role: ansible-hashicorp-vault
|
||||||
|
when: ansible_architecture == 'x86_64'
|
||||||
become: true
|
become: true
|
||||||
post_tasks:
|
|
||||||
- name: Stat root file
|
|
||||||
ansible.builtin.stat:
|
|
||||||
path: "{{ vault_unseal_keys_dir_output }}/rootkey"
|
|
||||||
register: rootkey_exist
|
|
||||||
delegate_to: localhost
|
|
||||||
- name: Reading root contents
|
|
||||||
ansible.builtin.command: cat "{{ vault_unseal_keys_dir_output }}/rootkey"
|
|
||||||
register: root_token
|
|
||||||
delegate_to: localhost
|
|
||||||
when: rootkey_exist.stat.exists
|
|
||||||
changed_when: false
|
|
||||||
- name: debug
|
|
||||||
ansible.builtin.debug:
|
|
||||||
var: root_token
|
|
||||||
- name: Generate nomad token
|
|
||||||
community.hashi_vault.vault_token_create:
|
|
||||||
renewable: true
|
|
||||||
policies: "nomad-server-policy"
|
|
||||||
period: 72h
|
|
||||||
no_parent: true
|
|
||||||
token: "{{ root_token.stdout }}"
|
|
||||||
url: "http://active.vault.service.consul:8200"
|
|
||||||
retries: 4
|
|
||||||
run_once: true
|
|
||||||
delegate_to: localhost
|
|
||||||
when: root_token.stdout is defined
|
|
||||||
register: nomad_token_data
|
|
||||||
|
|
||||||
- name: Gather nomad token
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
nomad_vault_token: "{{ nomad_token_data.login.auth.client_token }}"
|
|
||||||
when: nomad_token_data.login is defined
|
|
||||||
|
|
||||||
- name: nomad
|
|
||||||
hosts: all
|
|
||||||
vars:
|
|
||||||
unseal_keys_dir_output: ~/vaultunseal
|
|
||||||
roles:
|
|
||||||
- role: ansible-nomad
|
- role: ansible-nomad
|
||||||
become: true
|
become: true
|
||||||
- role: docker
|
- role: docker
|
||||||
|
@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts:
|
|
||||||
- homelab
|
|
||||||
- VPS
|
|
||||||
- NAS
|
|
||||||
vars:
|
|
||||||
# certbot_force: true
|
|
||||||
roles:
|
|
||||||
- autofs
|
|
@ -1,6 +1,7 @@
|
|||||||
---
|
---
|
||||||
- hosts: all
|
- hosts: all
|
||||||
gather_facts: false
|
remote_user: root
|
||||||
become: true
|
vars:
|
||||||
|
ansible_password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/user:root') }}"
|
||||||
roles:
|
roles:
|
||||||
- ansible_bootstrap
|
- ansible_bootstrap
|
||||||
|
@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
roles:
|
|
||||||
- role: ansible-user
|
|
||||||
vars:
|
|
||||||
user_name: '{{ user.name }}'
|
|
||||||
user_ldap: '{{ sssd_configure}}'
|
|
||||||
user_password: '{{ userPassword }}'
|
|
||||||
user_authorized_key: '{{ user.authorized_keys}}'
|
|
||||||
user_privatekey: '{{ user.privatekey}}'
|
|
||||||
user_shell: '/bin/zsh'
|
|
||||||
user_uid: '{{ user.uid }}'
|
|
||||||
user_groups:
|
|
||||||
- docker
|
|
||||||
become: true
|
|
||||||
become_user: '{{ user.name }}'
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
roles:
|
|
||||||
- role: user_config
|
|
||||||
vars:
|
|
||||||
user_config_username: "{{ user.name }}"
|
|
||||||
become_user: "{{ user.name }}"
|
|
||||||
become: true
|
|
||||||
- role: user_config
|
|
||||||
vars:
|
|
||||||
user_config_username: root
|
|
||||||
become: true
|
|
@ -1,54 +1,7 @@
|
|||||||
---
|
---
|
||||||
- name: Database playbook
|
- hosts: database
|
||||||
hosts: database
|
|
||||||
vars:
|
vars:
|
||||||
# certbot_force: true
|
# certbot_force: true
|
||||||
pre_tasks:
|
|
||||||
- name: Install Pg vertors (immich)
|
|
||||||
aur:
|
|
||||||
name: pgvecto.rs-bin
|
|
||||||
state: present
|
|
||||||
become: true
|
|
||||||
become_user: aur_builder
|
|
||||||
- name: Add database member to pg_hba replication
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
postgresql_hba_entries: "{{ postgresql_hba_entries + [\
|
|
||||||
{'type':'host', \
|
|
||||||
'database': 'replication',\
|
|
||||||
'user':'repli',\
|
|
||||||
'address':hostvars[item]['ansible_'+hostvars[item]['default_interface']]['ipv4']['address']+'/32',\
|
|
||||||
'auth_method':'trust'}] }}"
|
|
||||||
loop: '{{ groups.database }}'
|
|
||||||
roles:
|
roles:
|
||||||
- role: ansible-role-postgresql
|
- role: ansible-role-postgresql
|
||||||
become: true
|
become: true
|
||||||
tasks:
|
|
||||||
- name: Launch replication
|
|
||||||
ansible.builtin.command: pg_basebackup -D /var/lib/postgres/data -h {{groups["database_active"]|first}} -U repli -Fp -Xs -P -R -w
|
|
||||||
args:
|
|
||||||
creates: /var/lib/postgres/data/postgresql.conf
|
|
||||||
become: true
|
|
||||||
become_user: postgres
|
|
||||||
when: inventory_hostname in groups["database_standby"]
|
|
||||||
- name: Ensure PostgreSQL is started and enabled on boot.
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: '{{ postgresql_daemon }}'
|
|
||||||
state: '{{ postgresql_service_state }}'
|
|
||||||
enabled: '{{ postgresql_service_enabled }}'
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Set Postgress shared libraries
|
|
||||||
community.postgresql.postgresql_set:
|
|
||||||
name: shared_preload_libraries
|
|
||||||
value: vectors.so
|
|
||||||
become: true
|
|
||||||
become_user: postgres
|
|
||||||
when: inventory_hostname in groups["database_active"]
|
|
||||||
notify: Restart postgresql
|
|
||||||
- name: Set Postgress shared libraries
|
|
||||||
community.postgresql.postgresql_set:
|
|
||||||
name: search_path
|
|
||||||
value: '$user, public, vectors'
|
|
||||||
become: true
|
|
||||||
become_user: postgres
|
|
||||||
when: inventory_hostname in groups["database_active"]
|
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
- name: DNS playbook
|
|
||||||
hosts: DNS
|
|
||||||
roles:
|
|
||||||
- role: pdns_recursor-ansible
|
|
||||||
become: true
|
|
@ -5,3 +5,4 @@
|
|||||||
- cronie
|
- cronie
|
||||||
- hass-client-control
|
- hass-client-control
|
||||||
- mpd
|
- mpd
|
||||||
|
|
||||||
|
@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
- name: gather all
|
|
||||||
hosts: all
|
|
||||||
- name: NAS playbook
|
|
||||||
hosts: NAS
|
|
||||||
vars:
|
|
||||||
# certbot_force: true
|
|
||||||
pre_tasks:
|
|
||||||
- name: include task NasBind
|
|
||||||
ansible.builtin.include_tasks:
|
|
||||||
file: tasks/NasBind.yml
|
|
||||||
loop: "{{ nas_bind_source }}"
|
|
||||||
- name: create nomad folder
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ nas_bind_target }}/nomad/{{ item.name }}"
|
|
||||||
owner: "{{ item.owner|default('root') }}"
|
|
||||||
state: directory
|
|
||||||
become: true
|
|
||||||
loop: "{{ NAS_nomad_folder }}"
|
|
||||||
roles:
|
|
||||||
- role: ansible-role-nut
|
|
||||||
become: true
|
|
||||||
- role: ansible-role-nfs
|
|
||||||
become: true
|
|
||||||
- role: ansible-role-pureftpd
|
|
||||||
become: true
|
|
||||||
- role: vladgh.samba.server
|
|
||||||
become: true
|
|
@ -2,25 +2,10 @@
|
|||||||
- hosts:
|
- hosts:
|
||||||
- homelab
|
- homelab
|
||||||
- VPS
|
- VPS
|
||||||
- NAS
|
|
||||||
vars:
|
vars:
|
||||||
# certbot_force: true
|
# certbot_force: true
|
||||||
tasks:
|
|
||||||
- name: Create user
|
|
||||||
ansible.builtin.include_role:
|
|
||||||
name: "ansible-user"
|
|
||||||
apply:
|
|
||||||
become: true
|
|
||||||
vars:
|
|
||||||
user_name: "{{ create.name }}"
|
|
||||||
user_home: "{{ create.home }}"
|
|
||||||
user_groups: "{{ create.groups|default('') }}"
|
|
||||||
user_shell: "{{ create.shell|default('') }}"
|
|
||||||
user_authorized_key: "{{ create.authorized_keys|default([]) }}"
|
|
||||||
user_privatekey: "{{ create.privatekey|default([])}}"
|
|
||||||
loop: "{{system_user}}"
|
|
||||||
loop_control:
|
|
||||||
loop_var: create
|
|
||||||
roles:
|
roles:
|
||||||
- system
|
- system
|
||||||
|
- autofs
|
||||||
|
- msmtp
|
||||||
- cronie
|
- cronie
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
roles:
|
|
||||||
- role: ansible-role-sssd
|
|
||||||
become: true
|
|
@ -1,18 +0,0 @@
|
|||||||
- name: Ensure base NFS directory exist
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item.dest }}"
|
|
||||||
state: directory
|
|
||||||
become: true
|
|
||||||
- name: Ensure source NFS directory exist
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item.source }}"
|
|
||||||
state: directory
|
|
||||||
become: true
|
|
||||||
- name: Bind NAS export
|
|
||||||
ansible.posix.mount:
|
|
||||||
path: "{{ item.dest }}"
|
|
||||||
src: "{{ item.source }}"
|
|
||||||
opts: bind
|
|
||||||
fstype: none
|
|
||||||
state: mounted
|
|
||||||
become: true
|
|
@ -1 +0,0 @@
|
|||||||
path = /exports/homes/%S
|
|
4
ansible/playbooks/user_config.yml
Normal file
4
ansible/playbooks/user_config.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
- hosts: all
|
||||||
|
vars:
|
||||||
|
roles:
|
||||||
|
- user_config
|
@ -2,4 +2,4 @@
|
|||||||
- hosts: wireguard
|
- hosts: wireguard
|
||||||
roles:
|
roles:
|
||||||
- role: ansible-role-wireguard
|
- role: ansible-role-wireguard
|
||||||
become: true
|
become: True
|
||||||
|
@ -1,52 +1,24 @@
|
|||||||
[DNS]
|
[homelab]
|
||||||
oscar
|
oscar
|
||||||
|
gerard
|
||||||
|
|
||||||
|
[VPS]
|
||||||
|
corwin
|
||||||
|
merlin
|
||||||
|
|
||||||
|
|
||||||
[dhcp]
|
[dhcp]
|
||||||
oberon
|
gerard
|
||||||
|
|
||||||
[database_active]
|
[wireguard]
|
||||||
bleys
|
corwin
|
||||||
|
|
||||||
[database_standby]
|
|
||||||
oscar
|
oscar
|
||||||
|
merlin
|
||||||
|
gerard
|
||||||
|
|
||||||
[database:children]
|
[database]
|
||||||
database_active
|
oscar
|
||||||
database_standby
|
merlin
|
||||||
|
|
||||||
[rsyncd]
|
[rsyncd]
|
||||||
oscar
|
oscar
|
||||||
bleys
|
|
||||||
|
|
||||||
[wireguard:children]
|
|
||||||
production
|
|
||||||
|
|
||||||
[NAS]
|
|
||||||
oberon
|
|
||||||
|
|
||||||
[cluster]
|
|
||||||
oscar
|
|
||||||
#gerard
|
|
||||||
bleys
|
|
||||||
|
|
||||||
|
|
||||||
[homelab:children]
|
|
||||||
NAS
|
|
||||||
cluster
|
|
||||||
|
|
||||||
[VPS]
|
|
||||||
merlin
|
|
||||||
|
|
||||||
[region:children]
|
|
||||||
homelab
|
|
||||||
VPS
|
|
||||||
production
|
|
||||||
|
|
||||||
[production]
|
|
||||||
oscar
|
|
||||||
merlin
|
|
||||||
#gerard
|
|
||||||
bleys
|
|
||||||
oberon
|
|
||||||
|
|
||||||
[staging]
|
|
||||||
|
@ -6,8 +6,10 @@
|
|||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
remote_user: root
|
remote_user: root
|
||||||
|
vars:
|
||||||
|
ansible_password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/user:root') }}"
|
||||||
roles:
|
roles:
|
||||||
- ansible_bootstrap
|
- ansible_bootstrap
|
||||||
|
|
||||||
# - remote_user: "{{ user.name }}"
|
- remote_user: "{{ user.name }}"
|
||||||
# import_playbook: site.yml
|
import_playbook: site.yml
|
||||||
|
@ -1,49 +1,37 @@
|
|||||||
---
|
---
|
||||||
roles:
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-arch-provissionning.git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-arch-provissionning.git
|
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-postgresql.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-postgresql.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-sssd
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-sssd
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible_bootstrap.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible_bootstrap.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/autofs.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/autofs.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/cronie.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/cronie.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/docker.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/docker.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/hass-client-control.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/hass-client-control.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/msmtp.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/msmtp.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/rsyncd.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/rsyncd.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/system.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/system.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/user_config.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/user_config.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: git@github.com:vincentDcmps/ansible-role-wireguard.git
|
- src: https://github.com/githubixx/ansible-role-wireguard.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-consul.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-consul.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-hashicorp-vault.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-hashicorp-vault.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-nomad.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-nomad.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/mpd.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/mpd.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-dhcpd.git
|
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-dhcpd.git
|
||||||
scm: git
|
scm: git
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-user.git
|
|
||||||
scm: git
|
|
||||||
- src: git@github.com:vincentDcmps/ansible-role-nfs.git
|
|
||||||
scm: git
|
|
||||||
- src: git@github.com:vincentDcmps/ansible-role-nut.git
|
|
||||||
scm: git
|
|
||||||
- src: git@git.ducamps.eu:2222/ansible-roles/ansible-role-pureftpd.git
|
|
||||||
scm: git
|
|
||||||
- src: https://github.com/PowerDNS/pdns_recursor-ansible.git
|
|
||||||
collections:
|
|
||||||
- name: vladgh.samba
|
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
---
|
---
|
||||||
- import_playbook: playbooks/server.yml
|
- import_playbook: playbooks/server.yml
|
||||||
- import_playbook: playbooks/dhcpd.yml
|
- import_playbook: playbooks/wireguard.yml
|
||||||
- import_playbook: playbooks/dns.yml
|
|
||||||
- import_playbook: playbooks/HashicorpStack.yml
|
- import_playbook: playbooks/HashicorpStack.yml
|
||||||
- import_playbook: playbooks/nas.yml
|
|
||||||
- import_playbook: playbooks/autofs.yml
|
|
||||||
- import_playbook: playbooks/sssd.yml
|
|
||||||
- import_playbook: playbooks/database.yml
|
- import_playbook: playbooks/database.yml
|
||||||
- import_playbook: playbooks/rsyncd.yml
|
- import_playbook: playbooks/rsyncd.yml
|
||||||
|
- import_playbook: playbooks/music-player.yml
|
||||||
|
- import_playbook: playbooks/dhcpd.yml
|
||||||
|
- import_playbook: playbooks/user_config.yml
|
||||||
|
@ -1,44 +1,13 @@
|
|||||||
[DNS]
|
|
||||||
oscar-dev
|
|
||||||
|
|
||||||
[database_active]
|
|
||||||
oscar-dev
|
|
||||||
|
|
||||||
[database_standby]
|
|
||||||
gerard-dev
|
|
||||||
|
|
||||||
[database:children]
|
|
||||||
database_active
|
|
||||||
database_standby
|
|
||||||
|
|
||||||
[wireguard:children]
|
|
||||||
staging
|
|
||||||
|
|
||||||
[NAS]
|
|
||||||
nas-dev
|
|
||||||
|
|
||||||
[cluster]
|
|
||||||
oscar-dev
|
|
||||||
gerard-dev
|
|
||||||
|
|
||||||
[homelab:children]
|
|
||||||
NAS
|
|
||||||
cluster
|
|
||||||
|
|
||||||
[VPS]
|
[VPS]
|
||||||
merlin-dev
|
VMDR
|
||||||
|
|
||||||
[region:children]
|
[dhcp]
|
||||||
homelab
|
VMAS-BUILD
|
||||||
VPS
|
|
||||||
staging
|
[VMServer]
|
||||||
|
VMAS-HML
|
||||||
|
|
||||||
|
|
||||||
|
[wireguard]
|
||||||
|
VMDR
|
||||||
|
|
||||||
[staging]
|
|
||||||
oscar-dev
|
|
||||||
gerard-dev
|
|
||||||
merlin-dev
|
|
||||||
nas-dev
|
|
||||||
|
|
||||||
[production]
|
|
||||||
|
@ -6,16 +6,15 @@
|
|||||||
"tags": [
|
"tags": [
|
||||||
"homer.enable=true",
|
"homer.enable=true",
|
||||||
"homer.name=Diskstation",
|
"homer.name=Diskstation",
|
||||||
"homer.url=https://syno.ducamps.eu",
|
"homer.url=https://syno.ducamps.win",
|
||||||
"homer.logo=https://syno.ducamps.eu/webman/resources/images/icon_dsm_96.png",
|
"homer.logo=https://syno.ducamps.win/webman/resources/images/icon_dsm_96.png",
|
||||||
"homer.service=Application",
|
"homer.service=Application",
|
||||||
"homer.target=_blank",
|
"homer.target=_blank",
|
||||||
|
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.syno.rule=Host(`syno.ducamps.eu`)",
|
"traefik.http.routers.syno.rule=Host(`syno.ducamps.win`)",
|
||||||
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.eu",
|
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.win",
|
||||||
"traefik.http.routers.syno.tls.certresolver=myresolver",
|
"traefik.http.routers.syno.tls.certresolver=myresolver"
|
||||||
"traefik.http.routers.syno.entrypoints=web,websecure"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,35 +0,0 @@
|
|||||||
# 001 Development environment
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
Accepted
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
we need to create a virtual cluster to do test without impact on production.
|
|
||||||
|
|
||||||
### Virtualisation or Container
|
|
||||||
|
|
||||||
Virtualisation provide better isolation but must ressource are needed.
|
|
||||||
Container able to create more item without consum as resource than virtual machine.
|
|
||||||
|
|
||||||
### Creation Wrapper
|
|
||||||
|
|
||||||
Vagrant is good top manage virtual machine but not a lot of LXC box availlable, Vagant van be use with other configuration manager than ansible.
|
|
||||||
Molecule can manage molecule with plugins molecule-LXD. molecule is ansible exclusive solution
|
|
||||||
|
|
||||||
## Decision
|
|
||||||
|
|
||||||
we will use container instead VM for the resource consumption avantage.
|
|
||||||
|
|
||||||
Molecule wrapper will be use because all our configuration is already provide by ansible and we can have a better choise of container with molecule than vagrant.
|
|
||||||
|
|
||||||
25/08/2023
|
|
||||||
|
|
||||||
some issue are meet with lxc (share kernel, privilege, plugin not maintain)
|
|
||||||
I have increase RAM on my computer so I can switch to virtual machine for the dev env
|
|
||||||
instead to build vagrant VM in a molecule playbooke we only use a vagrant file to avoid toi many overlay to maintain.
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
migrate molecule provissioning on dedicated vagrant file
|
|
@ -1,28 +0,0 @@
|
|||||||
# 002-Vault-Backend
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
Currently vault Backend is onboard in Consul KV
|
|
||||||
Hashicorp recommandation is to use integrated storage from vault cluster
|
|
||||||
This could remove consul dependancy on rebuild
|
|
||||||
|
|
||||||
## Decision
|
|
||||||
|
|
||||||
migrate to vault integrated storage
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
to do:
|
|
||||||
|
|
||||||
- [migration plan]("https://developer.hashicorp.com/vault/tutorials/raft/raft-migration")
|
|
||||||
|
|
||||||
1. basculer oscar,gerard et bleys and itegrated storage merlin restera en storage consul pendant l'opé avant décom
|
|
||||||
2. stoper le service vault sur oscar
|
|
||||||
3. lancer la commande de migration
|
|
||||||
4. joindre les autre node au cluster
|
|
||||||
5. décom vault sur merlin
|
|
||||||
6. adapter job backup
|
|
||||||
|
|
||||||
- [backup]("https://developer.hashicorp.com/vault/tutorials/standard-procedures/sop-backup")
|
|
@ -1,54 +0,0 @@
|
|||||||
# 003-mailserver
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
Gandi free email will become a pay service in 2 month.
|
|
||||||
|
|
||||||
In this condition it will be interesting to study selfhosted mail solution.
|
|
||||||
|
|
||||||
### domain name
|
|
||||||
|
|
||||||
do I take advantage of this to change domaine name:
|
|
||||||
|
|
||||||
Pro:
|
|
||||||
|
|
||||||
- could test more easy
|
|
||||||
- could redirect old domain name to new one untile end of gandi domain (2026)
|
|
||||||
- get a more "normal" extention
|
|
||||||
|
|
||||||
con:
|
|
||||||
|
|
||||||
- need to progresively update every personal account
|
|
||||||
|
|
||||||
### Container localisation
|
|
||||||
|
|
||||||
on hetzner:
|
|
||||||
|
|
||||||
- need to increase memory
|
|
||||||
|
|
||||||
on homelab:
|
|
||||||
|
|
||||||
- need to redirect all serveur flux to hetzner to be sure to be sure that mail will be send with hetzner IP (control PTR on this IP)
|
|
||||||
- hetzner will be too a SPOF
|
|
||||||
|
|
||||||
### software choose
|
|
||||||
|
|
||||||
mail server will run in nomad cluster.
|
|
||||||
|
|
||||||
docker-mailserver -> 1 container
|
|
||||||
mailu
|
|
||||||
|
|
||||||
## Decision
|
|
||||||
|
|
||||||
we will switch to another domain name on "https://www.bookmyname.com/": ducamps.eu""
|
|
||||||
docker-mailserver will be more easier to configure because only one container to migrate to nomad
|
|
||||||
for begining container will be launch on hetzner
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
- need to buy a new domaine name and configure DNS (done)
|
|
||||||
- inprove memory on corwin (done)
|
|
@ -1,117 +0,0 @@
|
|||||||
# DNS
|
|
||||||
|
|
||||||
## 001 Recursor out off NAS
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
curently main local domain DNS is located on NAS.
|
|
||||||
|
|
||||||
goal:
|
|
||||||
|
|
||||||
- avoid DNS outtage in case of NAS reboot (my synology have 10 years and is a litle long to reboot) morever during NAS reboot we lost the adblock DNS in the nomad cluster because nomad depend of the NFS share.
|
|
||||||
- remove the direct redirection to service.consul DNS and the IPTABLE rule use to redirect port 53 on consul on gerard instead new DNS could be forward directly to an active consul node on port 8300
|
|
||||||
|
|
||||||
#### DNS software
|
|
||||||
|
|
||||||
need DHCP Dynamic update
|
|
||||||
could redirect domain on other port than port 53
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
we will migrate Main Domain DNS from NAS to gerard (powerDNS)
|
|
||||||
powerDNS provide two disting binaries one for authority server one other for recursor
|
|
||||||
goal is to first migrate the recursice part from synology to a physical service
|
|
||||||
and in second time migrate authority server in nmad cluster
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
before to move authority server need to remove DB dns dependance (create db consul services)
|
|
||||||
need to delete the iptable rule on gerard before deploy
|
|
||||||
|
|
||||||
## 002 each node request self consul client for consul dns query
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
to avoid a cluster failled in case of the DNS recursor default.
|
|
||||||
I would like that each cluster client request their own consul client
|
|
||||||
first to resolve consul DNS query
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
Implement sytemd-resolved on all cluster member and add a DNS redirection
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
need to modify annsible system role for systemd-resolved activation and consul role for configure redirection
|
|
||||||
|
|
||||||
## 003 migrate authority DNS from NAS to cluster
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
we have curently three authority domain on NAS:
|
|
||||||
|
|
||||||
- ducamps.win
|
|
||||||
- ducamps.eu
|
|
||||||
- lan.ducamps.eu
|
|
||||||
|
|
||||||
we could migrate authority DNS in cluster
|
|
||||||
ducamps.win and ducamps.eu are only use for application access so no dependence with cluster build
|
|
||||||
need to study cluster build dependance for lan.ducamps.eu-> in every case in case of build from scratch need to use IP
|
|
||||||
need keepalive IP and check if no conflict if store on same machine than pihole->ok don't need to listen on 53 only request by recursor
|
|
||||||
DNS authority will dependant to storage (less problematic than recursor)
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
## 004 migrate recurson in cluster
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
now that cluster doesn't depend of recursor because request self consul agent for consul query need
|
|
||||||
need to study if we can migrate recursor in nomad wihout break dependance
|
|
||||||
advantage:
|
|
||||||
|
|
||||||
- recursor could change client in case of faillure
|
|
||||||
|
|
||||||
agains:
|
|
||||||
|
|
||||||
- this job need a keepalive IP like pihole
|
|
||||||
- *loss recursor if lost nomad cluster*
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
put one recursor on cluster over authority server and keep the recursor on gerard for better recundancy
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
|
|
||||||
## 005 physical Recursor location
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
following NAS migration physical DNS Recursor was install directly on NAS this bring a SPOF when NAS failed Recursor on Nomad cluster are stopped because of volume dependance
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
Put physical Recursor on a cluster node like that to have a DNS issue we need to have NAS and this nomad down on same Time
|
|
@ -1,42 +0,0 @@
|
|||||||
# NAS
|
|
||||||
|
|
||||||
## 001 New Nas spec
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
In progress
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
Storage:
|
|
||||||
|
|
||||||
- Data filesytem will be in btrfs.
|
|
||||||
- Study if keep root filesystem in EXT4.
|
|
||||||
- Need to use LVM over btrfs added posibility to add cache later (cache on cold data useless on beginning maybe write cache in future use).
|
|
||||||
- hot Data (nomad, document,fresh download file,music?) on SSD cold DATA on HDD (film, serie photo)
|
|
||||||
- at least 2 HDD and 2 SSD
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Hardware:
|
|
||||||
|
|
||||||
- network 2.5 gpbs will be good for evolve
|
|
||||||
- at least 4go ram (expansive will be appreciable)
|
|
||||||
|
|
||||||
Software:
|
|
||||||
|
|
||||||
be able to install custom linux distrib
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
- Due to form factor/consumption and SSD capability my choise is on ASUSTOR Nimbustor 2 Gen 2 AS5402, he corresponding to need and less expensive than a DIY NAS
|
|
||||||
- buy only a new ssd of 2to in more to store system and hot data
|
|
||||||
|
|
||||||
### Cosequence
|
|
||||||
|
|
||||||
need to migrate Data and keep same disk
|
|
||||||
|
|
||||||
- install system
|
|
||||||
- copy all data from 2to HDD to SSD then format 2to HDD
|
|
||||||
- copy download data to FROM 4 to HDD to SSD
|
|
||||||
- copy serie to 2to HDD and copy film on external harddrive
|
|
@ -1,25 +0,0 @@
|
|||||||
# Docker Pull throught
|
|
||||||
|
|
||||||
# 001 architecture consideration
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
Accepted
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
docker hub get a pull limit if somebody go wrong on our infrastructure we can get quickyly this limit solution will be to implement a pull throught proxy.
|
|
||||||
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
create two container task to create a dockerhub pull through and a ghcr one
|
|
||||||
|
|
||||||
we can add these registry to traefick to have both under the port 5000 but this will add a traefik dependancy on rebuild
|
|
||||||
|
|
||||||
so to begin we will use one trafick service on two diferent static port
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
- this registry need to be start first on cluster creation
|
|
||||||
- need to update all job image with local proxy url
|
|
@ -1,36 +0,0 @@
|
|||||||
# Architecture DNS
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart LR
|
|
||||||
subgraph External
|
|
||||||
externalRecursor[recursor]
|
|
||||||
GandiDns[ hetzner ducamps.win]
|
|
||||||
end
|
|
||||||
subgraph Internal
|
|
||||||
pihole[pihole]--ducamps.win-->NAS
|
|
||||||
pihole--service.consul-->consul[consul cluster]
|
|
||||||
pihole--->recursor
|
|
||||||
recursor--service.consul-->consul
|
|
||||||
DHCP --dynamic update--> NAS
|
|
||||||
NAS
|
|
||||||
recursor--ducamps.win-->NAS
|
|
||||||
consul--service.consul--->consul
|
|
||||||
clients--->pihole
|
|
||||||
clients--->recursor
|
|
||||||
end
|
|
||||||
pihole --> externalRecursor
|
|
||||||
recursor-->External
|
|
||||||
```
|
|
||||||
|
|
||||||
## Detail
|
|
||||||
|
|
||||||
Pihole container in nomad cluster is set as primary DNS as add blocker secondary DNS recursore is locate on gerard
|
|
||||||
|
|
||||||
DNS locate on NAS manage domain *ducamps.win* on local network each recursor forward each request on *ducamps.win* to this DNS.
|
|
||||||
|
|
||||||
Each DNS forward *service.consul* request to the consul cluster.
|
|
||||||
Each consul node have a consul redirection in systemd-resolved to theire own consul client
|
|
||||||
|
|
||||||
a DHCP service is set to do dynamic update on NAS DNS on lease delivery
|
|
||||||
|
|
||||||
external recursor are set on pihole on cloudflare and FDN in case of recursors faillure
|
|
@ -1,11 +0,0 @@
|
|||||||
# Add a new job
|
|
||||||
|
|
||||||
## Create Nomad job
|
|
||||||
|
|
||||||
## Add secret to vault
|
|
||||||
|
|
||||||
## Add a new policy to Vault terraform
|
|
||||||
|
|
||||||
## Add Database creation in ansible variable (if neeeded)
|
|
||||||
|
|
||||||
## Create CNAME in local DNS and External if needed
|
|
@ -1,25 +0,0 @@
|
|||||||
# ansible vault management
|
|
||||||
|
|
||||||
ansible password are encoded with a gpg key store in ansible/misc
|
|
||||||
to renew password follow this workflown
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# Generate a new password for the default vault
|
|
||||||
pwgen -s 64 default-pw
|
|
||||||
|
|
||||||
# Re-encrypt all default vaults
|
|
||||||
ansible-vault rekey --new-vault-password-file ./default-pw \
|
|
||||||
$(git grep -l 'ANSIBLE_VAULT;1.1;AES256$')
|
|
||||||
|
|
||||||
# Save the new password in encrypted form
|
|
||||||
# (replace "RECIPIENT" with your email)
|
|
||||||
gpg -r RECIPIENT -o misc/vault--password.gpg -e default-pw
|
|
||||||
|
|
||||||
# Ensure the new password is usable
|
|
||||||
ansible-vault view misc/vaults/vault_hcloud.yml
|
|
||||||
|
|
||||||
# Remove the unencrypted password file
|
|
||||||
rm new-default-pw
|
|
||||||
```
|
|
||||||
|
|
||||||
script `vault-keyring-client.sh` is set in ansible.cfg as vault_password_file to decrypt the gpg file
|
|
@ -1,8 +0,0 @@
|
|||||||
# Troubleshooting
|
|
||||||
|
|
||||||
## issue with SMTP traefik port
|
|
||||||
|
|
||||||
ensure that no other traefik router (httt or TCP) listening on smtp or
|
|
||||||
all entrypoint this can pertuubate smtp TLS connection
|
|
||||||
see [https://doc.traefik.io/traefik/routing/routers/#entrypoints_1](here)
|
|
||||||
|
|
@ -1,23 +0,0 @@
|
|||||||
# How to Bootstrap dev env
|
|
||||||
|
|
||||||
## prerequisite
|
|
||||||
|
|
||||||
dev environment is manage by molecule job who launch container via LXD you need following software to launch it:
|
|
||||||
|
|
||||||
- LXD server up on your local machine
|
|
||||||
- molecule install ```pip install molecule```
|
|
||||||
- molecule-LXD plugins ```pip install molecule-lxd```
|
|
||||||
|
|
||||||
## provissionning
|
|
||||||
|
|
||||||
you can launch ```make create-dev``` on root project
|
|
||||||
|
|
||||||
molecule will create 3 container on different distribution
|
|
||||||
|
|
||||||
- archlinux
|
|
||||||
- rockylinux 9
|
|
||||||
- debian 11
|
|
||||||
|
|
||||||
To bootstrap the container (base account, sudo configuration) role [ansible_bootstrap](https://git.ducamps.win/ansible-roles/ansible_bootstrap) will be apply
|
|
||||||
|
|
||||||
Converge step call playbook [site.yml](https://git.ducamps.win/vincent/homelab/src/commit/c5ff235b9768d91b240ec97e7ff8e2ad5a9602ca/ansible/site.yml) to provission the cluster
|
|
@ -1,3 +0,0 @@
|
|||||||
--8<--
|
|
||||||
README.md
|
|
||||||
--8<--
|
|
23
infra/.terraform.lock.hcl
Normal file
23
infra/.terraform.lock.hcl
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# This file is maintained automatically by "terraform init".
|
||||||
|
# Manual edits may be lost in future updates.
|
||||||
|
|
||||||
|
provider "registry.terraform.io/hetznercloud/hcloud" {
|
||||||
|
version = "1.33.2"
|
||||||
|
hashes = [
|
||||||
|
"h1:3Hx8p9LbcnHfBhy3nT7+unlc5rwkiSZjLt9SVQOSpB8=",
|
||||||
|
"zh:0a5d0f332d7dfe77fa27301094af98a185aabfb9f56d71b81936e03211e4d66f",
|
||||||
|
"zh:0e047859ee7296f335881933ccf8ce8c07aa47bef56d5449a81b85a2d9dac93a",
|
||||||
|
"zh:1d3d0896f518df9e245c3207ed231e528f5dcfe628508e7c3ceba4a2bfefaa7a",
|
||||||
|
"zh:1d7a31c8c490512896ce327ab220e950f1a2e30ee83cc2e58e69bbbfbbb87e72",
|
||||||
|
"zh:67cbb2492683cb22f6c54f26bee72aec140c8dd2d0881b2815d2ef80959fc751",
|
||||||
|
"zh:771062815e662979204ac2dc91c34c893f27670d67e02370e48124483d3c9838",
|
||||||
|
"zh:957ebb146898cd059c0cc8b4c32e574b61041d8b6a11cd854b3cc1d3baaeb3a9",
|
||||||
|
"zh:95dbd8634000b979213cb97b5d869cad78299ac994d0665d150c8dafc1390429",
|
||||||
|
"zh:a21b22b2e9d835e1b8b3b7e0b41a4d199171d62e9e9be78c444c700e96b31316",
|
||||||
|
"zh:aead1ba50640a51f20d574374f2c6065d9bfa4eea5ef044d1475873c33e58239",
|
||||||
|
"zh:cefabd0a78af40ea5cd08e1ca436c753df9b1c6496eb27281b755a2de1f167ab",
|
||||||
|
"zh:d98cffc5206b9a7550a23e13031a6f53566bd1ed3bf65314bc55ef12404d49ce",
|
||||||
|
"zh:dddaaf95b6aba701153659feff12c7bce6acc78362cb5ff8321a1a1cbf780cd9",
|
||||||
|
"zh:fd662b483250326a1bfbe5684c22c5083955a43e0773347eea35cd4c2cfe700e",
|
||||||
|
]
|
||||||
|
}
|
@ -1,24 +1,6 @@
|
|||||||
resource "hcloud_firewall" "prod" {
|
resource "hcloud_firewall" "prod" {
|
||||||
name= "prod"
|
name= "prod"
|
||||||
|
|
||||||
rule {
|
rule {
|
||||||
direction ="in"
|
|
||||||
protocol = "icmp"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
|
||||||
protocol = "udp"
|
|
||||||
port = "51820"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
direction ="in"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
port = "80"
|
port = "80"
|
||||||
@ -36,11 +18,28 @@ resource "hcloud_firewall" "prod" {
|
|||||||
"::/0"
|
"::/0"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
# torrent UDH port
|
||||||
|
rule {
|
||||||
|
direction ="in"
|
||||||
|
protocol = "udp"
|
||||||
|
port = "6881"
|
||||||
|
source_ips = [
|
||||||
|
"0.0.0.0/0",
|
||||||
|
"::/0"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
# wireguard port
|
||||||
|
rule {
|
||||||
|
direction ="in"
|
||||||
|
protocol = "udp"
|
||||||
|
port = "51820"
|
||||||
|
source_ips = [
|
||||||
|
"0.0.0.0/0",
|
||||||
|
"::/0"
|
||||||
|
]
|
||||||
|
|
||||||
|
}
|
||||||
resource "hcloud_firewall" "torrent" {
|
# torrent listen port
|
||||||
name = "torrent"
|
|
||||||
rule {
|
rule {
|
||||||
direction ="in"
|
direction ="in"
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
@ -51,10 +50,9 @@ resource "hcloud_firewall" "torrent" {
|
|||||||
]
|
]
|
||||||
|
|
||||||
}
|
}
|
||||||
rule {
|
rule {
|
||||||
direction ="in"
|
direction ="in"
|
||||||
protocol = "udp"
|
protocol = "icmp"
|
||||||
port = "6881"
|
|
||||||
source_ips = [
|
source_ips = [
|
||||||
"0.0.0.0/0",
|
"0.0.0.0/0",
|
||||||
"::/0"
|
"::/0"
|
||||||
@ -74,47 +72,3 @@ resource "hcloud_firewall" "ssh" {
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "hcloud_firewall" "Gitea_SSH" {
|
|
||||||
name= "Gitea SSH"
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
|
||||||
protocol = "tcp"
|
|
||||||
port="2222"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resource "hcloud_firewall" "mail" {
|
|
||||||
name= "mail"
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
|
||||||
protocol = "tcp"
|
|
||||||
port="25"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
|
||||||
protocol = "tcp"
|
|
||||||
port="993"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
direction ="in"
|
|
||||||
protocol = "tcp"
|
|
||||||
port="465"
|
|
||||||
source_ips = [
|
|
||||||
"0.0.0.0/0",
|
|
||||||
"::/0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,13 +1,13 @@
|
|||||||
output "homelab_servers_status" {
|
output "homelab_servers_status" {
|
||||||
value = {
|
value = {
|
||||||
for server in hcloud_server.merlin :
|
for server in hcloud_server.HomeLab :
|
||||||
server.name => server.status
|
server.name => server.status
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
output "homelab_servers_ips" {
|
output "homelab_servers_ips" {
|
||||||
value = {
|
value = {
|
||||||
for server in hcloud_server.merlin :
|
for server in hcloud_server.HomeLab :
|
||||||
server.name => server.ipv4_address
|
server.name => server.ipv4_address
|
||||||
}
|
}
|
||||||
}
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user