Compare commits
451 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
90dd0ecd9a | ||
|
4f6743db5f | ||
|
2452a2ad44 | ||
|
5e2bb57914 | ||
|
3eb2dbfa08 | ||
|
1ea094aa6e | ||
|
c1e48d4ace | ||
|
b2710aab2f | ||
|
c000933f66 | ||
|
7948773757 | ||
|
3d90a1f6d7 | ||
|
1f29007172 | ||
|
af58866882 | ||
|
374a62c304 | ||
|
9451443266 | ||
|
dacd187f7b | ||
|
e48a879c43 | ||
|
6ea5de0315 | ||
|
984b712c78 | ||
|
293fddd81c | ||
|
0952c4bf42 | ||
|
3228054172 | ||
|
ee7cd0c12e | ||
|
22a60b42d4 | ||
|
d578fefbce | ||
|
cae4ceb623 | ||
|
ddc4320fe9 | ||
|
d1b475d651 | ||
|
d817f3a7f8 | ||
|
18a78f6fd2 | ||
|
f22e3406be | ||
|
1520ec0dcc | ||
|
275435664c | ||
|
f9ff70a9d9 | ||
|
8915ff52dd | ||
|
74794f866a | ||
|
7244ceb5b1 | ||
|
49a8a427f7 | ||
|
f4f77fc55a | ||
|
351d7c287f | ||
|
598896ad5f | ||
|
6e00668840 | ||
|
24eb640c60 | ||
|
9b6ed6cc6e | ||
|
2f1de5dcd5 | ||
|
78692be3fd | ||
|
272efbb844 | ||
|
c9f4656470 | ||
|
6e679c82a0 | ||
|
9d0c513787 | ||
|
69a2ad4efd | ||
|
2f6c814fb1 | ||
|
ab3c42cf8b | ||
|
992937c011 | ||
|
5fe61223c3 | ||
|
452ab3611a | ||
|
1ee5e21f84 | ||
|
92befa7ea4 | ||
|
4be6af919d | ||
|
77e7cd4f88 | ||
|
fe9bc8dbab | ||
|
60cfe75e47 | ||
|
4fcf862279 | ||
|
98c1d63962 | ||
|
0b067cabca | ||
|
4ef30222f7 | ||
|
117e9397a3 | ||
|
0b25eb194e | ||
|
74dc3a0c89 | ||
|
9bc0e24357 | ||
|
e0f9190b76 | ||
|
f0676ec3f7 | ||
|
8b895fee06 | ||
|
aeed90ea34 | ||
|
a89109e1ff | ||
|
d748beb6a4 | ||
|
3a80c47b56 | ||
|
c75e9e707a | ||
|
4926b4eb06 | ||
|
0ebd087544 | ||
|
b7dc26cc27 | ||
|
012c448c73 | ||
|
1b79fe4cb0 | ||
|
6848ffa05b | ||
|
aec7230f11 | ||
|
da3b290d4a | ||
|
5718968407 | ||
|
0db8555fe8 | ||
|
2fee8293dc | ||
|
3dae6adb33 | ||
|
f207be7d7d | ||
|
f32c0d1e40 | ||
|
d37fe78e39 | ||
|
586e6101ca | ||
|
e470b204a5 | ||
|
c4d10aacfe | ||
|
e10830e028 | ||
|
c37083b5c9 | ||
|
c7e6270c3a | ||
|
625bda7fda | ||
|
d1cc5ff299 | ||
|
0a57c5659c | ||
|
7191cb7216 | ||
|
b3488061da | ||
|
c08032052d | ||
|
25780828cc | ||
|
46b4a51935 | ||
|
993753f284 | ||
|
5188d865d8 | ||
|
2a731201a1 | ||
|
70e0d6011b | ||
|
2c0da4bd15 | ||
|
547ce05466 | ||
|
bfb3ec3d34 | ||
|
9756939f8e | ||
|
f420f17929 | ||
|
2bae64c40b | ||
|
c8f7d7f8c3 | ||
|
2632c6d2b0 | ||
|
f61008b570 | ||
|
73df5fa582 | ||
|
e3d76630c3 | ||
|
41b1a71c76 | ||
|
e9ad317436 | ||
|
2db6061516 | ||
|
3367c78314 | ||
|
08ea604028 | ||
|
29ab70a1d5 | ||
|
e083f4da7a | ||
|
2ea4992f57 | ||
|
49de33bbdb | ||
|
2b678b7786 | ||
|
fc2dcd7b33 | ||
|
29d70cac0e | ||
|
4117bd80c5 | ||
|
da6f04e42e | ||
|
13bda4cd34 | ||
|
63cd352fff | ||
|
a65e3484b5 | ||
|
2b9e034232 | ||
|
527d2f2345 | ||
|
2da18e9c12 | ||
|
49f639cb15 | ||
|
abc88f0074 | ||
|
394dbaf6cb | ||
|
78762b477e | ||
|
2c00b9be59 | ||
|
acc6cdc5fa | ||
|
43b6cf9158 | ||
|
015a89b27e | ||
|
68434f3e92 | ||
|
fe6d1c5e26 | ||
|
f8bc026165 | ||
|
80f489422a | ||
|
4207b1fc75 | ||
|
ea30fce975 | ||
|
5b23006e97 | ||
|
9370a92518 | ||
|
9fcf2d78e6 | ||
|
f82c99c2ba | ||
|
cecad8b785 | ||
|
28fc2bf6a7 | ||
|
a0214d0d74 | ||
|
9812376a1d | ||
|
6ddcc4736e | ||
|
11fe5fb5dc | ||
|
ec2ecd08cd | ||
|
40ce7c1550 | ||
|
64346cc63b | ||
|
ffd597f710 | ||
|
c4f1423501 | ||
|
5a8c4519a6 | ||
|
908495bce3 | ||
|
8ca6413b02 | ||
|
8008295780 | ||
|
05930da661 | ||
|
5d966908c5 | ||
|
c7a6ed5392 | ||
|
f3469bd612 | ||
|
33b4fc6ad5 | ||
|
351bef555c | ||
|
6db6b28706 | ||
|
8081e89176 | ||
|
3628139699 | ||
|
f0dd3e8f33 | ||
|
0b78cbe0e3 | ||
|
da1686cdea | ||
|
5939ff8057 | ||
|
d15939640f | ||
|
47761bf90e | ||
|
2fc86fc14f | ||
|
49d2ce491f | ||
|
1992f75888 | ||
|
a0179b829d | ||
|
2cad7575d1 | ||
|
9f5c738317 | ||
|
f2c7e9a95a | ||
|
4f1646afc2 | ||
|
ba4647379e | ||
|
58f89756d3 | ||
|
a60a1bc578 | ||
|
9578b25804 | ||
|
f2bc16cbe0 | ||
|
70eec26d0a | ||
|
98f1e34d04 | ||
|
9e4348065e | ||
|
f17a946d81 | ||
|
b494eaf358 | ||
|
5d3432ff45 | ||
|
5685458fbf | ||
|
674813e2e4 | ||
|
3944d444aa | ||
|
9a0aa359a5 | ||
|
4e9155e0db | ||
|
b54420c0d9 | ||
|
db8b2c3b1e | ||
|
bed1a666da | ||
|
9d44ad59c7 | ||
|
c8a1ba34f3 | ||
|
b1afa5a801 | ||
|
4cd583622b | ||
|
8718bfe051 | ||
|
594ffcad44 | ||
|
14b1ac38e2 | ||
|
521ea28229 | ||
|
85d9dfa7d7 | ||
|
61d182dfe6 | ||
|
ecc4e1dbb9 | ||
|
439611990e | ||
|
ef927ee761 | ||
|
3770c41d03 | ||
|
50d43dd44c | ||
|
1accb487e6 | ||
|
9965a58e47 | ||
|
b972781036 | ||
|
0e4d6c30d1 | ||
|
cf53b72179 | ||
|
a99d4534c6 | ||
|
38ea6d811e | ||
|
202fdf176e | ||
|
dc7d2134bf | ||
|
aef03b0e13 | ||
|
d5ad4a239c | ||
|
42cce82722 | ||
|
276fa3c7ec | ||
|
7a433c2492 | ||
|
6f55907bb3 | ||
|
bfa620f178 | ||
|
1fbf3a9407 | ||
|
a8ed6daf77 | ||
|
ae52d90998 | ||
|
32b5b30760 | ||
|
24ab28b538 | ||
|
6b0b4ff807 | ||
|
bf88a6e74f | ||
|
7e1771d998 | ||
|
4a3e6b3450 | ||
|
18dccdd54c | ||
|
196d1b1759 | ||
|
6149b95b4e | ||
|
cbb2ba178b | ||
|
5253490f65 | ||
|
6d2c5f57a5 | ||
|
211a2adc5c | ||
|
bfd67fdf46 | ||
|
a8637576eb | ||
|
614e237d45 | ||
|
b44c620f95 | ||
|
2c1f3629c5 | ||
|
4a987e6446 | ||
|
db04c1b678 | ||
|
eadf067157 | ||
|
b4d1c7ffb9 | ||
|
fb5f6978ac | ||
|
6705e06541 | ||
|
0d2e2a3d52 | ||
|
cd35d16f0f | ||
|
15e1c5c018 | ||
|
9f853d91f5 | ||
|
a99cc3a76b | ||
|
c2737f6771 | ||
|
5a92d6b37a | ||
|
55801ac7e1 | ||
|
4fa4b83484 | ||
|
a9da5949e2 | ||
|
7fb16ee116 | ||
|
b4e76f9325 | ||
|
989453a16a | ||
|
29a6f1ae1a | ||
|
7929ae75e7 | ||
|
54d298dbcf | ||
|
27847f256b | ||
|
42dbb13323 | ||
|
295e45e5f8 | ||
|
0951fbb6c7 | ||
|
1c4ae9b1e2 | ||
|
a47ee8f846 | ||
|
ddf50d4837 | ||
|
efa707dea0 | ||
|
0d983dd085 | ||
|
1606797e71 | ||
|
8e30abd428 | ||
|
d72f6d540e | ||
|
353bb70e85 | ||
|
9e11793375 | ||
|
5f4c8aafbf | ||
|
8bfb3a1361 | ||
|
071ac98956 | ||
|
3ac0213417 | ||
|
0538343169 | ||
|
3487f79ec2 | ||
|
d9b6525812 | ||
|
1bee6ee326 | ||
|
85106ce630 | ||
|
88d6055da9 | ||
|
2470faf2c7 | ||
|
d3a1b4178c | ||
|
c254799d4a | ||
|
c13a264105 | ||
|
f7d77d61cc | ||
|
76dfa1c0de | ||
|
ebfcc02ae5 | ||
|
d8ec201e92 | ||
|
cba82f9183 | ||
|
dfc5eb566b | ||
|
f8a19d3e65 | ||
|
b00763ddce | ||
|
5337092bee | ||
|
825f93dd7f | ||
|
e8ef99aaa9 | ||
|
34083cbed9 | ||
|
7b9c34c567 | ||
|
b8b2db7632 | ||
|
ab74166a49 | ||
|
feec56e12d | ||
|
af6f627250 | ||
|
45707d5b2b | ||
|
53eaf5254d | ||
|
bcddfe7dd3 | ||
|
5f105ae8e9 | ||
|
69c5e14b47 | ||
|
8ddc3113f4 | ||
|
905d8fecd5 | ||
|
0f15912367 | ||
|
93edd48b81 | ||
|
18e4596e72 | ||
|
e8477f77ae | ||
|
6e966077a1 | ||
|
d9719a0077 | ||
|
0ecb686bfc | ||
|
510e1f14cb | ||
|
545d426bd3 | ||
|
fc7407300b | ||
|
5d55feab96 | ||
|
1e42376ed4 | ||
|
2c770c0163 | ||
|
a7b590626f | ||
|
83bd59ef03 | ||
|
af70c6d368 | ||
|
0b082b7377 | ||
|
af9309621c | ||
|
633cc6bbd5 | ||
|
06b9151c77 | ||
|
37e755978f | ||
|
c86a618ee1 | ||
|
c2eacbd13f | ||
|
93399e0b8f | ||
|
b55a41a338 | ||
|
6cac635294 | ||
|
c412854050 | ||
|
50935ae052 | ||
|
658de35b0d | ||
|
0596832337 | ||
|
1880303b43 | ||
|
eeff68a02c | ||
|
edfa7eacd8 | ||
|
0fd5535833 | ||
|
6d41655e0a | ||
|
05d0055210 | ||
|
550c0b8ec0 | ||
|
b33438b434 | ||
|
58d55cb486 | ||
|
c3bbaf6cfc | ||
|
4ed00fdb74 | ||
|
88cf62f45e | ||
|
a68543aeca | ||
|
aed8122aba | ||
|
9fe27b845c | ||
|
515e14367b | ||
|
d8e2e5b822 | ||
|
15f4dd762b | ||
|
902671515f | ||
|
3ce2f9327a | ||
|
97ec6e30c2 | ||
|
10af2e4848 | ||
|
1046d63037 | ||
|
7bd66ecdad | ||
|
cd66acfa7e | ||
|
c5ff235b97 | ||
|
d65eb1a6f3 | ||
|
4b2fc3b11d | ||
|
bb1bb51b4a | ||
|
a3abcb41a3 | ||
|
732d4b458d | ||
|
9c02f03cac | ||
|
0d25b5d03d | ||
|
262c97168b | ||
|
e9361a6c90 | ||
|
a0c899eb13 | ||
|
57ecdbadc2 | ||
|
ec24a076f9 | ||
|
ed4752f059 | ||
|
91b23e0c0b | ||
|
a87120ac1f | ||
|
5b09a5806e | ||
|
4151190d71 | ||
|
03950f61ef | ||
|
7954e0ca79 | ||
|
89cec17224 | ||
|
f79f326479 | ||
|
72a0539844 | ||
|
66b6c1c0d5 | ||
|
5e7bd9eb06 | ||
|
85a02032f8 | ||
|
6882646740 | ||
|
f8a0ec9a49 | ||
|
c174d8fb72 | ||
|
a30d9d112d | ||
|
2221784cb1 | ||
|
65d34f708b | ||
|
9b0b4954b2 | ||
|
7f9c734981 | ||
|
6ee2ec0ecd | ||
|
ff9f930747 | ||
|
29c395395e | ||
|
24ae45a3fd | ||
|
15dc6226c5 | ||
|
237262d7d1 | ||
|
3db0616a17 | ||
|
17a019d1c2 | ||
|
4dc30ddf20 | ||
|
688c4166cc | ||
|
74d6ef2fd6 | ||
|
eb6b24b9a1 | ||
|
5c66a08c90 | ||
|
2516e2fede | ||
|
41e1968c12 | ||
|
685f8b60db | ||
|
20721ff87e | ||
|
34544dda96 | ||
|
6189c3c40c |
66
.drone.yml
Executable file
66
.drone.yml
Executable file
@ -0,0 +1,66 @@
|
||||
---
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: lint
|
||||
|
||||
steps:
|
||||
- name: yaml linting
|
||||
image: pipelinecomponents/yamllint
|
||||
commands:
|
||||
- yamllint .
|
||||
- name: markdown linting
|
||||
image: 06kellyjac/markdownlint-cli
|
||||
commands:
|
||||
- markdownlint . --config .markdownlint.yaml
|
||||
|
||||
---
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: test build
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: squidfunk/mkdocs-material
|
||||
commands:
|
||||
- mkdocs build --clean --strict --verbose --site-dir build
|
||||
trigger:
|
||||
event:
|
||||
exclude:
|
||||
- push
|
||||
---
|
||||
kind: pipeline
|
||||
type: docker
|
||||
name: deploy
|
||||
|
||||
steps:
|
||||
- name: build
|
||||
image: squidfunk/mkdocs-material
|
||||
commands:
|
||||
- mkdocs build --clean --strict --verbose --site-dir homelab
|
||||
|
||||
- name: deploy
|
||||
image: appleboy/drone-scp
|
||||
when:
|
||||
status:
|
||||
- success
|
||||
settings:
|
||||
host: www.service.consul
|
||||
user: drone-deploy
|
||||
overwrite: true
|
||||
key:
|
||||
from_secret: dronePrivateKey
|
||||
target: /srv/http
|
||||
source: homelab
|
||||
|
||||
trigger:
|
||||
branch:
|
||||
- master
|
||||
event:
|
||||
- push
|
||||
|
||||
---
|
||||
kind: secret
|
||||
name: dronePrivateKey
|
||||
get:
|
||||
path: secrets/data/droneci/keyRSA
|
||||
name: dronePrivateKey
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -35,4 +35,7 @@ override.tf.json
|
||||
# Ignore CLI configuration files
|
||||
.terraformrc
|
||||
terraform.rc
|
||||
site
|
||||
|
||||
|
||||
.vagrant
|
||||
|
10
.markdownlint.yaml
Executable file
10
.markdownlint.yaml
Executable file
@ -0,0 +1,10 @@
|
||||
---
|
||||
# Default state for all rules
|
||||
default: true
|
||||
MD009:
|
||||
strict: false
|
||||
|
||||
MD013: false
|
||||
MD033: false
|
||||
MD024: false
|
||||
MD041: false
|
33
.yamllint
Normal file
33
.yamllint
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
# Based on ansible-lint config
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
braces:
|
||||
max-spaces-inside: 1
|
||||
level: error
|
||||
brackets:
|
||||
max-spaces-inside: 1
|
||||
level: error
|
||||
colons:
|
||||
max-spaces-after: -1
|
||||
level: error
|
||||
commas:
|
||||
max-spaces-after: -1
|
||||
level: error
|
||||
comments: disable
|
||||
comments-indentation: disable
|
||||
document-start: disable
|
||||
empty-lines:
|
||||
max: 3
|
||||
level: error
|
||||
hyphens:
|
||||
level: error
|
||||
indentation: disable
|
||||
key-duplicates: enable
|
||||
line-length: disable
|
||||
new-line-at-end-of-file: disable
|
||||
new-lines:
|
||||
type: unix
|
||||
trailing-spaces: disable
|
||||
truthy: disable
|
48
README.md
Normal file
48
README.md
Normal file
@ -0,0 +1,48 @@
|
||||
# Homelab
|
||||
|
||||
This repository contain my homelab Infrastructure As Code
|
||||
|
||||
this Homelab is build over Hashicorp software stack:
|
||||
|
||||
- Nomad
|
||||
- Consul
|
||||
- Vault
|
||||
|
||||
## Dev
|
||||
|
||||
dev stack is build over vagrant box with libvirt provider
|
||||
|
||||
curently need to have vault and ldap production up to be correctly provision
|
||||
|
||||
to launch dev stack provissionning :
|
||||
|
||||
```sh
|
||||
make create-dev
|
||||
```
|
||||
|
||||
## Rebuild
|
||||
|
||||
## Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph Home
|
||||
bleys[bleys]
|
||||
oscar[oscar]
|
||||
gerard[gerard]
|
||||
LAN
|
||||
NAS
|
||||
end
|
||||
subgraph Cloud
|
||||
corwin[corwin]
|
||||
end
|
||||
LAN--main road--ooscar
|
||||
LAN --- bleys
|
||||
LAN --- gerard
|
||||
LAN --- NAS
|
||||
bleys <--wireguard--> corwin
|
||||
oscar <--wiregard--> corwin
|
||||
gerard <--wiregard--> corwin
|
||||
corwin <--> internet
|
||||
|
||||
```
|
11
Readme.md
11
Readme.md
@ -1,11 +0,0 @@
|
||||
# homelab
|
||||
|
||||
|
||||
## rebuild
|
||||
to rebuild from scratch ansible need a vault server up and unseal
|
||||
you can rebuild a standalone vault srver with a consul database snaphot with
|
||||
|
||||
```
|
||||
make vault-dev FILE=./yourconsulsnaphot.snap
|
||||
```
|
||||
|
105
Vagrantfile
vendored
Normal file
105
Vagrantfile
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
Vagrant.configure('2') do |config|
|
||||
if Vagrant.has_plugin?('vagrant-cachier')
|
||||
config.cache.scope = 'machine'
|
||||
config.cache.enable :pacman
|
||||
end
|
||||
config.vm.provider :libvirt do |libvirt|
|
||||
libvirt.management_network_domain = "lan.ducamps.dev"
|
||||
|
||||
end
|
||||
config.vm.define "oscar-dev" do |c|
|
||||
# Box definition
|
||||
c.vm.box = "archlinux/archlinux"
|
||||
# Config options
|
||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
c.ssh.insert_key = true
|
||||
c.vm.hostname = "oscar-dev"
|
||||
# Network
|
||||
|
||||
# instance_raw_config_args
|
||||
# Provider
|
||||
c.vm.provider "libvirt" do |libvirt, override|
|
||||
|
||||
libvirt.memory = 2048
|
||||
libvirt.cpus = 2
|
||||
end
|
||||
c.vm.provision "ansible" do |bootstrap|
|
||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
||||
bootstrap.limit="oscar-dev"
|
||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.define "merlin-dev" do |c|
|
||||
# Box definition
|
||||
c.vm.box = "archlinux/archlinux"
|
||||
# Config options
|
||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
c.ssh.insert_key = true
|
||||
c.vm.hostname = "merlin-dev"
|
||||
# Network
|
||||
# instance_raw_config_args
|
||||
# Provider
|
||||
c.vm.provider "libvirt" do |libvirt, override|
|
||||
|
||||
libvirt.memory = 512
|
||||
libvirt.cpus = 2
|
||||
|
||||
end
|
||||
c.vm.provision "ansible" do |bootstrap|
|
||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
||||
bootstrap.limit="merlin-dev"
|
||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.define "gerard-dev" do |c|
|
||||
# Box definition
|
||||
c.vm.box = "archlinux/archlinux"
|
||||
# Config options
|
||||
|
||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
c.ssh.insert_key = true
|
||||
c.vm.hostname = "gerard-dev"
|
||||
# Network
|
||||
# instance_raw_config_args
|
||||
# Provider
|
||||
c.vm.provider "libvirt" do |libvirt, override|
|
||||
libvirt.memory = 2048
|
||||
libvirt.cpus = 2
|
||||
end
|
||||
c.vm.provision "ansible" do |bootstrap|
|
||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
||||
bootstrap.limit="gerard-dev"
|
||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.define "nas-dev" do |c|
|
||||
# Box definition
|
||||
c.vm.box = "archlinux/archlinux"
|
||||
# Config options
|
||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
c.ssh.insert_key = true
|
||||
c.vm.hostname = "nas-dev"
|
||||
# Network
|
||||
# instance_raw_config_args
|
||||
# Provider
|
||||
c.vm.provider "libvirt" do |libvirt, override|
|
||||
|
||||
libvirt.memory = 2048
|
||||
libvirt.cpus = 2
|
||||
end
|
||||
|
||||
c.vm.provision "ansible" do |bootstrap|
|
||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
||||
bootstrap.limit="nas-dev"
|
||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
||||
end
|
||||
end
|
||||
|
||||
end
|
2
ansible/.ansible-lint
Normal file
2
ansible/.ansible-lint
Normal file
@ -0,0 +1,2 @@
|
||||
skip_list:
|
||||
- 'fcqn-builtins'
|
@ -99,7 +99,7 @@ host_key_checking = False
|
||||
#sudo_flags = -H -S -n
|
||||
|
||||
# SSH timeout
|
||||
#timeout = 10
|
||||
timeout = 30
|
||||
|
||||
# default user to use for playbooks if user is not specified
|
||||
# (/usr/bin/ansible will use current user as default)
|
||||
@ -136,7 +136,7 @@ host_key_checking = False
|
||||
|
||||
# If set, configures the path to the Vault password file as an alternative to
|
||||
# specifying --vault-password-file on the command line.
|
||||
#vault_password_file = /path/to/vault_password_file
|
||||
vault_password_file = ./misc/vault-keyring-client.sh
|
||||
|
||||
# format of string {{ ansible_managed }} available within Jinja2
|
||||
# templates indicates to users editing templates files will be replaced.
|
||||
@ -275,7 +275,7 @@ retry_files_enabled = False
|
||||
# turn this on to have behaviour more like Ansible prior to 2.1.x. See
|
||||
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
|
||||
# for more secure ways to fix this than enabling this option.
|
||||
#allow_world_readable_tmpfiles = False
|
||||
allow_world_readable_tmpfiles = True
|
||||
|
||||
# controls the compression level of variables sent to
|
||||
# worker processes. At the default of 0, no compression
|
||||
|
24
ansible/group_vars/DNS
Normal file
24
ansible/group_vars/DNS
Normal file
@ -0,0 +1,24 @@
|
||||
pdns_config:
|
||||
local-address: "127.0.0.1"
|
||||
local-port: "5300"
|
||||
api: yes
|
||||
api-key:
|
||||
|
||||
pdns_backends:
|
||||
gsqlite3:
|
||||
dnssec: yes
|
||||
database: "/var/lib/powerdns/powerdns.sqlite"
|
||||
pdns_sqlite_databases_locations:
|
||||
- "/var/lib/powerdns/powerdns.sqlite"
|
||||
|
||||
pdns_rec_config:
|
||||
forward-zones:
|
||||
- "{{ consul_domain }}=127.0.0.1:8600"
|
||||
- "ducamps.win=192.168.1.10"
|
||||
- "{{ domain.name }}=192.168.1.5"
|
||||
- "lan.{{ domain.name }}=192.168.1.5"
|
||||
- "1.168.192.in-addr.arpa=192.168.1.5:5300"
|
||||
|
||||
local-address: "{{ hostvars[inventory_hostname]['ansible_'+ default_interface].ipv4.address|default(ansible_default_ipv4.address) }}"
|
||||
dnssec: "off"
|
||||
|
90
ansible/group_vars/NAS/NAS
Normal file
90
ansible/group_vars/NAS/NAS
Normal file
@ -0,0 +1,90 @@
|
||||
NAS_nomad_folder:
|
||||
- name: actualbudget
|
||||
- name: archiso
|
||||
owner: 1000001
|
||||
- name: backup
|
||||
owner: 1000001
|
||||
- name: borgmatic
|
||||
- name: crowdsec
|
||||
owner: 1000001
|
||||
- name: dms
|
||||
owner: 1000001
|
||||
- name: filestash
|
||||
owner: 1000
|
||||
- name: gitea
|
||||
owner: 1000000
|
||||
- name: grafana
|
||||
owner: 472
|
||||
- name: hass
|
||||
owner: 1000001
|
||||
- name: homer
|
||||
owner: 1000001
|
||||
- name: immich/cache
|
||||
- name: immich/upload
|
||||
- name: jellyfin
|
||||
owner: 1000001
|
||||
- name: loki
|
||||
owner: 10001
|
||||
- name: mealie
|
||||
owner: 1000001
|
||||
- name: mosquito
|
||||
owner: 1883
|
||||
- name: pacoloco
|
||||
owner: 1000001
|
||||
- name: pdns-auth
|
||||
owner: 1000001
|
||||
- name: pdns-admin
|
||||
owner: 1000001
|
||||
- name: pihole
|
||||
owner: 999
|
||||
- name: prometheus
|
||||
owner: 65534
|
||||
- name: prowlarr
|
||||
owner: 1000001
|
||||
- name: radicale
|
||||
owner: 1000001
|
||||
- name: openldap
|
||||
owner: 1001
|
||||
- name: registry/ghcr
|
||||
- name: registry/docker
|
||||
- name: syncthing
|
||||
owner: 1000001
|
||||
- name: traefik
|
||||
owner: 1000001
|
||||
- name: tt-rss
|
||||
owner: 1000001
|
||||
- name: vaultwarden
|
||||
owner: 1000001
|
||||
- name: zigbee2mqtt
|
||||
owner: 1000001
|
||||
nas_bind_target: "/exports"
|
||||
|
||||
nas_bind_source:
|
||||
- dest: "{{ nas_bind_target }}/nomad"
|
||||
source: /data/data1/nomad
|
||||
- dest: "{{ nas_bind_target }}/music"
|
||||
source: /data/data1/music
|
||||
- dest: "{{ nas_bind_target }}/download"
|
||||
source: /data/data1/download
|
||||
- dest: "{{ nas_bind_target }}/media/serie"
|
||||
source: /data/data2/serie
|
||||
- dest: "{{ nas_bind_target }}/media/film"
|
||||
source: /data/data3/film
|
||||
- dest: "{{ nas_bind_target }}/photo"
|
||||
source: /data/data1/photo
|
||||
- dest: "{{ nas_bind_target }}/homes"
|
||||
source: /data/data1/homes
|
||||
- dest: "{{ nas_bind_target }}/ebook"
|
||||
source: /data/data1/ebook
|
||||
- dest: "{{ nas_bind_target }}/media/download/serie"
|
||||
source: /data/data1/download/serie
|
||||
- dest: "{{ nas_bind_target }}/media/download/film"
|
||||
source: /data/data1/download/film
|
||||
- dest: "{{ nas_bind_target }}/music/download/"
|
||||
source: /data/data1/download/music
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
1
ansible/group_vars/NAS/ftp
Normal file
1
ansible/group_vars/NAS/ftp
Normal file
@ -0,0 +1 @@
|
||||
vsftpd_config: {}
|
15
ansible/group_vars/NAS/nfs
Normal file
15
ansible/group_vars/NAS/nfs
Normal file
@ -0,0 +1,15 @@
|
||||
nfs_cluster_list: "{% for server in groups['all']%} {% if hostvars[server]['ansible_default_ipv4']['address'] is defined %} {{hostvars[server]['ansible_' + hostvars[server]['nfs_iface']|default('')].ipv4.address|default(hostvars[server]['ansible_default_ipv4']['address'],true)}}{{ nfs_options }} {% endif %} {%endfor%}"
|
||||
nfs_options: "(rw,no_root_squash,crossmnt,async,insecure_locks,sec=sys)"
|
||||
nfs_consul_service: true
|
||||
nfs_bind_target: "/exports"
|
||||
|
||||
|
||||
nfs_exports:
|
||||
- "{{ nas_bind_target }} *(fsid=0,insecure,no_subtree_check)"
|
||||
- "{{ nas_bind_target }}/nomad {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/download {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/music {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/media {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/photo {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/homes {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
||||
- "{{ nas_bind_target }}/ebook {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
1
ansible/group_vars/NAS/nomad
Normal file
1
ansible/group_vars/NAS/nomad
Normal file
@ -0,0 +1 @@
|
||||
nomad_node_class: 'NAS'
|
25
ansible/group_vars/NAS/samba
Normal file
25
ansible/group_vars/NAS/samba
Normal file
@ -0,0 +1,25 @@
|
||||
samba_passdb_backend: tdbsam
|
||||
samba_shares_root: /exports
|
||||
samba_shares:
|
||||
- name: media
|
||||
comment: "media"
|
||||
write_list: "@NAS_media"
|
||||
browseable: true
|
||||
- name: ebook
|
||||
comment: "ebook"
|
||||
write_list: "@NAS_ebook"
|
||||
browseable: true
|
||||
- name: music
|
||||
comment: "music"
|
||||
write_list: "@NAS_music"
|
||||
browseable: true
|
||||
- name: photo
|
||||
comment: "photo"
|
||||
write_list: "@NAS_photo"
|
||||
browseable: true
|
||||
- name: download
|
||||
comment: "downlaod"
|
||||
write_list: "@NAS_download"
|
||||
browseable: true
|
||||
samba_load_homes: True
|
||||
samba_homes_include: samba_homes_include.conf
|
@ -1,24 +0,0 @@
|
||||
system_upgrade: true
|
||||
nginx_error_log: "/var/log/nginx/error.log debug"
|
||||
|
||||
hosts_entries:
|
||||
- name: ducamps.win
|
||||
ip: 127.0.0.1
|
||||
aliases:
|
||||
- arch.ducamps.win
|
||||
- www.ducamps.win
|
||||
- file.ducamps.win
|
||||
- supysonic.ducamps.win
|
||||
- syno.ducamps.win
|
||||
- vault.ducamps.win
|
||||
- ww.ducamps.win
|
||||
- hass.ducamps.win
|
||||
- git.ducamps.win
|
||||
|
||||
consul_bootstrap_expect: 1
|
||||
nomad_bootstrap_expect: 1
|
||||
nomad_datacenter: hml
|
||||
consul_server: False
|
||||
nomad_server: False
|
||||
consul_retry_join_force:
|
||||
- 192.168.1.40
|
@ -1,99 +0,0 @@
|
||||
# defaults file for ansible-arch-provissionning
|
||||
partition_table:
|
||||
- device: "/dev/sda"
|
||||
label: gpt
|
||||
settings:
|
||||
- number: 1
|
||||
part_end: 64MB
|
||||
flags: [boot, esp]
|
||||
fstype: vfat
|
||||
format: yes
|
||||
- number: 2
|
||||
part_start: 512MB
|
||||
part_end: 1524MB
|
||||
flags: []
|
||||
fstype: swap
|
||||
format: yes
|
||||
- number: 3
|
||||
part_start: 1524MB
|
||||
flags: [lvm]
|
||||
fstype: ext4
|
||||
format: yes
|
||||
#- device: "/dev/sdb"
|
||||
#settings:
|
||||
#- number: 1
|
||||
#name: home
|
||||
#fstype: ext4
|
||||
#format:
|
||||
mount_table:
|
||||
- device: "/dev/sda"
|
||||
settings:
|
||||
- number: 3
|
||||
mountpath: /mnt
|
||||
fstype: ext4
|
||||
- number: 1
|
||||
mountpath: /mnt/boot
|
||||
fstype: vfat
|
||||
|
||||
#need vfat boot partition with esp label
|
||||
provissionning_UEFI_Enable: True
|
||||
sssd_configure: False
|
||||
nomad_datacenter: hetzner
|
||||
|
||||
systemd_mounts:
|
||||
diskstation_nomad:
|
||||
share: diskstation.ducamps.win:/volume2/nomad
|
||||
mount: /mnt/diskstation/nomad
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
hetzner_storage:
|
||||
share: //u304977.your-storagebox.de/backup
|
||||
mount: /mnt/hetzner/storagebox
|
||||
type: cifs
|
||||
options:
|
||||
- credentials=/etc/creds/hetzner_credentials
|
||||
- uid= 1024
|
||||
- gid= 10
|
||||
- vers=3.0
|
||||
- mfsymlinks
|
||||
automount: true
|
||||
diskstation_git:
|
||||
share: diskstation.ducamps.win:/volume2/git
|
||||
mount: /mnt/diskstation/git
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_CardDav:
|
||||
share: diskstation.ducamps.win:/volume2/CardDav
|
||||
mount: /mnt/diskstation/CardDav
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_music:
|
||||
share: diskstation.ducamps.win:/volume2/music
|
||||
mount: /mnt/diskstation/music
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
|
||||
|
||||
credentials_files:
|
||||
1:
|
||||
type: smb
|
||||
path: /etc/creds/hetzner_credentials
|
||||
username: u304977
|
||||
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
|
||||
|
||||
|
||||
|
||||
systemd_mounts_enabled:
|
||||
- diskstation_nomad
|
||||
- hetzner_storage
|
||||
- diskstation_git
|
||||
- diskstation_music
|
||||
- diskstation_CardDav
|
45
ansible/group_vars/VPS/VPS
Normal file
45
ansible/group_vars/VPS/VPS
Normal file
@ -0,0 +1,45 @@
|
||||
# defaults file for ansible-arch-provissionning
|
||||
partition_table:
|
||||
- device: "/dev/sda"
|
||||
label: gpt
|
||||
settings:
|
||||
- number: 1
|
||||
part_end: 64MB
|
||||
flags: [boot, esp]
|
||||
fstype: vfat
|
||||
format: yes
|
||||
- number: 2
|
||||
part_start: 512MB
|
||||
part_end: 1524MB
|
||||
flags: []
|
||||
fstype: swap
|
||||
format: yes
|
||||
- number: 3
|
||||
part_start: 1524MB
|
||||
flags: [lvm]
|
||||
fstype: ext4
|
||||
format: yes
|
||||
#- device: "/dev/sdb"
|
||||
#settings:
|
||||
#- number: 1
|
||||
#name: home
|
||||
#fstype: ext4
|
||||
#format:
|
||||
mount_table:
|
||||
- device: "/dev/sda"
|
||||
settings:
|
||||
- number: 3
|
||||
mountpath: /mnt
|
||||
fstype: ext4
|
||||
- number: 1
|
||||
mountpath: /mnt/boot
|
||||
fstype: vfat
|
||||
|
||||
#need vfat boot partition with esp label
|
||||
provissionning_UEFI_Enable: True
|
||||
#sssd_configure: False
|
||||
nomad_datacenter: hetzner
|
||||
|
||||
consul_server: False
|
||||
nomad_server: False
|
||||
|
28
ansible/group_vars/VPS/mount
Normal file
28
ansible/group_vars/VPS/mount
Normal file
@ -0,0 +1,28 @@
|
||||
systemd_mounts:
|
||||
diskstation_nomad:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
||||
mount: /mnt/diskstation/nomad
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
hetzner_storage:
|
||||
share: //u304977.your-storagebox.de/backup
|
||||
mount: /mnt/hetzner/storagebox
|
||||
type: cifs
|
||||
options:
|
||||
- credentials=/etc/creds/hetzner_credentials
|
||||
- uid=100001
|
||||
- gid=10
|
||||
- vers=3.0
|
||||
- mfsymlinks
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
|
||||
credentials_files:
|
||||
1:
|
||||
type: smb
|
||||
path: /etc/creds/hetzner_credentials
|
||||
username: u304977
|
||||
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
|
12
ansible/group_vars/VPS/vault_mount
Normal file
12
ansible/group_vars/VPS/vault_mount
Normal file
@ -0,0 +1,12 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
31303539336464336239376636623862303066336438383739356163616431643366386565366361
|
||||
3264336232303135336334333663326234393832343235640a313638323963666631353836373531
|
||||
61636261623662396330653135326238363630363938323166303861313563393063386161393238
|
||||
3231336232663533640a333763643864363939336566333731353031313739616633623537386435
|
||||
39613934663133613733356433616162363430616439623830663837343530623937656434366663
|
||||
33656466396263616132356337326236383761363834663363643163343231366563333865656433
|
||||
39316365663734653734363362363539623636666261333534313935343566646166316233623535
|
||||
32323831626463656337313266343634303830633936396232663966373264313762346235646665
|
||||
61333139363039363436393962666365336334663164306230393433636664623934343039323637
|
||||
33383036323233646237343031633030353330633734353232343633623864333834646239346362
|
||||
643634303135656333646235343366636361
|
45
ansible/group_vars/VPS/vps
Normal file
45
ansible/group_vars/VPS/vps
Normal file
@ -0,0 +1,45 @@
|
||||
# defaults file for ansible-arch-provissionning
|
||||
partition_table:
|
||||
- device: "/dev/sda"
|
||||
label: gpt
|
||||
settings:
|
||||
- number: 1
|
||||
part_end: 64MB
|
||||
flags: [boot, esp]
|
||||
fstype: vfat
|
||||
format: yes
|
||||
- number: 2
|
||||
part_start: 512MB
|
||||
part_end: 1524MB
|
||||
flags: []
|
||||
fstype: swap
|
||||
format: yes
|
||||
- number: 3
|
||||
part_start: 1524MB
|
||||
flags: [lvm]
|
||||
fstype: ext4
|
||||
format: yes
|
||||
#- device: "/dev/sdb"
|
||||
#settings:
|
||||
#- number: 1
|
||||
#name: home
|
||||
#fstype: ext4
|
||||
#format:
|
||||
mount_table:
|
||||
- device: "/dev/sda"
|
||||
settings:
|
||||
- number: 3
|
||||
mountpath: /mnt
|
||||
fstype: ext4
|
||||
- number: 1
|
||||
mountpath: /mnt/boot
|
||||
fstype: vfat
|
||||
|
||||
#need vfat boot partition with esp label
|
||||
provissionning_UEFI_Enable: True
|
||||
#sssd_configure: False
|
||||
nomad_datacenter: hetzner
|
||||
|
||||
consul_server: False
|
||||
nomad_server: False
|
||||
|
@ -1,20 +1,7 @@
|
||||
##ansible_python_interpreter: /usr/bin/python2
|
||||
user:
|
||||
name: vincent
|
||||
uid: 1024
|
||||
mail: vincent@ducamps.win
|
||||
|
||||
domain:
|
||||
name: ducamps.win
|
||||
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
hass_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDfVei9iC/Ra5qmSZcLu8z2CTaXCmfn4JSS4o3eu0HhykdYGSqhBTcUDD3/FhcTPQJVFsu1P4Gwqq1dCE+EvaZZRQaMUqVKUpOliThSG6etbImkvqLQQsC1qt+/NqSvfzu2+28A6+YspzuxsViGo7e3Gg9MdwV3LMGh0mcOr/uXb/HIk18sJg5yQpwMfYTj0Wda90nyegcN3F2iZMeauh/aaFJzWcHNakAAewceDYOErU07NhlZgVA2C8HgkJ8HL7AqIVqt9VOx3xLp91DbKTNXSxvyM0X4NQP24P7ZFxAOk/j0AX3hAWhaNmievCHyBWvQve1VshZXFwEIiuHm8q4GSCxK2r0oQudKdtIuQMfuUALigdiSxo522oEiML/2kSk17WsxZwh7SxfD0DKa82fy9iAwcAluWLwJ+yN3nGnDFF/tHYaamSiowpmTTmQ9ycyIPWPLVZclt3BlEt9WH/FPOdzAyY7YLzW9X6jhsU3QwViyaTRGqAdqzUAiflKCMsNzb5kq0oYsDFC+/eqp1USlgTZDhoKtTKRGEjW2KuUlDsXGBeB6w1D8XZxXJXAaHuMh4oMUgLswjLUdTH3oLnnAvfOrl8O66kTkmcQ8i/kr1wDODMy/oNUzs8q4DeRuhD5dpUiTUGYDTWPYj6m6U/GAEHvN/2YEqSgfVff1iQ4VBw==
|
||||
|
||||
system_arch_local_mirror: "https://arch.{{domain.name}}"
|
||||
|
||||
privatekeytodeploy:
|
||||
- user: "{{user.name}}"
|
||||
keyfile: "/home/{{user.name}}/.ssh/id_gitea"
|
||||
privatekey: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||
- user: root
|
||||
keyfile: /root/.ssh/id_gitea
|
||||
privatekey: "{{lookup('hashi_vault', 'secret=secrets/data/ansible/privatekey:gitea')}}"
|
||||
system_arch_local_mirror: "https://arch.{{domain.name}}/repo/archlinux_$arch"
|
||||
system_sudoers_group: "serverAdmin"
|
||||
system_ipV6_disable: True
|
||||
system_ip_unprivileged_port_start: 0
|
||||
wireguard_mtu: 1420
|
||||
|
5
ansible/group_vars/all/consul
Normal file
5
ansible/group_vars/all/consul
Normal file
@ -0,0 +1,5 @@
|
||||
consul_client_addr: "0.0.0.0"
|
||||
consul_datacenter: "homelab"
|
||||
consul_backup_location: "/mnt/diskstation/git/backup/consul"
|
||||
consul_ansible_group: all
|
||||
consul_systemd_resolved_enable: true
|
8
ansible/group_vars/all/docker
Normal file
8
ansible/group_vars/all/docker
Normal file
@ -0,0 +1,8 @@
|
||||
docker_daemon_config:
|
||||
dns:
|
||||
- 172.17.0.1
|
||||
- 192.168.1.6
|
||||
mtu: 1420
|
||||
insecure-registries:
|
||||
- 192.168.1.0/24
|
||||
- 192.168.121.0/24
|
9
ansible/group_vars/all/nomad
Normal file
9
ansible/group_vars/all/nomad
Normal file
@ -0,0 +1,9 @@
|
||||
nomad_docker_allow_caps:
|
||||
- NET_ADMIN
|
||||
- NET_BROADCAST
|
||||
- NET_RAW
|
||||
nomad_allow_privileged: True
|
||||
nomad_vault_enabled: true
|
||||
nomad_vault_address: "http://active.vault.service.{{consul_domain}}:8200"
|
||||
nomad_vault_role: "nomad-cluster"
|
||||
nomad_docker_extra_labels: ["job_name", "task_group_name", "task_name", "namespace", "node_name"]
|
@ -1,37 +0,0 @@
|
||||
consul_client_addr: "0.0.0.0"
|
||||
consul_datacenter: "homelab"
|
||||
consul_backup_location: "/mnt/diskstation/git/backup/consul"
|
||||
consul_ansible_group: all
|
||||
consul_bootstrap_expect: 2
|
||||
nomad_vault_enabled: true
|
||||
nomad_vault_address: "http://active.vault.service.consul:8200"
|
||||
nomad_vault_role: "nomad-cluster"
|
||||
nomad_vault_token: "{{ lookup('hashi_vault','secret=secrets/data/ansible/hashistack:nomad_vault_token') }}"
|
||||
nomad_bootstrap_expect: 2
|
||||
notification_mail: "{{inventory_hostname}}@{{ domain.name }}"
|
||||
msmtp_mailhub: smtp.{{ domain.name }}
|
||||
msmtp_auth_user: "{{ user.mail }}"
|
||||
msmtp_auth_pass: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:email') }}"
|
||||
|
||||
docker_users: "{{user.name}}"
|
||||
|
||||
system_user:
|
||||
- name: drone-deploy
|
||||
home: /home/drone-deploy
|
||||
shell: /bin/bash
|
||||
|
||||
keystodeploy:
|
||||
- name: juicessh with password
|
||||
user: "{{user.name}}"
|
||||
sshkey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
||||
- name: fixe-pc new
|
||||
user: "{{user.name}}"
|
||||
sshkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
||||
- name: zen-pc
|
||||
user: "{{user.name}}"
|
||||
sshkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
||||
- name: drone
|
||||
user: drone-deploy
|
||||
sshkey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
sssd_configure: true
|
||||
# sssd_configure is False by default - by default nothing is done by this role.
|
||||
ldap_search_base: "dc=ducamps,dc=win"
|
||||
ldap_uri: "ldaps://ldap.ducamps.win"
|
||||
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=win"
|
||||
ldap_search_base: "dc=ducamps,dc=eu"
|
||||
ldap_uri: "ldaps://ldaps.service.consul"
|
||||
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=eu"
|
||||
|
42
ansible/group_vars/all/users
Normal file
42
ansible/group_vars/all/users
Normal file
@ -0,0 +1,42 @@
|
||||
user:
|
||||
name: vincent
|
||||
home: /home/vincent
|
||||
uid: 1024
|
||||
mail: vincent@ducamps.eu
|
||||
groups:
|
||||
- docker
|
||||
authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
||||
privatekey:
|
||||
- keyname: "id_gitea"
|
||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
||||
|
||||
|
||||
|
||||
system_user:
|
||||
- name: drone-deploy
|
||||
home: /home/drone-deploy
|
||||
shell: /bin/bash
|
||||
authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
|
||||
|
||||
- name: ansible
|
||||
home: /home/ansible
|
||||
shell: /bin/bash
|
||||
|
||||
- name: root
|
||||
home: /root
|
||||
privatekey:
|
||||
- keyname: id_gitea
|
||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
||||
|
||||
|
||||
|
||||
user_custom_host:
|
||||
- host: "git.ducamps.eu"
|
||||
user: "git"
|
||||
keyfile: "~/.ssh/id_gitea"
|
||||
|
||||
user_config_repo: "ssh://git@git.ducamps.eu:2222/vincent/conf2.git"
|
1
ansible/group_vars/all/vault
Normal file
1
ansible/group_vars/all/vault
Normal file
@ -0,0 +1 @@
|
||||
vault_raft_group_name: "homelab"
|
11
ansible/group_vars/all/vault_nomad
Normal file
11
ansible/group_vars/all/vault_nomad
Normal file
@ -0,0 +1,11 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
39613433313663653039643961643165643632313938626339653365376633613135653436363938
|
||||
6331623132366638633665636163336462393333336264320a666466303465663839646435626231
|
||||
38396437363034313236383261326637306238616162303131356537393635363939376236386130
|
||||
6466353961643233310a306631333664363332336263656638623763393732306361306632386662
|
||||
37623934633932653965316532386664353130653830356237313337643266366233346633323265
|
||||
37616533303561363864626531396366323565396536383133643539663630636633356238386633
|
||||
34383464333363663532643239363438626135336632316135393537643930613532336231633064
|
||||
35376561663637623932313365636261306131353233636661313435643563323534623365346436
|
||||
65366132333635643832353464323961643466343832376635386531393834336535386364396333
|
||||
3932393561646133336437643138373230366266633430663937
|
12
ansible/group_vars/all/vault_sssd
Normal file
12
ansible/group_vars/all/vault_sssd
Normal file
@ -0,0 +1,12 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61326233336236343231396231306638373837653661313334313261313539316532373437346132
|
||||
3931306637303530373032663236363466383433316161310a396439393564643731656664663639
|
||||
32386130663837303663376432633930393663386436666263313939326631616466643237333138
|
||||
3365346131636333330a376436323964656563363664336638653564656231636136663635303439
|
||||
35346461356337303064623861326331346263373539336335393566623462343464323065366237
|
||||
61346637326336613232643462323733366530656439626234663335633965376335623733336162
|
||||
37323739376237323534613361333831396531663637666161666366656237353563626164626632
|
||||
33326336353663356235373835666166643465666562616663336539316233373430633862613133
|
||||
36363831623361393230653161626131353264366634326233363232336635306266376363363739
|
||||
66373434343330633337633436316135656533613465613963363931383266323466653762623365
|
||||
363332393662393532313063613066653964
|
14
ansible/group_vars/all/vault_users
Normal file
14
ansible/group_vars/all/vault_users
Normal file
@ -0,0 +1,14 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35303137383361396262313561623237626336306366376630663065396664643630383638376436
|
||||
3930346265616235383331383735613166383461643233310a663564356266663366633539303630
|
||||
37616532393035356133653838323964393464333230313861356465326433353339336435363263
|
||||
3162653932646662650a613762393062613433343362633365316434663661306637623363333834
|
||||
61303231303362313133346461373738633239613933303564383532353537626538363636306461
|
||||
66663330346566356637623036363964396137646435333139323430353639386134396537366334
|
||||
39303130386432366335383433626431663034656466626265393863623438366130346562623365
|
||||
63653963393663353666313631326131636361333230386461383638333338393137336562323935
|
||||
37343034363961306663303232346139356534613837663230393962323333656536303161373939
|
||||
65626164336166306264653538313661393934383966303135356161336331623835663235646332
|
||||
63343764643861366537383962616230323036326331386333346463353835393762653735353862
|
||||
32323839663365353337303363313535633362643231653663393936363539363933636430613832
|
||||
32336566633962646463316636346330336265626130373636643335323762363661
|
14
ansible/group_vars/all/vault_vault
Normal file
14
ansible/group_vars/all/vault_vault
Normal file
@ -0,0 +1,14 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
64396261616266633665646330393631316463386334633032353965323964633464333331323334
|
||||
6261653930313764313836366531383462313965336231620a656637623439623639383931373361
|
||||
37373434636531623563336565356136633031633835633636643436653165386436636564616130
|
||||
3763383036343739370a376565343130636631653635616566653531323464343632623566313436
|
||||
32396165636333393032636636613030373663393238323964396462323163616162613933626536
|
||||
31623931343633346131636563643563393230323839636438373933666137393031326532356535
|
||||
32363439306338623533353734613966396362303164616335363535333438326234623161653732
|
||||
66613762653966613763623966633939323634346536636334343364306332323563653361346563
|
||||
65313433376634363261323934376637646233636233346536316262386634353666376539613235
|
||||
63666432396636373139663861393164626165383665663933383734303165623464666630343231
|
||||
33323339663138373530396636636333323439616137313434316465633162396237306238343366
|
||||
30326162306539396630633738323435323432646338633331626665363838376363343835336534
|
||||
3635
|
50
ansible/group_vars/cluster/mount
Normal file
50
ansible/group_vars/cluster/mount
Normal file
@ -0,0 +1,50 @@
|
||||
systemd_mounts:
|
||||
diskstation_photo:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/photo"
|
||||
mount: /mnt/diskstation/photo
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
diskstation_music:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/music"
|
||||
mount: /mnt/diskstation/music
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
diskstation_media:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/media"
|
||||
mount: /mnt/diskstation/media
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
|
||||
diskstation_ebook:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/ebook"
|
||||
mount: /mnt/diskstation/ebook
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
diskstation_nomad:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
||||
mount: /mnt/diskstation/nomad
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
||||
diskstation_download:
|
||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/download"
|
||||
mount: /mnt/diskstation/download
|
||||
type: nfs
|
||||
options:
|
||||
- "vers=4"
|
||||
automount: "{{ env_automount }}"
|
||||
enabled: true
|
1
ansible/group_vars/cluster/nomad
Normal file
1
ansible/group_vars/cluster/nomad
Normal file
@ -0,0 +1 @@
|
||||
nomad_node_class: 'cluster'
|
@ -1,47 +0,0 @@
|
||||
|
||||
postgresql_users:
|
||||
- name: root
|
||||
role_attr_flags: SUPERUSER
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:root')}}"
|
||||
- name: wikijs
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:wikijs')}}"
|
||||
- name: ttrss
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:ttrss')}}"
|
||||
- name: gitea
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:gitea')}}"
|
||||
- name: supysonic
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:supysonic')}}"
|
||||
- name: hass
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:hass')}}"
|
||||
- name: nextcloud
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:nextcloud')}}"
|
||||
- name: vaultwarden
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:vaultwarden')}}"
|
||||
- name: drone
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:drone')}}"
|
||||
- name: dendrite
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:dendrite')}}"
|
||||
- name: paperless
|
||||
password: "{{ lookup('hashi_vault', 'secret=secrets/data/ansible/database:paperless')}}"
|
||||
|
||||
postgresql_databases:
|
||||
- name: wikijs
|
||||
owner: wikijs
|
||||
- name: ttrss
|
||||
owner: ttrss
|
||||
- name: gitea
|
||||
owner: gitea
|
||||
- name: supysonic
|
||||
owner: supysonic
|
||||
- name: hass
|
||||
owner: hass
|
||||
- name: nextcloud
|
||||
owner: nextcloud
|
||||
- name: vaultwarden
|
||||
owner: vaultwarden
|
||||
- name: drone
|
||||
owner: drone
|
||||
- name: dendrite
|
||||
owner: dendrite
|
||||
- name: paperless
|
||||
owner: paperless
|
38
ansible/group_vars/database/database
Normal file
38
ansible/group_vars/database/database
Normal file
@ -0,0 +1,38 @@
|
||||
postgres_consul_service: true
|
||||
postgres_consul_service_name: db
|
||||
|
||||
postgresql_databases:
|
||||
- name: ttrss
|
||||
owner: ttrss
|
||||
- name: gitea
|
||||
owner: gitea
|
||||
- name: supysonic
|
||||
owner: supysonic
|
||||
- name: hass
|
||||
owner: hass
|
||||
- name: vaultwarden
|
||||
owner: vaultwarden
|
||||
- name: drone
|
||||
owner: drone
|
||||
- name: paperless
|
||||
owner: paperless
|
||||
- name: vikunja
|
||||
owner: vikunja
|
||||
- name: ghostfolio
|
||||
owner: ghostfolio
|
||||
- name: pdns-auth
|
||||
owner: pdns-auth
|
||||
- name: pdns-admin
|
||||
owner: pdns-admin
|
||||
- name: mealie
|
||||
owner: mealie
|
||||
- name: immich
|
||||
owner: immich
|
||||
|
||||
postgresql_hba_entries:
|
||||
- {type: local, database: all, user: postgres, auth_method: peer}
|
||||
- {type: local, database: all, user: all, auth_method: peer}
|
||||
- {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5}
|
||||
- {type: host, database: all, user: all, address: '::1/128', auth_method: md5}
|
||||
- {type: host, database: all, user: all, address: '::0/128', auth_method: md5}
|
||||
- {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5}
|
54
ansible/group_vars/database/vault_database
Normal file
54
ansible/group_vars/database/vault_database
Normal file
@ -0,0 +1,54 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
39363436643831373861376361613830316334613939346338616636393462663033393261633838
|
||||
6337336161393063646136613538396366653538656435360a303062636463383739653730346639
|
||||
61323634306265613336313634653039313639663836363032353261383566393865613166613032
|
||||
3837313634633466610a313062646237396138316361303361663565353862363139343566306539
|
||||
38303161303163323265376539323939393938373965353934303535613962653534363362346563
|
||||
61643638353138623162353364353736396162613735333063633739346132613161303564356437
|
||||
62343535363263646463306466663536613937393463666336396332646533343439613433626566
|
||||
38643363343065393165646134343935386461626166316662356365366666363737653336626631
|
||||
64643230616431396666666462303366343164323233303139643939346635353730316234386163
|
||||
35613235643034643833393233373536383863333763393066373564353535353463363336316335
|
||||
63363537643432663266386438316563656663656462333039303861393364333966383430643263
|
||||
63356435373064633861343137616637393161383361306135373864386235653034323732316663
|
||||
65336465386135663532356433386562666639333464633362663131646237613034646563396133
|
||||
33303464633635636233626633353038656230373266666132323561383866343632333561323363
|
||||
61346664623338376436373332646232646235323639633262666166346535663238653563363239
|
||||
34663365633363313433376333653534333364393635316235333965383262313563373161663065
|
||||
36393565396534353235623238303835343334646632306638306332336539616463393966653538
|
||||
35336462623031326539633139636533633632623137393463333531663935323765663139306361
|
||||
66643434393533313039356434326438626265323066613966323634306632653765363834613034
|
||||
30373039336536393865383265643335396232643537343363313338383838383030386665303237
|
||||
64363666346535633237353462333232623132353031323231623338356136656261303662656465
|
||||
31313039643561623635643435333133663032313964323061393231666336343233363038616231
|
||||
36356262326530383233336130326361613431623866633832663361633937646461343731343938
|
||||
33306262346463623935663466356264393837626239313739356431653163376563333234346566
|
||||
38373663643532313635333131663239383736343930623735323861663037356136353433633865
|
||||
63626435613936303661366637623338633961643137613933303735366265663933396130363039
|
||||
34396637643638613839306639343765393539653164616536653661373264376436626639316666
|
||||
61303835323761643531326438363035343539383464376433363534623934366534373631353364
|
||||
61383866323737316430303736366533643939313637393631303833363431613562303639323939
|
||||
66313434613963656464383964313734383938353366306462666537653563336465376464303538
|
||||
34336531663334303938333739313638636363623562613536333736386137363139653164626261
|
||||
62663662316365663563646164303935323866633336633939323837393962393130626330666233
|
||||
63663661303565646236623130663034636264353235376561306630376365613966663536303963
|
||||
63643161386435633831393334333035653761393863373731616239313235383033633439376166
|
||||
39613762376162386231633938393036633461303732323337656430373430636435313337303365
|
||||
37646461336339623339316663616636373036656564383462356562306465623762653162633963
|
||||
35636466386138333564666564323034393162633965386133643235303938616439333130353637
|
||||
61343536323034366464653138353665326436396133313432666563353335383733363335613562
|
||||
61646365346665383866623364396138323666326338313530353663323938613362653038313339
|
||||
32613663616535313661386538366330373364366637386634633437646362383764346263636434
|
||||
35616166393065343038643861636333373738363335353164326435303961326662356230323262
|
||||
35656531653535643630376330393731643532353132366662636664626132646632306361323035
|
||||
31373136616435336362633439356339336466313337623538383763386132396135653864386638
|
||||
31393864363466653137643565306462616238333435343036613331653866393532313861376331
|
||||
33646636623666343439616332386363373664346164313963623861393134666463383366633539
|
||||
35313761333564303635656364303566643436393130356163623137313530653539656537653139
|
||||
38336636623732313630303933303962303561376436623737633139643564343166326335386639
|
||||
31373437336139326562613339393235393065396538333566323864643639303132313733396132
|
||||
35613532396363326166313061353136373965303964623534653634613639303764393038333037
|
||||
63656131616463663565653134363336326139303736313138366262616338643339316231663631
|
||||
30656132386462393433313261313466303239346138623433643634616465656139343764353338
|
||||
62616139613731363665333438383861623837643432643134626461643631323034383262656439
|
||||
33653563323434343964633236353434643739333863636630636363633639373630
|
1
ansible/group_vars/database_active
Normal file
1
ansible/group_vars/database_active
Normal file
@ -0,0 +1 @@
|
||||
postgres_consul_tag: "active"
|
1
ansible/group_vars/database_standby
Normal file
1
ansible/group_vars/database_standby
Normal file
@ -0,0 +1 @@
|
||||
postgres_consul_tag: "standby"
|
@ -1,152 +0,0 @@
|
||||
dhcpd_authoritative: True
|
||||
dhcpd_lease_time: '72'
|
||||
dhcpd_domain_name: "{{ domain.name }}"
|
||||
dhcpd_nameservers:
|
||||
- '192.168.1.40'
|
||||
- '192.168.1.10'
|
||||
dhcpd_keys:
|
||||
- key: dhcp
|
||||
algorithm: HMAC-MD5
|
||||
secret: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:dhcpd_key') }}"
|
||||
|
||||
dhcpd_zones:
|
||||
- zone: "{{ domain.name }}."
|
||||
primary: "192.168.1.10"
|
||||
key: "dhcp"
|
||||
- zone: "1.168.192.in-addr.arpa."
|
||||
primary: "192.168.1.10"
|
||||
key: "dhcp"
|
||||
|
||||
dhcpd_options: |
|
||||
ddns-updates on;
|
||||
ddns-update-style interim;
|
||||
ignore client-updates;
|
||||
update-static-leases on;
|
||||
ddns-domainname "ducamps.win.";
|
||||
ddns-rev-domainname "in-addr.arpa.";
|
||||
|
||||
|
||||
dhcpd_subnets:
|
||||
- subnet: '192.168.1.0'
|
||||
netmask: '255.255.255.0'
|
||||
options: |
|
||||
option routers 192.168.1.1;
|
||||
pools:
|
||||
- range: '192.168.1.100 192.168.1.140'
|
||||
|
||||
dhcpd_hosts:
|
||||
- hostname: 'zen-pc'
|
||||
address: '192.168.1.14'
|
||||
ethernet: 'f0:d5:bf:f4:ce:d7'
|
||||
|
||||
- hostname: 'fixe-pc'
|
||||
address: '192.168.1.15'
|
||||
ethernet: 'ee:35:20:fc:7b:04'
|
||||
|
||||
- hostname: 'oscar'
|
||||
address: '192.168.1.40'
|
||||
ethernet: '84:39:be:12:05:69'
|
||||
|
||||
- hostname: 'VMAS-HML'
|
||||
address: '192.168.1.50'
|
||||
ethernet: '52:54:00:02:74:ed'
|
||||
|
||||
- hostname: 'VMAS-BUILD'
|
||||
address: '192.168.1.53'
|
||||
ethernet: '52:54:13:1e:93'
|
||||
|
||||
|
||||
- hostname: 'xiaomi-chambre-gateway'
|
||||
address: '192.168.1.61'
|
||||
ethernet: '04:cf:8c:9c:f7:f0'
|
||||
- hostname: 'xiaomi-ampoule-chambre'
|
||||
address: '192.168.1.62'
|
||||
ethernet: '44:23:7c:88:1f:ea'
|
||||
- hostname: 'shelly-chambre-ecran'
|
||||
address: '192.168.1.63'
|
||||
ethernet: 'b4:e6:2d:7a:ea:77'
|
||||
- hostname: 'shelly-salon-cadre'
|
||||
address: '192.168.1.64'
|
||||
ethernet: 'b4:e6:2d:7a:e6:1e'
|
||||
- hostname: 'shelly-chambre-ventilo'
|
||||
address: '192.168.1.65'
|
||||
ethernet: 'e0:98:06:97:78:0b'
|
||||
|
||||
keystodeploy:
|
||||
- name: juicessh with password
|
||||
user: "{{user.name}}"
|
||||
sshkey: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
||||
- name: fixe-pc new
|
||||
user: "{{user.name}}"
|
||||
sshkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
||||
- name: zen-pc
|
||||
user: "{{user.name}}"
|
||||
sshkey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
||||
|
||||
nomad_datacenter: homelab
|
||||
|
||||
|
||||
systemd_mounts:
|
||||
diskstation_nomad:
|
||||
share: diskstation.ducamps.win:/volume2/nomad
|
||||
mount: /mnt/diskstation/nomad
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_git:
|
||||
share: diskstation.ducamps.win:/volume2/git
|
||||
mount: /mnt/diskstation/git
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_music:
|
||||
share: diskstation.ducamps.win:/volume2/music
|
||||
mount: /mnt/diskstation/music
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_nextcloud:
|
||||
share: //diskstation.ducamps.win/nextcloud
|
||||
mount: /mnt/diskstation/nextcloud
|
||||
type: cifs
|
||||
options:
|
||||
- credentials=/etc/creds/.diskstation_credentials
|
||||
- uid=33
|
||||
- gid=33
|
||||
- vers=3.0
|
||||
- dir_mode=0770
|
||||
- _netdev
|
||||
automount: true
|
||||
diskstation_CardDav:
|
||||
share: diskstation.ducamps.win:/volume2/CardDav
|
||||
mount: /mnt/diskstation/CardDav
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_archMirror:
|
||||
share: diskstation.ducamps.win:/volume2/archMirror
|
||||
mount: /mnt/diskstation/archMirror
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
|
||||
credentials_files:
|
||||
1:
|
||||
type: smb
|
||||
path: /etc/creds/.diskstation_credentials
|
||||
username: admin
|
||||
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:diskstation_admin') }}"
|
||||
|
||||
|
||||
systemd_mounts_enabled:
|
||||
- diskstation_nomad
|
||||
- diskstation_git
|
||||
- diskstation_music
|
||||
- diskstation_nextcloud
|
||||
- diskstation_CardDav
|
||||
- diskstation_archMirror
|
68
ansible/group_vars/dhcp/dhcp
Normal file
68
ansible/group_vars/dhcp/dhcp
Normal file
@ -0,0 +1,68 @@
|
||||
dhcpd_authoritative: True
|
||||
dhcpd_lease_time: '72'
|
||||
dhcpd_domain_name: "lan.{{ domain.name }}"
|
||||
dhcpd_nameservers:
|
||||
- '192.168.1.4'
|
||||
- '192.168.1.40'
|
||||
|
||||
dhcpd_zones:
|
||||
- zone: "lan.{{ domain.name }}."
|
||||
primary: "192.168.1.5"
|
||||
key: "dhcpdupdate"
|
||||
- zone: "1.168.192.in-addr.arpa."
|
||||
primary: "192.168.1.5"
|
||||
key: "dhcpdupdate"
|
||||
|
||||
dhcpd_options: |
|
||||
ddns-updates on;
|
||||
ddns-update-style interim;
|
||||
ignore client-updates;
|
||||
update-static-leases on;
|
||||
ddns-domainname "lan.{{ domain.name }}.";
|
||||
ddns-rev-domainname "in-addr.arpa.";
|
||||
|
||||
|
||||
dhcpd_subnets:
|
||||
- subnet: '192.168.1.0'
|
||||
netmask: '255.255.255.0'
|
||||
options: |
|
||||
option routers 192.168.1.1;
|
||||
pools:
|
||||
- range: '192.168.1.100 192.168.1.140'
|
||||
|
||||
dhcpd_hosts:
|
||||
- hostname: 'zen-pc'
|
||||
address: '192.168.1.14'
|
||||
ethernet: 'f0:d5:bf:f4:ce:d7'
|
||||
|
||||
- hostname: 'fixe-pc'
|
||||
address: '192.168.1.15'
|
||||
ethernet: 'ee:35:20:fc:7b:04'
|
||||
|
||||
- hostname: 'oscar'
|
||||
address: '192.168.1.40'
|
||||
ethernet: '68:1D:EF:3C:F0:44'
|
||||
- hostname: 'bleys'
|
||||
address: '192.168.1.42'
|
||||
ethernet: '68:1d:ef:2b:3d:24'
|
||||
|
||||
|
||||
- hostname: 'xiaomi-chambre-gateway'
|
||||
address: '192.168.1.61'
|
||||
ethernet: '04:cf:8c:9c:f7:f0'
|
||||
- hostname: 'xiaomi-ampoule-chambre'
|
||||
address: '192.168.1.62'
|
||||
ethernet: '44:23:7c:88:1f:ea'
|
||||
- hostname: 'shelly-chambre-ecran'
|
||||
address: '192.168.1.63'
|
||||
ethernet: 'b4:e6:2d:7a:ea:77'
|
||||
- hostname: 'shelly-salon-cadre'
|
||||
address: '192.168.1.64'
|
||||
ethernet: 'b4:e6:2d:7a:e6:1e'
|
||||
- hostname: 'shelly-chambre-ventilo'
|
||||
address: '192.168.1.65'
|
||||
ethernet: 'e0:98:06:97:78:0b'
|
||||
- hostname: 'shelly-Bureau-chauffeau'
|
||||
address: '192.168.1.66'
|
||||
ethernet: '8c:aa:b5:42:b9:b9'
|
||||
|
14
ansible/group_vars/dhcp/vault_dhcp
Normal file
14
ansible/group_vars/dhcp/vault_dhcp
Normal file
@ -0,0 +1,14 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65303666336535386536653939626336646338623431353161636565393532623264316534326539
|
||||
6265393839323438376666393030383839326239323261660a333132613538306137383332336538
|
||||
38323830353062366133643734303138343939323135333532333666653039326437316361353463
|
||||
6665393263376132620a346239386437326462363565636335303766306638393331656664376665
|
||||
63373131373039653065633861626263646635323634333538343163346239633937303761366362
|
||||
31376438363731613666393531656232653033336332653261313866396434616461303831353336
|
||||
38663965636536313932346133363733636636643938366364366435366237316435643062336231
|
||||
34343931653963613431336465653036616431323263613731393963656637303561366461663038
|
||||
31336131346266393035343135323131636435333865323733386439363763376638383337613530
|
||||
34356331356361636665383933633130343564373739343630663835313164326565393439306163
|
||||
31386538633033333961386534323234653833323537356565616436346462613333663139623035
|
||||
30636265313230383162633466373937353262383965313631326336666133653331366230653961
|
||||
6131
|
@ -1,10 +1,2 @@
|
||||
chisel_server: true
|
||||
chisel_server_port: 9090
|
||||
chisel_server_backend: https://www.{{domain.name}}
|
||||
chisel_server_auth:
|
||||
user: chisel
|
||||
pass: "{{ lookup('hashi_vault','secret=secrets/data/ansible/other:chisel_pass') }}"
|
||||
arch_mirror_location: "/mnt/diskstation/archMirror"
|
||||
|
||||
nomad_datacenter: homelab
|
||||
nomad_allow_privileged: True
|
||||
system_wol_enable: True
|
||||
|
@ -1,92 +0,0 @@
|
||||
systemd_mounts:
|
||||
diskstation_git:
|
||||
share: diskstation.ducamps.win:/volume2/git
|
||||
mount: /mnt/diskstation/git
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_CardDav:
|
||||
share: diskstation.ducamps.win:/volume2/CardDav
|
||||
mount: /mnt/diskstation/CardDav
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
backup_disk:
|
||||
share: /dev/sdb1
|
||||
mount: /mnt/backup
|
||||
type: ntfs-3g
|
||||
options:
|
||||
- "uid=1024
|
||||
- guid=100
|
||||
- vers=3.0"
|
||||
automount: true
|
||||
diskstation_home:
|
||||
share: diskstation.ducamps.win:/volume2/homes/admin
|
||||
mount: /mnt/diskstation/home
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_photo:
|
||||
share: diskstation.ducamps.win:/volume2/photo
|
||||
mount: /mnt/diskstation/photo
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_music:
|
||||
share: diskstation.ducamps.win:/volume2/music
|
||||
mount: /mnt/diskstation/music
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_media:
|
||||
share: diskstation.ducamps.win:/volume1/media
|
||||
mount: /mnt/diskstation/media
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_ebook:
|
||||
share: diskstation.ducamps.win:/volume2/ebook
|
||||
mount: /mnt/diskstation/ebook
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_archMirror:
|
||||
share: diskstation.ducamps.win:/volume2/archMirror
|
||||
mount: /mnt/diskstation/archMirror
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
diskstation_nomad:
|
||||
share: diskstation.ducamps.win:/volume2/nomad
|
||||
mount: /mnt/diskstation/nomad
|
||||
type: nfs
|
||||
options:
|
||||
- " "
|
||||
automount: true
|
||||
|
||||
systemd_mounts_enabled:
|
||||
- diskstation_git
|
||||
- diskstation_music
|
||||
- backup_disk
|
||||
- diskstation_photo
|
||||
- diskstation_home
|
||||
- diskstation_CardDav
|
||||
- diskstation_media
|
||||
- diskstation_ebook
|
||||
- diskstation_archMirror
|
||||
- diskstation_nomad
|
||||
|
||||
credentials_files:
|
||||
1:
|
||||
type: smb
|
||||
path: /etc/creds/.diskstation_credentials
|
||||
username: admin
|
||||
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:diskstation_admin') }}"
|
13
ansible/group_vars/production
Normal file
13
ansible/group_vars/production
Normal file
@ -0,0 +1,13 @@
|
||||
domain:
|
||||
name: ducamps.eu
|
||||
consul_bootstrap_expect: 3
|
||||
consul_domain: "consul"
|
||||
nomad_bootstrap_expect: 3
|
||||
nomad_client_meta:
|
||||
- name: "env"
|
||||
value: "production"
|
||||
vault_unseal_keys_dir_output: "~/vaultUnseal/production"
|
||||
env_default_nfs_path: ""
|
||||
env_media_nfs_path: "/volume1"
|
||||
env_automount: true
|
||||
nas_ip: "192.168.1.43"
|
21
ansible/group_vars/staging
Normal file
21
ansible/group_vars/staging
Normal file
@ -0,0 +1,21 @@
|
||||
domain:
|
||||
name: ducamps.dev
|
||||
#systemd_mounts: []
|
||||
#systemd_mounts_enabled: []
|
||||
consul_bootstrap_expect: 2
|
||||
consul_domain: "consul"
|
||||
nomad_bootstrap_expect: 2
|
||||
nomad_client_meta:
|
||||
- name: "env"
|
||||
value: "staging"
|
||||
|
||||
vault_unseal_keys_dir_output: "~/vaultUnseal/staging"
|
||||
hosts_entries:
|
||||
- ip: "{{ hostvars['nas-dev']['ansible_default_ipv4']['address'] }}"
|
||||
name: diskstation.ducamps.eu
|
||||
|
||||
env_default_nfs_path: ""
|
||||
env_automount: true
|
||||
nas_ip: "nfs.service.consul"
|
||||
|
||||
|
@ -1,2 +0,0 @@
|
||||
|
||||
chainetv_repo_branch: dev
|
@ -1,2 +0,0 @@
|
||||
|
||||
chainetv_repo_branch: master
|
@ -1,2 +0,0 @@
|
||||
|
||||
wireguard_address: "10.0.0.100/24"
|
65
ansible/host_vars/bleys
Normal file
65
ansible/host_vars/bleys
Normal file
@ -0,0 +1,65 @@
|
||||
---
|
||||
ansible_host: "192.168.1.42"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
default_interface: "enp2s0"
|
||||
consul_iface: "{{ default_interface}}"
|
||||
vault_iface: "{{ default_interface}}"
|
||||
nfs_iface: "{{ default_interface}}"
|
||||
wireguard_address: "10.0.0.7/24"
|
||||
wireguard_byhost_allowed_ips:
|
||||
merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24
|
||||
corwin: 10.0.0.7,192.168.1.42,192.168.1.0/24
|
||||
perrsistent_keepalive: "20"
|
||||
wireguard_endpoint: ""
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o {{default_interface}} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=1
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o {default_interface} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=0
|
||||
|
||||
partition_table:
|
||||
- device: "/dev/sda"
|
||||
label: gpt
|
||||
settings:
|
||||
- number: 1
|
||||
part_end: 300MB
|
||||
flags: [boot, esp]
|
||||
fstype: vfat
|
||||
format: yes
|
||||
- number: 2
|
||||
part_start: 512MB
|
||||
part_end: 1524MB
|
||||
flags: []
|
||||
fstype: swap
|
||||
format: yes
|
||||
- number: 3
|
||||
part_start: 1524MB
|
||||
flags: [lvm]
|
||||
fstype: ext4
|
||||
format: yes
|
||||
#- device: "/dev/sdb"
|
||||
#settings:
|
||||
#- number: 1
|
||||
#name: home
|
||||
#fstype: ext4
|
||||
#format:
|
||||
mount_table:
|
||||
- device: "/dev/sda"
|
||||
settings:
|
||||
- number: 3
|
||||
mountpath: /mnt
|
||||
fstype: ext4
|
||||
- number: 1
|
||||
mountpath: /mnt/boot
|
||||
fstype: vfat
|
||||
|
||||
#need vfat boot partition with esp label
|
||||
provissionning_UEFI_Enable: True
|
||||
|
@ -1,30 +1,35 @@
|
||||
---
|
||||
ansible_host: 65.108.221.233
|
||||
|
||||
ansible_host: 10.0.0.1
|
||||
#ansible_host: 135.181.150.203
|
||||
default_interface: "eth0"
|
||||
wireguard_address: "10.0.0.1/24"
|
||||
wireguard_endpoint: "65.108.221.233"
|
||||
wireguard_persistent_keepalive: "30"
|
||||
wireguard_allowed_ips: "10.0.0.1/32"
|
||||
wireguard_endpoint: "135.181.150.203"
|
||||
wireguard_persistent_keepalive: "20"
|
||||
wireguard_allowed_ips: 10.0.0.1
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -o %i -j ACCEPT
|
||||
- iptables -A FORWARD -i %i -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o enp1s0 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=1
|
||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i %i -j ACCEPT
|
||||
- iptables -D FORWARD -o %i -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o enp1s0 -j MASQUERADE
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=0
|
||||
|
||||
wireguard_unmanaged_peers:
|
||||
phone:
|
||||
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
|
||||
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
|
||||
allowed_ips: 10.0.0.3/32
|
||||
persistent_keepalive: 0
|
||||
zen:
|
||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
||||
allowed_ips: 10.0.0.5/32
|
||||
persistent_keepalive: 0
|
||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
||||
consul_client_addr: "127.0.0.1 10.0.0.1"
|
||||
consul_bind_address: "10.0.0.1"
|
||||
consul_ui: True
|
||||
@ -34,7 +39,9 @@ nomad_host_networks:
|
||||
- name: "private"
|
||||
interface: wg0
|
||||
- name: "public"
|
||||
interface: enp1s0
|
||||
interface: eth0
|
||||
- name: "default"
|
||||
interface: wg0
|
||||
nomad_client_network_interface : "wg0"
|
||||
vault_listener_address: 10.0.0.1
|
||||
nomad_plugins_podman: True
|
||||
|
@ -1,18 +1,24 @@
|
||||
---
|
||||
ansible_host: "192.168.1.41"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
wireguard_address: "10.0.0.5/24"
|
||||
wireguard_allowed_ips: "10.0.0.5/32,192.168.1.0/24"
|
||||
perrsistent_keepalive: "30"
|
||||
default_interface: "enu1u1"
|
||||
consul_iface: "{{ default_interface }}"
|
||||
vault_iface: "{{ default_interface }}"
|
||||
|
||||
wireguard_address: "10.0.0.6/24"
|
||||
wireguard_byhost_allowed_ips:
|
||||
merlin: 10.0.0.6,192.168.1.41
|
||||
corwin: 10.0.0.6,192.168.1.41
|
||||
perrsistent_keepalive: "20"
|
||||
wireguard_endpoint: ""
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o eno1 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o eno1 -j MASQUERADE
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
|
19
ansible/host_vars/gerard-dev
Normal file
19
ansible/host_vars/gerard-dev
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
|
||||
default_interface: eth0
|
||||
vault_iface: "{{ default_interface}}"
|
||||
ansible_host: gerard-dev.lan.ducamps.dev
|
||||
wireguard_address: "10.0.1.6/24"
|
||||
perrsistent_keepalive: "20"
|
||||
wireguard_endpoint: ""
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface}} -j MASQUERADE
|
||||
|
@ -1,31 +1,39 @@
|
||||
---
|
||||
ansible_host: 65.109.13.133
|
||||
|
||||
ansible_host: 10.0.0.4
|
||||
#ansible_host: 65.21.2.14
|
||||
default_interface: "ens3"
|
||||
nfs_iface: "wg0"
|
||||
wireguard_address: "10.0.0.4/24"
|
||||
wireguard_endpoint: "65.109.13.133"
|
||||
wireguard_persistent_keepalive: "30"
|
||||
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3/32,10.0.0.5/32"
|
||||
wireguard_endpoint: "65.21.2.14"
|
||||
wireguard_persistent_keepalive: "20"
|
||||
wireguard_byhost_allowed_ips:
|
||||
oscar: "0.0.0.0/0"
|
||||
bleys: "0.0.0.0/0"
|
||||
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3,10.0.0.5"
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -o %i -j ACCEPT
|
||||
- iptables -A FORWARD -i %i -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=1
|
||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i %i -j ACCEPT
|
||||
- iptables -D FORWARD -o %i -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
- sysctl -w net.ipv4.ip_forward=0
|
||||
|
||||
wireguard_unmanaged_peers:
|
||||
phone:
|
||||
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
|
||||
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
|
||||
allowed_ips: 10.0.0.3/32
|
||||
persistent_keepalive: 0
|
||||
zen:
|
||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
||||
allowed_ips: 10.0.0.5/32
|
||||
persistent_keepalive: 0
|
||||
wireguard_dns: "192.168.1.41,192.168.1.10"
|
||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
||||
consul_client_addr: "127.0.0.1 10.0.0.4"
|
||||
consul_bind_address: "10.0.0.4"
|
||||
consul_ui: True
|
||||
@ -35,7 +43,8 @@ nomad_host_networks:
|
||||
- name: "private"
|
||||
interface: wg0
|
||||
- name: "public"
|
||||
interface: eth0
|
||||
interface: ens3
|
||||
- name: "default"
|
||||
interface: wg0
|
||||
vault_listener_address: 10.0.0.4
|
||||
nomad_plugins_podman: True
|
||||
|
41
ansible/host_vars/merlin-dev
Normal file
41
ansible/host_vars/merlin-dev
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
|
||||
ansible_host: merlin-dev.lan.ducamps.dev
|
||||
default_interface: eth0
|
||||
vault_iface: "{{ default_interface}}"
|
||||
wireguard_address: "10.0.1.4/24"
|
||||
wireguard_endpoint: "{{ ansible_default_ipv4.address }}"
|
||||
wireguard_persistent_keepalive: "30"
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -o %i -j ACCEPT
|
||||
- iptables -A FORWARD -i %i -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i %i -j ACCEPT
|
||||
- iptables -D FORWARD -o %i -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_unmanaged_peers:
|
||||
phone:
|
||||
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
|
||||
allowed_ips: 10.0.1.3/32
|
||||
persistent_keepalive: 0
|
||||
zen:
|
||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
||||
allowed_ips: 10.0.1.5/32
|
||||
persistent_keepalive: 0
|
||||
consul_client_addr: "127.0.0.1 10.0.1.4"
|
||||
consul_bind_address: "10.0.1.4"
|
||||
consul_ui: True
|
||||
consul_iface: "wg0"
|
||||
nomad_bind_addr: "10.0.1.4"
|
||||
nomad_host_networks:
|
||||
- name: "private"
|
||||
interface: wg0
|
||||
- name: "public"
|
||||
interface: eth0
|
||||
- name: "default"
|
||||
interface: wg0
|
||||
vault_listener_address: 10.0.1.4
|
17
ansible/host_vars/nas-dev
Normal file
17
ansible/host_vars/nas-dev
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
ansible_host: nas-dev.lan.ducamps.dev
|
||||
default_interface: eth0
|
||||
vault_iface: "{{ default_interface}}"
|
||||
wireguard_address: "10.0.1.8/24"
|
||||
perrsistent_keepalive: "30"
|
||||
wireguard_endpoint: ""
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
19
ansible/host_vars/oberon
Normal file
19
ansible/host_vars/oberon
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
wireguard_address: "10.0.0.8/24"
|
||||
default_interface: "enp2s0"
|
||||
consul_iface: "{{ default_interface}}"
|
||||
vault_iface: "{{ default_interface}}"
|
||||
perrsistent_keepalive: "30"
|
||||
wireguard_endpoint: ""
|
||||
wireguard_byhost_allowed_ips:
|
||||
merlin: 10.0.0.8,192.168.1.43
|
||||
corwin: 10.0.0.8,192.168.1.43
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
@ -1,16 +1,62 @@
|
||||
---
|
||||
default_interface: "enp1s0"
|
||||
consul_iface: "{{ default_interface}}"
|
||||
vault_iface: "{{ default_interface}}"
|
||||
nfs_iface: "{{ default_interface}}"
|
||||
nomad_client_cpu_total_compute: 8000
|
||||
wireguard_address: "10.0.0.2/24"
|
||||
wireguard_allowed_ips: "10.0.0.2/32,192.168.1.0/24"
|
||||
wireguard_byhost_allowed_ips:
|
||||
merlin: 10.0.0.2,192.168.1.40
|
||||
corwin: 10.0.0.2,192.168.1.40
|
||||
perrsistent_keepalive: "30"
|
||||
wireguard_endpoint: ""
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o eno1 -j MASQUERADE
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o eno1 -j MASQUERADE
|
||||
consul_snapshot: True
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
partition_table:
|
||||
- device: "/dev/sda"
|
||||
label: gpt
|
||||
settings:
|
||||
- number: 1
|
||||
part_end: 300MB
|
||||
flags: [boot, esp]
|
||||
fstype: vfat
|
||||
format: yes
|
||||
- number: 2
|
||||
part_start: 512MB
|
||||
part_end: 1524MB
|
||||
flags: []
|
||||
fstype: swap
|
||||
format: yes
|
||||
- number: 3
|
||||
part_start: 1524MB
|
||||
flags: [lvm]
|
||||
fstype: ext4
|
||||
format: yes
|
||||
#- device: "/dev/sdb"
|
||||
#settings:
|
||||
#- number: 1
|
||||
#name: home
|
||||
#fstype: ext4
|
||||
#format:
|
||||
mount_table:
|
||||
- device: "/dev/sda"
|
||||
settings:
|
||||
- number: 3
|
||||
mountpath: /mnt
|
||||
fstype: ext4
|
||||
- number: 1
|
||||
mountpath: /mnt/boot
|
||||
fstype: vfat
|
||||
|
||||
#need vfat boot partition with esp label
|
||||
provissionning_UEFI_Enable: True
|
||||
|
||||
|
17
ansible/host_vars/oscar-dev
Normal file
17
ansible/host_vars/oscar-dev
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
ansible_host: oscar-dev.lan.ducamps.dev
|
||||
default_interface: eth0
|
||||
vault_iface: "{{ default_interface}}"
|
||||
wireguard_address: "10.0.1.2/24"
|
||||
perrsistent_keepalive: "30"
|
||||
wireguard_endpoint: ""
|
||||
|
||||
wireguard_postup:
|
||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
||||
|
||||
wireguard_postdown:
|
||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
@ -1,12 +1,25 @@
|
||||
|
||||
requirements:
|
||||
ansible-galaxy install -g -f -r roles/requirements.yml
|
||||
ansible-galaxy install -g -r roles/requirements.yml
|
||||
|
||||
deploy_production:
|
||||
ansible-playbook site.yml -i production
|
||||
deploy_production:
|
||||
ansible-playbook site.yml -i production -u ansible
|
||||
|
||||
deploy_staging:
|
||||
ansible-playbook site.yml -i staging
|
||||
deploy_production_wiregard:
|
||||
ansible-playbook playbooks/wireguard.yml -i production -u ansible
|
||||
|
||||
deploy_staging:
|
||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
||||
ansible-playbook site.yml -i staging -u ansible
|
||||
|
||||
|
||||
deploy_staging_base:
|
||||
ansible-playbook playbooks/sssd.yml -i staging -u ansible
|
||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
||||
ansible-playbook playbooks/server.yml -i staging -u ansible
|
||||
|
||||
|
||||
|
||||
view-allvault:
|
||||
ansible-vault view `git grep -l "ANSIBLE_VAULT;1.1;AES256$$"`
|
||||
|
||||
generate-token:
|
||||
@echo export VAULT_TOKEN=`vault token create -policy=ansible -field="token" -period 6h`
|
||||
|
9
ansible/misc/vault-keyring-client.sh
Executable file
9
ansible/misc/vault-keyring-client.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
readonly vault_password_file_encrypted="$(dirname $0)/vault-password.gpg"
|
||||
|
||||
# flock used to work around "gpg: decryption failed: No secret key" in tf-stage2
|
||||
# would otherwise need 'auto-expand-secmem' (https://dev.gnupg.org/T3530#106174)
|
||||
flock "$vault_password_file_encrypted" \
|
||||
gpg --batch --decrypt --quiet "$vault_password_file_encrypted"
|
||||
|
BIN
ansible/misc/vault-password.gpg
Normal file
BIN
ansible/misc/vault-password.gpg
Normal file
Binary file not shown.
45
ansible/molecule/default/molecule.yml
Normal file
45
ansible/molecule/default/molecule.yml
Normal file
@ -0,0 +1,45 @@
|
||||
---
|
||||
prerun: false
|
||||
dependency:
|
||||
name: galaxy
|
||||
enabled: false
|
||||
driver:
|
||||
name: vagrant
|
||||
provider:
|
||||
name: libvirt
|
||||
default_box: archlinux/archlinux
|
||||
platforms:
|
||||
- name: oscar-dev
|
||||
cpu: 1
|
||||
memory: 1024
|
||||
box: archlinux/archlinux
|
||||
- name: merlin-dev
|
||||
cpu: 1
|
||||
memory: 1024
|
||||
box: generic/rocky9
|
||||
- name: gerard-dev
|
||||
cpu: 1
|
||||
memory: 1024
|
||||
box: debian/bookworm64
|
||||
- name: nas-dev
|
||||
cpu: 1
|
||||
memory: 1024
|
||||
box: archlinux/archlinux
|
||||
provisioner:
|
||||
name: ansible
|
||||
connection_options:
|
||||
ansible_ssh_user: vagrant
|
||||
ansible_become: true
|
||||
env:
|
||||
ANSIBLE_CONFIG: ../../ansible.cfg
|
||||
ANSIBLE_ROLES_PATH: "../../roles"
|
||||
log: true
|
||||
lint:
|
||||
name: ansible-lint
|
||||
inventory:
|
||||
host_vars: []
|
||||
links:
|
||||
group_vars: ../../group_vars
|
||||
hosts: ../../staging
|
||||
verifier:
|
||||
name: ansible
|
@ -1,10 +1,54 @@
|
||||
- hosts: all
|
||||
---
|
||||
- name: Consul install
|
||||
hosts: all
|
||||
roles:
|
||||
- role: ansible-consul
|
||||
become: true
|
||||
|
||||
- name: Vault install
|
||||
hosts: homelab
|
||||
roles:
|
||||
- role: ansible-hashicorp-vault
|
||||
when: ansible_architecture == 'x86_64'
|
||||
become: true
|
||||
post_tasks:
|
||||
- name: Stat root file
|
||||
ansible.builtin.stat:
|
||||
path: "{{ vault_unseal_keys_dir_output }}/rootkey"
|
||||
register: rootkey_exist
|
||||
delegate_to: localhost
|
||||
- name: Reading root contents
|
||||
ansible.builtin.command: cat "{{ vault_unseal_keys_dir_output }}/rootkey"
|
||||
register: root_token
|
||||
delegate_to: localhost
|
||||
when: rootkey_exist.stat.exists
|
||||
changed_when: false
|
||||
- name: debug
|
||||
ansible.builtin.debug:
|
||||
var: root_token
|
||||
- name: Generate nomad token
|
||||
community.hashi_vault.vault_token_create:
|
||||
renewable: true
|
||||
policies: "nomad-server-policy"
|
||||
period: 72h
|
||||
no_parent: true
|
||||
token: "{{ root_token.stdout }}"
|
||||
url: "http://active.vault.service.consul:8200"
|
||||
retries: 4
|
||||
run_once: true
|
||||
delegate_to: localhost
|
||||
when: root_token.stdout is defined
|
||||
register: nomad_token_data
|
||||
|
||||
- name: Gather nomad token
|
||||
ansible.builtin.set_fact:
|
||||
nomad_vault_token: "{{ nomad_token_data.login.auth.client_token }}"
|
||||
when: nomad_token_data.login is defined
|
||||
|
||||
- name: nomad
|
||||
hosts: all
|
||||
vars:
|
||||
unseal_keys_dir_output: ~/vaultunseal
|
||||
roles:
|
||||
- role: ansible-nomad
|
||||
become: true
|
||||
- role: docker
|
||||
|
9
ansible/playbooks/autofs.yml
Normal file
9
ansible/playbooks/autofs.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- hosts:
|
||||
- homelab
|
||||
- VPS
|
||||
- NAS
|
||||
vars:
|
||||
# certbot_force: true
|
||||
roles:
|
||||
- autofs
|
@ -1,4 +1,6 @@
|
||||
---
|
||||
- hosts: all
|
||||
gather_facts: false
|
||||
become: true
|
||||
roles:
|
||||
- ansible_bootstrap
|
||||
|
28
ansible/playbooks/create_user.yml
Normal file
28
ansible/playbooks/create_user.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
- hosts: all
|
||||
roles:
|
||||
- role: ansible-user
|
||||
vars:
|
||||
user_name: '{{ user.name }}'
|
||||
user_ldap: '{{ sssd_configure}}'
|
||||
user_password: '{{ userPassword }}'
|
||||
user_authorized_key: '{{ user.authorized_keys}}'
|
||||
user_privatekey: '{{ user.privatekey}}'
|
||||
user_shell: '/bin/zsh'
|
||||
user_uid: '{{ user.uid }}'
|
||||
user_groups:
|
||||
- docker
|
||||
become: true
|
||||
become_user: '{{ user.name }}'
|
||||
|
||||
- hosts: all
|
||||
roles:
|
||||
- role: user_config
|
||||
vars:
|
||||
user_config_username: "{{ user.name }}"
|
||||
become_user: "{{ user.name }}"
|
||||
become: true
|
||||
- role: user_config
|
||||
vars:
|
||||
user_config_username: root
|
||||
become: true
|
@ -1,7 +1,54 @@
|
||||
---
|
||||
- hosts: database
|
||||
- name: Database playbook
|
||||
hosts: database
|
||||
vars:
|
||||
# certbot_force: true
|
||||
pre_tasks:
|
||||
- name: Install Pg vertors (immich)
|
||||
aur:
|
||||
name: pgvecto.rs-bin
|
||||
state: present
|
||||
become: true
|
||||
become_user: aur_builder
|
||||
- name: Add database member to pg_hba replication
|
||||
ansible.builtin.set_fact:
|
||||
postgresql_hba_entries: "{{ postgresql_hba_entries + [\
|
||||
{'type':'host', \
|
||||
'database': 'replication',\
|
||||
'user':'repli',\
|
||||
'address':hostvars[item]['ansible_'+hostvars[item]['default_interface']]['ipv4']['address']+'/32',\
|
||||
'auth_method':'trust'}] }}"
|
||||
loop: '{{ groups.database }}'
|
||||
roles:
|
||||
- role: ansible-role-postgresql
|
||||
become: true
|
||||
tasks:
|
||||
- name: Launch replication
|
||||
ansible.builtin.command: pg_basebackup -D /var/lib/postgres/data -h {{groups["database_active"]|first}} -U repli -Fp -Xs -P -R -w
|
||||
args:
|
||||
creates: /var/lib/postgres/data/postgresql.conf
|
||||
become: true
|
||||
become_user: postgres
|
||||
when: inventory_hostname in groups["database_standby"]
|
||||
- name: Ensure PostgreSQL is started and enabled on boot.
|
||||
ansible.builtin.service:
|
||||
name: '{{ postgresql_daemon }}'
|
||||
state: '{{ postgresql_service_state }}'
|
||||
enabled: '{{ postgresql_service_enabled }}'
|
||||
become: true
|
||||
|
||||
- name: Set Postgress shared libraries
|
||||
community.postgresql.postgresql_set:
|
||||
name: shared_preload_libraries
|
||||
value: vectors.so
|
||||
become: true
|
||||
become_user: postgres
|
||||
when: inventory_hostname in groups["database_active"]
|
||||
notify: Restart postgresql
|
||||
- name: Set Postgress shared libraries
|
||||
community.postgresql.postgresql_set:
|
||||
name: search_path
|
||||
value: '$user, public, vectors'
|
||||
become: true
|
||||
become_user: postgres
|
||||
when: inventory_hostname in groups["database_active"]
|
||||
|
6
ansible/playbooks/dns.yml
Normal file
6
ansible/playbooks/dns.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: DNS playbook
|
||||
hosts: DNS
|
||||
roles:
|
||||
- role: pdns_recursor-ansible
|
||||
become: true
|
@ -5,4 +5,3 @@
|
||||
- cronie
|
||||
- hass-client-control
|
||||
- mpd
|
||||
|
||||
|
28
ansible/playbooks/nas.yml
Normal file
28
ansible/playbooks/nas.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: gather all
|
||||
hosts: all
|
||||
- name: NAS playbook
|
||||
hosts: NAS
|
||||
vars:
|
||||
# certbot_force: true
|
||||
pre_tasks:
|
||||
- name: include task NasBind
|
||||
ansible.builtin.include_tasks:
|
||||
file: tasks/NasBind.yml
|
||||
loop: "{{ nas_bind_source }}"
|
||||
- name: create nomad folder
|
||||
ansible.builtin.file:
|
||||
path: "{{ nas_bind_target }}/nomad/{{ item.name }}"
|
||||
owner: "{{ item.owner|default('root') }}"
|
||||
state: directory
|
||||
become: true
|
||||
loop: "{{ NAS_nomad_folder }}"
|
||||
roles:
|
||||
- role: ansible-role-nut
|
||||
become: true
|
||||
- role: ansible-role-nfs
|
||||
become: true
|
||||
- role: ansible-role-pureftpd
|
||||
become: true
|
||||
- role: vladgh.samba.server
|
||||
become: true
|
@ -1,11 +1,26 @@
|
||||
---
|
||||
- hosts:
|
||||
- hosts:
|
||||
- homelab
|
||||
- VPS
|
||||
- NAS
|
||||
vars:
|
||||
# certbot_force: true
|
||||
tasks:
|
||||
- name: Create user
|
||||
ansible.builtin.include_role:
|
||||
name: "ansible-user"
|
||||
apply:
|
||||
become: true
|
||||
vars:
|
||||
user_name: "{{ create.name }}"
|
||||
user_home: "{{ create.home }}"
|
||||
user_groups: "{{ create.groups|default('') }}"
|
||||
user_shell: "{{ create.shell|default('') }}"
|
||||
user_authorized_key: "{{ create.authorized_keys|default([]) }}"
|
||||
user_privatekey: "{{ create.privatekey|default([])}}"
|
||||
loop: "{{system_user}}"
|
||||
loop_control:
|
||||
loop_var: create
|
||||
roles:
|
||||
- system
|
||||
- autofs
|
||||
- msmtp
|
||||
- cronie
|
||||
|
5
ansible/playbooks/sssd.yml
Normal file
5
ansible/playbooks/sssd.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- hosts: all
|
||||
roles:
|
||||
- role: ansible-role-sssd
|
||||
become: true
|
18
ansible/playbooks/tasks/NasBind.yml
Normal file
18
ansible/playbooks/tasks/NasBind.yml
Normal file
@ -0,0 +1,18 @@
|
||||
- name: Ensure base NFS directory exist
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.dest }}"
|
||||
state: directory
|
||||
become: true
|
||||
- name: Ensure source NFS directory exist
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.source }}"
|
||||
state: directory
|
||||
become: true
|
||||
- name: Bind NAS export
|
||||
ansible.posix.mount:
|
||||
path: "{{ item.dest }}"
|
||||
src: "{{ item.source }}"
|
||||
opts: bind
|
||||
fstype: none
|
||||
state: mounted
|
||||
become: true
|
1
ansible/playbooks/templates/samba_homes_include.conf
Normal file
1
ansible/playbooks/templates/samba_homes_include.conf
Normal file
@ -0,0 +1 @@
|
||||
path = /exports/homes/%S
|
@ -1,4 +0,0 @@
|
||||
- hosts: all
|
||||
vars:
|
||||
roles:
|
||||
- user_config
|
@ -2,4 +2,4 @@
|
||||
- hosts: wireguard
|
||||
roles:
|
||||
- role: ansible-role-wireguard
|
||||
become: True
|
||||
become: true
|
||||
|
@ -1,24 +1,52 @@
|
||||
[homelab]
|
||||
[DNS]
|
||||
oscar
|
||||
gerard
|
||||
|
||||
[VPS]
|
||||
corwin
|
||||
merlin
|
||||
|
||||
|
||||
[dhcp]
|
||||
gerard
|
||||
oberon
|
||||
|
||||
[wireguard]
|
||||
corwin
|
||||
oscar
|
||||
merlin
|
||||
gerard
|
||||
[database_active]
|
||||
bleys
|
||||
|
||||
[database]
|
||||
[database_standby]
|
||||
oscar
|
||||
merlin
|
||||
|
||||
[database:children]
|
||||
database_active
|
||||
database_standby
|
||||
|
||||
[rsyncd]
|
||||
oscar
|
||||
bleys
|
||||
|
||||
[wireguard:children]
|
||||
production
|
||||
|
||||
[NAS]
|
||||
oberon
|
||||
|
||||
[cluster]
|
||||
oscar
|
||||
#gerard
|
||||
bleys
|
||||
|
||||
|
||||
[homelab:children]
|
||||
NAS
|
||||
cluster
|
||||
|
||||
[VPS]
|
||||
merlin
|
||||
|
||||
[region:children]
|
||||
homelab
|
||||
VPS
|
||||
production
|
||||
|
||||
[production]
|
||||
oscar
|
||||
merlin
|
||||
#gerard
|
||||
bleys
|
||||
oberon
|
||||
|
||||
[staging]
|
||||
|
@ -6,10 +6,8 @@
|
||||
|
||||
- hosts: all
|
||||
remote_user: root
|
||||
vars:
|
||||
ansible_password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/user:root') }}"
|
||||
roles:
|
||||
- ansible_bootstrap
|
||||
|
||||
- remote_user: "{{ user.name }}"
|
||||
import_playbook: site.yml
|
||||
# - remote_user: "{{ user.name }}"
|
||||
# import_playbook: site.yml
|
||||
|
@ -1,37 +1,49 @@
|
||||
---
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-arch-provissionning.git
|
||||
roles:
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-arch-provissionning.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-postgresql.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-postgresql.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-role-sssd
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-sssd
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible_bootstrap.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible_bootstrap.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/autofs.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/autofs.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/cronie.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/cronie.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/docker.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/docker.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/hass-client-control.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/hass-client-control.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/msmtp.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/msmtp.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/rsyncd.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/rsyncd.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/system.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/system.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/user_config.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/user_config.git
|
||||
scm: git
|
||||
- src: https://github.com/githubixx/ansible-role-wireguard.git
|
||||
- src: git@github.com:vincentDcmps/ansible-role-wireguard.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-consul.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-consul.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-hashicorp-vault.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-hashicorp-vault.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-nomad.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-nomad.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/mpd.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/mpd.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.win:2222/ansible-roles/ansible-dhcpd.git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-dhcpd.git
|
||||
scm: git
|
||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-user.git
|
||||
scm: git
|
||||
- src: git@github.com:vincentDcmps/ansible-role-nfs.git
|
||||
scm: git
|
||||
- src: git@github.com:vincentDcmps/ansible-role-nut.git
|
||||
scm: git
|
||||
- src: git@git.ducamps.eu:2222/ansible-roles/ansible-role-pureftpd.git
|
||||
scm: git
|
||||
- src: https://github.com/PowerDNS/pdns_recursor-ansible.git
|
||||
collections:
|
||||
- name: vladgh.samba
|
||||
|
@ -1,9 +1,10 @@
|
||||
---
|
||||
- import_playbook: playbooks/server.yml
|
||||
- import_playbook: playbooks/wireguard.yml
|
||||
- import_playbook: playbooks/dhcpd.yml
|
||||
- import_playbook: playbooks/dns.yml
|
||||
- import_playbook: playbooks/HashicorpStack.yml
|
||||
- import_playbook: playbooks/nas.yml
|
||||
- import_playbook: playbooks/autofs.yml
|
||||
- import_playbook: playbooks/sssd.yml
|
||||
- import_playbook: playbooks/database.yml
|
||||
- import_playbook: playbooks/rsyncd.yml
|
||||
- import_playbook: playbooks/music-player.yml
|
||||
- import_playbook: playbooks/dhcpd.yml
|
||||
- import_playbook: playbooks/user_config.yml
|
||||
|
@ -1,13 +1,44 @@
|
||||
[DNS]
|
||||
oscar-dev
|
||||
|
||||
[database_active]
|
||||
oscar-dev
|
||||
|
||||
[database_standby]
|
||||
gerard-dev
|
||||
|
||||
[database:children]
|
||||
database_active
|
||||
database_standby
|
||||
|
||||
[wireguard:children]
|
||||
staging
|
||||
|
||||
[NAS]
|
||||
nas-dev
|
||||
|
||||
[cluster]
|
||||
oscar-dev
|
||||
gerard-dev
|
||||
|
||||
[homelab:children]
|
||||
NAS
|
||||
cluster
|
||||
|
||||
[VPS]
|
||||
VMDR
|
||||
merlin-dev
|
||||
|
||||
[dhcp]
|
||||
VMAS-BUILD
|
||||
|
||||
[VMServer]
|
||||
VMAS-HML
|
||||
[region:children]
|
||||
homelab
|
||||
VPS
|
||||
staging
|
||||
|
||||
|
||||
[wireguard]
|
||||
VMDR
|
||||
|
||||
[staging]
|
||||
oscar-dev
|
||||
gerard-dev
|
||||
merlin-dev
|
||||
nas-dev
|
||||
|
||||
[production]
|
||||
|
@ -6,15 +6,16 @@
|
||||
"tags": [
|
||||
"homer.enable=true",
|
||||
"homer.name=Diskstation",
|
||||
"homer.url=https://syno.ducamps.win",
|
||||
"homer.logo=https://syno.ducamps.win/webman/resources/images/icon_dsm_96.png",
|
||||
"homer.url=https://syno.ducamps.eu",
|
||||
"homer.logo=https://syno.ducamps.eu/webman/resources/images/icon_dsm_96.png",
|
||||
"homer.service=Application",
|
||||
"homer.target=_blank",
|
||||
|
||||
"traefik.enable=true",
|
||||
"traefik.http.routers.syno.rule=Host(`syno.ducamps.win`)",
|
||||
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.win",
|
||||
"traefik.http.routers.syno.tls.certresolver=myresolver"
|
||||
"traefik.http.routers.syno.rule=Host(`syno.ducamps.eu`)",
|
||||
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.eu",
|
||||
"traefik.http.routers.syno.tls.certresolver=myresolver",
|
||||
"traefik.http.routers.syno.entrypoints=web,websecure"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
35
docs/ADR/001-Development-environment.md
Normal file
35
docs/ADR/001-Development-environment.md
Normal file
@ -0,0 +1,35 @@
|
||||
# 001 Development environment
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
we need to create a virtual cluster to do test without impact on production.
|
||||
|
||||
### Virtualisation or Container
|
||||
|
||||
Virtualisation provide better isolation but must ressource are needed.
|
||||
Container able to create more item without consum as resource than virtual machine.
|
||||
|
||||
### Creation Wrapper
|
||||
|
||||
Vagrant is good top manage virtual machine but not a lot of LXC box availlable, Vagant van be use with other configuration manager than ansible.
|
||||
Molecule can manage molecule with plugins molecule-LXD. molecule is ansible exclusive solution
|
||||
|
||||
## Decision
|
||||
|
||||
we will use container instead VM for the resource consumption avantage.
|
||||
|
||||
Molecule wrapper will be use because all our configuration is already provide by ansible and we can have a better choise of container with molecule than vagrant.
|
||||
|
||||
25/08/2023
|
||||
|
||||
some issue are meet with lxc (share kernel, privilege, plugin not maintain)
|
||||
I have increase RAM on my computer so I can switch to virtual machine for the dev env
|
||||
instead to build vagrant VM in a molecule playbooke we only use a vagrant file to avoid toi many overlay to maintain.
|
||||
|
||||
## Consequences
|
||||
|
||||
migrate molecule provissioning on dedicated vagrant file
|
28
docs/ADR/002-Vault-Backend.md
Normal file
28
docs/ADR/002-Vault-Backend.md
Normal file
@ -0,0 +1,28 @@
|
||||
# 002-Vault-Backend
|
||||
|
||||
## Status
|
||||
|
||||
## Context
|
||||
|
||||
Currently vault Backend is onboard in Consul KV
|
||||
Hashicorp recommandation is to use integrated storage from vault cluster
|
||||
This could remove consul dependancy on rebuild
|
||||
|
||||
## Decision
|
||||
|
||||
migrate to vault integrated storage
|
||||
|
||||
## Consequences
|
||||
|
||||
to do:
|
||||
|
||||
- [migration plan]("https://developer.hashicorp.com/vault/tutorials/raft/raft-migration")
|
||||
|
||||
1. basculer oscar,gerard et bleys and itegrated storage merlin restera en storage consul pendant l'opé avant décom
|
||||
2. stoper le service vault sur oscar
|
||||
3. lancer la commande de migration
|
||||
4. joindre les autre node au cluster
|
||||
5. décom vault sur merlin
|
||||
6. adapter job backup
|
||||
|
||||
- [backup]("https://developer.hashicorp.com/vault/tutorials/standard-procedures/sop-backup")
|
54
docs/ADR/003-mailserver.md
Normal file
54
docs/ADR/003-mailserver.md
Normal file
@ -0,0 +1,54 @@
|
||||
# 003-mailserver
|
||||
|
||||
## Status
|
||||
|
||||
done
|
||||
|
||||
## Context
|
||||
|
||||
Gandi free email will become a pay service in 2 month.
|
||||
|
||||
In this condition it will be interesting to study selfhosted mail solution.
|
||||
|
||||
### domain name
|
||||
|
||||
do I take advantage of this to change domaine name:
|
||||
|
||||
Pro:
|
||||
|
||||
- could test more easy
|
||||
- could redirect old domain name to new one untile end of gandi domain (2026)
|
||||
- get a more "normal" extention
|
||||
|
||||
con:
|
||||
|
||||
- need to progresively update every personal account
|
||||
|
||||
### Container localisation
|
||||
|
||||
on hetzner:
|
||||
|
||||
- need to increase memory
|
||||
|
||||
on homelab:
|
||||
|
||||
- need to redirect all serveur flux to hetzner to be sure to be sure that mail will be send with hetzner IP (control PTR on this IP)
|
||||
- hetzner will be too a SPOF
|
||||
|
||||
### software choose
|
||||
|
||||
mail server will run in nomad cluster.
|
||||
|
||||
docker-mailserver -> 1 container
|
||||
mailu
|
||||
|
||||
## Decision
|
||||
|
||||
we will switch to another domain name on "https://www.bookmyname.com/": ducamps.eu""
|
||||
docker-mailserver will be more easier to configure because only one container to migrate to nomad
|
||||
for begining container will be launch on hetzner
|
||||
|
||||
## Consequences
|
||||
|
||||
- need to buy a new domaine name and configure DNS (done)
|
||||
- inprove memory on corwin (done)
|
117
docs/ADR/004-DNS.md
Normal file
117
docs/ADR/004-DNS.md
Normal file
@ -0,0 +1,117 @@
|
||||
# DNS
|
||||
|
||||
## 001 Recursor out off NAS
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
curently main local domain DNS is located on NAS.
|
||||
|
||||
goal:
|
||||
|
||||
- avoid DNS outtage in case of NAS reboot (my synology have 10 years and is a litle long to reboot) morever during NAS reboot we lost the adblock DNS in the nomad cluster because nomad depend of the NFS share.
|
||||
- remove the direct redirection to service.consul DNS and the IPTABLE rule use to redirect port 53 on consul on gerard instead new DNS could be forward directly to an active consul node on port 8300
|
||||
|
||||
#### DNS software
|
||||
|
||||
need DHCP Dynamic update
|
||||
could redirect domain on other port than port 53
|
||||
|
||||
### Decision
|
||||
|
||||
we will migrate Main Domain DNS from NAS to gerard (powerDNS)
|
||||
powerDNS provide two disting binaries one for authority server one other for recursor
|
||||
goal is to first migrate the recursice part from synology to a physical service
|
||||
and in second time migrate authority server in nmad cluster
|
||||
|
||||
### Consequences
|
||||
|
||||
before to move authority server need to remove DB dns dependance (create db consul services)
|
||||
need to delete the iptable rule on gerard before deploy
|
||||
|
||||
## 002 each node request self consul client for consul dns query
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
to avoid a cluster failled in case of the DNS recursor default.
|
||||
I would like that each cluster client request their own consul client
|
||||
first to resolve consul DNS query
|
||||
|
||||
### Decision
|
||||
|
||||
Implement sytemd-resolved on all cluster member and add a DNS redirection
|
||||
|
||||
### Consequences
|
||||
|
||||
need to modify annsible system role for systemd-resolved activation and consul role for configure redirection
|
||||
|
||||
## 003 migrate authority DNS from NAS to cluster
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
we have curently three authority domain on NAS:
|
||||
|
||||
- ducamps.win
|
||||
- ducamps.eu
|
||||
- lan.ducamps.eu
|
||||
|
||||
we could migrate authority DNS in cluster
|
||||
ducamps.win and ducamps.eu are only use for application access so no dependence with cluster build
|
||||
need to study cluster build dependance for lan.ducamps.eu-> in every case in case of build from scratch need to use IP
|
||||
need keepalive IP and check if no conflict if store on same machine than pihole->ok don't need to listen on 53 only request by recursor
|
||||
DNS authority will dependant to storage (less problematic than recursor)
|
||||
|
||||
### Decision
|
||||
|
||||
### Consequences
|
||||
|
||||
## 004 migrate recurson in cluster
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
now that cluster doesn't depend of recursor because request self consul agent for consul query need
|
||||
need to study if we can migrate recursor in nomad wihout break dependance
|
||||
advantage:
|
||||
|
||||
- recursor could change client in case of faillure
|
||||
|
||||
agains:
|
||||
|
||||
- this job need a keepalive IP like pihole
|
||||
- *loss recursor if lost nomad cluster*
|
||||
|
||||
### Decision
|
||||
|
||||
put one recursor on cluster over authority server and keep the recursor on gerard for better recundancy
|
||||
|
||||
### Consequences
|
||||
|
||||
|
||||
## 005 physical Recursor location
|
||||
|
||||
### Status
|
||||
|
||||
done
|
||||
|
||||
### Context
|
||||
|
||||
following NAS migration physical DNS Recursor was install directly on NAS this bring a SPOF when NAS failed Recursor on Nomad cluster are stopped because of volume dependance
|
||||
|
||||
### Decision
|
||||
|
||||
Put physical Recursor on a cluster node like that to have a DNS issue we need to have NAS and this nomad down on same Time
|
42
docs/ADR/005-NAS.md
Normal file
42
docs/ADR/005-NAS.md
Normal file
@ -0,0 +1,42 @@
|
||||
# NAS
|
||||
|
||||
## 001 New Nas spec
|
||||
|
||||
### Status
|
||||
|
||||
In progress
|
||||
|
||||
### Context
|
||||
|
||||
Storage:
|
||||
|
||||
- Data filesytem will be in btrfs.
|
||||
- Study if keep root filesystem in EXT4.
|
||||
- Need to use LVM over btrfs added posibility to add cache later (cache on cold data useless on beginning maybe write cache in future use).
|
||||
- hot Data (nomad, document,fresh download file,music?) on SSD cold DATA on HDD (film, serie photo)
|
||||
- at least 2 HDD and 2 SSD
|
||||
|
||||
|
||||
|
||||
Hardware:
|
||||
|
||||
- network 2.5 gpbs will be good for evolve
|
||||
- at least 4go ram (expansive will be appreciable)
|
||||
|
||||
Software:
|
||||
|
||||
be able to install custom linux distrib
|
||||
|
||||
### Decision
|
||||
|
||||
- Due to form factor/consumption and SSD capability my choise is on ASUSTOR Nimbustor 2 Gen 2 AS5402, he corresponding to need and less expensive than a DIY NAS
|
||||
- buy only a new ssd of 2to in more to store system and hot data
|
||||
|
||||
### Cosequence
|
||||
|
||||
need to migrate Data and keep same disk
|
||||
|
||||
- install system
|
||||
- copy all data from 2to HDD to SSD then format 2to HDD
|
||||
- copy download data to FROM 4 to HDD to SSD
|
||||
- copy serie to 2to HDD and copy film on external harddrive
|
25
docs/ADR/006-Docker-pull-through
Normal file
25
docs/ADR/006-Docker-pull-through
Normal file
@ -0,0 +1,25 @@
|
||||
# Docker Pull throught
|
||||
|
||||
# 001 architecture consideration
|
||||
|
||||
## Status
|
||||
|
||||
Accepted
|
||||
|
||||
## Context
|
||||
|
||||
docker hub get a pull limit if somebody go wrong on our infrastructure we can get quickyly this limit solution will be to implement a pull throught proxy.
|
||||
|
||||
|
||||
### Decision
|
||||
|
||||
create two container task to create a dockerhub pull through and a ghcr one
|
||||
|
||||
we can add these registry to traefick to have both under the port 5000 but this will add a traefik dependancy on rebuild
|
||||
|
||||
so to begin we will use one trafick service on two diferent static port
|
||||
|
||||
## Consequences
|
||||
|
||||
- this registry need to be start first on cluster creation
|
||||
- need to update all job image with local proxy url
|
36
docs/Concepts/DNS.md
Normal file
36
docs/Concepts/DNS.md
Normal file
@ -0,0 +1,36 @@
|
||||
# Architecture DNS
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph External
|
||||
externalRecursor[recursor]
|
||||
GandiDns[ hetzner ducamps.win]
|
||||
end
|
||||
subgraph Internal
|
||||
pihole[pihole]--ducamps.win-->NAS
|
||||
pihole--service.consul-->consul[consul cluster]
|
||||
pihole--->recursor
|
||||
recursor--service.consul-->consul
|
||||
DHCP --dynamic update--> NAS
|
||||
NAS
|
||||
recursor--ducamps.win-->NAS
|
||||
consul--service.consul--->consul
|
||||
clients--->pihole
|
||||
clients--->recursor
|
||||
end
|
||||
pihole --> externalRecursor
|
||||
recursor-->External
|
||||
```
|
||||
|
||||
## Detail
|
||||
|
||||
Pihole container in nomad cluster is set as primary DNS as add blocker secondary DNS recursore is locate on gerard
|
||||
|
||||
DNS locate on NAS manage domain *ducamps.win* on local network each recursor forward each request on *ducamps.win* to this DNS.
|
||||
|
||||
Each DNS forward *service.consul* request to the consul cluster.
|
||||
Each consul node have a consul redirection in systemd-resolved to theire own consul client
|
||||
|
||||
a DHCP service is set to do dynamic update on NAS DNS on lease delivery
|
||||
|
||||
external recursor are set on pihole on cloudflare and FDN in case of recursors faillure
|
11
docs/How-to/Add a new job.md
Normal file
11
docs/How-to/Add a new job.md
Normal file
@ -0,0 +1,11 @@
|
||||
# Add a new job
|
||||
|
||||
## Create Nomad job
|
||||
|
||||
## Add secret to vault
|
||||
|
||||
## Add a new policy to Vault terraform
|
||||
|
||||
## Add Database creation in ansible variable (if neeeded)
|
||||
|
||||
## Create CNAME in local DNS and External if needed
|
25
docs/How-to/ansible_vault.md
Normal file
25
docs/How-to/ansible_vault.md
Normal file
@ -0,0 +1,25 @@
|
||||
# ansible vault management
|
||||
|
||||
ansible password are encoded with a gpg key store in ansible/misc
|
||||
to renew password follow this workflown
|
||||
|
||||
```sh
|
||||
# Generate a new password for the default vault
|
||||
pwgen -s 64 default-pw
|
||||
|
||||
# Re-encrypt all default vaults
|
||||
ansible-vault rekey --new-vault-password-file ./default-pw \
|
||||
$(git grep -l 'ANSIBLE_VAULT;1.1;AES256$')
|
||||
|
||||
# Save the new password in encrypted form
|
||||
# (replace "RECIPIENT" with your email)
|
||||
gpg -r RECIPIENT -o misc/vault--password.gpg -e default-pw
|
||||
|
||||
# Ensure the new password is usable
|
||||
ansible-vault view misc/vaults/vault_hcloud.yml
|
||||
|
||||
# Remove the unencrypted password file
|
||||
rm new-default-pw
|
||||
```
|
||||
|
||||
script `vault-keyring-client.sh` is set in ansible.cfg as vault_password_file to decrypt the gpg file
|
8
docs/How-to/troubleshoot.md
Normal file
8
docs/How-to/troubleshoot.md
Normal file
@ -0,0 +1,8 @@
|
||||
# Troubleshooting
|
||||
|
||||
## issue with SMTP traefik port
|
||||
|
||||
ensure that no other traefik router (httt or TCP) listening on smtp or
|
||||
all entrypoint this can pertuubate smtp TLS connection
|
||||
see [https://doc.traefik.io/traefik/routing/routers/#entrypoints_1](here)
|
||||
|
23
docs/Installation/Bootstrap-dev.md
Normal file
23
docs/Installation/Bootstrap-dev.md
Normal file
@ -0,0 +1,23 @@
|
||||
# How to Bootstrap dev env
|
||||
|
||||
## prerequisite
|
||||
|
||||
dev environment is manage by molecule job who launch container via LXD you need following software to launch it:
|
||||
|
||||
- LXD server up on your local machine
|
||||
- molecule install ```pip install molecule```
|
||||
- molecule-LXD plugins ```pip install molecule-lxd```
|
||||
|
||||
## provissionning
|
||||
|
||||
you can launch ```make create-dev``` on root project
|
||||
|
||||
molecule will create 3 container on different distribution
|
||||
|
||||
- archlinux
|
||||
- rockylinux 9
|
||||
- debian 11
|
||||
|
||||
To bootstrap the container (base account, sudo configuration) role [ansible_bootstrap](https://git.ducamps.win/ansible-roles/ansible_bootstrap) will be apply
|
||||
|
||||
Converge step call playbook [site.yml](https://git.ducamps.win/vincent/homelab/src/commit/c5ff235b9768d91b240ec97e7ff8e2ad5a9602ca/ansible/site.yml) to provission the cluster
|
3
docs/index.md
Normal file
3
docs/index.md
Normal file
@ -0,0 +1,3 @@
|
||||
--8<--
|
||||
README.md
|
||||
--8<--
|
@ -1,23 +0,0 @@
|
||||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hetznercloud/hcloud" {
|
||||
version = "1.33.2"
|
||||
hashes = [
|
||||
"h1:3Hx8p9LbcnHfBhy3nT7+unlc5rwkiSZjLt9SVQOSpB8=",
|
||||
"zh:0a5d0f332d7dfe77fa27301094af98a185aabfb9f56d71b81936e03211e4d66f",
|
||||
"zh:0e047859ee7296f335881933ccf8ce8c07aa47bef56d5449a81b85a2d9dac93a",
|
||||
"zh:1d3d0896f518df9e245c3207ed231e528f5dcfe628508e7c3ceba4a2bfefaa7a",
|
||||
"zh:1d7a31c8c490512896ce327ab220e950f1a2e30ee83cc2e58e69bbbfbbb87e72",
|
||||
"zh:67cbb2492683cb22f6c54f26bee72aec140c8dd2d0881b2815d2ef80959fc751",
|
||||
"zh:771062815e662979204ac2dc91c34c893f27670d67e02370e48124483d3c9838",
|
||||
"zh:957ebb146898cd059c0cc8b4c32e574b61041d8b6a11cd854b3cc1d3baaeb3a9",
|
||||
"zh:95dbd8634000b979213cb97b5d869cad78299ac994d0665d150c8dafc1390429",
|
||||
"zh:a21b22b2e9d835e1b8b3b7e0b41a4d199171d62e9e9be78c444c700e96b31316",
|
||||
"zh:aead1ba50640a51f20d574374f2c6065d9bfa4eea5ef044d1475873c33e58239",
|
||||
"zh:cefabd0a78af40ea5cd08e1ca436c753df9b1c6496eb27281b755a2de1f167ab",
|
||||
"zh:d98cffc5206b9a7550a23e13031a6f53566bd1ed3bf65314bc55ef12404d49ce",
|
||||
"zh:dddaaf95b6aba701153659feff12c7bce6acc78362cb5ff8321a1a1cbf780cd9",
|
||||
"zh:fd662b483250326a1bfbe5684c22c5083955a43e0773347eea35cd4c2cfe700e",
|
||||
]
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
resource "hcloud_server" "HomeLab" {
|
||||
count = var.instances
|
||||
name = "merlin"
|
||||
image = var.os_type
|
||||
server_type = var.server_type
|
||||
location = var.location
|
||||
ssh_keys = [hcloud_ssh_key.default.id]
|
||||
firewall_ids = [hcloud_firewall.prod.id]
|
||||
labels = {
|
||||
}
|
||||
|
||||
}
|
||||
resource "hcloud_server" "HomeLab2" {
|
||||
count = var.instances
|
||||
name = "corwin"
|
||||
image = "rocky-9"
|
||||
server_type = var.server_type
|
||||
location = var.location
|
||||
ssh_keys = [hcloud_ssh_key.default.id]
|
||||
firewall_ids = [hcloud_firewall.prod.id]
|
||||
labels = {
|
||||
}
|
||||
|
||||
}
|
28
makefile
28
makefile
@ -9,3 +9,31 @@ vault-dev:
|
||||
else \
|
||||
./vault/standalone_vault.sh $(FILE);\
|
||||
fi
|
||||
|
||||
vagranup:
|
||||
vagrant up
|
||||
|
||||
create-dev: vagranup DNS-stagging
|
||||
make -C ansible deploy_staging
|
||||
make -C terraform deploy_vault env=staging
|
||||
VAULT_TOKEN=$(shell cat ~/vaultUnseal/staging/rootkey) python ./script/generate-vault-secret
|
||||
|
||||
create-dev-base: vagranup DNS-stagging
|
||||
make -C ansible deploy_staging_base
|
||||
|
||||
|
||||
destroy-dev:
|
||||
vagrant destroy --force
|
||||
|
||||
serve:
|
||||
mkdocs serve
|
||||
|
||||
DNS-stagging:
|
||||
$(eval dns := $(shell dig oscar-dev.lan.ducamps.dev +short))
|
||||
$(eval dns1 := $(shell dig nas-dev.lan.ducamps.dev +short))
|
||||
sudo resolvectl dns virbr2 "$(dns)" "$(dns1)";sudo resolvectl domain virbr2 "~consul";sudo systemctl restart systemd-resolved.service
|
||||
|
||||
|
||||
DNS-production:
|
||||
sudo resolvectl dns virbr2 "";sudo resolvectl domain virbr2 "";sudo systemctl restart systemd-resolved.service
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user