Compare commits
No commits in common. "master" and "nextcloud" have entirely different histories.
66
.drone.yml
66
.drone.yml
@ -1,66 +0,0 @@
|
|||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: lint
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: yaml linting
|
|
||||||
image: pipelinecomponents/yamllint
|
|
||||||
commands:
|
|
||||||
- yamllint .
|
|
||||||
- name: markdown linting
|
|
||||||
image: 06kellyjac/markdownlint-cli
|
|
||||||
commands:
|
|
||||||
- markdownlint . --config .markdownlint.yaml
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: test build
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: squidfunk/mkdocs-material
|
|
||||||
commands:
|
|
||||||
- mkdocs build --clean --strict --verbose --site-dir build
|
|
||||||
trigger:
|
|
||||||
event:
|
|
||||||
exclude:
|
|
||||||
- push
|
|
||||||
---
|
|
||||||
kind: pipeline
|
|
||||||
type: docker
|
|
||||||
name: deploy
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: build
|
|
||||||
image: squidfunk/mkdocs-material
|
|
||||||
commands:
|
|
||||||
- mkdocs build --clean --strict --verbose --site-dir homelab
|
|
||||||
|
|
||||||
- name: deploy
|
|
||||||
image: appleboy/drone-scp
|
|
||||||
when:
|
|
||||||
status:
|
|
||||||
- success
|
|
||||||
settings:
|
|
||||||
host: www.service.consul
|
|
||||||
user: drone-deploy
|
|
||||||
overwrite: true
|
|
||||||
key:
|
|
||||||
from_secret: dronePrivateKey
|
|
||||||
target: /srv/http
|
|
||||||
source: homelab
|
|
||||||
|
|
||||||
trigger:
|
|
||||||
branch:
|
|
||||||
- master
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: secret
|
|
||||||
name: dronePrivateKey
|
|
||||||
get:
|
|
||||||
path: secrets/data/droneci/keyRSA
|
|
||||||
name: dronePrivateKey
|
|
41
.gitignore
vendored
41
.gitignore
vendored
@ -1,41 +0,0 @@
|
|||||||
nohup.out
|
|
||||||
|
|
||||||
# terraform gitignore
|
|
||||||
# Local .terraform directories
|
|
||||||
**/.terraform/*
|
|
||||||
|
|
||||||
# .tfstate files
|
|
||||||
*.tfstate
|
|
||||||
*.tfstate.*
|
|
||||||
|
|
||||||
# Crash log files
|
|
||||||
crash.log
|
|
||||||
crash.*.log
|
|
||||||
|
|
||||||
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
|
|
||||||
# password, private keys, and other secrets. These should not be part of version
|
|
||||||
# control as they are data points which are potentially sensitive and subject
|
|
||||||
# to change depending on the environment.
|
|
||||||
*.tfvars
|
|
||||||
*.tfvars.json
|
|
||||||
|
|
||||||
# Ignore override files as they are usually used to override resources locally and so
|
|
||||||
# are not checked in
|
|
||||||
override.tf
|
|
||||||
override.tf.json
|
|
||||||
*_override.tf
|
|
||||||
*_override.tf.json
|
|
||||||
|
|
||||||
# Include override files you do wish to add to version control using negated pattern
|
|
||||||
# !example_override.tf
|
|
||||||
|
|
||||||
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
|
|
||||||
# example: *tfplan*
|
|
||||||
|
|
||||||
# Ignore CLI configuration files
|
|
||||||
.terraformrc
|
|
||||||
terraform.rc
|
|
||||||
site
|
|
||||||
|
|
||||||
|
|
||||||
.vagrant
|
|
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
# Default state for all rules
|
|
||||||
default: true
|
|
||||||
MD009:
|
|
||||||
strict: false
|
|
||||||
|
|
||||||
MD013: false
|
|
||||||
MD033: false
|
|
||||||
MD024: false
|
|
||||||
MD041: false
|
|
33
.yamllint
33
.yamllint
@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
# Based on ansible-lint config
|
|
||||||
extends: default
|
|
||||||
|
|
||||||
rules:
|
|
||||||
braces:
|
|
||||||
max-spaces-inside: 1
|
|
||||||
level: error
|
|
||||||
brackets:
|
|
||||||
max-spaces-inside: 1
|
|
||||||
level: error
|
|
||||||
colons:
|
|
||||||
max-spaces-after: -1
|
|
||||||
level: error
|
|
||||||
commas:
|
|
||||||
max-spaces-after: -1
|
|
||||||
level: error
|
|
||||||
comments: disable
|
|
||||||
comments-indentation: disable
|
|
||||||
document-start: disable
|
|
||||||
empty-lines:
|
|
||||||
max: 3
|
|
||||||
level: error
|
|
||||||
hyphens:
|
|
||||||
level: error
|
|
||||||
indentation: disable
|
|
||||||
key-duplicates: enable
|
|
||||||
line-length: disable
|
|
||||||
new-line-at-end-of-file: disable
|
|
||||||
new-lines:
|
|
||||||
type: unix
|
|
||||||
trailing-spaces: disable
|
|
||||||
truthy: disable
|
|
48
README.md
48
README.md
@ -1,48 +0,0 @@
|
|||||||
# Homelab
|
|
||||||
|
|
||||||
This repository contain my homelab Infrastructure As Code
|
|
||||||
|
|
||||||
this Homelab is build over Hashicorp software stack:
|
|
||||||
|
|
||||||
- Nomad
|
|
||||||
- Consul
|
|
||||||
- Vault
|
|
||||||
|
|
||||||
## Dev
|
|
||||||
|
|
||||||
dev stack is build over vagrant box with libvirt provider
|
|
||||||
|
|
||||||
curently need to have vault and ldap production up to be correctly provision
|
|
||||||
|
|
||||||
to launch dev stack provissionning :
|
|
||||||
|
|
||||||
```sh
|
|
||||||
make create-dev
|
|
||||||
```
|
|
||||||
|
|
||||||
## Rebuild
|
|
||||||
|
|
||||||
## Architecture
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart LR
|
|
||||||
subgraph Home
|
|
||||||
bleys[bleys]
|
|
||||||
oscar[oscar]
|
|
||||||
gerard[gerard]
|
|
||||||
LAN
|
|
||||||
NAS
|
|
||||||
end
|
|
||||||
subgraph Cloud
|
|
||||||
corwin[corwin]
|
|
||||||
end
|
|
||||||
LAN--main road--ooscar
|
|
||||||
LAN --- bleys
|
|
||||||
LAN --- gerard
|
|
||||||
LAN --- NAS
|
|
||||||
bleys <--wireguard--> corwin
|
|
||||||
oscar <--wiregard--> corwin
|
|
||||||
gerard <--wiregard--> corwin
|
|
||||||
corwin <--> internet
|
|
||||||
|
|
||||||
```
|
|
105
Vagrantfile
vendored
105
Vagrantfile
vendored
@ -1,105 +0,0 @@
|
|||||||
Vagrant.configure('2') do |config|
|
|
||||||
if Vagrant.has_plugin?('vagrant-cachier')
|
|
||||||
config.cache.scope = 'machine'
|
|
||||||
config.cache.enable :pacman
|
|
||||||
end
|
|
||||||
config.vm.provider :libvirt do |libvirt|
|
|
||||||
libvirt.management_network_domain = "lan.ducamps.dev"
|
|
||||||
|
|
||||||
end
|
|
||||||
config.vm.define "oscar-dev" do |c|
|
|
||||||
# Box definition
|
|
||||||
c.vm.box = "archlinux/archlinux"
|
|
||||||
# Config options
|
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
|
||||||
c.ssh.insert_key = true
|
|
||||||
c.vm.hostname = "oscar-dev"
|
|
||||||
# Network
|
|
||||||
|
|
||||||
# instance_raw_config_args
|
|
||||||
# Provider
|
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
|
||||||
|
|
||||||
libvirt.memory = 2048
|
|
||||||
libvirt.cpus = 2
|
|
||||||
end
|
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="oscar-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.define "merlin-dev" do |c|
|
|
||||||
# Box definition
|
|
||||||
c.vm.box = "archlinux/archlinux"
|
|
||||||
# Config options
|
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
|
||||||
c.ssh.insert_key = true
|
|
||||||
c.vm.hostname = "merlin-dev"
|
|
||||||
# Network
|
|
||||||
# instance_raw_config_args
|
|
||||||
# Provider
|
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
|
||||||
|
|
||||||
libvirt.memory = 512
|
|
||||||
libvirt.cpus = 2
|
|
||||||
|
|
||||||
end
|
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="merlin-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.define "gerard-dev" do |c|
|
|
||||||
# Box definition
|
|
||||||
c.vm.box = "archlinux/archlinux"
|
|
||||||
# Config options
|
|
||||||
|
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
|
||||||
c.ssh.insert_key = true
|
|
||||||
c.vm.hostname = "gerard-dev"
|
|
||||||
# Network
|
|
||||||
# instance_raw_config_args
|
|
||||||
# Provider
|
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
|
||||||
libvirt.memory = 2048
|
|
||||||
libvirt.cpus = 2
|
|
||||||
end
|
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="gerard-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.define "nas-dev" do |c|
|
|
||||||
# Box definition
|
|
||||||
c.vm.box = "archlinux/archlinux"
|
|
||||||
# Config options
|
|
||||||
c.vm.synced_folder ".", "/vagrant", disabled: true
|
|
||||||
c.ssh.insert_key = true
|
|
||||||
c.vm.hostname = "nas-dev"
|
|
||||||
# Network
|
|
||||||
# instance_raw_config_args
|
|
||||||
# Provider
|
|
||||||
c.vm.provider "libvirt" do |libvirt, override|
|
|
||||||
|
|
||||||
libvirt.memory = 2048
|
|
||||||
libvirt.cpus = 2
|
|
||||||
end
|
|
||||||
|
|
||||||
c.vm.provision "ansible" do |bootstrap|
|
|
||||||
bootstrap.playbook= "ansible/playbooks/bootstrap.yml"
|
|
||||||
bootstrap.galaxy_roles_path= "ansible/roles"
|
|
||||||
bootstrap.limit="nas-dev"
|
|
||||||
bootstrap.extra_vars = { ansible_python_interpreter:"/usr/bin/python3" }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
@ -1,2 +0,0 @@
|
|||||||
skip_list:
|
|
||||||
- 'fcqn-builtins'
|
|
@ -1,495 +0,0 @@
|
|||||||
# config file for ansible -- https://ansible.com/
|
|
||||||
# ===============================================
|
|
||||||
|
|
||||||
# nearly all parameters can be overridden in ansible-playbook
|
|
||||||
# or with command line flags. ansible will read ANSIBLE_CONFIG,
|
|
||||||
# ansible.cfg in the current working directory, .ansible.cfg in
|
|
||||||
# the home directory or /etc/ansible/ansible.cfg, whichever it
|
|
||||||
# finds first
|
|
||||||
|
|
||||||
[defaults]
|
|
||||||
# some basic default values...
|
|
||||||
|
|
||||||
#inventory = /etc/ansible/hosts
|
|
||||||
#library = /usr/share/my_modules/
|
|
||||||
#module_utils = /usr/share/my_module_utils/
|
|
||||||
#remote_tmp = ~/.ansible/tmp
|
|
||||||
#local_tmp = ~/.ansible/tmp
|
|
||||||
#plugin_filters_cfg = /etc/ansible/plugin_filters.yml
|
|
||||||
#forks = 5
|
|
||||||
#poll_interval = 15
|
|
||||||
#sudo_user = root
|
|
||||||
#ask_sudo_pass = True
|
|
||||||
#ask_pass = True
|
|
||||||
#transport = smart
|
|
||||||
#remote_port = 22
|
|
||||||
#module_lang = C
|
|
||||||
#module_set_locale = False
|
|
||||||
|
|
||||||
# plays will gather facts by default, which contain information about
|
|
||||||
# the remote system.
|
|
||||||
#
|
|
||||||
# smart - gather by default, but don't regather if already gathered
|
|
||||||
# implicit - gather by default, turn off with gather_facts: False
|
|
||||||
# explicit - do not gather by default, must say gather_facts: True
|
|
||||||
#gathering = implicit
|
|
||||||
|
|
||||||
# This only affects the gathering done by a play's gather_facts directive,
|
|
||||||
# by default gathering retrieves all facts subsets
|
|
||||||
# all - gather all subsets
|
|
||||||
# network - gather min and network facts
|
|
||||||
# hardware - gather hardware facts (longest facts to retrieve)
|
|
||||||
# virtual - gather min and virtual facts
|
|
||||||
# facter - import facts from facter
|
|
||||||
# ohai - import facts from ohai
|
|
||||||
# You can combine them using comma (ex: network,virtual)
|
|
||||||
# You can negate them using ! (ex: !hardware,!facter,!ohai)
|
|
||||||
# A minimal set of facts is always gathered.
|
|
||||||
#gather_subset = all
|
|
||||||
|
|
||||||
# some hardware related facts are collected
|
|
||||||
# with a maximum timeout of 10 seconds. This
|
|
||||||
# option lets you increase or decrease that
|
|
||||||
# timeout to something more suitable for the
|
|
||||||
# environment.
|
|
||||||
# gather_timeout = 10
|
|
||||||
|
|
||||||
# Ansible facts are available inside the ansible_facts.* dictionary
|
|
||||||
# namespace. This setting maintains the behaviour which was the default prior
|
|
||||||
# to 2.5, duplicating these variables into the main namespace, each with a
|
|
||||||
# prefix of 'ansible_'.
|
|
||||||
# This variable is set to True by default for backwards compatibility. It
|
|
||||||
# will be changed to a default of 'False' in a future release.
|
|
||||||
# ansible_facts.
|
|
||||||
# inject_facts_as_vars = True
|
|
||||||
|
|
||||||
# additional paths to search for roles in, colon separated
|
|
||||||
roles_path = roles
|
|
||||||
|
|
||||||
# uncomment this to disable SSH key host checking
|
|
||||||
host_key_checking = False
|
|
||||||
|
|
||||||
# change the default callback, you can only have one 'stdout' type enabled at a time.
|
|
||||||
#stdout_callback = skippy
|
|
||||||
|
|
||||||
|
|
||||||
## Ansible ships with some plugins that require whitelisting,
|
|
||||||
## this is done to avoid running all of a type by default.
|
|
||||||
## These setting lists those that you want enabled for your system.
|
|
||||||
## Custom plugins should not need this unless plugin author specifies it.
|
|
||||||
|
|
||||||
# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
|
|
||||||
#callback_whitelist = timer, mail
|
|
||||||
|
|
||||||
# Determine whether includes in tasks and handlers are "static" by
|
|
||||||
# default. As of 2.0, includes are dynamic by default. Setting these
|
|
||||||
# values to True will make includes behave more like they did in the
|
|
||||||
# 1.x versions.
|
|
||||||
#task_includes_static = False
|
|
||||||
#handler_includes_static = False
|
|
||||||
|
|
||||||
# Controls if a missing handler for a notification event is an error or a warning
|
|
||||||
#error_on_missing_handler = True
|
|
||||||
|
|
||||||
# change this for alternative sudo implementations
|
|
||||||
#sudo_exe = sudo
|
|
||||||
|
|
||||||
# What flags to pass to sudo
|
|
||||||
# WARNING: leaving out the defaults might create unexpected behaviours
|
|
||||||
#sudo_flags = -H -S -n
|
|
||||||
|
|
||||||
# SSH timeout
|
|
||||||
timeout = 30
|
|
||||||
|
|
||||||
# default user to use for playbooks if user is not specified
|
|
||||||
# (/usr/bin/ansible will use current user as default)
|
|
||||||
#remote_user = root
|
|
||||||
|
|
||||||
# logging is off by default unless this path is defined
|
|
||||||
# if so defined, consider logrotate
|
|
||||||
#log_path = /var/log/ansible.log
|
|
||||||
|
|
||||||
# default module name for /usr/bin/ansible
|
|
||||||
#module_name = command
|
|
||||||
|
|
||||||
# use this shell for commands executed under sudo
|
|
||||||
# you may need to change this to bin/bash in rare instances
|
|
||||||
# if sudo is constrained
|
|
||||||
#executable = /bin/sh
|
|
||||||
|
|
||||||
# if inventory variables overlap, does the higher precedence one win
|
|
||||||
# or are hash values merged together? The default is 'replace' but
|
|
||||||
# this can also be set to 'merge'.
|
|
||||||
#hash_behaviour = merge
|
|
||||||
|
|
||||||
# by default, variables from roles will be visible in the global variable
|
|
||||||
# scope. To prevent this, the following option can be enabled, and only
|
|
||||||
# tasks and handlers within the role will see the variables there
|
|
||||||
#private_role_vars = yes
|
|
||||||
|
|
||||||
# list any Jinja2 extensions to enable here:
|
|
||||||
#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
|
|
||||||
|
|
||||||
# if set, always use this private key file for authentication, same as
|
|
||||||
# if passing --private-key to ansible or ansible-playbook
|
|
||||||
#private_key_file = /path/to/file
|
|
||||||
|
|
||||||
# If set, configures the path to the Vault password file as an alternative to
|
|
||||||
# specifying --vault-password-file on the command line.
|
|
||||||
vault_password_file = ./misc/vault-keyring-client.sh
|
|
||||||
|
|
||||||
# format of string {{ ansible_managed }} available within Jinja2
|
|
||||||
# templates indicates to users editing templates files will be replaced.
|
|
||||||
# replacing {file}, {host} and {uid} and strftime codes with proper values.
|
|
||||||
#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
|
|
||||||
# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
|
|
||||||
# in some situations so the default is a static string:
|
|
||||||
#ansible_managed = Ansible managed
|
|
||||||
|
|
||||||
# by default, ansible-playbook will display "Skipping [host]" if it determines a task
|
|
||||||
# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
|
|
||||||
# messages. NOTE: the task header will still be shown regardless of whether or not the
|
|
||||||
# task is skipped.
|
|
||||||
#display_skipped_hosts = True
|
|
||||||
|
|
||||||
# by default, if a task in a playbook does not include a name: field then
|
|
||||||
# ansible-playbook will construct a header that includes the task's action but
|
|
||||||
# not the task's args. This is a security feature because ansible cannot know
|
|
||||||
# if the *module* considers an argument to be no_log at the time that the
|
|
||||||
# header is printed. If your environment doesn't have a problem securing
|
|
||||||
# stdout from ansible-playbook (or you have manually specified no_log in your
|
|
||||||
# playbook on all of the tasks where you have secret information) then you can
|
|
||||||
# safely set this to True to get more informative messages.
|
|
||||||
#display_args_to_stdout = False
|
|
||||||
|
|
||||||
# by default (as of 1.3), Ansible will raise errors when attempting to dereference
|
|
||||||
# Jinja2 variables that are not set in templates or action lines. Uncomment this line
|
|
||||||
# to revert the behavior to pre-1.3.
|
|
||||||
#error_on_undefined_vars = False
|
|
||||||
|
|
||||||
# by default (as of 1.6), Ansible may display warnings based on the configuration of the
|
|
||||||
# system running ansible itself. This may include warnings about 3rd party packages or
|
|
||||||
# other conditions that should be resolved if possible.
|
|
||||||
# to disable these warnings, set the following value to False:
|
|
||||||
#system_warnings = True
|
|
||||||
|
|
||||||
# by default (as of 1.4), Ansible may display deprecation warnings for language
|
|
||||||
# features that should no longer be used and will be removed in future versions.
|
|
||||||
# to disable these warnings, set the following value to False:
|
|
||||||
#deprecation_warnings = True
|
|
||||||
|
|
||||||
# (as of 1.8), Ansible can optionally warn when usage of the shell and
|
|
||||||
# command module appear to be simplified by using a default Ansible module
|
|
||||||
# instead. These warnings can be silenced by adjusting the following
|
|
||||||
# setting or adding warn=yes or warn=no to the end of the command line
|
|
||||||
# parameter string. This will for example suggest using the git module
|
|
||||||
# instead of shelling out to the git command.
|
|
||||||
# command_warnings = False
|
|
||||||
|
|
||||||
|
|
||||||
# set plugin path directories here, separate with colons
|
|
||||||
#action_plugins = /usr/share/ansible/plugins/action
|
|
||||||
#cache_plugins = /usr/share/ansible/plugins/cache
|
|
||||||
#callback_plugins = /usr/share/ansible/plugins/callback
|
|
||||||
#connection_plugins = /usr/share/ansible/plugins/connection
|
|
||||||
#lookup_plugins = /usr/share/ansible/plugins/lookup
|
|
||||||
#inventory_plugins = /usr/share/ansible/plugins/inventory
|
|
||||||
#vars_plugins = /usr/share/ansible/plugins/vars
|
|
||||||
#filter_plugins = /usr/share/ansible/plugins/filter
|
|
||||||
#test_plugins = /usr/share/ansible/plugins/test
|
|
||||||
#terminal_plugins = /usr/share/ansible/plugins/terminal
|
|
||||||
#strategy_plugins = /usr/share/ansible/plugins/strategy
|
|
||||||
|
|
||||||
|
|
||||||
# by default, ansible will use the 'linear' strategy but you may want to try
|
|
||||||
# another one
|
|
||||||
#strategy = free
|
|
||||||
|
|
||||||
# by default callbacks are not loaded for /bin/ansible, enable this if you
|
|
||||||
# want, for example, a notification or logging callback to also apply to
|
|
||||||
# /bin/ansible runs
|
|
||||||
#bin_ansible_callbacks = False
|
|
||||||
|
|
||||||
|
|
||||||
# don't like cows? that's unfortunate.
|
|
||||||
# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
|
|
||||||
#nocows = 1
|
|
||||||
|
|
||||||
# set which cowsay stencil you'd like to use by default. When set to 'random',
|
|
||||||
# a random stencil will be selected for each task. The selection will be filtered
|
|
||||||
# against the `cow_whitelist` option below.
|
|
||||||
#cow_selection = default
|
|
||||||
#cow_selection = random
|
|
||||||
|
|
||||||
# when using the 'random' option for cowsay, stencils will be restricted to this list.
|
|
||||||
# it should be formatted as a comma-separated list with no spaces between names.
|
|
||||||
# NOTE: line continuations here are for formatting purposes only, as the INI parser
|
|
||||||
# in python does not support them.
|
|
||||||
#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
|
|
||||||
# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
|
|
||||||
# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
|
|
||||||
|
|
||||||
# don't like colors either?
|
|
||||||
# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
|
|
||||||
#nocolor = 1
|
|
||||||
|
|
||||||
# if set to a persistent type (not 'memory', for example 'redis') fact values
|
|
||||||
# from previous runs in Ansible will be stored. This may be useful when
|
|
||||||
# wanting to use, for example, IP information from one group of servers
|
|
||||||
# without having to talk to them in the same playbook run to get their
|
|
||||||
# current IP information.
|
|
||||||
#fact_caching = memory
|
|
||||||
|
|
||||||
#This option tells Ansible where to cache facts. The value is plugin dependent.
|
|
||||||
#For the jsonfile plugin, it should be a path to a local directory.
|
|
||||||
#For the redis plugin, the value is a host:port:database triplet: fact_caching_connection = localhost:6379:0
|
|
||||||
|
|
||||||
#fact_caching_connection=/tmp
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# retry files
|
|
||||||
# When a playbook fails by default a .retry file will be created in ~/
|
|
||||||
# You can disable this feature by setting retry_files_enabled to False
|
|
||||||
# and you can change the location of the files by setting retry_files_save_path
|
|
||||||
|
|
||||||
retry_files_enabled = False
|
|
||||||
#retry_files_save_path = ~/.ansible-retry
|
|
||||||
|
|
||||||
# squash actions
|
|
||||||
# Ansible can optimise actions that call modules with list parameters
|
|
||||||
# when looping. Instead of calling the module once per with_ item, the
|
|
||||||
# module is called once with all items at once. Currently this only works
|
|
||||||
# under limited circumstances, and only with parameters named 'name'.
|
|
||||||
#squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper
|
|
||||||
|
|
||||||
# prevents logging of task data, off by default
|
|
||||||
#no_log = False
|
|
||||||
|
|
||||||
# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
|
|
||||||
#no_target_syslog = False
|
|
||||||
|
|
||||||
# controls whether Ansible will raise an error or warning if a task has no
|
|
||||||
# choice but to create world readable temporary files to execute a module on
|
|
||||||
# the remote machine. This option is False by default for security. Users may
|
|
||||||
# turn this on to have behaviour more like Ansible prior to 2.1.x. See
|
|
||||||
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
|
|
||||||
# for more secure ways to fix this than enabling this option.
|
|
||||||
allow_world_readable_tmpfiles = True
|
|
||||||
|
|
||||||
# controls the compression level of variables sent to
|
|
||||||
# worker processes. At the default of 0, no compression
|
|
||||||
# is used. This value must be an integer from 0 to 9.
|
|
||||||
#var_compression_level = 9
|
|
||||||
|
|
||||||
# controls what compression method is used for new-style ansible modules when
|
|
||||||
# they are sent to the remote system. The compression types depend on having
|
|
||||||
# support compiled into both the controller's python and the client's python.
|
|
||||||
# The names should match with the python Zipfile compression types:
|
|
||||||
# * ZIP_STORED (no compression. available everywhere)
|
|
||||||
# * ZIP_DEFLATED (uses zlib, the default)
|
|
||||||
# These values may be set per host via the ansible_module_compression inventory
|
|
||||||
# variable
|
|
||||||
#module_compression = 'ZIP_DEFLATED'
|
|
||||||
|
|
||||||
# This controls the cutoff point (in bytes) on --diff for files
|
|
||||||
# set to 0 for unlimited (RAM may suffer!).
|
|
||||||
#max_diff_size = 1048576
|
|
||||||
|
|
||||||
# This controls how ansible handles multiple --tags and --skip-tags arguments
|
|
||||||
# on the CLI. If this is True then multiple arguments are merged together. If
|
|
||||||
# it is False, then the last specified argument is used and the others are ignored.
|
|
||||||
# This option will be removed in 2.8.
|
|
||||||
#merge_multiple_cli_flags = True
|
|
||||||
|
|
||||||
# Controls showing custom stats at the end, off by default
|
|
||||||
#show_custom_stats = True
|
|
||||||
|
|
||||||
# Controls which files to ignore when using a directory as inventory with
|
|
||||||
# possibly multiple sources (both static and dynamic)
|
|
||||||
#inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
|
|
||||||
|
|
||||||
# This family of modules use an alternative execution path optimized for network appliances
|
|
||||||
# only update this setting if you know how this works, otherwise it can break module execution
|
|
||||||
#network_group_modules=eos, nxos, ios, iosxr, junos, vyos
|
|
||||||
|
|
||||||
# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
|
|
||||||
# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
|
|
||||||
# jinja2 templating language which will be run through the templating engine.
|
|
||||||
# ENABLING THIS COULD BE A SECURITY RISK
|
|
||||||
#allow_unsafe_lookups = False
|
|
||||||
|
|
||||||
# set default errors for all plays
|
|
||||||
#any_errors_fatal = False
|
|
||||||
|
|
||||||
[inventory]
|
|
||||||
# enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini', 'auto'
|
|
||||||
#enable_plugins = host_list, virtualbox, yaml, constructed
|
|
||||||
|
|
||||||
# ignore these extensions when parsing a directory as inventory source
|
|
||||||
#ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
|
|
||||||
|
|
||||||
# ignore files matching these patterns when parsing a directory as inventory source
|
|
||||||
#ignore_patterns=
|
|
||||||
|
|
||||||
# If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise.
|
|
||||||
#unparsed_is_failed=False
|
|
||||||
|
|
||||||
[privilege_escalation]
|
|
||||||
#become=True
|
|
||||||
#become_method=sudo
|
|
||||||
#become_user=root
|
|
||||||
#become_ask_pass=False
|
|
||||||
|
|
||||||
[paramiko_connection]
|
|
||||||
|
|
||||||
# uncomment this line to cause the paramiko connection plugin to not record new host
|
|
||||||
# keys encountered. Increases performance on new host additions. Setting works independently of the
|
|
||||||
# host key checking setting above.
|
|
||||||
#record_host_keys=False
|
|
||||||
|
|
||||||
# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
|
|
||||||
# line to disable this behaviour.
|
|
||||||
#pty=False
|
|
||||||
|
|
||||||
# paramiko will default to looking for SSH keys initially when trying to
|
|
||||||
# authenticate to remote devices. This is a problem for some network devices
|
|
||||||
# that close the connection after a key failure. Uncomment this line to
|
|
||||||
# disable the Paramiko look for keys function
|
|
||||||
#look_for_keys = False
|
|
||||||
|
|
||||||
# When using persistent connections with Paramiko, the connection runs in a
|
|
||||||
# background process. If the host doesn't already have a valid SSH key, by
|
|
||||||
# default Ansible will prompt to add the host key. This will cause connections
|
|
||||||
# running in background processes to fail. Uncomment this line to have
|
|
||||||
# Paramiko automatically add host keys.
|
|
||||||
#host_key_auto_add = True
|
|
||||||
|
|
||||||
[ssh_connection]
|
|
||||||
|
|
||||||
# ssh arguments to use
|
|
||||||
# Leaving off ControlPersist will result in poor performance, so use
|
|
||||||
# paramiko on older platforms rather than removing it, -C controls compression use
|
|
||||||
ssh_args = -o ForwardAgent=yes -C -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
|
|
||||||
|
|
||||||
# The base directory for the ControlPath sockets.
|
|
||||||
# This is the "%(directory)s" in the control_path option
|
|
||||||
#
|
|
||||||
# Example:
|
|
||||||
# control_path_dir = /tmp/.ansible/cp
|
|
||||||
#control_path_dir = ~/.ansible/cp
|
|
||||||
|
|
||||||
# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
|
|
||||||
# port and username (empty string in the config). The hash mitigates a common problem users
|
|
||||||
# found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
|
|
||||||
# In those cases, a "too long for Unix domain socket" ssh error would occur.
|
|
||||||
#
|
|
||||||
# Example:
|
|
||||||
# control_path = %(directory)s/%%h-%%r
|
|
||||||
#control_path =
|
|
||||||
|
|
||||||
# Enabling pipelining reduces the number of SSH operations required to
|
|
||||||
# execute a module on the remote server. This can result in a significant
|
|
||||||
# performance improvement when enabled, however when using "sudo:" you must
|
|
||||||
# first disable 'requiretty' in /etc/sudoers
|
|
||||||
#
|
|
||||||
# By default, this option is disabled to preserve compatibility with
|
|
||||||
# sudoers configurations that have requiretty (the default on many distros).
|
|
||||||
#
|
|
||||||
#pipelining = False
|
|
||||||
|
|
||||||
# Control the mechanism for transferring files (old)
|
|
||||||
# * smart = try sftp and then try scp [default]
|
|
||||||
# * True = use scp only
|
|
||||||
# * False = use sftp only
|
|
||||||
#scp_if_ssh = smart
|
|
||||||
|
|
||||||
# Control the mechanism for transferring files (new)
|
|
||||||
# If set, this will override the scp_if_ssh option
|
|
||||||
# * sftp = use sftp to transfer files
|
|
||||||
# * scp = use scp to transfer files
|
|
||||||
# * piped = use 'dd' over SSH to transfer files
|
|
||||||
# * smart = try sftp, scp, and piped, in that order [default]
|
|
||||||
#transfer_method = smart
|
|
||||||
|
|
||||||
# if False, sftp will not use batch mode to transfer files. This may cause some
|
|
||||||
# types of file transfer failures impossible to catch however, and should
|
|
||||||
# only be disabled if your sftp version has problems with batch mode
|
|
||||||
#sftp_batch_mode = False
|
|
||||||
|
|
||||||
# The -tt argument is passed to ssh when pipelining is not enabled because sudo
|
|
||||||
# requires a tty by default.
|
|
||||||
#use_tty = True
|
|
||||||
|
|
||||||
# Number of times to retry an SSH connection to a host, in case of UNREACHABLE.
|
|
||||||
# For each retry attempt, there is an exponential backoff,
|
|
||||||
# so after the first attempt there is 1s wait, then 2s, 4s etc. up to 30s (max).
|
|
||||||
#retries = 3
|
|
||||||
|
|
||||||
[persistent_connection]
|
|
||||||
|
|
||||||
# Configures the persistent connection timeout value in seconds. This value is
|
|
||||||
# how long the persistent connection will remain idle before it is destroyed.
|
|
||||||
# If the connection doesn't receive a request before the timeout value
|
|
||||||
# expires, the connection is shutdown. The default value is 30 seconds.
|
|
||||||
#connect_timeout = 30
|
|
||||||
|
|
||||||
# Configures the persistent connection retry timeout. This value configures the
|
|
||||||
# the retry timeout that ansible-connection will wait to connect
|
|
||||||
# to the local domain socket. This value must be larger than the
|
|
||||||
# ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout).
|
|
||||||
# The default value is 15 seconds.
|
|
||||||
#connect_retry_timeout = 15
|
|
||||||
|
|
||||||
# The command timeout value defines the amount of time to wait for a command
|
|
||||||
# or RPC call before timing out. The value for the command timeout must
|
|
||||||
# be less than the value of the persistent connection idle timeout (connect_timeout)
|
|
||||||
# The default value is 10 second.
|
|
||||||
#command_timeout = 10
|
|
||||||
|
|
||||||
[accelerate]
|
|
||||||
#accelerate_port = 5099
|
|
||||||
#accelerate_timeout = 30
|
|
||||||
#accelerate_connect_timeout = 5.0
|
|
||||||
|
|
||||||
# The daemon timeout is measured in minutes. This time is measured
|
|
||||||
# from the last activity to the accelerate daemon.
|
|
||||||
#accelerate_daemon_timeout = 30
|
|
||||||
|
|
||||||
# If set to yes, accelerate_multi_key will allow multiple
|
|
||||||
# private keys to be uploaded to it, though each user must
|
|
||||||
# have access to the system via SSH to add a new key. The default
|
|
||||||
# is "no".
|
|
||||||
#accelerate_multi_key = yes
|
|
||||||
|
|
||||||
[selinux]
|
|
||||||
# file systems that require special treatment when dealing with security context
|
|
||||||
# the default behaviour that copies the existing context or uses the user default
|
|
||||||
# needs to be changed to use the file system dependent context.
|
|
||||||
#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p
|
|
||||||
|
|
||||||
# Set this to yes to allow libvirt_lxc connections to work without SELinux.
|
|
||||||
#libvirt_lxc_noseclabel = yes
|
|
||||||
|
|
||||||
[colors]
|
|
||||||
#highlight = white
|
|
||||||
#verbose = blue
|
|
||||||
#warn = bright purple
|
|
||||||
#error = red
|
|
||||||
#debug = dark gray
|
|
||||||
#deprecate = purple
|
|
||||||
#skip = cyan
|
|
||||||
#unreachable = red
|
|
||||||
#ok = green
|
|
||||||
#changed = yellow
|
|
||||||
#diff_add = green
|
|
||||||
#diff_remove = red
|
|
||||||
#diff_lines = cyan
|
|
||||||
|
|
||||||
|
|
||||||
[diff]
|
|
||||||
# Always print diff when running ( same as always running with -D/--diff )
|
|
||||||
# always = no
|
|
||||||
|
|
||||||
# Set how many context lines to show in diff
|
|
||||||
# context = 3
|
|
@ -1,24 +0,0 @@
|
|||||||
pdns_config:
|
|
||||||
local-address: "127.0.0.1"
|
|
||||||
local-port: "5300"
|
|
||||||
api: yes
|
|
||||||
api-key:
|
|
||||||
|
|
||||||
pdns_backends:
|
|
||||||
gsqlite3:
|
|
||||||
dnssec: yes
|
|
||||||
database: "/var/lib/powerdns/powerdns.sqlite"
|
|
||||||
pdns_sqlite_databases_locations:
|
|
||||||
- "/var/lib/powerdns/powerdns.sqlite"
|
|
||||||
|
|
||||||
pdns_rec_config:
|
|
||||||
forward-zones:
|
|
||||||
- "{{ consul_domain }}=127.0.0.1:8600"
|
|
||||||
- "ducamps.win=192.168.1.10"
|
|
||||||
- "{{ domain.name }}=192.168.1.5"
|
|
||||||
- "lan.{{ domain.name }}=192.168.1.5"
|
|
||||||
- "1.168.192.in-addr.arpa=192.168.1.5:5300"
|
|
||||||
|
|
||||||
local-address: "{{ hostvars[inventory_hostname]['ansible_'+ default_interface].ipv4.address|default(ansible_default_ipv4.address) }}"
|
|
||||||
dnssec: "off"
|
|
||||||
|
|
@ -1,90 +0,0 @@
|
|||||||
NAS_nomad_folder:
|
|
||||||
- name: actualbudget
|
|
||||||
- name: archiso
|
|
||||||
owner: 1000001
|
|
||||||
- name: backup
|
|
||||||
owner: 1000001
|
|
||||||
- name: borgmatic
|
|
||||||
- name: crowdsec
|
|
||||||
owner: 1000001
|
|
||||||
- name: dms
|
|
||||||
owner: 1000001
|
|
||||||
- name: filestash
|
|
||||||
owner: 1000
|
|
||||||
- name: gitea
|
|
||||||
owner: 1000000
|
|
||||||
- name: grafana
|
|
||||||
owner: 472
|
|
||||||
- name: hass
|
|
||||||
owner: 1000001
|
|
||||||
- name: homer
|
|
||||||
owner: 1000001
|
|
||||||
- name: immich/cache
|
|
||||||
- name: immich/upload
|
|
||||||
- name: jellyfin
|
|
||||||
owner: 1000001
|
|
||||||
- name: loki
|
|
||||||
owner: 10001
|
|
||||||
- name: mealie
|
|
||||||
owner: 1000001
|
|
||||||
- name: mosquito
|
|
||||||
owner: 1883
|
|
||||||
- name: pacoloco
|
|
||||||
owner: 1000001
|
|
||||||
- name: pdns-auth
|
|
||||||
owner: 1000001
|
|
||||||
- name: pdns-admin
|
|
||||||
owner: 1000001
|
|
||||||
- name: pihole
|
|
||||||
owner: 999
|
|
||||||
- name: prometheus
|
|
||||||
owner: 65534
|
|
||||||
- name: prowlarr
|
|
||||||
owner: 1000001
|
|
||||||
- name: radicale
|
|
||||||
owner: 1000001
|
|
||||||
- name: openldap
|
|
||||||
owner: 1001
|
|
||||||
- name: registry/ghcr
|
|
||||||
- name: registry/docker
|
|
||||||
- name: syncthing
|
|
||||||
owner: 1000001
|
|
||||||
- name: traefik
|
|
||||||
owner: 1000001
|
|
||||||
- name: tt-rss
|
|
||||||
owner: 1000001
|
|
||||||
- name: vaultwarden
|
|
||||||
owner: 1000001
|
|
||||||
- name: zigbee2mqtt
|
|
||||||
owner: 1000001
|
|
||||||
nas_bind_target: "/exports"
|
|
||||||
|
|
||||||
nas_bind_source:
|
|
||||||
- dest: "{{ nas_bind_target }}/nomad"
|
|
||||||
source: /data/data1/nomad
|
|
||||||
- dest: "{{ nas_bind_target }}/music"
|
|
||||||
source: /data/data1/music
|
|
||||||
- dest: "{{ nas_bind_target }}/download"
|
|
||||||
source: /data/data1/download
|
|
||||||
- dest: "{{ nas_bind_target }}/media/serie"
|
|
||||||
source: /data/data2/serie
|
|
||||||
- dest: "{{ nas_bind_target }}/media/film"
|
|
||||||
source: /data/data3/film
|
|
||||||
- dest: "{{ nas_bind_target }}/photo"
|
|
||||||
source: /data/data1/photo
|
|
||||||
- dest: "{{ nas_bind_target }}/homes"
|
|
||||||
source: /data/data1/homes
|
|
||||||
- dest: "{{ nas_bind_target }}/ebook"
|
|
||||||
source: /data/data1/ebook
|
|
||||||
- dest: "{{ nas_bind_target }}/media/download/serie"
|
|
||||||
source: /data/data1/download/serie
|
|
||||||
- dest: "{{ nas_bind_target }}/media/download/film"
|
|
||||||
source: /data/data1/download/film
|
|
||||||
- dest: "{{ nas_bind_target }}/music/download/"
|
|
||||||
source: /data/data1/download/music
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
|||||||
vsftpd_config: {}
|
|
@ -1,15 +0,0 @@
|
|||||||
nfs_cluster_list: "{% for server in groups['all']%} {% if hostvars[server]['ansible_default_ipv4']['address'] is defined %} {{hostvars[server]['ansible_' + hostvars[server]['nfs_iface']|default('')].ipv4.address|default(hostvars[server]['ansible_default_ipv4']['address'],true)}}{{ nfs_options }} {% endif %} {%endfor%}"
|
|
||||||
nfs_options: "(rw,no_root_squash,crossmnt,async,insecure_locks,sec=sys)"
|
|
||||||
nfs_consul_service: true
|
|
||||||
nfs_bind_target: "/exports"
|
|
||||||
|
|
||||||
|
|
||||||
nfs_exports:
|
|
||||||
- "{{ nas_bind_target }} *(fsid=0,insecure,no_subtree_check)"
|
|
||||||
- "{{ nas_bind_target }}/nomad {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/download {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/music {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/media {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/photo {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/homes {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
||||||
- "{{ nas_bind_target }}/ebook {{nfs_cluster_list}} 172.17.0.0/16{{ nfs_options }}"
|
|
@ -1 +0,0 @@
|
|||||||
nomad_node_class: 'NAS'
|
|
@ -1,25 +0,0 @@
|
|||||||
samba_passdb_backend: tdbsam
|
|
||||||
samba_shares_root: /exports
|
|
||||||
samba_shares:
|
|
||||||
- name: media
|
|
||||||
comment: "media"
|
|
||||||
write_list: "@NAS_media"
|
|
||||||
browseable: true
|
|
||||||
- name: ebook
|
|
||||||
comment: "ebook"
|
|
||||||
write_list: "@NAS_ebook"
|
|
||||||
browseable: true
|
|
||||||
- name: music
|
|
||||||
comment: "music"
|
|
||||||
write_list: "@NAS_music"
|
|
||||||
browseable: true
|
|
||||||
- name: photo
|
|
||||||
comment: "photo"
|
|
||||||
write_list: "@NAS_photo"
|
|
||||||
browseable: true
|
|
||||||
- name: download
|
|
||||||
comment: "downlaod"
|
|
||||||
write_list: "@NAS_download"
|
|
||||||
browseable: true
|
|
||||||
samba_load_homes: True
|
|
||||||
samba_homes_include: samba_homes_include.conf
|
|
@ -1,45 +0,0 @@
|
|||||||
# defaults file for ansible-arch-provissionning
|
|
||||||
partition_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
label: gpt
|
|
||||||
settings:
|
|
||||||
- number: 1
|
|
||||||
part_end: 64MB
|
|
||||||
flags: [boot, esp]
|
|
||||||
fstype: vfat
|
|
||||||
format: yes
|
|
||||||
- number: 2
|
|
||||||
part_start: 512MB
|
|
||||||
part_end: 1524MB
|
|
||||||
flags: []
|
|
||||||
fstype: swap
|
|
||||||
format: yes
|
|
||||||
- number: 3
|
|
||||||
part_start: 1524MB
|
|
||||||
flags: [lvm]
|
|
||||||
fstype: ext4
|
|
||||||
format: yes
|
|
||||||
#- device: "/dev/sdb"
|
|
||||||
#settings:
|
|
||||||
#- number: 1
|
|
||||||
#name: home
|
|
||||||
#fstype: ext4
|
|
||||||
#format:
|
|
||||||
mount_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
settings:
|
|
||||||
- number: 3
|
|
||||||
mountpath: /mnt
|
|
||||||
fstype: ext4
|
|
||||||
- number: 1
|
|
||||||
mountpath: /mnt/boot
|
|
||||||
fstype: vfat
|
|
||||||
|
|
||||||
#need vfat boot partition with esp label
|
|
||||||
provissionning_UEFI_Enable: True
|
|
||||||
#sssd_configure: False
|
|
||||||
nomad_datacenter: hetzner
|
|
||||||
|
|
||||||
consul_server: False
|
|
||||||
nomad_server: False
|
|
||||||
|
|
@ -1,28 +0,0 @@
|
|||||||
systemd_mounts:
|
|
||||||
diskstation_nomad:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
|
||||||
mount: /mnt/diskstation/nomad
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
hetzner_storage:
|
|
||||||
share: //u304977.your-storagebox.de/backup
|
|
||||||
mount: /mnt/hetzner/storagebox
|
|
||||||
type: cifs
|
|
||||||
options:
|
|
||||||
- credentials=/etc/creds/hetzner_credentials
|
|
||||||
- uid=100001
|
|
||||||
- gid=10
|
|
||||||
- vers=3.0
|
|
||||||
- mfsymlinks
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
credentials_files:
|
|
||||||
1:
|
|
||||||
type: smb
|
|
||||||
path: /etc/creds/hetzner_credentials
|
|
||||||
username: u304977
|
|
||||||
password: "{{ lookup('hashi_vault','secret=secrets/data/ansible/storage:hetzner') }}"
|
|
@ -1,12 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
31303539336464336239376636623862303066336438383739356163616431643366386565366361
|
|
||||||
3264336232303135336334333663326234393832343235640a313638323963666631353836373531
|
|
||||||
61636261623662396330653135326238363630363938323166303861313563393063386161393238
|
|
||||||
3231336232663533640a333763643864363939336566333731353031313739616633623537386435
|
|
||||||
39613934663133613733356433616162363430616439623830663837343530623937656434366663
|
|
||||||
33656466396263616132356337326236383761363834663363643163343231366563333865656433
|
|
||||||
39316365663734653734363362363539623636666261333534313935343566646166316233623535
|
|
||||||
32323831626463656337313266343634303830633936396232663966373264313762346235646665
|
|
||||||
61333139363039363436393962666365336334663164306230393433636664623934343039323637
|
|
||||||
33383036323233646237343031633030353330633734353232343633623864333834646239346362
|
|
||||||
643634303135656333646235343366636361
|
|
@ -1,45 +0,0 @@
|
|||||||
# defaults file for ansible-arch-provissionning
|
|
||||||
partition_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
label: gpt
|
|
||||||
settings:
|
|
||||||
- number: 1
|
|
||||||
part_end: 64MB
|
|
||||||
flags: [boot, esp]
|
|
||||||
fstype: vfat
|
|
||||||
format: yes
|
|
||||||
- number: 2
|
|
||||||
part_start: 512MB
|
|
||||||
part_end: 1524MB
|
|
||||||
flags: []
|
|
||||||
fstype: swap
|
|
||||||
format: yes
|
|
||||||
- number: 3
|
|
||||||
part_start: 1524MB
|
|
||||||
flags: [lvm]
|
|
||||||
fstype: ext4
|
|
||||||
format: yes
|
|
||||||
#- device: "/dev/sdb"
|
|
||||||
#settings:
|
|
||||||
#- number: 1
|
|
||||||
#name: home
|
|
||||||
#fstype: ext4
|
|
||||||
#format:
|
|
||||||
mount_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
settings:
|
|
||||||
- number: 3
|
|
||||||
mountpath: /mnt
|
|
||||||
fstype: ext4
|
|
||||||
- number: 1
|
|
||||||
mountpath: /mnt/boot
|
|
||||||
fstype: vfat
|
|
||||||
|
|
||||||
#need vfat boot partition with esp label
|
|
||||||
provissionning_UEFI_Enable: True
|
|
||||||
#sssd_configure: False
|
|
||||||
nomad_datacenter: hetzner
|
|
||||||
|
|
||||||
consul_server: False
|
|
||||||
nomad_server: False
|
|
||||||
|
|
@ -1,7 +0,0 @@
|
|||||||
ansible_python_interpreter: /usr/bin/python3
|
|
||||||
hass_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDfVei9iC/Ra5qmSZcLu8z2CTaXCmfn4JSS4o3eu0HhykdYGSqhBTcUDD3/FhcTPQJVFsu1P4Gwqq1dCE+EvaZZRQaMUqVKUpOliThSG6etbImkvqLQQsC1qt+/NqSvfzu2+28A6+YspzuxsViGo7e3Gg9MdwV3LMGh0mcOr/uXb/HIk18sJg5yQpwMfYTj0Wda90nyegcN3F2iZMeauh/aaFJzWcHNakAAewceDYOErU07NhlZgVA2C8HgkJ8HL7AqIVqt9VOx3xLp91DbKTNXSxvyM0X4NQP24P7ZFxAOk/j0AX3hAWhaNmievCHyBWvQve1VshZXFwEIiuHm8q4GSCxK2r0oQudKdtIuQMfuUALigdiSxo522oEiML/2kSk17WsxZwh7SxfD0DKa82fy9iAwcAluWLwJ+yN3nGnDFF/tHYaamSiowpmTTmQ9ycyIPWPLVZclt3BlEt9WH/FPOdzAyY7YLzW9X6jhsU3QwViyaTRGqAdqzUAiflKCMsNzb5kq0oYsDFC+/eqp1USlgTZDhoKtTKRGEjW2KuUlDsXGBeB6w1D8XZxXJXAaHuMh4oMUgLswjLUdTH3oLnnAvfOrl8O66kTkmcQ8i/kr1wDODMy/oNUzs8q4DeRuhD5dpUiTUGYDTWPYj6m6U/GAEHvN/2YEqSgfVff1iQ4VBw==
|
|
||||||
system_arch_local_mirror: "https://arch.{{domain.name}}/repo/archlinux_$arch"
|
|
||||||
system_sudoers_group: "serverAdmin"
|
|
||||||
system_ipV6_disable: True
|
|
||||||
system_ip_unprivileged_port_start: 0
|
|
||||||
wireguard_mtu: 1420
|
|
@ -1,5 +0,0 @@
|
|||||||
consul_client_addr: "0.0.0.0"
|
|
||||||
consul_datacenter: "homelab"
|
|
||||||
consul_backup_location: "/mnt/diskstation/git/backup/consul"
|
|
||||||
consul_ansible_group: all
|
|
||||||
consul_systemd_resolved_enable: true
|
|
@ -1,8 +0,0 @@
|
|||||||
docker_daemon_config:
|
|
||||||
dns:
|
|
||||||
- 172.17.0.1
|
|
||||||
- 192.168.1.6
|
|
||||||
mtu: 1420
|
|
||||||
insecure-registries:
|
|
||||||
- 192.168.1.0/24
|
|
||||||
- 192.168.121.0/24
|
|
@ -1,9 +0,0 @@
|
|||||||
nomad_docker_allow_caps:
|
|
||||||
- NET_ADMIN
|
|
||||||
- NET_BROADCAST
|
|
||||||
- NET_RAW
|
|
||||||
nomad_allow_privileged: True
|
|
||||||
nomad_vault_enabled: true
|
|
||||||
nomad_vault_address: "http://active.vault.service.{{consul_domain}}:8200"
|
|
||||||
nomad_vault_role: "nomad-cluster"
|
|
||||||
nomad_docker_extra_labels: ["job_name", "task_group_name", "task_name", "namespace", "node_name"]
|
|
@ -1,5 +0,0 @@
|
|||||||
sssd_configure: true
|
|
||||||
# sssd_configure is False by default - by default nothing is done by this role.
|
|
||||||
ldap_search_base: "dc=ducamps,dc=eu"
|
|
||||||
ldap_uri: "ldaps://ldaps.service.consul"
|
|
||||||
ldap_sudo_search_base: "ou=sudoers,dc=ducamps,dc=eu"
|
|
@ -1,42 +0,0 @@
|
|||||||
user:
|
|
||||||
name: vincent
|
|
||||||
home: /home/vincent
|
|
||||||
uid: 1024
|
|
||||||
mail: vincent@ducamps.eu
|
|
||||||
groups:
|
|
||||||
- docker
|
|
||||||
authorized_keys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN5V9WPPi2/HwAQuDeaJO3hUPf8HxNMHqVmkf1pDjWg JuiceSSH
|
|
||||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDBrP9akjyailPU9tUMvKrtDsqjI191W1L95u3OFjBqqapXgbDVx1FVtSlIIKcCHZyTII1zgC7woZmNRpmaIJRh6N+VIuRrRs29xx2GUVc4pxflUwwIAK36hgZS3nqmA2biacmPR9HogZLZMcPtZdLhWGlLuUv1cWqbqW7UcDa0lbubCo2v4OQMx/zt37voKAZSkkbH9mVszH6eKxNFy1KXbLYhwXiKfYBnAHbivhiSkZUGV6D4HNj8Jx6IY1YF3bfwMXmt841Q/7OY+t3RTIS8ewvSF+jpQ7GKHBEsZTZUGwIoSyZFFvCgKQVOJu/ZJJS4HNkluilir9Sxtx2LRgy+HHQ251trnsVsJp3ts4uTiMkKJQy1PXy1ZvQXYkip9Af3vlXUMmTyVj8cv+No07G1rZ1pZ3wXKX4RkTsoep5GsYlhyUd7GzsAQQiX9YhYyWDQ6NHBYAGAWbw2BLNxltWa4AyWOa1C8v+1+mRwdvpdMY7powJNCXQaIJmiOZiI/Us= vincent@fixe-pc-2020-03-01
|
|
||||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCYHkEIa38p3e4+m/LScHm8Ei7H2X/pDksjVAzoJ4fHr8oXc6DKkC8SWwMnh3L4WzWBhfTbzwUgFTNpsxhp/UyJf+fdzmzetlbVlYSuA6yWuSmgMeFbXFImhZ+Sn3i59hLeqAAyrkQLjba2waehdEsuOQ/AGoDbMYm38Xf9Wka/1YIeUPE4gLeLvymRnGw7BSug6Unycy52WlFAquollObOvc7tNiX0uLDh81Dp0KZhqWRs75hfmQ9du4g4uNhFLiF11hOGNgj3PWV+nWe8GWNQYVUBChWX1dsP8ct/ahG9IFXSPEaFD1IZeFp29u2ln3mgKkBtcRTRe1e3CLQqiRsUq2aixVFbSgFMFgGSUiNGNqKR4f9DeyJrYBplSj6HXjWoBny4Wm8+yfk8qR2RtQpS6AUu81xtKnXOaj9Q5VZO3kVF0U3EXHAZutTYDj9mDlhLSBS7x7hmrkRBbIy7adSx9Gx5Ck3/RllqG6KD+LdJa4I0pUTRNetpLpYDeZpwjnDP1r7udaSQMyRMH5YKLzhtHqIV/imn9QO4KCxNxTgwxt9ho6HDvlDGERCxm+yeHUu3CPyq2ZGSF5HHsYTGUtYvQw4JfQyw/5DrZ7IIdU1e7ZuaE3h/NvFgKJPVTP52nmUtIW7pIOkHpn9mddjm/oKMayOzMspLn9HLFVbqi7A5Xw== vincent@zen-pc
|
|
||||||
privatekey:
|
|
||||||
- keyname: "id_gitea"
|
|
||||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
system_user:
|
|
||||||
- name: drone-deploy
|
|
||||||
home: /home/drone-deploy
|
|
||||||
shell: /bin/bash
|
|
||||||
authorized_keys:
|
|
||||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDUaK+pQlosmopbZfucll9UdqDOTaODOBwoxRwkJEk1i drone@oscar
|
|
||||||
|
|
||||||
- name: ansible
|
|
||||||
home: /home/ansible
|
|
||||||
shell: /bin/bash
|
|
||||||
|
|
||||||
- name: root
|
|
||||||
home: /root
|
|
||||||
privatekey:
|
|
||||||
- keyname: id_gitea
|
|
||||||
key: "{{lookup('file', '~/.ssh/id_gitea')}}"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
user_custom_host:
|
|
||||||
- host: "git.ducamps.eu"
|
|
||||||
user: "git"
|
|
||||||
keyfile: "~/.ssh/id_gitea"
|
|
||||||
|
|
||||||
user_config_repo: "ssh://git@git.ducamps.eu:2222/vincent/conf2.git"
|
|
@ -1 +0,0 @@
|
|||||||
vault_raft_group_name: "homelab"
|
|
@ -1,11 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
39613433313663653039643961643165643632313938626339653365376633613135653436363938
|
|
||||||
6331623132366638633665636163336462393333336264320a666466303465663839646435626231
|
|
||||||
38396437363034313236383261326637306238616162303131356537393635363939376236386130
|
|
||||||
6466353961643233310a306631333664363332336263656638623763393732306361306632386662
|
|
||||||
37623934633932653965316532386664353130653830356237313337643266366233346633323265
|
|
||||||
37616533303561363864626531396366323565396536383133643539663630636633356238386633
|
|
||||||
34383464333363663532643239363438626135336632316135393537643930613532336231633064
|
|
||||||
35376561663637623932313365636261306131353233636661313435643563323534623365346436
|
|
||||||
65366132333635643832353464323961643466343832376635386531393834336535386364396333
|
|
||||||
3932393561646133336437643138373230366266633430663937
|
|
@ -1,12 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
61326233336236343231396231306638373837653661313334313261313539316532373437346132
|
|
||||||
3931306637303530373032663236363466383433316161310a396439393564643731656664663639
|
|
||||||
32386130663837303663376432633930393663386436666263313939326631616466643237333138
|
|
||||||
3365346131636333330a376436323964656563363664336638653564656231636136663635303439
|
|
||||||
35346461356337303064623861326331346263373539336335393566623462343464323065366237
|
|
||||||
61346637326336613232643462323733366530656439626234663335633965376335623733336162
|
|
||||||
37323739376237323534613361333831396531663637666161666366656237353563626164626632
|
|
||||||
33326336353663356235373835666166643465666562616663336539316233373430633862613133
|
|
||||||
36363831623361393230653161626131353264366634326233363232336635306266376363363739
|
|
||||||
66373434343330633337633436316135656533613465613963363931383266323466653762623365
|
|
||||||
363332393662393532313063613066653964
|
|
@ -1,14 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
35303137383361396262313561623237626336306366376630663065396664643630383638376436
|
|
||||||
3930346265616235383331383735613166383461643233310a663564356266663366633539303630
|
|
||||||
37616532393035356133653838323964393464333230313861356465326433353339336435363263
|
|
||||||
3162653932646662650a613762393062613433343362633365316434663661306637623363333834
|
|
||||||
61303231303362313133346461373738633239613933303564383532353537626538363636306461
|
|
||||||
66663330346566356637623036363964396137646435333139323430353639386134396537366334
|
|
||||||
39303130386432366335383433626431663034656466626265393863623438366130346562623365
|
|
||||||
63653963393663353666313631326131636361333230386461383638333338393137336562323935
|
|
||||||
37343034363961306663303232346139356534613837663230393962323333656536303161373939
|
|
||||||
65626164336166306264653538313661393934383966303135356161336331623835663235646332
|
|
||||||
63343764643861366537383962616230323036326331386333346463353835393762653735353862
|
|
||||||
32323839663365353337303363313535633362643231653663393936363539363933636430613832
|
|
||||||
32336566633962646463316636346330336265626130373636643335323762363661
|
|
@ -1,14 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
64396261616266633665646330393631316463386334633032353965323964633464333331323334
|
|
||||||
6261653930313764313836366531383462313965336231620a656637623439623639383931373361
|
|
||||||
37373434636531623563336565356136633031633835633636643436653165386436636564616130
|
|
||||||
3763383036343739370a376565343130636631653635616566653531323464343632623566313436
|
|
||||||
32396165636333393032636636613030373663393238323964396462323163616162613933626536
|
|
||||||
31623931343633346131636563643563393230323839636438373933666137393031326532356535
|
|
||||||
32363439306338623533353734613966396362303164616335363535333438326234623161653732
|
|
||||||
66613762653966613763623966633939323634346536636334343364306332323563653361346563
|
|
||||||
65313433376634363261323934376637646233636233346536316262386634353666376539613235
|
|
||||||
63666432396636373139663861393164626165383665663933383734303165623464666630343231
|
|
||||||
33323339663138373530396636636333323439616137313434316465633162396237306238343366
|
|
||||||
30326162306539396630633738323435323432646338633331626665363838376363343835336534
|
|
||||||
3635
|
|
@ -1,50 +0,0 @@
|
|||||||
systemd_mounts:
|
|
||||||
diskstation_photo:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/photo"
|
|
||||||
mount: /mnt/diskstation/photo
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_music:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/music"
|
|
||||||
mount: /mnt/diskstation/music
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_media:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/media"
|
|
||||||
mount: /mnt/diskstation/media
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
diskstation_ebook:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/ebook"
|
|
||||||
mount: /mnt/diskstation/ebook
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_nomad:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/nomad"
|
|
||||||
mount: /mnt/diskstation/nomad
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- " "
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
||||||
diskstation_download:
|
|
||||||
share: "{{ nas_ip }}:{{ env_default_nfs_path }}/download"
|
|
||||||
mount: /mnt/diskstation/download
|
|
||||||
type: nfs
|
|
||||||
options:
|
|
||||||
- "vers=4"
|
|
||||||
automount: "{{ env_automount }}"
|
|
||||||
enabled: true
|
|
@ -1 +0,0 @@
|
|||||||
nomad_node_class: 'cluster'
|
|
@ -1,38 +0,0 @@
|
|||||||
postgres_consul_service: true
|
|
||||||
postgres_consul_service_name: db
|
|
||||||
|
|
||||||
postgresql_databases:
|
|
||||||
- name: ttrss
|
|
||||||
owner: ttrss
|
|
||||||
- name: gitea
|
|
||||||
owner: gitea
|
|
||||||
- name: supysonic
|
|
||||||
owner: supysonic
|
|
||||||
- name: hass
|
|
||||||
owner: hass
|
|
||||||
- name: vaultwarden
|
|
||||||
owner: vaultwarden
|
|
||||||
- name: drone
|
|
||||||
owner: drone
|
|
||||||
- name: paperless
|
|
||||||
owner: paperless
|
|
||||||
- name: vikunja
|
|
||||||
owner: vikunja
|
|
||||||
- name: ghostfolio
|
|
||||||
owner: ghostfolio
|
|
||||||
- name: pdns-auth
|
|
||||||
owner: pdns-auth
|
|
||||||
- name: pdns-admin
|
|
||||||
owner: pdns-admin
|
|
||||||
- name: mealie
|
|
||||||
owner: mealie
|
|
||||||
- name: immich
|
|
||||||
owner: immich
|
|
||||||
|
|
||||||
postgresql_hba_entries:
|
|
||||||
- {type: local, database: all, user: postgres, auth_method: peer}
|
|
||||||
- {type: local, database: all, user: all, auth_method: peer}
|
|
||||||
- {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: md5}
|
|
||||||
- {type: host, database: all, user: all, address: '::1/128', auth_method: md5}
|
|
||||||
- {type: host, database: all, user: all, address: '::0/128', auth_method: md5}
|
|
||||||
- {type: host, database: all, user: all, address: '0.0.0.0/0', auth_method: md5}
|
|
@ -1,54 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
39363436643831373861376361613830316334613939346338616636393462663033393261633838
|
|
||||||
6337336161393063646136613538396366653538656435360a303062636463383739653730346639
|
|
||||||
61323634306265613336313634653039313639663836363032353261383566393865613166613032
|
|
||||||
3837313634633466610a313062646237396138316361303361663565353862363139343566306539
|
|
||||||
38303161303163323265376539323939393938373965353934303535613962653534363362346563
|
|
||||||
61643638353138623162353364353736396162613735333063633739346132613161303564356437
|
|
||||||
62343535363263646463306466663536613937393463666336396332646533343439613433626566
|
|
||||||
38643363343065393165646134343935386461626166316662356365366666363737653336626631
|
|
||||||
64643230616431396666666462303366343164323233303139643939346635353730316234386163
|
|
||||||
35613235643034643833393233373536383863333763393066373564353535353463363336316335
|
|
||||||
63363537643432663266386438316563656663656462333039303861393364333966383430643263
|
|
||||||
63356435373064633861343137616637393161383361306135373864386235653034323732316663
|
|
||||||
65336465386135663532356433386562666639333464633362663131646237613034646563396133
|
|
||||||
33303464633635636233626633353038656230373266666132323561383866343632333561323363
|
|
||||||
61346664623338376436373332646232646235323639633262666166346535663238653563363239
|
|
||||||
34663365633363313433376333653534333364393635316235333965383262313563373161663065
|
|
||||||
36393565396534353235623238303835343334646632306638306332336539616463393966653538
|
|
||||||
35336462623031326539633139636533633632623137393463333531663935323765663139306361
|
|
||||||
66643434393533313039356434326438626265323066613966323634306632653765363834613034
|
|
||||||
30373039336536393865383265643335396232643537343363313338383838383030386665303237
|
|
||||||
64363666346535633237353462333232623132353031323231623338356136656261303662656465
|
|
||||||
31313039643561623635643435333133663032313964323061393231666336343233363038616231
|
|
||||||
36356262326530383233336130326361613431623866633832663361633937646461343731343938
|
|
||||||
33306262346463623935663466356264393837626239313739356431653163376563333234346566
|
|
||||||
38373663643532313635333131663239383736343930623735323861663037356136353433633865
|
|
||||||
63626435613936303661366637623338633961643137613933303735366265663933396130363039
|
|
||||||
34396637643638613839306639343765393539653164616536653661373264376436626639316666
|
|
||||||
61303835323761643531326438363035343539383464376433363534623934366534373631353364
|
|
||||||
61383866323737316430303736366533643939313637393631303833363431613562303639323939
|
|
||||||
66313434613963656464383964313734383938353366306462666537653563336465376464303538
|
|
||||||
34336531663334303938333739313638636363623562613536333736386137363139653164626261
|
|
||||||
62663662316365663563646164303935323866633336633939323837393962393130626330666233
|
|
||||||
63663661303565646236623130663034636264353235376561306630376365613966663536303963
|
|
||||||
63643161386435633831393334333035653761393863373731616239313235383033633439376166
|
|
||||||
39613762376162386231633938393036633461303732323337656430373430636435313337303365
|
|
||||||
37646461336339623339316663616636373036656564383462356562306465623762653162633963
|
|
||||||
35636466386138333564666564323034393162633965386133643235303938616439333130353637
|
|
||||||
61343536323034366464653138353665326436396133313432666563353335383733363335613562
|
|
||||||
61646365346665383866623364396138323666326338313530353663323938613362653038313339
|
|
||||||
32613663616535313661386538366330373364366637386634633437646362383764346263636434
|
|
||||||
35616166393065343038643861636333373738363335353164326435303961326662356230323262
|
|
||||||
35656531653535643630376330393731643532353132366662636664626132646632306361323035
|
|
||||||
31373136616435336362633439356339336466313337623538383763386132396135653864386638
|
|
||||||
31393864363466653137643565306462616238333435343036613331653866393532313861376331
|
|
||||||
33646636623666343439616332386363373664346164313963623861393134666463383366633539
|
|
||||||
35313761333564303635656364303566643436393130356163623137313530653539656537653139
|
|
||||||
38336636623732313630303933303962303561376436623737633139643564343166326335386639
|
|
||||||
31373437336139326562613339393235393065396538333566323864643639303132313733396132
|
|
||||||
35613532396363326166313061353136373965303964623534653634613639303764393038333037
|
|
||||||
63656131616463663565653134363336326139303736313138366262616338643339316231663631
|
|
||||||
30656132386462393433313261313466303239346138623433643634616465656139343764353338
|
|
||||||
62616139613731363665333438383861623837643432643134626461643631323034383262656439
|
|
||||||
33653563323434343964633236353434643739333863636630636363633639373630
|
|
@ -1 +0,0 @@
|
|||||||
postgres_consul_tag: "active"
|
|
@ -1 +0,0 @@
|
|||||||
postgres_consul_tag: "standby"
|
|
@ -1,68 +0,0 @@
|
|||||||
dhcpd_authoritative: True
|
|
||||||
dhcpd_lease_time: '72'
|
|
||||||
dhcpd_domain_name: "lan.{{ domain.name }}"
|
|
||||||
dhcpd_nameservers:
|
|
||||||
- '192.168.1.4'
|
|
||||||
- '192.168.1.40'
|
|
||||||
|
|
||||||
dhcpd_zones:
|
|
||||||
- zone: "lan.{{ domain.name }}."
|
|
||||||
primary: "192.168.1.5"
|
|
||||||
key: "dhcpdupdate"
|
|
||||||
- zone: "1.168.192.in-addr.arpa."
|
|
||||||
primary: "192.168.1.5"
|
|
||||||
key: "dhcpdupdate"
|
|
||||||
|
|
||||||
dhcpd_options: |
|
|
||||||
ddns-updates on;
|
|
||||||
ddns-update-style interim;
|
|
||||||
ignore client-updates;
|
|
||||||
update-static-leases on;
|
|
||||||
ddns-domainname "lan.{{ domain.name }}.";
|
|
||||||
ddns-rev-domainname "in-addr.arpa.";
|
|
||||||
|
|
||||||
|
|
||||||
dhcpd_subnets:
|
|
||||||
- subnet: '192.168.1.0'
|
|
||||||
netmask: '255.255.255.0'
|
|
||||||
options: |
|
|
||||||
option routers 192.168.1.1;
|
|
||||||
pools:
|
|
||||||
- range: '192.168.1.100 192.168.1.140'
|
|
||||||
|
|
||||||
dhcpd_hosts:
|
|
||||||
- hostname: 'zen-pc'
|
|
||||||
address: '192.168.1.14'
|
|
||||||
ethernet: 'f0:d5:bf:f4:ce:d7'
|
|
||||||
|
|
||||||
- hostname: 'fixe-pc'
|
|
||||||
address: '192.168.1.15'
|
|
||||||
ethernet: 'ee:35:20:fc:7b:04'
|
|
||||||
|
|
||||||
- hostname: 'oscar'
|
|
||||||
address: '192.168.1.40'
|
|
||||||
ethernet: '68:1D:EF:3C:F0:44'
|
|
||||||
- hostname: 'bleys'
|
|
||||||
address: '192.168.1.42'
|
|
||||||
ethernet: '68:1d:ef:2b:3d:24'
|
|
||||||
|
|
||||||
|
|
||||||
- hostname: 'xiaomi-chambre-gateway'
|
|
||||||
address: '192.168.1.61'
|
|
||||||
ethernet: '04:cf:8c:9c:f7:f0'
|
|
||||||
- hostname: 'xiaomi-ampoule-chambre'
|
|
||||||
address: '192.168.1.62'
|
|
||||||
ethernet: '44:23:7c:88:1f:ea'
|
|
||||||
- hostname: 'shelly-chambre-ecran'
|
|
||||||
address: '192.168.1.63'
|
|
||||||
ethernet: 'b4:e6:2d:7a:ea:77'
|
|
||||||
- hostname: 'shelly-salon-cadre'
|
|
||||||
address: '192.168.1.64'
|
|
||||||
ethernet: 'b4:e6:2d:7a:e6:1e'
|
|
||||||
- hostname: 'shelly-chambre-ventilo'
|
|
||||||
address: '192.168.1.65'
|
|
||||||
ethernet: 'e0:98:06:97:78:0b'
|
|
||||||
- hostname: 'shelly-Bureau-chauffeau'
|
|
||||||
address: '192.168.1.66'
|
|
||||||
ethernet: '8c:aa:b5:42:b9:b9'
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
|||||||
$ANSIBLE_VAULT;1.1;AES256
|
|
||||||
65303666336535386536653939626336646338623431353161636565393532623264316534326539
|
|
||||||
6265393839323438376666393030383839326239323261660a333132613538306137383332336538
|
|
||||||
38323830353062366133643734303138343939323135333532333666653039326437316361353463
|
|
||||||
6665393263376132620a346239386437326462363565636335303766306638393331656664376665
|
|
||||||
63373131373039653065633861626263646635323634333538343163346239633937303761366362
|
|
||||||
31376438363731613666393531656232653033336332653261313866396434616461303831353336
|
|
||||||
38663965636536313932346133363733636636643938366364366435366237316435643062336231
|
|
||||||
34343931653963613431336465653036616431323263613731393963656637303561366461663038
|
|
||||||
31336131346266393035343135323131636435333865323733386439363763376638383337613530
|
|
||||||
34356331356361636665383933633130343564373739343630663835313164326565393439306163
|
|
||||||
31386538633033333961386534323234653833323537356565616436346462613333663139623035
|
|
||||||
30636265313230383162633466373937353262383965313631326336666133653331366230653961
|
|
||||||
6131
|
|
@ -1,2 +0,0 @@
|
|||||||
nomad_datacenter: homelab
|
|
||||||
system_wol_enable: True
|
|
@ -1,13 +0,0 @@
|
|||||||
domain:
|
|
||||||
name: ducamps.eu
|
|
||||||
consul_bootstrap_expect: 3
|
|
||||||
consul_domain: "consul"
|
|
||||||
nomad_bootstrap_expect: 3
|
|
||||||
nomad_client_meta:
|
|
||||||
- name: "env"
|
|
||||||
value: "production"
|
|
||||||
vault_unseal_keys_dir_output: "~/vaultUnseal/production"
|
|
||||||
env_default_nfs_path: ""
|
|
||||||
env_media_nfs_path: "/volume1"
|
|
||||||
env_automount: true
|
|
||||||
nas_ip: "192.168.1.43"
|
|
@ -1,9 +0,0 @@
|
|||||||
|
|
||||||
rsynclocations:
|
|
||||||
- name: backup nas
|
|
||||||
location: /mnt/backup
|
|
||||||
readonly: "no"
|
|
||||||
|
|
||||||
rsynchostalloawed: 192.168.1.10
|
|
||||||
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
|||||||
domain:
|
|
||||||
name: ducamps.dev
|
|
||||||
#systemd_mounts: []
|
|
||||||
#systemd_mounts_enabled: []
|
|
||||||
consul_bootstrap_expect: 2
|
|
||||||
consul_domain: "consul"
|
|
||||||
nomad_bootstrap_expect: 2
|
|
||||||
nomad_client_meta:
|
|
||||||
- name: "env"
|
|
||||||
value: "staging"
|
|
||||||
|
|
||||||
vault_unseal_keys_dir_output: "~/vaultUnseal/staging"
|
|
||||||
hosts_entries:
|
|
||||||
- ip: "{{ hostvars['nas-dev']['ansible_default_ipv4']['address'] }}"
|
|
||||||
name: diskstation.ducamps.eu
|
|
||||||
|
|
||||||
env_default_nfs_path: ""
|
|
||||||
env_automount: true
|
|
||||||
nas_ip: "nfs.service.consul"
|
|
||||||
|
|
||||||
|
|
@ -1,65 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: "192.168.1.42"
|
|
||||||
ansible_python_interpreter: "/usr/bin/python3"
|
|
||||||
default_interface: "enp2s0"
|
|
||||||
consul_iface: "{{ default_interface}}"
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
nfs_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.0.7/24"
|
|
||||||
wireguard_byhost_allowed_ips:
|
|
||||||
merlin: 10.0.0.7,192.168.1.42,192.168.1.0/24
|
|
||||||
corwin: 10.0.0.7,192.168.1.42,192.168.1.0/24
|
|
||||||
perrsistent_keepalive: "20"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{default_interface}} -j MASQUERADE
|
|
||||||
- sysctl -w net.ipv4.ip_forward=1
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {default_interface} -j MASQUERADE
|
|
||||||
- sysctl -w net.ipv4.ip_forward=0
|
|
||||||
|
|
||||||
partition_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
label: gpt
|
|
||||||
settings:
|
|
||||||
- number: 1
|
|
||||||
part_end: 300MB
|
|
||||||
flags: [boot, esp]
|
|
||||||
fstype: vfat
|
|
||||||
format: yes
|
|
||||||
- number: 2
|
|
||||||
part_start: 512MB
|
|
||||||
part_end: 1524MB
|
|
||||||
flags: []
|
|
||||||
fstype: swap
|
|
||||||
format: yes
|
|
||||||
- number: 3
|
|
||||||
part_start: 1524MB
|
|
||||||
flags: [lvm]
|
|
||||||
fstype: ext4
|
|
||||||
format: yes
|
|
||||||
#- device: "/dev/sdb"
|
|
||||||
#settings:
|
|
||||||
#- number: 1
|
|
||||||
#name: home
|
|
||||||
#fstype: ext4
|
|
||||||
#format:
|
|
||||||
mount_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
settings:
|
|
||||||
- number: 3
|
|
||||||
mountpath: /mnt
|
|
||||||
fstype: ext4
|
|
||||||
- number: 1
|
|
||||||
mountpath: /mnt/boot
|
|
||||||
fstype: vfat
|
|
||||||
|
|
||||||
#need vfat boot partition with esp label
|
|
||||||
provissionning_UEFI_Enable: True
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: 10.0.0.1
|
|
||||||
#ansible_host: 135.181.150.203
|
|
||||||
default_interface: "eth0"
|
|
||||||
wireguard_address: "10.0.0.1/24"
|
|
||||||
wireguard_endpoint: "135.181.150.203"
|
|
||||||
wireguard_persistent_keepalive: "20"
|
|
||||||
wireguard_allowed_ips: 10.0.0.1
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -o %i -j ACCEPT
|
|
||||||
- iptables -A FORWARD -i %i -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
- sysctl -w net.ipv4.ip_forward=1
|
|
||||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i %i -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o %i -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
- sysctl -w net.ipv4.ip_forward=0
|
|
||||||
|
|
||||||
wireguard_unmanaged_peers:
|
|
||||||
phone:
|
|
||||||
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
|
|
||||||
allowed_ips: 10.0.0.3/32
|
|
||||||
persistent_keepalive: 0
|
|
||||||
zen:
|
|
||||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
|
||||||
allowed_ips: 10.0.0.5/32
|
|
||||||
persistent_keepalive: 0
|
|
||||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
|
||||||
consul_client_addr: "127.0.0.1 10.0.0.1"
|
|
||||||
consul_bind_address: "10.0.0.1"
|
|
||||||
consul_ui: True
|
|
||||||
consul_iface: "wg0"
|
|
||||||
nomad_bind_addr: "10.0.0.1"
|
|
||||||
nomad_host_networks:
|
|
||||||
- name: "private"
|
|
||||||
interface: wg0
|
|
||||||
- name: "public"
|
|
||||||
interface: eth0
|
|
||||||
- name: "default"
|
|
||||||
interface: wg0
|
|
||||||
nomad_client_network_interface : "wg0"
|
|
||||||
vault_listener_address: 10.0.0.1
|
|
||||||
nomad_plugins_podman: True
|
|
@ -1,24 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: "192.168.1.41"
|
|
||||||
ansible_python_interpreter: "/usr/bin/python3"
|
|
||||||
default_interface: "enu1u1"
|
|
||||||
consul_iface: "{{ default_interface }}"
|
|
||||||
vault_iface: "{{ default_interface }}"
|
|
||||||
|
|
||||||
wireguard_address: "10.0.0.6/24"
|
|
||||||
wireguard_byhost_allowed_ips:
|
|
||||||
merlin: 10.0.0.6,192.168.1.41
|
|
||||||
corwin: 10.0.0.6,192.168.1.41
|
|
||||||
perrsistent_keepalive: "20"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
ansible_host: gerard-dev.lan.ducamps.dev
|
|
||||||
wireguard_address: "10.0.1.6/24"
|
|
||||||
perrsistent_keepalive: "20"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface}} -j MASQUERADE
|
|
||||||
|
|
@ -1,50 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: 10.0.0.4
|
|
||||||
#ansible_host: 65.21.2.14
|
|
||||||
default_interface: "ens3"
|
|
||||||
nfs_iface: "wg0"
|
|
||||||
wireguard_address: "10.0.0.4/24"
|
|
||||||
wireguard_endpoint: "65.21.2.14"
|
|
||||||
wireguard_persistent_keepalive: "20"
|
|
||||||
wireguard_byhost_allowed_ips:
|
|
||||||
oscar: "0.0.0.0/0"
|
|
||||||
bleys: "0.0.0.0/0"
|
|
||||||
wireguard_allowed_ips: "10.0.0.4/32,10.0.0.3,10.0.0.5"
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -o %i -j ACCEPT
|
|
||||||
- iptables -A FORWARD -i %i -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
- sysctl -w net.ipv4.ip_forward=1
|
|
||||||
- resolvectl dns %i 192.168.1.4 192.168.1.41; resolvectl domain %i '~ducamps.win' '~ducamps.eu' '~{{ consul_domain }}'
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i %i -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o %i -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
- sysctl -w net.ipv4.ip_forward=0
|
|
||||||
|
|
||||||
wireguard_unmanaged_peers:
|
|
||||||
phone:
|
|
||||||
public_key: IYKgrQ2VJUbOnupSqedOfIilsbmBBABZUTRF9ZoTrkc=
|
|
||||||
allowed_ips: 10.0.0.3/32
|
|
||||||
persistent_keepalive: 0
|
|
||||||
zen:
|
|
||||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
|
||||||
allowed_ips: 10.0.0.5/32
|
|
||||||
persistent_keepalive: 0
|
|
||||||
wireguard_dns: "192.168.1.4,192.168.1.41"
|
|
||||||
consul_client_addr: "127.0.0.1 10.0.0.4"
|
|
||||||
consul_bind_address: "10.0.0.4"
|
|
||||||
consul_ui: True
|
|
||||||
consul_iface: "wg0"
|
|
||||||
nomad_bind_addr: "10.0.0.4"
|
|
||||||
nomad_host_networks:
|
|
||||||
- name: "private"
|
|
||||||
interface: wg0
|
|
||||||
- name: "public"
|
|
||||||
interface: ens3
|
|
||||||
- name: "default"
|
|
||||||
interface: wg0
|
|
||||||
vault_listener_address: 10.0.0.4
|
|
||||||
nomad_plugins_podman: True
|
|
@ -1,41 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
ansible_host: merlin-dev.lan.ducamps.dev
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.1.4/24"
|
|
||||||
wireguard_endpoint: "{{ ansible_default_ipv4.address }}"
|
|
||||||
wireguard_persistent_keepalive: "30"
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -o %i -j ACCEPT
|
|
||||||
- iptables -A FORWARD -i %i -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i %i -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o %i -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_unmanaged_peers:
|
|
||||||
phone:
|
|
||||||
public_key: ioG35kDFTtip+Acfq+je9qDHYbZij+J6+Pg3T6Z4N0w=
|
|
||||||
allowed_ips: 10.0.1.3/32
|
|
||||||
persistent_keepalive: 0
|
|
||||||
zen:
|
|
||||||
public_key: rYYljQw8InmM95pxCP9KyZ8R+kcicgnjr6E9qtkI1Ag=
|
|
||||||
allowed_ips: 10.0.1.5/32
|
|
||||||
persistent_keepalive: 0
|
|
||||||
consul_client_addr: "127.0.0.1 10.0.1.4"
|
|
||||||
consul_bind_address: "10.0.1.4"
|
|
||||||
consul_ui: True
|
|
||||||
consul_iface: "wg0"
|
|
||||||
nomad_bind_addr: "10.0.1.4"
|
|
||||||
nomad_host_networks:
|
|
||||||
- name: "private"
|
|
||||||
interface: wg0
|
|
||||||
- name: "public"
|
|
||||||
interface: eth0
|
|
||||||
- name: "default"
|
|
||||||
interface: wg0
|
|
||||||
vault_listener_address: 10.0.1.4
|
|
@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: nas-dev.lan.ducamps.dev
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.1.8/24"
|
|
||||||
perrsistent_keepalive: "30"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
wireguard_address: "10.0.0.8/24"
|
|
||||||
default_interface: "enp2s0"
|
|
||||||
consul_iface: "{{ default_interface}}"
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
perrsistent_keepalive: "30"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
wireguard_byhost_allowed_ips:
|
|
||||||
merlin: 10.0.0.8,192.168.1.43
|
|
||||||
corwin: 10.0.0.8,192.168.1.43
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
@ -1,62 +0,0 @@
|
|||||||
---
|
|
||||||
default_interface: "enp1s0"
|
|
||||||
consul_iface: "{{ default_interface}}"
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
nfs_iface: "{{ default_interface}}"
|
|
||||||
nomad_client_cpu_total_compute: 8000
|
|
||||||
wireguard_address: "10.0.0.2/24"
|
|
||||||
wireguard_byhost_allowed_ips:
|
|
||||||
merlin: 10.0.0.2,192.168.1.40
|
|
||||||
corwin: 10.0.0.2,192.168.1.40
|
|
||||||
perrsistent_keepalive: "30"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
partition_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
label: gpt
|
|
||||||
settings:
|
|
||||||
- number: 1
|
|
||||||
part_end: 300MB
|
|
||||||
flags: [boot, esp]
|
|
||||||
fstype: vfat
|
|
||||||
format: yes
|
|
||||||
- number: 2
|
|
||||||
part_start: 512MB
|
|
||||||
part_end: 1524MB
|
|
||||||
flags: []
|
|
||||||
fstype: swap
|
|
||||||
format: yes
|
|
||||||
- number: 3
|
|
||||||
part_start: 1524MB
|
|
||||||
flags: [lvm]
|
|
||||||
fstype: ext4
|
|
||||||
format: yes
|
|
||||||
#- device: "/dev/sdb"
|
|
||||||
#settings:
|
|
||||||
#- number: 1
|
|
||||||
#name: home
|
|
||||||
#fstype: ext4
|
|
||||||
#format:
|
|
||||||
mount_table:
|
|
||||||
- device: "/dev/sda"
|
|
||||||
settings:
|
|
||||||
- number: 3
|
|
||||||
mountpath: /mnt
|
|
||||||
fstype: ext4
|
|
||||||
- number: 1
|
|
||||||
mountpath: /mnt/boot
|
|
||||||
fstype: vfat
|
|
||||||
|
|
||||||
#need vfat boot partition with esp label
|
|
||||||
provissionning_UEFI_Enable: True
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
|||||||
---
|
|
||||||
ansible_host: oscar-dev.lan.ducamps.dev
|
|
||||||
default_interface: eth0
|
|
||||||
vault_iface: "{{ default_interface}}"
|
|
||||||
wireguard_address: "10.0.1.2/24"
|
|
||||||
perrsistent_keepalive: "30"
|
|
||||||
wireguard_endpoint: ""
|
|
||||||
|
|
||||||
wireguard_postup:
|
|
||||||
- iptables -A FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -A FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -A POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
||||||
|
|
||||||
wireguard_postdown:
|
|
||||||
- iptables -D FORWARD -i wg0 -j ACCEPT
|
|
||||||
- iptables -D FORWARD -o wg0 -j ACCEPT
|
|
||||||
- iptables -t nat -D POSTROUTING -o {{ default_interface }} -j MASQUERADE
|
|
@ -1,25 +0,0 @@
|
|||||||
|
|
||||||
requirements:
|
|
||||||
ansible-galaxy install -g -r roles/requirements.yml
|
|
||||||
|
|
||||||
deploy_production:
|
|
||||||
ansible-playbook site.yml -i production -u ansible
|
|
||||||
|
|
||||||
deploy_production_wiregard:
|
|
||||||
ansible-playbook playbooks/wireguard.yml -i production -u ansible
|
|
||||||
|
|
||||||
deploy_staging:
|
|
||||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
|
||||||
ansible-playbook site.yml -i staging -u ansible
|
|
||||||
|
|
||||||
|
|
||||||
deploy_staging_base:
|
|
||||||
ansible-playbook playbooks/sssd.yml -i staging -u ansible
|
|
||||||
ansible-playbook playbooks/wireguard.yml -i staging -u ansible
|
|
||||||
ansible-playbook playbooks/server.yml -i staging -u ansible
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
view-allvault:
|
|
||||||
ansible-vault view `git grep -l "ANSIBLE_VAULT;1.1;AES256$$"`
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
readonly vault_password_file_encrypted="$(dirname $0)/vault-password.gpg"
|
|
||||||
|
|
||||||
# flock used to work around "gpg: decryption failed: No secret key" in tf-stage2
|
|
||||||
# would otherwise need 'auto-expand-secmem' (https://dev.gnupg.org/T3530#106174)
|
|
||||||
flock "$vault_password_file_encrypted" \
|
|
||||||
gpg --batch --decrypt --quiet "$vault_password_file_encrypted"
|
|
||||||
|
|
Binary file not shown.
@ -1,45 +0,0 @@
|
|||||||
---
|
|
||||||
prerun: false
|
|
||||||
dependency:
|
|
||||||
name: galaxy
|
|
||||||
enabled: false
|
|
||||||
driver:
|
|
||||||
name: vagrant
|
|
||||||
provider:
|
|
||||||
name: libvirt
|
|
||||||
default_box: archlinux/archlinux
|
|
||||||
platforms:
|
|
||||||
- name: oscar-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: archlinux/archlinux
|
|
||||||
- name: merlin-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: generic/rocky9
|
|
||||||
- name: gerard-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: debian/bookworm64
|
|
||||||
- name: nas-dev
|
|
||||||
cpu: 1
|
|
||||||
memory: 1024
|
|
||||||
box: archlinux/archlinux
|
|
||||||
provisioner:
|
|
||||||
name: ansible
|
|
||||||
connection_options:
|
|
||||||
ansible_ssh_user: vagrant
|
|
||||||
ansible_become: true
|
|
||||||
env:
|
|
||||||
ANSIBLE_CONFIG: ../../ansible.cfg
|
|
||||||
ANSIBLE_ROLES_PATH: "../../roles"
|
|
||||||
log: true
|
|
||||||
lint:
|
|
||||||
name: ansible-lint
|
|
||||||
inventory:
|
|
||||||
host_vars: []
|
|
||||||
links:
|
|
||||||
group_vars: ../../group_vars
|
|
||||||
hosts: ../../staging
|
|
||||||
verifier:
|
|
||||||
name: ansible
|
|
@ -1,55 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Consul install
|
|
||||||
hosts: all
|
|
||||||
roles:
|
|
||||||
- role: ansible-consul
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Vault install
|
|
||||||
hosts: homelab
|
|
||||||
roles:
|
|
||||||
- role: ansible-hashicorp-vault
|
|
||||||
become: true
|
|
||||||
post_tasks:
|
|
||||||
- name: Stat root file
|
|
||||||
ansible.builtin.stat:
|
|
||||||
path: "{{ vault_unseal_keys_dir_output }}/rootkey"
|
|
||||||
register: rootkey_exist
|
|
||||||
delegate_to: localhost
|
|
||||||
- name: Reading root contents
|
|
||||||
ansible.builtin.command: cat "{{ vault_unseal_keys_dir_output }}/rootkey"
|
|
||||||
register: root_token
|
|
||||||
delegate_to: localhost
|
|
||||||
when: rootkey_exist.stat.exists
|
|
||||||
changed_when: false
|
|
||||||
- name: debug
|
|
||||||
ansible.builtin.debug:
|
|
||||||
var: root_token
|
|
||||||
- name: Generate nomad token
|
|
||||||
community.hashi_vault.vault_token_create:
|
|
||||||
renewable: true
|
|
||||||
policies: "nomad-server-policy"
|
|
||||||
period: 72h
|
|
||||||
no_parent: true
|
|
||||||
token: "{{ root_token.stdout }}"
|
|
||||||
url: "http://active.vault.service.consul:8200"
|
|
||||||
retries: 4
|
|
||||||
run_once: true
|
|
||||||
delegate_to: localhost
|
|
||||||
when: root_token.stdout is defined
|
|
||||||
register: nomad_token_data
|
|
||||||
|
|
||||||
- name: Gather nomad token
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
nomad_vault_token: "{{ nomad_token_data.login.auth.client_token }}"
|
|
||||||
when: nomad_token_data.login is defined
|
|
||||||
|
|
||||||
- name: nomad
|
|
||||||
hosts: all
|
|
||||||
vars:
|
|
||||||
unseal_keys_dir_output: ~/vaultunseal
|
|
||||||
roles:
|
|
||||||
- role: ansible-nomad
|
|
||||||
become: true
|
|
||||||
- role: docker
|
|
||||||
become: true
|
|
@ -1,9 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts:
|
|
||||||
- homelab
|
|
||||||
- VPS
|
|
||||||
- NAS
|
|
||||||
vars:
|
|
||||||
# certbot_force: true
|
|
||||||
roles:
|
|
||||||
- autofs
|
|
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
roles:
|
|
||||||
- ansible_bootstrap
|
|
@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
roles:
|
|
||||||
- role: ansible-user
|
|
||||||
vars:
|
|
||||||
user_name: '{{ user.name }}'
|
|
||||||
user_ldap: '{{ sssd_configure}}'
|
|
||||||
user_password: '{{ userPassword }}'
|
|
||||||
user_authorized_key: '{{ user.authorized_keys}}'
|
|
||||||
user_privatekey: '{{ user.privatekey}}'
|
|
||||||
user_shell: '/bin/zsh'
|
|
||||||
user_uid: '{{ user.uid }}'
|
|
||||||
user_groups:
|
|
||||||
- docker
|
|
||||||
become: true
|
|
||||||
become_user: '{{ user.name }}'
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
roles:
|
|
||||||
- role: user_config
|
|
||||||
vars:
|
|
||||||
user_config_username: "{{ user.name }}"
|
|
||||||
become_user: "{{ user.name }}"
|
|
||||||
become: true
|
|
||||||
- role: user_config
|
|
||||||
vars:
|
|
||||||
user_config_username: root
|
|
||||||
become: true
|
|
@ -1,54 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Database playbook
|
|
||||||
hosts: database
|
|
||||||
vars:
|
|
||||||
# certbot_force: true
|
|
||||||
pre_tasks:
|
|
||||||
- name: Install Pg vertors (immich)
|
|
||||||
aur:
|
|
||||||
name: pgvecto.rs-bin
|
|
||||||
state: present
|
|
||||||
become: true
|
|
||||||
become_user: aur_builder
|
|
||||||
- name: Add database member to pg_hba replication
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
postgresql_hba_entries: "{{ postgresql_hba_entries + [\
|
|
||||||
{'type':'host', \
|
|
||||||
'database': 'replication',\
|
|
||||||
'user':'repli',\
|
|
||||||
'address':hostvars[item]['ansible_'+hostvars[item]['default_interface']]['ipv4']['address']+'/32',\
|
|
||||||
'auth_method':'trust'}] }}"
|
|
||||||
loop: '{{ groups.database }}'
|
|
||||||
roles:
|
|
||||||
- role: ansible-role-postgresql
|
|
||||||
become: true
|
|
||||||
tasks:
|
|
||||||
- name: Launch replication
|
|
||||||
ansible.builtin.command: pg_basebackup -D /var/lib/postgres/data -h {{groups["database_active"]|first}} -U repli -Fp -Xs -P -R -w
|
|
||||||
args:
|
|
||||||
creates: /var/lib/postgres/data/postgresql.conf
|
|
||||||
become: true
|
|
||||||
become_user: postgres
|
|
||||||
when: inventory_hostname in groups["database_standby"]
|
|
||||||
- name: Ensure PostgreSQL is started and enabled on boot.
|
|
||||||
ansible.builtin.service:
|
|
||||||
name: '{{ postgresql_daemon }}'
|
|
||||||
state: '{{ postgresql_service_state }}'
|
|
||||||
enabled: '{{ postgresql_service_enabled }}'
|
|
||||||
become: true
|
|
||||||
|
|
||||||
- name: Set Postgress shared libraries
|
|
||||||
community.postgresql.postgresql_set:
|
|
||||||
name: shared_preload_libraries
|
|
||||||
value: vectors.so
|
|
||||||
become: true
|
|
||||||
become_user: postgres
|
|
||||||
when: inventory_hostname in groups["database_active"]
|
|
||||||
notify: Restart postgresql
|
|
||||||
- name: Set Postgress shared libraries
|
|
||||||
community.postgresql.postgresql_set:
|
|
||||||
name: search_path
|
|
||||||
value: '$user, public, vectors'
|
|
||||||
become: true
|
|
||||||
become_user: postgres
|
|
||||||
when: inventory_hostname in groups["database_active"]
|
|
@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: dhcp
|
|
||||||
vars:
|
|
||||||
|
|
||||||
roles:
|
|
||||||
- role: ansible-dhcpd
|
|
||||||
become: true
|
|
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
- name: DNS playbook
|
|
||||||
hosts: DNS
|
|
||||||
roles:
|
|
||||||
- role: pdns_recursor-ansible
|
|
||||||
become: true
|
|
@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: music-player
|
|
||||||
roles:
|
|
||||||
- user_config
|
|
||||||
- cronie
|
|
||||||
- hass-client-control
|
|
||||||
- mpd
|
|
@ -1,28 +0,0 @@
|
|||||||
---
|
|
||||||
- name: gather all
|
|
||||||
hosts: all
|
|
||||||
- name: NAS playbook
|
|
||||||
hosts: NAS
|
|
||||||
vars:
|
|
||||||
# certbot_force: true
|
|
||||||
pre_tasks:
|
|
||||||
- name: include task NasBind
|
|
||||||
ansible.builtin.include_tasks:
|
|
||||||
file: tasks/NasBind.yml
|
|
||||||
loop: "{{ nas_bind_source }}"
|
|
||||||
- name: create nomad folder
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ nas_bind_target }}/nomad/{{ item.name }}"
|
|
||||||
owner: "{{ item.owner|default('root') }}"
|
|
||||||
state: directory
|
|
||||||
become: true
|
|
||||||
loop: "{{ NAS_nomad_folder }}"
|
|
||||||
roles:
|
|
||||||
- role: ansible-role-nut
|
|
||||||
become: true
|
|
||||||
- role: ansible-role-nfs
|
|
||||||
become: true
|
|
||||||
- role: ansible-role-pureftpd
|
|
||||||
become: true
|
|
||||||
- role: vladgh.samba.server
|
|
||||||
become: true
|
|
@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: rsyncd
|
|
||||||
vars:
|
|
||||||
# certbot_force: true
|
|
||||||
roles:
|
|
||||||
- role: rsyncd
|
|
||||||
become: true
|
|
@ -1,26 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts:
|
|
||||||
- homelab
|
|
||||||
- VPS
|
|
||||||
- NAS
|
|
||||||
vars:
|
|
||||||
# certbot_force: true
|
|
||||||
tasks:
|
|
||||||
- name: Create user
|
|
||||||
ansible.builtin.include_role:
|
|
||||||
name: "ansible-user"
|
|
||||||
apply:
|
|
||||||
become: true
|
|
||||||
vars:
|
|
||||||
user_name: "{{ create.name }}"
|
|
||||||
user_home: "{{ create.home }}"
|
|
||||||
user_groups: "{{ create.groups|default('') }}"
|
|
||||||
user_shell: "{{ create.shell|default('') }}"
|
|
||||||
user_authorized_key: "{{ create.authorized_keys|default([]) }}"
|
|
||||||
user_privatekey: "{{ create.privatekey|default([])}}"
|
|
||||||
loop: "{{system_user}}"
|
|
||||||
loop_control:
|
|
||||||
loop_var: create
|
|
||||||
roles:
|
|
||||||
- system
|
|
||||||
- cronie
|
|
@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
roles:
|
|
||||||
- role: ansible-role-sssd
|
|
||||||
become: true
|
|
@ -1,18 +0,0 @@
|
|||||||
- name: Ensure base NFS directory exist
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item.dest }}"
|
|
||||||
state: directory
|
|
||||||
become: true
|
|
||||||
- name: Ensure source NFS directory exist
|
|
||||||
ansible.builtin.file:
|
|
||||||
path: "{{ item.source }}"
|
|
||||||
state: directory
|
|
||||||
become: true
|
|
||||||
- name: Bind NAS export
|
|
||||||
ansible.posix.mount:
|
|
||||||
path: "{{ item.dest }}"
|
|
||||||
src: "{{ item.source }}"
|
|
||||||
opts: bind
|
|
||||||
fstype: none
|
|
||||||
state: mounted
|
|
||||||
become: true
|
|
@ -1 +0,0 @@
|
|||||||
path = /exports/homes/%S
|
|
@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: wireguard
|
|
||||||
roles:
|
|
||||||
- role: ansible-role-wireguard
|
|
||||||
become: true
|
|
@ -1,52 +0,0 @@
|
|||||||
[DNS]
|
|
||||||
oscar
|
|
||||||
|
|
||||||
[dhcp]
|
|
||||||
oberon
|
|
||||||
|
|
||||||
[database_active]
|
|
||||||
bleys
|
|
||||||
|
|
||||||
[database_standby]
|
|
||||||
oscar
|
|
||||||
|
|
||||||
[database:children]
|
|
||||||
database_active
|
|
||||||
database_standby
|
|
||||||
|
|
||||||
[rsyncd]
|
|
||||||
oscar
|
|
||||||
bleys
|
|
||||||
|
|
||||||
[wireguard:children]
|
|
||||||
production
|
|
||||||
|
|
||||||
[NAS]
|
|
||||||
oberon
|
|
||||||
|
|
||||||
[cluster]
|
|
||||||
oscar
|
|
||||||
#gerard
|
|
||||||
bleys
|
|
||||||
|
|
||||||
|
|
||||||
[homelab:children]
|
|
||||||
NAS
|
|
||||||
cluster
|
|
||||||
|
|
||||||
[VPS]
|
|
||||||
merlin
|
|
||||||
|
|
||||||
[region:children]
|
|
||||||
homelab
|
|
||||||
VPS
|
|
||||||
production
|
|
||||||
|
|
||||||
[production]
|
|
||||||
oscar
|
|
||||||
merlin
|
|
||||||
#gerard
|
|
||||||
bleys
|
|
||||||
oberon
|
|
||||||
|
|
||||||
[staging]
|
|
@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
roles:
|
|
||||||
- ansible-arch-provissionning
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
remote_user: root
|
|
||||||
roles:
|
|
||||||
- ansible_bootstrap
|
|
||||||
|
|
||||||
# - remote_user: "{{ user.name }}"
|
|
||||||
# import_playbook: site.yml
|
|
12
ansible/roles/.gitignore
vendored
12
ansible/roles/.gitignore
vendored
@ -1,12 +0,0 @@
|
|||||||
# Ignore everything in roles dir
|
|
||||||
/*
|
|
||||||
# Except:
|
|
||||||
# the .gitignore file
|
|
||||||
!.gitignore
|
|
||||||
# the requirements file
|
|
||||||
!requirements.yml
|
|
||||||
# Readme if you have one
|
|
||||||
!README.md
|
|
||||||
# and any specific role we want to version locally
|
|
||||||
!locally-versioned-role*/
|
|
||||||
|
|
@ -1,49 +0,0 @@
|
|||||||
---
|
|
||||||
roles:
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-arch-provissionning.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-postgresql.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-role-sssd
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible_bootstrap.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/autofs.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/cronie.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/docker.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/hass-client-control.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/msmtp.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/rsyncd.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/system.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/user_config.git
|
|
||||||
scm: git
|
|
||||||
- src: git@github.com:vincentDcmps/ansible-role-wireguard.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-consul.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-hashicorp-vault.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-nomad.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/mpd.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-dhcpd.git
|
|
||||||
scm: git
|
|
||||||
- src: ssh://git@git.ducamps.eu:2222/ansible-roles/ansible-user.git
|
|
||||||
scm: git
|
|
||||||
- src: git@github.com:vincentDcmps/ansible-role-nfs.git
|
|
||||||
scm: git
|
|
||||||
- src: git@github.com:vincentDcmps/ansible-role-nut.git
|
|
||||||
scm: git
|
|
||||||
- src: git@git.ducamps.eu:2222/ansible-roles/ansible-role-pureftpd.git
|
|
||||||
scm: git
|
|
||||||
- src: https://github.com/PowerDNS/pdns_recursor-ansible.git
|
|
||||||
collections:
|
|
||||||
- name: vladgh.samba
|
|
@ -1,10 +0,0 @@
|
|||||||
---
|
|
||||||
- import_playbook: playbooks/server.yml
|
|
||||||
- import_playbook: playbooks/dhcpd.yml
|
|
||||||
- import_playbook: playbooks/dns.yml
|
|
||||||
- import_playbook: playbooks/HashicorpStack.yml
|
|
||||||
- import_playbook: playbooks/nas.yml
|
|
||||||
- import_playbook: playbooks/autofs.yml
|
|
||||||
- import_playbook: playbooks/sssd.yml
|
|
||||||
- import_playbook: playbooks/database.yml
|
|
||||||
- import_playbook: playbooks/rsyncd.yml
|
|
@ -1,44 +0,0 @@
|
|||||||
[DNS]
|
|
||||||
oscar-dev
|
|
||||||
|
|
||||||
[database_active]
|
|
||||||
oscar-dev
|
|
||||||
|
|
||||||
[database_standby]
|
|
||||||
gerard-dev
|
|
||||||
|
|
||||||
[database:children]
|
|
||||||
database_active
|
|
||||||
database_standby
|
|
||||||
|
|
||||||
[wireguard:children]
|
|
||||||
staging
|
|
||||||
|
|
||||||
[NAS]
|
|
||||||
nas-dev
|
|
||||||
|
|
||||||
[cluster]
|
|
||||||
oscar-dev
|
|
||||||
gerard-dev
|
|
||||||
|
|
||||||
[homelab:children]
|
|
||||||
NAS
|
|
||||||
cluster
|
|
||||||
|
|
||||||
[VPS]
|
|
||||||
merlin-dev
|
|
||||||
|
|
||||||
[region:children]
|
|
||||||
homelab
|
|
||||||
VPS
|
|
||||||
staging
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[staging]
|
|
||||||
oscar-dev
|
|
||||||
gerard-dev
|
|
||||||
merlin-dev
|
|
||||||
nas-dev
|
|
||||||
|
|
||||||
[production]
|
|
@ -1,19 +0,0 @@
|
|||||||
{
|
|
||||||
"Service": {
|
|
||||||
"Name": "file",
|
|
||||||
"address": "192.168.1.10",
|
|
||||||
"port": 7000,
|
|
||||||
"tags": [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=FileStation",
|
|
||||||
"homer.url=https://file.ducamps.win",
|
|
||||||
"homer.logo=https://file.ducamps.win/webman/modules/FileBrowser/images/1x/file_browser_64.png",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.file.rule=Host(`file.ducamps.win`)",
|
|
||||||
"traefik.http.routers.file.tls.domains[0].sans=file.ducamps.win",
|
|
||||||
"traefik.http.routers.file.tls.certresolver=myresolver"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
{
|
|
||||||
"Service": {
|
|
||||||
"Name": "syno",
|
|
||||||
"address": "192.168.1.10",
|
|
||||||
"port": 5000,
|
|
||||||
"tags": [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=Diskstation",
|
|
||||||
"homer.url=https://syno.ducamps.eu",
|
|
||||||
"homer.logo=https://syno.ducamps.eu/webman/resources/images/icon_dsm_96.png",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.target=_blank",
|
|
||||||
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.syno.rule=Host(`syno.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.syno.tls.domains[0].sans=syno.ducamps.eu",
|
|
||||||
"traefik.http.routers.syno.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.syno.entrypoints=web,websecure"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
# 001 Development environment
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
Accepted
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
we need to create a virtual cluster to do test without impact on production.
|
|
||||||
|
|
||||||
### Virtualisation or Container
|
|
||||||
|
|
||||||
Virtualisation provide better isolation but must ressource are needed.
|
|
||||||
Container able to create more item without consum as resource than virtual machine.
|
|
||||||
|
|
||||||
### Creation Wrapper
|
|
||||||
|
|
||||||
Vagrant is good top manage virtual machine but not a lot of LXC box availlable, Vagant van be use with other configuration manager than ansible.
|
|
||||||
Molecule can manage molecule with plugins molecule-LXD. molecule is ansible exclusive solution
|
|
||||||
|
|
||||||
## Decision
|
|
||||||
|
|
||||||
we will use container instead VM for the resource consumption avantage.
|
|
||||||
|
|
||||||
Molecule wrapper will be use because all our configuration is already provide by ansible and we can have a better choise of container with molecule than vagrant.
|
|
||||||
|
|
||||||
25/08/2023
|
|
||||||
|
|
||||||
some issue are meet with lxc (share kernel, privilege, plugin not maintain)
|
|
||||||
I have increase RAM on my computer so I can switch to virtual machine for the dev env
|
|
||||||
instead to build vagrant VM in a molecule playbooke we only use a vagrant file to avoid toi many overlay to maintain.
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
migrate molecule provissioning on dedicated vagrant file
|
|
@ -1,28 +0,0 @@
|
|||||||
# 002-Vault-Backend
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
Currently vault Backend is onboard in Consul KV
|
|
||||||
Hashicorp recommandation is to use integrated storage from vault cluster
|
|
||||||
This could remove consul dependancy on rebuild
|
|
||||||
|
|
||||||
## Decision
|
|
||||||
|
|
||||||
migrate to vault integrated storage
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
to do:
|
|
||||||
|
|
||||||
- [migration plan]("https://developer.hashicorp.com/vault/tutorials/raft/raft-migration")
|
|
||||||
|
|
||||||
1. basculer oscar,gerard et bleys and itegrated storage merlin restera en storage consul pendant l'opé avant décom
|
|
||||||
2. stoper le service vault sur oscar
|
|
||||||
3. lancer la commande de migration
|
|
||||||
4. joindre les autre node au cluster
|
|
||||||
5. décom vault sur merlin
|
|
||||||
6. adapter job backup
|
|
||||||
|
|
||||||
- [backup]("https://developer.hashicorp.com/vault/tutorials/standard-procedures/sop-backup")
|
|
@ -1,54 +0,0 @@
|
|||||||
# 003-mailserver
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
Gandi free email will become a pay service in 2 month.
|
|
||||||
|
|
||||||
In this condition it will be interesting to study selfhosted mail solution.
|
|
||||||
|
|
||||||
### domain name
|
|
||||||
|
|
||||||
do I take advantage of this to change domaine name:
|
|
||||||
|
|
||||||
Pro:
|
|
||||||
|
|
||||||
- could test more easy
|
|
||||||
- could redirect old domain name to new one untile end of gandi domain (2026)
|
|
||||||
- get a more "normal" extention
|
|
||||||
|
|
||||||
con:
|
|
||||||
|
|
||||||
- need to progresively update every personal account
|
|
||||||
|
|
||||||
### Container localisation
|
|
||||||
|
|
||||||
on hetzner:
|
|
||||||
|
|
||||||
- need to increase memory
|
|
||||||
|
|
||||||
on homelab:
|
|
||||||
|
|
||||||
- need to redirect all serveur flux to hetzner to be sure to be sure that mail will be send with hetzner IP (control PTR on this IP)
|
|
||||||
- hetzner will be too a SPOF
|
|
||||||
|
|
||||||
### software choose
|
|
||||||
|
|
||||||
mail server will run in nomad cluster.
|
|
||||||
|
|
||||||
docker-mailserver -> 1 container
|
|
||||||
mailu
|
|
||||||
|
|
||||||
## Decision
|
|
||||||
|
|
||||||
we will switch to another domain name on "https://www.bookmyname.com/": ducamps.eu""
|
|
||||||
docker-mailserver will be more easier to configure because only one container to migrate to nomad
|
|
||||||
for begining container will be launch on hetzner
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
- need to buy a new domaine name and configure DNS (done)
|
|
||||||
- inprove memory on corwin (done)
|
|
@ -1,117 +0,0 @@
|
|||||||
# DNS
|
|
||||||
|
|
||||||
## 001 Recursor out off NAS
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
curently main local domain DNS is located on NAS.
|
|
||||||
|
|
||||||
goal:
|
|
||||||
|
|
||||||
- avoid DNS outtage in case of NAS reboot (my synology have 10 years and is a litle long to reboot) morever during NAS reboot we lost the adblock DNS in the nomad cluster because nomad depend of the NFS share.
|
|
||||||
- remove the direct redirection to service.consul DNS and the IPTABLE rule use to redirect port 53 on consul on gerard instead new DNS could be forward directly to an active consul node on port 8300
|
|
||||||
|
|
||||||
#### DNS software
|
|
||||||
|
|
||||||
need DHCP Dynamic update
|
|
||||||
could redirect domain on other port than port 53
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
we will migrate Main Domain DNS from NAS to gerard (powerDNS)
|
|
||||||
powerDNS provide two disting binaries one for authority server one other for recursor
|
|
||||||
goal is to first migrate the recursice part from synology to a physical service
|
|
||||||
and in second time migrate authority server in nmad cluster
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
before to move authority server need to remove DB dns dependance (create db consul services)
|
|
||||||
need to delete the iptable rule on gerard before deploy
|
|
||||||
|
|
||||||
## 002 each node request self consul client for consul dns query
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
to avoid a cluster failled in case of the DNS recursor default.
|
|
||||||
I would like that each cluster client request their own consul client
|
|
||||||
first to resolve consul DNS query
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
Implement sytemd-resolved on all cluster member and add a DNS redirection
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
need to modify annsible system role for systemd-resolved activation and consul role for configure redirection
|
|
||||||
|
|
||||||
## 003 migrate authority DNS from NAS to cluster
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
we have curently three authority domain on NAS:
|
|
||||||
|
|
||||||
- ducamps.win
|
|
||||||
- ducamps.eu
|
|
||||||
- lan.ducamps.eu
|
|
||||||
|
|
||||||
we could migrate authority DNS in cluster
|
|
||||||
ducamps.win and ducamps.eu are only use for application access so no dependence with cluster build
|
|
||||||
need to study cluster build dependance for lan.ducamps.eu-> in every case in case of build from scratch need to use IP
|
|
||||||
need keepalive IP and check if no conflict if store on same machine than pihole->ok don't need to listen on 53 only request by recursor
|
|
||||||
DNS authority will dependant to storage (less problematic than recursor)
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
## 004 migrate recurson in cluster
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
now that cluster doesn't depend of recursor because request self consul agent for consul query need
|
|
||||||
need to study if we can migrate recursor in nomad wihout break dependance
|
|
||||||
advantage:
|
|
||||||
|
|
||||||
- recursor could change client in case of faillure
|
|
||||||
|
|
||||||
agains:
|
|
||||||
|
|
||||||
- this job need a keepalive IP like pihole
|
|
||||||
- *loss recursor if lost nomad cluster*
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
put one recursor on cluster over authority server and keep the recursor on gerard for better recundancy
|
|
||||||
|
|
||||||
### Consequences
|
|
||||||
|
|
||||||
|
|
||||||
## 005 physical Recursor location
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
following NAS migration physical DNS Recursor was install directly on NAS this bring a SPOF when NAS failed Recursor on Nomad cluster are stopped because of volume dependance
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
Put physical Recursor on a cluster node like that to have a DNS issue we need to have NAS and this nomad down on same Time
|
|
@ -1,42 +0,0 @@
|
|||||||
# NAS
|
|
||||||
|
|
||||||
## 001 New Nas spec
|
|
||||||
|
|
||||||
### Status
|
|
||||||
|
|
||||||
In progress
|
|
||||||
|
|
||||||
### Context
|
|
||||||
|
|
||||||
Storage:
|
|
||||||
|
|
||||||
- Data filesytem will be in btrfs.
|
|
||||||
- Study if keep root filesystem in EXT4.
|
|
||||||
- Need to use LVM over btrfs added posibility to add cache later (cache on cold data useless on beginning maybe write cache in future use).
|
|
||||||
- hot Data (nomad, document,fresh download file,music?) on SSD cold DATA on HDD (film, serie photo)
|
|
||||||
- at least 2 HDD and 2 SSD
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Hardware:
|
|
||||||
|
|
||||||
- network 2.5 gpbs will be good for evolve
|
|
||||||
- at least 4go ram (expansive will be appreciable)
|
|
||||||
|
|
||||||
Software:
|
|
||||||
|
|
||||||
be able to install custom linux distrib
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
- Due to form factor/consumption and SSD capability my choise is on ASUSTOR Nimbustor 2 Gen 2 AS5402, he corresponding to need and less expensive than a DIY NAS
|
|
||||||
- buy only a new ssd of 2to in more to store system and hot data
|
|
||||||
|
|
||||||
### Cosequence
|
|
||||||
|
|
||||||
need to migrate Data and keep same disk
|
|
||||||
|
|
||||||
- install system
|
|
||||||
- copy all data from 2to HDD to SSD then format 2to HDD
|
|
||||||
- copy download data to FROM 4 to HDD to SSD
|
|
||||||
- copy serie to 2to HDD and copy film on external harddrive
|
|
@ -1,25 +0,0 @@
|
|||||||
# Docker Pull throught
|
|
||||||
|
|
||||||
# 001 architecture consideration
|
|
||||||
|
|
||||||
## Status
|
|
||||||
|
|
||||||
Accepted
|
|
||||||
|
|
||||||
## Context
|
|
||||||
|
|
||||||
docker hub get a pull limit if somebody go wrong on our infrastructure we can get quickyly this limit solution will be to implement a pull throught proxy.
|
|
||||||
|
|
||||||
|
|
||||||
### Decision
|
|
||||||
|
|
||||||
create two container task to create a dockerhub pull through and a ghcr one
|
|
||||||
|
|
||||||
we can add these registry to traefick to have both under the port 5000 but this will add a traefik dependancy on rebuild
|
|
||||||
|
|
||||||
so to begin we will use one trafick service on two diferent static port
|
|
||||||
|
|
||||||
## Consequences
|
|
||||||
|
|
||||||
- this registry need to be start first on cluster creation
|
|
||||||
- need to update all job image with local proxy url
|
|
@ -1,36 +0,0 @@
|
|||||||
# Architecture DNS
|
|
||||||
|
|
||||||
```mermaid
|
|
||||||
flowchart LR
|
|
||||||
subgraph External
|
|
||||||
externalRecursor[recursor]
|
|
||||||
GandiDns[ hetzner ducamps.win]
|
|
||||||
end
|
|
||||||
subgraph Internal
|
|
||||||
pihole[pihole]--ducamps.win-->NAS
|
|
||||||
pihole--service.consul-->consul[consul cluster]
|
|
||||||
pihole--->recursor
|
|
||||||
recursor--service.consul-->consul
|
|
||||||
DHCP --dynamic update--> NAS
|
|
||||||
NAS
|
|
||||||
recursor--ducamps.win-->NAS
|
|
||||||
consul--service.consul--->consul
|
|
||||||
clients--->pihole
|
|
||||||
clients--->recursor
|
|
||||||
end
|
|
||||||
pihole --> externalRecursor
|
|
||||||
recursor-->External
|
|
||||||
```
|
|
||||||
|
|
||||||
## Detail
|
|
||||||
|
|
||||||
Pihole container in nomad cluster is set as primary DNS as add blocker secondary DNS recursore is locate on gerard
|
|
||||||
|
|
||||||
DNS locate on NAS manage domain *ducamps.win* on local network each recursor forward each request on *ducamps.win* to this DNS.
|
|
||||||
|
|
||||||
Each DNS forward *service.consul* request to the consul cluster.
|
|
||||||
Each consul node have a consul redirection in systemd-resolved to theire own consul client
|
|
||||||
|
|
||||||
a DHCP service is set to do dynamic update on NAS DNS on lease delivery
|
|
||||||
|
|
||||||
external recursor are set on pihole on cloudflare and FDN in case of recursors faillure
|
|
@ -1,11 +0,0 @@
|
|||||||
# Add a new job
|
|
||||||
|
|
||||||
## Create Nomad job
|
|
||||||
|
|
||||||
## Add secret to vault
|
|
||||||
|
|
||||||
## Add a new policy to Vault terraform
|
|
||||||
|
|
||||||
## Add Database creation in ansible variable (if neeeded)
|
|
||||||
|
|
||||||
## Create CNAME in local DNS and External if needed
|
|
@ -1,25 +0,0 @@
|
|||||||
# ansible vault management
|
|
||||||
|
|
||||||
ansible password are encoded with a gpg key store in ansible/misc
|
|
||||||
to renew password follow this workflown
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# Generate a new password for the default vault
|
|
||||||
pwgen -s 64 default-pw
|
|
||||||
|
|
||||||
# Re-encrypt all default vaults
|
|
||||||
ansible-vault rekey --new-vault-password-file ./default-pw \
|
|
||||||
$(git grep -l 'ANSIBLE_VAULT;1.1;AES256$')
|
|
||||||
|
|
||||||
# Save the new password in encrypted form
|
|
||||||
# (replace "RECIPIENT" with your email)
|
|
||||||
gpg -r RECIPIENT -o misc/vault--password.gpg -e default-pw
|
|
||||||
|
|
||||||
# Ensure the new password is usable
|
|
||||||
ansible-vault view misc/vaults/vault_hcloud.yml
|
|
||||||
|
|
||||||
# Remove the unencrypted password file
|
|
||||||
rm new-default-pw
|
|
||||||
```
|
|
||||||
|
|
||||||
script `vault-keyring-client.sh` is set in ansible.cfg as vault_password_file to decrypt the gpg file
|
|
@ -1,8 +0,0 @@
|
|||||||
# Troubleshooting
|
|
||||||
|
|
||||||
## issue with SMTP traefik port
|
|
||||||
|
|
||||||
ensure that no other traefik router (httt or TCP) listening on smtp or
|
|
||||||
all entrypoint this can pertuubate smtp TLS connection
|
|
||||||
see [https://doc.traefik.io/traefik/routing/routers/#entrypoints_1](here)
|
|
||||||
|
|
@ -1,23 +0,0 @@
|
|||||||
# How to Bootstrap dev env
|
|
||||||
|
|
||||||
## prerequisite
|
|
||||||
|
|
||||||
dev environment is manage by molecule job who launch container via LXD you need following software to launch it:
|
|
||||||
|
|
||||||
- LXD server up on your local machine
|
|
||||||
- molecule install ```pip install molecule```
|
|
||||||
- molecule-LXD plugins ```pip install molecule-lxd```
|
|
||||||
|
|
||||||
## provissionning
|
|
||||||
|
|
||||||
you can launch ```make create-dev``` on root project
|
|
||||||
|
|
||||||
molecule will create 3 container on different distribution
|
|
||||||
|
|
||||||
- archlinux
|
|
||||||
- rockylinux 9
|
|
||||||
- debian 11
|
|
||||||
|
|
||||||
To bootstrap the container (base account, sudo configuration) role [ansible_bootstrap](https://git.ducamps.win/ansible-roles/ansible_bootstrap) will be apply
|
|
||||||
|
|
||||||
Converge step call playbook [site.yml](https://git.ducamps.win/vincent/homelab/src/commit/c5ff235b9768d91b240ec97e7ff8e2ad5a9602ca/ansible/site.yml) to provission the cluster
|
|
@ -1,3 +0,0 @@
|
|||||||
--8<--
|
|
||||||
README.md
|
|
||||||
--8<--
|
|
88
drone.nomad
Normal file
88
drone.nomad
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
job "drone" {
|
||||||
|
datacenters = ["homelab"]
|
||||||
|
type = "service"
|
||||||
|
constraint {
|
||||||
|
attribute = "${attr.cpu.arch}"
|
||||||
|
value = "amd64"
|
||||||
|
}
|
||||||
|
|
||||||
|
group "droneCI" {
|
||||||
|
network {
|
||||||
|
mode = "host"
|
||||||
|
port "http" {
|
||||||
|
to = 80
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vault {
|
||||||
|
policies = ["access-tables"]
|
||||||
|
}
|
||||||
|
task "drone-server" {
|
||||||
|
driver = "docker"
|
||||||
|
service {
|
||||||
|
name = "drone"
|
||||||
|
port = "http"
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
|
||||||
|
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
|
||||||
|
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||||
|
"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https",
|
||||||
|
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=httpsRedirect"
|
||||||
|
|
||||||
|
]
|
||||||
|
}
|
||||||
|
config {
|
||||||
|
image = "drone/drone:latest"
|
||||||
|
ports = [
|
||||||
|
"http"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
env {
|
||||||
|
|
||||||
|
}
|
||||||
|
template {
|
||||||
|
data= <<EOH
|
||||||
|
{{ with secret "secrets/data/droneCI"}}
|
||||||
|
DRONE_GITEA_SERVER="https://git.ducamps.win"
|
||||||
|
DRONE_GITEA_CLIENT_ID="{{ .Data.data.DRONE_GITEA_CLIENT_ID }}"
|
||||||
|
DRONE_GITEA_CLIENT_SECRET="{{ .Data.data.DRONE_GITEA_CLIENT_SECRET }}"
|
||||||
|
DRONE_GITEA_ALWAYS_AUTH="True"
|
||||||
|
DRONE_USER_CREATE="username:vincent,admin:true"
|
||||||
|
DRONE_DATABASE_DRIVER="postgres"
|
||||||
|
DRONE_DATABASE_DATASOURCE="postgres://drone:{{ .Data.data.DRONE_DB_PASSWORD }}@db1.ducamps.win:5432/drone?sslmode=disable"
|
||||||
|
DRONE_RPC_SECRET="{{ .Data.data.DRONE_RPC_SECRET }}"
|
||||||
|
DRONE_SERVER_HOST="drone.ducamps.win"
|
||||||
|
DRONE_SERVER_PROTO="https"
|
||||||
|
{{end}}
|
||||||
|
EOH
|
||||||
|
destination = "local/drone.env"
|
||||||
|
env = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
task "drone-runner"{
|
||||||
|
driver = "docker"
|
||||||
|
config {
|
||||||
|
image = "drone/drone-runner-docker:latest"
|
||||||
|
volumes =[
|
||||||
|
"/var/run/docker.sock:/var/run/docker.sock",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
env {
|
||||||
|
|
||||||
|
}
|
||||||
|
template {
|
||||||
|
data= <<EOH
|
||||||
|
{{ with secret "secrets/data/droneCI"}}
|
||||||
|
DRONE_RPC_HOST="drone.ducamps.win"
|
||||||
|
DRONE_RPC_PROTO="https"
|
||||||
|
DRONE_RPC_SECRET= "{{ .Data.data.DRONE_RPC_SECRET}}"
|
||||||
|
{{ end }}
|
||||||
|
EOH
|
||||||
|
destination = "local/drone-runner.env"
|
||||||
|
env = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
85
gitea.nomad
Normal file
85
gitea.nomad
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
job "git" {
|
||||||
|
datacenters = ["homelab"]
|
||||||
|
type = "service"
|
||||||
|
|
||||||
|
group "gitea" {
|
||||||
|
network {
|
||||||
|
mode = "host"
|
||||||
|
port "http" {
|
||||||
|
to = 3000
|
||||||
|
}
|
||||||
|
port "ssh" {
|
||||||
|
to = 22
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vault {
|
||||||
|
policies = ["access-tables"]
|
||||||
|
}
|
||||||
|
task "gitea" {
|
||||||
|
driver = "docker"
|
||||||
|
service {
|
||||||
|
name = "gitea"
|
||||||
|
port = "http"
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.ducamps.win`)",
|
||||||
|
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=${NOMAD_JOB_NAME}.ducamps.win",
|
||||||
|
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||||
|
"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https",
|
||||||
|
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=httpsRedirect"
|
||||||
|
|
||||||
|
]
|
||||||
|
}
|
||||||
|
service {
|
||||||
|
name= "gitea-ssh"
|
||||||
|
port = "ssh"
|
||||||
|
tags = [
|
||||||
|
"traefik.enable=true",
|
||||||
|
"traefik.tcp.routers.gitea-ssh.rule=HostSNI(`*`)",
|
||||||
|
"traefik.tcp.routers.gitea-ssh.entrypoints=ssh"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
config {
|
||||||
|
image = "gitea/gitea:latest"
|
||||||
|
ports = [
|
||||||
|
"http",
|
||||||
|
"ssh"
|
||||||
|
]
|
||||||
|
volumes = [
|
||||||
|
"/mnt/diskstation/git:/repo",
|
||||||
|
"/mnt/diskstation/nomad/gitea:/data"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
env {
|
||||||
|
USER_UID = 1000000
|
||||||
|
USER_GUID = 985
|
||||||
|
GITEA__server__DOMAIN = "git.ducamps.win"
|
||||||
|
GITEA__server__ROOT_URL = "https://git.ducamps.win"
|
||||||
|
GITEA__server__SSH_DOMAIN = "git.ducamps.win"
|
||||||
|
GITEA__server__SSH_PORT = "2222"
|
||||||
|
GITEA__server__SSH_LISTEN_PORT = "2222"
|
||||||
|
GITEA__server__START_SSH_SERVER = "false"
|
||||||
|
GITEA__database__DB_TYPE = "postgres"
|
||||||
|
GITEA__database__HOST = "db1.ducamps.win"
|
||||||
|
GITEA__database__NAME = "gitea"
|
||||||
|
GITEA__database__USER = "gitea"
|
||||||
|
GITEA__service__DISABLE_REGISTRATION = "true"
|
||||||
|
GITEA__repository__ROOT = "/repo"
|
||||||
|
GITEA__server__APP_DATA_PATH = "/data"
|
||||||
|
GITEA__server__LFS_CONTENT_PATH = "/repo/LFS"
|
||||||
|
}
|
||||||
|
template {
|
||||||
|
data= <<EOH
|
||||||
|
{{ with secret "secrets/data/gitea"}}
|
||||||
|
GITEA__database__PASSWD = "{{.Data.data.PASSWD}}"
|
||||||
|
GITEA__security__SECRET_KEY = "{{.Data.data.secret_key}}"
|
||||||
|
GITEA__oauth2__JWT_SECRET = "{{.Data.data.jwt_secret}}"
|
||||||
|
GITEA__security__INTERNAL_TOKEN = "{{.Data.data.internal_token}}"
|
||||||
|
{{end}}
|
||||||
|
EOH
|
||||||
|
destination = "secrets/gitea.env"
|
||||||
|
env = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
39
makefile
39
makefile
@ -1,39 +0,0 @@
|
|||||||
|
|
||||||
nomad-dev:
|
|
||||||
@read -p 'enter your vault token:' VAULT_TOKEN;\
|
|
||||||
nomad agent -dev -bind 0.0.0.0 -dc homelab -vault-address "http://active.vault.service.consul:8200" -vault-create-from-role "nomad-cluster" -vault-enabled -vault-token $$VAULT_TOKEN
|
|
||||||
|
|
||||||
vault-dev:
|
|
||||||
if [ -z "$(FILE)"]; then \
|
|
||||||
./vault/standalone_vault.sh; \
|
|
||||||
else \
|
|
||||||
./vault/standalone_vault.sh $(FILE);\
|
|
||||||
fi
|
|
||||||
|
|
||||||
vagranup:
|
|
||||||
vagrant up
|
|
||||||
|
|
||||||
create-dev: vagranup DNS-stagging
|
|
||||||
make -C ansible deploy_staging
|
|
||||||
make -C terraform deploy_vault env=staging
|
|
||||||
VAULT_TOKEN=$(shell cat ~/vaultUnseal/staging/rootkey) python ./script/generate-vault-secret
|
|
||||||
|
|
||||||
create-dev-base: vagranup DNS-stagging
|
|
||||||
make -C ansible deploy_staging_base
|
|
||||||
|
|
||||||
|
|
||||||
destroy-dev:
|
|
||||||
vagrant destroy --force
|
|
||||||
|
|
||||||
serve:
|
|
||||||
mkdocs serve
|
|
||||||
|
|
||||||
DNS-stagging:
|
|
||||||
$(eval dns := $(shell dig oscar-dev.lan.ducamps.dev +short))
|
|
||||||
$(eval dns1 := $(shell dig nas-dev.lan.ducamps.dev +short))
|
|
||||||
sudo resolvectl dns virbr2 "$(dns)" "$(dns1)";sudo resolvectl domain virbr2 "~consul";sudo systemctl restart systemd-resolved.service
|
|
||||||
|
|
||||||
|
|
||||||
DNS-production:
|
|
||||||
sudo resolvectl dns virbr2 "";sudo resolvectl domain virbr2 "";sudo systemctl restart systemd-resolved.service
|
|
||||||
|
|
31
mkdocs.yml
31
mkdocs.yml
@ -1,31 +0,0 @@
|
|||||||
---
|
|
||||||
site_name: Homelab
|
|
||||||
copyright: Copyright © 2021 - 2022 VincentDcmps
|
|
||||||
repo_url: https://git.ducamps.win/vincent/homelab
|
|
||||||
site_author: VincentDcmps
|
|
||||||
site_url: https://www.ducamps.win/homelab
|
|
||||||
theme:
|
|
||||||
name: material
|
|
||||||
highlightjs: true
|
|
||||||
palette:
|
|
||||||
scheme: default
|
|
||||||
primary: black
|
|
||||||
hljs_languages:
|
|
||||||
- yaml
|
|
||||||
features:
|
|
||||||
- navigation.indexes
|
|
||||||
- navigation.instant
|
|
||||||
- navigation.sections
|
|
||||||
- search.highlight
|
|
||||||
markdown_extensions:
|
|
||||||
- pymdownx.snippets:
|
|
||||||
check_paths: true
|
|
||||||
- pymdownx.superfences:
|
|
||||||
custom_fences:
|
|
||||||
- name: mermaid
|
|
||||||
class: mermaid
|
|
||||||
format: !!python/name:pymdownx.superfences.fence_code_format
|
|
||||||
extra:
|
|
||||||
social:
|
|
||||||
- icon: fontawesome/brands/github-alt
|
|
||||||
link: https://github.com/vincentDcmps
|
|
@ -30,13 +30,9 @@ job "nextcloud" {
|
|||||||
port = "http"
|
port = "http"
|
||||||
tags = [
|
tags = [
|
||||||
"traefik.enable=true",
|
"traefik.enable=true",
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`file.ducamps.eu`)",
|
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`file.ducamps.win`)",
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=file.ducamps.eu",
|
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=file.ducamps.win",
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}_insecure.entrypoints=web",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}_insecure.rule=Host(`file.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}_insecure.middlewares=httpsRedirect",
|
|
||||||
"traefik.http.middlewares.httpsRedirect.redirectscheme.scheme=https",
|
|
||||||
|
|
||||||
|
|
||||||
]
|
]
|
||||||
@ -62,8 +58,8 @@ job "nextcloud" {
|
|||||||
POSTGRES_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}"
|
POSTGRES_PASSWORD="{{ .Data.data.POSTGRES_PASSWORD }}"
|
||||||
NEXTCLOUD_ADMIN_USER="vincent"
|
NEXTCLOUD_ADMIN_USER="vincent"
|
||||||
NEXTCLOUD_ADMIN_PASSWORD="{{ .Data.data.ADMIN_PASSWORD }}"
|
NEXTCLOUD_ADMIN_PASSWORD="{{ .Data.data.ADMIN_PASSWORD }}"
|
||||||
NEXTCLOUD_TRUSTED_DOMAINS="file.ducamps.eu"
|
NEXTCLOUD_TRUSTED_DOMAINS="file.ducamps.win"
|
||||||
POSTGRES_HOST="active.db.service.consul"
|
POSTGRES_HOST="db1.ducamps.win"
|
||||||
{{end}}
|
{{end}}
|
||||||
EOH
|
EOH
|
||||||
destination = "secrets/nextcloud.env"
|
destination = "secrets/nextcloud.env"
|
@ -1,133 +0,0 @@
|
|||||||
|
|
||||||
job "MQTT" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 90
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.unique.hostname}"
|
|
||||||
value = "oscar"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "MQTT" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "zigbee2mqtt" {
|
|
||||||
to = 8090
|
|
||||||
}
|
|
||||||
port "mosquittoMQTT" {
|
|
||||||
static = 1883
|
|
||||||
to = 1883
|
|
||||||
}
|
|
||||||
port "mosquittoWS" {
|
|
||||||
to = 9001
|
|
||||||
static = 9001
|
|
||||||
}
|
|
||||||
}
|
|
||||||
task "mosquitto" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "mosquitto"
|
|
||||||
port = "mosquittoMQTT"
|
|
||||||
tags = [
|
|
||||||
]
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "docker.service.consul:5000/library/eclipse-mosquitto"
|
|
||||||
ports = ["mosquittoWS", "mosquittoMQTT"]
|
|
||||||
volumes = [
|
|
||||||
"/mnt/diskstation/nomad/mosquitto:/mosquitto/data",
|
|
||||||
"local/mosquitto.conf:/mosquitto/config/mosquitto.conf"
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
TZ = "Europe/Paris"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
persistence false
|
|
||||||
log_dest stdout
|
|
||||||
listener 1883
|
|
||||||
allow_anonymous true
|
|
||||||
connection_messages true
|
|
||||||
EOH
|
|
||||||
destination = "local/mosquitto.conf"
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 100
|
|
||||||
}
|
|
||||||
}
|
|
||||||
task "Zigbee2MQTT" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "Zigbee2MQTT"
|
|
||||||
port = "zigbee2mqtt"
|
|
||||||
tags = [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=zigbee.mqtt",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.logo=https://www.zigbee2mqtt.io/logo.png",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.url=http://${NOMAD_ADDR_zigbee2mqtt}",
|
|
||||||
|
|
||||||
]
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "koenkk/zigbee2mqtt"
|
|
||||||
privileged = true
|
|
||||||
ports = ["zigbee2mqtt"]
|
|
||||||
volumes = [
|
|
||||||
"/mnt/diskstation/nomad/zigbee2mqtt:/app/data",
|
|
||||||
"local/configuration.yaml:/app/data/configuration.yaml",
|
|
||||||
"/run/udev:/run/udev",
|
|
||||||
"/dev/ttyACM0:/dev/ttyACM0",
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
TZ = "Europe/Paris"
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
# MQTT settings
|
|
||||||
mqtt:
|
|
||||||
# MQTT base topic for Zigbee2MQTT MQTT messages
|
|
||||||
base_topic: zigbee2mqtt
|
|
||||||
# MQTT server URL
|
|
||||||
server: 'mqtt://{{env "NOMAD_ADDR_mosquittoMQTT"}}'
|
|
||||||
# MQTT server authentication, uncomment if required:
|
|
||||||
# user: my_user
|
|
||||||
# password: my_password
|
|
||||||
frontend:
|
|
||||||
port: 8090
|
|
||||||
homeassistant: true
|
|
||||||
devices:
|
|
||||||
'0x00158d00027bf710':
|
|
||||||
friendly_name: remote_chambre
|
|
||||||
'0x00158d0003fabc52':
|
|
||||||
friendly_name: weather_chambre
|
|
||||||
'0x00158d0003cd381c':
|
|
||||||
friendly_name: weather_exterieur
|
|
||||||
'0x00158d00036d5fe8':
|
|
||||||
friendly_name: motion_sensor_chambre
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Serial settings
|
|
||||||
serial:
|
|
||||||
# Location of the adapter (see first step of this guide)
|
|
||||||
adapter: deconz
|
|
||||||
port: /dev/ttyACM0
|
|
||||||
EOH
|
|
||||||
destination = "local/configuration.yaml"
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 175
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,62 +0,0 @@
|
|||||||
|
|
||||||
job "actualbudget" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 50
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "cluster"
|
|
||||||
}
|
|
||||||
group "actualbudget"{
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "http" {
|
|
||||||
to = 5006
|
|
||||||
}
|
|
||||||
}
|
|
||||||
task "actualbudget-server" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "actualbudget"
|
|
||||||
port = "http"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`budget.ducamps.eu`)",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=budget.ducamps.eu",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=${NOMAD_TASK_NAME}",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.logo=https://budget.ducamps.eu/apple-touch-icon.png",
|
|
||||||
"homer.url=https://budget.ducamps.eu",
|
|
||||||
|
|
||||||
]
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "ghcr.service.consul:5000/actualbudget/actual-server:latest"
|
|
||||||
ports = ["http"]
|
|
||||||
volumes = [
|
|
||||||
"/mnt/diskstation/nomad/actualbudget:/data"
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
}
|
|
||||||
|
|
||||||
resources {
|
|
||||||
memory = 300
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,240 +0,0 @@
|
|||||||
|
|
||||||
job "borgmatic" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 50
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "NAS"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "borgmatic"{
|
|
||||||
vault{
|
|
||||||
policies= ["borgmatic"]
|
|
||||||
|
|
||||||
}
|
|
||||||
task "borgmatic" {
|
|
||||||
action "manual-backup" {
|
|
||||||
command = "/usr/local/bin/borgmatic"
|
|
||||||
args = ["create",
|
|
||||||
"prune",
|
|
||||||
"--verbosity",
|
|
||||||
"1"
|
|
||||||
|
|
||||||
]
|
|
||||||
}
|
|
||||||
action "list-backup" {
|
|
||||||
command = "/usr/local/bin/borgmatic"
|
|
||||||
args = ["rlist"]
|
|
||||||
}
|
|
||||||
driver = "docker"
|
|
||||||
config {
|
|
||||||
image = "ghcr.service.consul:5000/borgmatic-collective/borgmatic"
|
|
||||||
volumes = [
|
|
||||||
"/exports:/exports",
|
|
||||||
"local/borgmatic.d:/etc/borgmatic.d",
|
|
||||||
"secret/id_rsa:/root/.ssh/id_rsa",
|
|
||||||
"secret/known_hosts:/root/.ssh/known_hosts",
|
|
||||||
"/exports/nomad/borgmatic:/root/.cache/borg",
|
|
||||||
]
|
|
||||||
|
|
||||||
}
|
|
||||||
env {
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
BORG_RSH="ssh -i /root/.ssh/id_rsa -p 23"
|
|
||||||
{{ with secret "secrets/data/nomad/borgmatic"}}
|
|
||||||
BORG_PASSPHRASE= {{.Data.data.passphrase}}
|
|
||||||
{{end}}
|
|
||||||
EOH
|
|
||||||
destination = "secrets/sample.env"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
0 2 * * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic create prune --verbosity 1
|
|
||||||
0 23 1 * * PATH=$PATH:/usr/local/bin /usr/local/bin/borgmatic check
|
|
||||||
EOH
|
|
||||||
destination = "local/borgmatic.d/crontab.txt"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
# List of source directories to backup (required). Globs and
|
|
||||||
# tildes are expanded. Do not backslash spaces in path names.
|
|
||||||
source_directories:
|
|
||||||
- /exports/ebook
|
|
||||||
- /exports/homes
|
|
||||||
- /exports/music
|
|
||||||
- /exports/nomad
|
|
||||||
- /exports/photo
|
|
||||||
|
|
||||||
repositories:
|
|
||||||
- path: ssh://u304977@u304977.your-storagebox.de/./{{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
|
|
||||||
label: {{if eq "production" (env "meta.env") }}backup_hamelab{{else}}backup_homelab_dev{{end}}
|
|
||||||
|
|
||||||
exclude_patterns:
|
|
||||||
- '*/nomad/jellyfin/cache'
|
|
||||||
- '*nomad/loki/'
|
|
||||||
- '*nomad/prometheus'
|
|
||||||
- '*nomad/registry'
|
|
||||||
- '*nomad/pacoloco'
|
|
||||||
- '*nomad/pihole'
|
|
||||||
- '*nomad/jellyfin/*'
|
|
||||||
- '*.log*'
|
|
||||||
|
|
||||||
match_archives: '*'
|
|
||||||
archive_name_format: '{{ env "node.datacenter" }}-{now:%Y-%m-%dT%H:%M:%S.%f}'
|
|
||||||
extra_borg_options:
|
|
||||||
# Extra command-line options to pass to "borg init".
|
|
||||||
# init: --extra-option
|
|
||||||
|
|
||||||
# Extra command-line options to pass to "borg prune".
|
|
||||||
# prune: --extra-option
|
|
||||||
|
|
||||||
# Extra command-line options to pass to "borg compact".
|
|
||||||
# compact: --extra-option
|
|
||||||
|
|
||||||
# Extra command-line options to pass to "borg create".
|
|
||||||
create: --progress --stats
|
|
||||||
|
|
||||||
# Extra command-line options to pass to "borg check".
|
|
||||||
# check: --extra-option
|
|
||||||
|
|
||||||
# Keep all archives within this time interval.
|
|
||||||
# keep_within: 3H
|
|
||||||
|
|
||||||
# Number of secondly archives to keep.
|
|
||||||
# keep_secondly: 60
|
|
||||||
|
|
||||||
# Number of minutely archives to keep.
|
|
||||||
# keep_minutely: 60
|
|
||||||
|
|
||||||
# Number of hourly archives to keep.
|
|
||||||
# keep_hourly: 24
|
|
||||||
|
|
||||||
# Number of daily archives to keep.
|
|
||||||
keep_daily: 7
|
|
||||||
|
|
||||||
# Number of weekly archives to keep.
|
|
||||||
keep_weekly: 4
|
|
||||||
|
|
||||||
# Number of monthly archives to keep.
|
|
||||||
# keep_monthly: 6
|
|
||||||
|
|
||||||
# Number of yearly archives to keep.
|
|
||||||
# keep_yearly: 1
|
|
||||||
|
|
||||||
checks:
|
|
||||||
- name: repository
|
|
||||||
# - archives
|
|
||||||
# check_repositories:
|
|
||||||
# - user@backupserver:sourcehostname.borg
|
|
||||||
# check_last: 3
|
|
||||||
# output:
|
|
||||||
# color: false
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before creating a backup, run once per configuration file.
|
|
||||||
# before_backup:
|
|
||||||
# - echo "Starting a backup."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before pruning, run once per configuration file.
|
|
||||||
# before_prune:
|
|
||||||
# - echo "Starting pruning."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before compaction, run once per configuration file.
|
|
||||||
# before_compact:
|
|
||||||
# - echo "Starting compaction."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before consistency checks, run once per configuration file.
|
|
||||||
# before_check:
|
|
||||||
# - echo "Starting checks."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before extracting a backup, run once per configuration file.
|
|
||||||
# before_extract:
|
|
||||||
# - echo "Starting extracting."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after creating a backup, run once per configuration file.
|
|
||||||
# after_backup:
|
|
||||||
# - echo "Finished a backup."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after compaction, run once per configuration file.
|
|
||||||
# after_compact:
|
|
||||||
# - echo "Finished compaction."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after pruning, run once per configuration file.
|
|
||||||
# after_prune:
|
|
||||||
# - echo "Finished pruning."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after consistency checks, run once per configuration file.
|
|
||||||
# after_check:
|
|
||||||
# - echo "Finished checks."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after extracting a backup, run once per configuration file.
|
|
||||||
# after_extract:
|
|
||||||
# - echo "Finished extracting."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# when an exception occurs during a "prune", "compact",
|
|
||||||
# "create", or "check" action or an associated before/after
|
|
||||||
# hook.
|
|
||||||
# on_error:
|
|
||||||
# - echo "Error during prune/compact/create/check."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# before running all actions (if one of them is "create").
|
|
||||||
# These are collected from all configuration files and then
|
|
||||||
# run once before all of them (prior to all actions).
|
|
||||||
# before_everything:
|
|
||||||
# - echo "Starting actions."
|
|
||||||
|
|
||||||
# List of one or more shell commands or scripts to execute
|
|
||||||
# after running all actions (if one of them is "create").
|
|
||||||
# These are collected from all configuration files and then
|
|
||||||
# run once after all of them (after any action).
|
|
||||||
# after_everything:
|
|
||||||
# - echo "Completed actions."
|
|
||||||
EOH
|
|
||||||
destination = "local/borgmatic.d/config.yaml"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
{{ with secret "secrets/data/nomad/borgmatic"}}
|
|
||||||
{{.Data.data.privatekey}}
|
|
||||||
{{end}}
|
|
||||||
EOH
|
|
||||||
destination = "secret/id_rsa"
|
|
||||||
perms= "700"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data= <<EOH
|
|
||||||
[u304977.your-storagebox.de]:23 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIICf9svRenC/PLKIL9nk6K/pxQgoiFC41wTNvoIncOxs
|
|
||||||
[u304977.your-storagebox.de]:23 ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA5EB5p/5Hp3hGW1oHok+PIOH9Pbn7cnUiGmUEBrCVjnAw+HrKyN8bYVV0dIGllswYXwkG/+bgiBlE6IVIBAq+JwVWu1Sss3KarHY3OvFJUXZoZyRRg/Gc/+LRCE7lyKpwWQ70dbelGRyyJFH36eNv6ySXoUYtGkwlU5IVaHPApOxe4LHPZa/qhSRbPo2hwoh0orCtgejRebNtW5nlx00DNFgsvn8Svz2cIYLxsPVzKgUxs8Zxsxgn+Q/UvR7uq4AbAhyBMLxv7DjJ1pc7PJocuTno2Rw9uMZi1gkjbnmiOh6TTXIEWbnroyIhwc8555uto9melEUmWNQ+C+PwAK+MPw==
|
|
||||||
[u304977.your-storagebox.de]:23 ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGK0po6usux4Qv2d8zKZN1dDvbWjxKkGsx7XwFdSUCnF19Q8psHEUWR7C/LtSQ5crU/g+tQVRBtSgoUcE8T+FWp5wBxKvWG2X9gD+s9/4zRmDeSJR77W6gSA/+hpOZoSE+4KgNdnbYSNtbZH/dN74EG7GLb/gcIpbUUzPNXpfKl7mQitw==
|
|
||||||
EOH
|
|
||||||
destination = "secret/known_hosts"
|
|
||||||
perms="700"
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 300
|
|
||||||
memory_max = 1000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,51 +0,0 @@
|
|||||||
|
|
||||||
job "chainetv" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 30
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "2"
|
|
||||||
}
|
|
||||||
group "chainetv" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "http" {
|
|
||||||
to = 5000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
task "chainetv" {
|
|
||||||
driver = "docker"
|
|
||||||
service {
|
|
||||||
name = "chainetv"
|
|
||||||
port = "http"
|
|
||||||
tags = [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=ChaineTV",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.icon=fas fa-tv",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.url=https://www.ducamps.eu/${NOMAD_JOB_NAME}",
|
|
||||||
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`www.ducamps.eu`)&&PathPrefix(`/chainetv`)",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.domains[0].sans=www.ducamps.eu",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=myresolver",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.entrypoints=web,websecure",
|
|
||||||
"traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=chainetv,chainetvStrip",
|
|
||||||
"traefik.http.middlewares.chainetv.headers.customrequestheaders.X-Script-Name=/chainetv",
|
|
||||||
"traefik.http.middlewares.chainetvStrip.stripprefix.prefixes=/chainetv",
|
|
||||||
|
|
||||||
]
|
|
||||||
}
|
|
||||||
config {
|
|
||||||
image = "docker.service.consul:5000/ducampsv/chainetv:latest"
|
|
||||||
ports = ["http"]
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 200
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,194 +0,0 @@
|
|||||||
job "dockermailserver" {
|
|
||||||
datacenters = ["homelab"]
|
|
||||||
priority = 90
|
|
||||||
type = "service"
|
|
||||||
meta {
|
|
||||||
forcedeploy = "0"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${attr.cpu.arch}"
|
|
||||||
value = "amd64"
|
|
||||||
}
|
|
||||||
constraint {
|
|
||||||
attribute = "${node.class}"
|
|
||||||
operator = "set_contains"
|
|
||||||
value = "cluster"
|
|
||||||
}
|
|
||||||
group "dockermailserver" {
|
|
||||||
network {
|
|
||||||
mode = "host"
|
|
||||||
port "smtp" {
|
|
||||||
to = 25
|
|
||||||
}
|
|
||||||
port "imap" {
|
|
||||||
to = 10993
|
|
||||||
}
|
|
||||||
port "esmtp" {
|
|
||||||
to = 465
|
|
||||||
}
|
|
||||||
port "rspamd" {
|
|
||||||
to = 11334
|
|
||||||
}
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "smtp"
|
|
||||||
port = "smtp"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.tcp.routers.smtp.service=smtp",
|
|
||||||
"traefik.tcp.routers.smtp.entrypoints=smtp",
|
|
||||||
"traefik.tcp.routers.smtp.rule=HostSNI(`*`)",
|
|
||||||
"traefik.tcp.services.smtp.loadbalancer.proxyProtocol.version=1",
|
|
||||||
]
|
|
||||||
check {
|
|
||||||
name = "smtp_probe"
|
|
||||||
type = "tcp"
|
|
||||||
interval = "20s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "esmtp"
|
|
||||||
port = "esmtp"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.tcp.routers.esmtp.service=esmtp",
|
|
||||||
"traefik.tcp.routers.esmtp.entrypoints=esmtp",
|
|
||||||
"traefik.tcp.routers.esmtp.rule=HostSNI(`*`)",
|
|
||||||
"traefik.tcp.routers.esmtp.tls.passthrough=true",
|
|
||||||
"traefik.tcp.services.esmtp.loadbalancer.proxyProtocol.version=1",
|
|
||||||
]
|
|
||||||
check {
|
|
||||||
name = "esmtp_probe"
|
|
||||||
type = "tcp"
|
|
||||||
interval = "20s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "imap"
|
|
||||||
port = "imap"
|
|
||||||
tags = [
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.tcp.routers.imap.service=imap",
|
|
||||||
"traefik.tcp.routers.imap.entrypoints=imap",
|
|
||||||
"traefik.tcp.routers.imap.rule=HostSNI(`*`)",
|
|
||||||
"traefik.tcp.routers.imap.tls.passthrough=true",
|
|
||||||
"traefik.tcp.services.imap.loadbalancer.proxyProtocol.version=2",
|
|
||||||
]
|
|
||||||
check {
|
|
||||||
name = "imap_probe"
|
|
||||||
type = "tcp"
|
|
||||||
interval = "20s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "certmail"
|
|
||||||
tags =[
|
|
||||||
"traefik.enable=true",
|
|
||||||
"traefik.http.routers.certmail.entrypoints=web,websecure",
|
|
||||||
"traefik.http.routers.certmail.tls.domains[0].sans=mail.ducamps.eu",
|
|
||||||
"traefik.http.routers.certmail.tls.certresolver=myresolver",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
service {
|
|
||||||
name = "rspamdUI"
|
|
||||||
port = "rspamd"
|
|
||||||
tags = [
|
|
||||||
"homer.enable=true",
|
|
||||||
"homer.name=RSPAMD",
|
|
||||||
"homer.service=Application",
|
|
||||||
"homer.logo=http://${NOMAD_ADDR_rspamd}/img/rspamd_logo_navbar.png",
|
|
||||||
"homer.target=_blank",
|
|
||||||
"homer.url=http://${NOMAD_ADDR_rspamd}/",
|
|
||||||
]
|
|
||||||
check {
|
|
||||||
name = "rspamd_probe"
|
|
||||||
type = "http"
|
|
||||||
path = "/"
|
|
||||||
interval = "60s"
|
|
||||||
timeout = "2s"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# vault{
|
|
||||||
# policies= ["policy_name"]
|
|
||||||
#
|
|
||||||
#}
|
|
||||||
task "docker-mailserver" {
|
|
||||||
driver = "docker"
|
|
||||||
config {
|
|
||||||
image = "ghcr.service.consul:5000/docker-mailserver/docker-mailserver:latest"
|
|
||||||
ports = ["smtp", "esmtp", "imap","rspamd"]
|
|
||||||
volumes = [
|
|
||||||
"/mnt/diskstation/nomad/dms/mail-data:/var/mail",
|
|
||||||
"/mnt/diskstation/nomad/dms/mail-state:/var/mail-state",
|
|
||||||
"/mnt/diskstation/nomad/dms/mail-logs:/var/log/mail",
|
|
||||||
"/mnt/diskstation/nomad/dms/config:/tmp/docker-mailserver",
|
|
||||||
"/etc/localtime:/etc/localtime",
|
|
||||||
"local/postfix-main.cf:/tmp/docker-mailserver/postfix-main.cf",
|
|
||||||
"local/postfix-master.cf:/tmp/docker-mailserver/postfix-master.cf",
|
|
||||||
"local/dovecot.cf:/tmp/docker-mailserver/dovecot.cf",
|
|
||||||
"/mnt/diskstation/nomad/traefik/acme.json:/etc/letsencrypt/acme.json"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
env {
|
|
||||||
OVERRIDE_HOSTNAME = "mail.ducamps.eu"
|
|
||||||
DMS_VMAIL_UID = 1000000
|
|
||||||
DMS_VMAIL_GID = 984
|
|
||||||
SSL_TYPE= "letsencrypt"
|
|
||||||
LOG_LEVEL="info"
|
|
||||||
POSTMASTER_ADDRESS="vincent@ducamps.eu"
|
|
||||||
ENABLE_RSPAMD=1
|
|
||||||
ENABLE_OPENDKIM=0
|
|
||||||
ENABLE_OPENDMARC=0
|
|
||||||
ENABLE_POLICYD_SPF=0
|
|
||||||
ENABLE_UPDATE_CHECK=0
|
|
||||||
UPDATE_CHECK_INTERVAL="1d"
|
|
||||||
RSPAMD_CHECK_AUTHENTICATED=0
|
|
||||||
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
|
|
||||||
EOH
|
|
||||||
destination = "secrets/config"
|
|
||||||
env = true
|
|
||||||
}
|
|
||||||
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
postscreen_upstream_proxy_protocol = haproxy
|
|
||||||
EOH
|
|
||||||
destination = "local/postfix-main.cf"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
submission/inet/smtpd_upstream_proxy_protocol=haproxy
|
|
||||||
submissions/inet/smtpd_upstream_proxy_protocol=haproxy
|
|
||||||
EOH
|
|
||||||
destination = "local/postfix-master.cf"
|
|
||||||
}
|
|
||||||
template {
|
|
||||||
data = <<EOH
|
|
||||||
haproxy_trusted_networks = 10.0.0.0/24, 127.0.0.0/8, 172.17.0.1, 192.168.1.0/24
|
|
||||||
haproxy_timeout = 3 secs
|
|
||||||
service imap-login {
|
|
||||||
inet_listener imaps {
|
|
||||||
haproxy = yes
|
|
||||||
ssl = yes
|
|
||||||
port = 10993
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOH
|
|
||||||
destination = "local/dovecot.cf"
|
|
||||||
}
|
|
||||||
resources {
|
|
||||||
memory = 1000
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user