Compare commits
43 Commits
transcaffe
...
main
Author | SHA1 | Date | |
---|---|---|---|
c847046720 | |||
d7b7c59f46 | |||
153df81836 | |||
7021ed1a89 | |||
afe72f554e | |||
c36e95d7eb | |||
97526aec36 | |||
fc73fdd1fa | |||
bd43f3963c | |||
1076a9f384 | |||
82e69bdda3 | |||
b9b5c19d38 | |||
b9e4abdf36 | |||
aac6891518 | |||
31d025ed75 | |||
1423d2a243 | |||
967ebab4c1 | |||
5f4fbd492c | |||
96f5da9bf6 | |||
2aaa529585 | |||
8941b9357a | |||
04b5837fd8 | |||
4837172f64 | |||
ab7cca0947 | |||
960d95a924 | |||
eab7b7e915 | |||
e7886d8c98 | |||
13d40341a0 | |||
12b98487a5 | |||
2e6cb0a4d5 | |||
52d25942b4 | |||
af17bea1e1 | |||
52bf02e084 | |||
f72dd239f9 | |||
8e11c3734b | |||
4a2321cbaa | |||
af0adbcb34 | |||
f9bbcd6c71 | |||
1778ffac2a | |||
b8585b38b7 | |||
f9a0f92e27 | |||
048df5e3f6 | |||
bee1722cea |
25
README.md
25
README.md
@ -5,6 +5,31 @@
|
||||
This ansible collection provides various roles for installing
|
||||
and configuring basic system utilities like gnupg, ssh etc
|
||||
|
||||
- [`elasticsearch`](roles/elasticsearch/README.md): Deploy [elasticsearch](https://www.docker.elastic.co/r/elasticsearch/elasticsearch-oss),
|
||||
a popular (distributed) search and analytics engine, mostly known by it's
|
||||
letter "E" in the ELK-stack.
|
||||
|
||||
- [`git`](roles/git/README.md): configures git on the target system
|
||||
|
||||
- [`gnupg`](roles/gnupg/README.md): configures gnupg on the target system
|
||||
|
||||
- [`lego`](roles/lego/README.md): runs [lego (LetsEncrypt Go)](https://github.com/go-acme/lego),
|
||||
a ACME client written in go, using systemd (timers). Multi-instance capable.
|
||||
|
||||
- [`mariadb`](roles/mariadb/README.md): runs [MariaDB Server](https://mariadb.org/), one of the world's most popular open source relational database
|
||||
|
||||
- [`minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an
|
||||
s3-compatible object storage server, using docker containers.
|
||||
|
||||
- [`nginx`](roles/nginx/README.md): [nginx](https://www.nginx.com/),
|
||||
an advanced load balancer, webserver and reverse proxy.
|
||||
|
||||
- [`restic`](roles/restic/README.md): Manage backups using restic
|
||||
and persist them to a configurable backend.
|
||||
|
||||
- [`powerdns_tsig_key`](roles/powerdns_tsig_key/README.md): Simple ansible role
|
||||
for generating TSIG keys in PowerDNS.
|
||||
|
||||
## License
|
||||
|
||||
[CNPLv7+](LICENSE.md): Cooperative Nonviolent Public License
|
||||
|
21
galaxy.yml
21
galaxy.yml
@ -1,13 +1,22 @@
|
||||
namespace: finallycoffee
|
||||
name: base
|
||||
version: 0.0.1
|
||||
version: 0.1.2
|
||||
readme: README.md
|
||||
authors:
|
||||
- Johanna Dorothea Reichmann <transcaffeine@finally.coffee>
|
||||
description: Roles for bootstrapping tools like gpg, ssh and git
|
||||
license:
|
||||
- CNPLv7+
|
||||
- transcaffeine <transcaffeine@finally.coffee>
|
||||
description: Roles for base services which are common dependencies other services like databases
|
||||
dependencies:
|
||||
"community.docker": "^3.0.0"
|
||||
license_file: LICENSE.md
|
||||
build_ignore:
|
||||
- '*.tar.gz'
|
||||
repository: https://git.finally.coffee/finallycoffee/base
|
||||
issues: https://git.finally.coffee/finallycoffee/base/issues
|
||||
issues: https://codeberg.org/finallycoffee/ansible-collection-base/issues
|
||||
tags:
|
||||
- docker
|
||||
- elastic
|
||||
- lego
|
||||
- mariadb
|
||||
- minio
|
||||
- nginx
|
||||
- restic
|
||||
|
3
meta/runtime.yml
Normal file
3
meta/runtime.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
|
||||
requires_ansible: ">=2.15"
|
@ -1,20 +0,0 @@
|
||||
# `debian-proxmox` ansible role
|
||||
|
||||
This ansible role can be used to convert a (running and reachable) debian to a proxmox instance.
|
||||
|
||||
It automates the instructions from https://pve.proxmox.com/wiki/Install_Proxmox_VE_on_Debian_11_Bullseye.
|
||||
|
||||
## Usage
|
||||
|
||||
This role will attempt to ensure that the `/etc/hosts` are configured correctly for PVE.
|
||||
The public IP of the server should be given in `debian_proxmox_public_ip` and defaults to
|
||||
`ansible_facts['eno1'].ipv4.address`.
|
||||
|
||||
The hostname and fqdn should be correctly set in `debian_proxmox_hostname` and `debian_proxmox_fqdn`,
|
||||
and default to `ansible_hostname` and `ansible_fqdn` respectively.
|
||||
|
||||
## Packages
|
||||
|
||||
It is recommended to remove the packages for the `os-prober` (which would attempt to add VMs as bootable entries
|
||||
in the bootloader) and the default linux kernel `linux-image-amd64` / `linux-image-5.10*` (and use the proxmox-provided
|
||||
kernel instead). This role will remove those packages without asking, so be aware.
|
@ -1,43 +0,0 @@
|
||||
---
|
||||
|
||||
debian_proxmox_role_required_packages:
|
||||
- python3-apt
|
||||
- gpg
|
||||
|
||||
debian_proxmox_apt_repo_fingerprint: "28139A2F830BD68478A1A01FDD4BA3917E23BF59"
|
||||
debian_proxmox_apt_repo_key_url: "https://enterprise.proxmox.com/debian/proxmox-release-{{ ansible_distribution_release }}.gpg"
|
||||
|
||||
debian_proxmox_apt_repo_url: "http://download.proxmox.com/debian/pve"
|
||||
debian_proxmox_apt_repo: "deb [arch=amd64] {{ debian_proxmox_apt_repo_url }} {{ ansible_distribution_release }} pve-no-subscription"
|
||||
|
||||
debian_proxmox_apt_packages:
|
||||
- "proxmox-ve"
|
||||
- "postfix"
|
||||
- "open-iscsi"
|
||||
|
||||
debian_proxmox_apt_packages_to_remove:
|
||||
- "linux-image-amd64"
|
||||
- "linux-image-{{ (ansible_facts['kernel'] | split('-') | first | split('.'))[:2] | join('.') }}*"
|
||||
- "os-prober"
|
||||
|
||||
debian_proxmox_max_reboot_timeout_seconds: 900
|
||||
|
||||
debian_proxmox_public_ip: "{{ ansible_facts['eno1'].ipv4.address }}"
|
||||
debian_proxmox_loopback_ipv4: "{{ ansible_facts['lo'].ipv4.address }}"
|
||||
|
||||
debian_proxmox_hostname: "{{ ansible_hostname }}"
|
||||
debian_proxmox_fqdn: "{{ ansible_fqdn }}"
|
||||
|
||||
debian_proxmox_hosts_file:
|
||||
- ip: "{{ debian_proxmox_loopback_ipv4 }}"
|
||||
fqdn: localhost.localdomain
|
||||
aliases:
|
||||
- localhost
|
||||
- ip: "{[ debian_proxmox_public_ip }}"
|
||||
fqdn: "{{ debian_proxmox_fqdn }}"
|
||||
aliases:
|
||||
- "{{ debian_proxmox_hostname }}"
|
||||
- pvelocalhost
|
||||
- ip: 127.0.1.1
|
||||
fqdn: "{{ debian_proxmox_hostname }}"
|
||||
state: absent
|
@ -1,67 +0,0 @@
|
||||
---
|
||||
|
||||
- name: Ensure python3-apt and gpg is available
|
||||
apt:
|
||||
package: "{{ debian_proxmox_role_required_packages }}"
|
||||
state: present
|
||||
|
||||
- name: Check if target is debian before attempting to convert to proxmox
|
||||
fail:
|
||||
msg: "Target is {{ ansible_distribution }} which is not Debian"
|
||||
when: ansible_distribution != 'Debian'
|
||||
|
||||
- name: Check if debian version is supported by role
|
||||
fail:
|
||||
msg: "{{ ansible_distribution }} {{ ansible_distribution_version }} is not supported by the role"
|
||||
when: ansible_distribution_version not in debian_proxmox_supported_debian_versions
|
||||
|
||||
- name: Ensure /etc/hosts entries are safe for use with proxmox
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
line: "{{ item.ip }}\t{{ item.fqdn | default('') }}\t{{ item.aliases | default([]) | join('\t') }}"
|
||||
regex: "{{ item.ip }}.+"
|
||||
state: "{{ item.state | default('present') }}"
|
||||
loop: "{{ debian_proxmox_hosts_file }}"
|
||||
|
||||
- name: Ensure Proxmox VE apt repository keys are added
|
||||
apt_key:
|
||||
id: "{{ debian_proxmox_apt_repo_fingerprint }}"
|
||||
url: "{{ debian_proxmox_apt_repo_key_url }}"
|
||||
state: present
|
||||
|
||||
- name: Ensure Proxmox VE apt repository is added
|
||||
apt_repository:
|
||||
filename: pve-install-repo
|
||||
repo: "{{ debian_proxmox_apt_repo }}"
|
||||
state: present
|
||||
register: proxmox_ve_apt_repo
|
||||
|
||||
- name: Ensure APT cache is up to date
|
||||
apt:
|
||||
update_cache: yes
|
||||
when: proxmox_ve_apt_repo.changed
|
||||
|
||||
- name: Ensure system is upgraded
|
||||
apt:
|
||||
upgrade: full
|
||||
|
||||
- name: Ensure Proxmox VE packages are installed
|
||||
apt:
|
||||
package: "{{ debian_proxmox_apt_packages }}"
|
||||
state: present
|
||||
register: proxmox_ve_installed
|
||||
|
||||
- name: Ensure system is rebooted after install of PVE packages
|
||||
reboot:
|
||||
reboot_timeout: "{{ debian_proxmox_max_reboot_timeout_seconds | int }}"
|
||||
when: proxmox_ve_installed.changed
|
||||
|
||||
- name: Ensure packages are removed that will conflict with proxmox operation
|
||||
apt:
|
||||
package: "{{ debian_proxmox_apt_packages_to_remove }}"
|
||||
state: absent
|
||||
register: proxmox_apt_packages_removed
|
||||
|
||||
- name: Ensure grub was updated after the kernel was removed
|
||||
command: update-grub
|
||||
when: proxmox_apt_packages_removed.changed
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
|
||||
debian_proxmox_supported_debian_versions:
|
||||
- 11
|
33
roles/dns/README.md
Normal file
33
roles/dns/README.md
Normal file
@ -0,0 +1,33 @@
|
||||
# `finallycoffee.base.dns` ansible role
|
||||
|
||||
Simple role for wrapping around the
|
||||
[`famedly.dns.update`](https://github.com/famedly/ansible-collection-dns/blob/main/plugins/modules/update.py)
|
||||
ansible module.
|
||||
|
||||
## Usage
|
||||
|
||||
### Example playbook
|
||||
```yaml
|
||||
- target: "{{ target_hosts }}"
|
||||
roles:
|
||||
- role: finallycoffee.base.dns
|
||||
vars:
|
||||
dns_server: "dns.example.org"
|
||||
dns_zone: "zone.example.org"
|
||||
dns_records: "{{ dns_records }}"
|
||||
dns_record_state: exact
|
||||
dns_tsig_name: "mykeyname"
|
||||
dns_tsig_algo: "hmac-sha256"
|
||||
dns_tsig_key: "mykeycontent"
|
||||
vars:
|
||||
dns_records:
|
||||
- type: A
|
||||
name: gitea
|
||||
content: "127.0.0.1"
|
||||
- type: AAAA
|
||||
name: gitea
|
||||
content: "fe80::1"
|
||||
- type: CNAME
|
||||
name: "_acme_challenge.gitea"
|
||||
content: "delegated-cname.challenge.example.org"
|
||||
```
|
2
roles/dns/defaults/main.yml
Normal file
2
roles/dns/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
dns_record_state: present
|
11
roles/dns/tasks/main.yml
Normal file
11
roles/dns/tasks/main.yml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
|
||||
- name: Ensure DNS records in '{{ dns_zone }}' are up to date
|
||||
famedly.dns.update:
|
||||
primary_master: "{{ dns_server }}"
|
||||
zone: "{{ dns_zone }}"
|
||||
tsig_name: "{{ dns_tsig_name }}"
|
||||
tsig_algo: "{{ dns_tsig_algo }}"
|
||||
tsig_key: "{{ dns_tsig_key }}"
|
||||
rr_set: "{{ dns_records }}"
|
||||
state: "{{ dns_record_state }}"
|
22
roles/elasticsearch/README.md
Normal file
22
roles/elasticsearch/README.md
Normal file
@ -0,0 +1,22 @@
|
||||
# `finallycoffee.base.elastiscsearch`
|
||||
|
||||
A simple ansible role which deploys a single-node elastic container to provide
|
||||
an easy way to do some indexing.
|
||||
|
||||
## Usage
|
||||
|
||||
Per default, `/opt/elasticsearch/data` is used to persist data, it is
|
||||
customizable by using either `elasticsearch_base_path` or `elasticsearch_data_path`.
|
||||
|
||||
As elasticsearch be can be quite memory heavy, the maximum amount of allowed RAM
|
||||
can be configured using `elasticsearch_allocated_ram_mb`, defaulting to 512 (mb).
|
||||
|
||||
The cluster name and discovery type can be overridden using
|
||||
`elasticsearch_config_cluster_name` (default: elastic) and
|
||||
`elasticsearch_config_discovery_type` (default: single-node), should one
|
||||
need a multi-node elasticsearch deployment.
|
||||
|
||||
Per default, no ports or networks are mapped, and explizit mapping using
|
||||
either ports (`elasticsearch_container_ports`) or networks
|
||||
(`elasticsearch_container_networks`) is required in order for other services
|
||||
to use elastic.
|
35
roles/elasticsearch/defaults/main.yml
Normal file
35
roles/elasticsearch/defaults/main.yml
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
|
||||
elasticsearch_version: 7.17.7
|
||||
|
||||
elasticsearch_base_path: /opt/elasticsearch
|
||||
elasticsearch_data_path: "{{ elasticsearch_base_path }}/data"
|
||||
|
||||
elasticsearch_config_cluster_name: elastic
|
||||
elasticsearch_config_discovery_type: single-node
|
||||
elasticsearch_config_boostrap_memory_lock: true
|
||||
elasticsearch_allocated_ram_mb: 512
|
||||
|
||||
elasticsearch_container_image_name: docker.elastic.co/elasticsearch/elasticsearch-oss
|
||||
elasticsearch_container_image_tag: ~
|
||||
elasticsearch_container_image: >-
|
||||
{{ elasticsearch_container_image_name }}:{{ elasticsearch_container_image_tag | default(elasticsearch_version, true) }}
|
||||
|
||||
elasticsearch_container_name: elasticsearch
|
||||
elasticsearch_container_env:
|
||||
"ES_JAVA_OPTS": "-Xms{{ elasticsearch_allocated_ram_mb }}m -Xmx{{ elasticsearch_allocated_ram_mb }}m"
|
||||
"cluster.name": "{{ elasticsearch_config_cluster_name }}"
|
||||
"discovery.type": "{{ elasticsearch_config_discovery_type }}"
|
||||
"bootstrap.memory_lock": "{{ 'true' if elasticsearch_config_boostrap_memory_lock else 'false' }}"
|
||||
elasticsearch_container_user: ~
|
||||
elasticsearch_container_ports: ~
|
||||
elasticsearch_container_labels:
|
||||
version: "{{ elasticsearch_version }}"
|
||||
elasticsearch_container_ulimits:
|
||||
# - "memlock:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}"
|
||||
- "memlock:-1:-1"
|
||||
elasticsearch_container_volumes:
|
||||
- "{{ elasticsearch_data_path }}:/usr/share/elasticsearch/data:z"
|
||||
elasticsearch_container_networks: ~
|
||||
elasticsearch_container_purge_networks: ~
|
||||
elasticsearch_container_restart_policy: unless-stopped
|
32
roles/elasticsearch/tasks/main.yml
Normal file
32
roles/elasticsearch/tasks/main.yml
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
|
||||
- name: Ensure host directories are present
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "0777"
|
||||
loop:
|
||||
- "{{ elasticsearch_base_path }}"
|
||||
- "{{ elasticsearch_data_path }}"
|
||||
|
||||
- name: Ensure elastic container image is present
|
||||
docker_image:
|
||||
name: "{{ elasticsearch_container_image }}"
|
||||
state: present
|
||||
source: pull
|
||||
force_source: "{{ elasticsearch_container_image_tag|default(false, true)|bool }}"
|
||||
|
||||
- name: Ensure elastic container is running
|
||||
docker_container:
|
||||
name: "{{ elasticsearch_container_name }}"
|
||||
image: "{{ elasticsearch_container_image }}"
|
||||
env: "{{ elasticsearch_container_env | default(omit, True) }}"
|
||||
user: "{{ elasticsearch_container_user | default(omit, True) }}"
|
||||
ports: "{{ elasticsearch_container_ports | default(omit, True) }}"
|
||||
labels: "{{ elasticsearch_container_labels | default(omit, True) }}"
|
||||
volumes: "{{ elasticsearch_container_volumes }}"
|
||||
ulimits: "{{ elasticsearch_container_ulimits }}"
|
||||
networks: "{{ elasticsearch_container_networks | default(omit, True) }}"
|
||||
purge_networks: "{{ elasticsearch_container_purge_networks | default(omit, True) }}"
|
||||
restart_policy: "{{ elasticsearch_container_restart_policy }}"
|
||||
state: started
|
19
roles/git/README.md
Normal file
19
roles/git/README.md
Normal file
@ -0,0 +1,19 @@
|
||||
# `finallycoffee.base.git` ansible role
|
||||
|
||||
This role configures git for the `ansible_user` and can be used
|
||||
to (pre)-configure git.
|
||||
|
||||
## Examples
|
||||
|
||||
```yaml
|
||||
|
||||
git_config_user_name: # user name to use for git
|
||||
git_config_user_email: # email to use for git
|
||||
|
||||
git_config_core_editor: vim # editor to use
|
||||
|
||||
git_config_credentials:
|
||||
- remote_url: https://github.com
|
||||
config:
|
||||
username: my_github_username
|
||||
```
|
4
roles/gnupg/README.md
Normal file
4
roles/gnupg/README.md
Normal file
@ -0,0 +1,4 @@
|
||||
# `finallycoffee.base.gnupg` ansible role
|
||||
|
||||
Configures GnuPG on the target system, including a Smart-Card (SC) daemon
|
||||
and can set up gpg-agent as an SSH-agent.
|
46
roles/lego/README.md
Normal file
46
roles/lego/README.md
Normal file
@ -0,0 +1,46 @@
|
||||
# `finallycoffee.base.lego` ansible role
|
||||
|
||||
This role can be used to retrieve ACME certificates on the target host. It uses `lego` for that, and with systemd template units provides an easy way to configure and monitor the status for each certificate.
|
||||
|
||||
## Requirements
|
||||
|
||||
- `systemd`
|
||||
- write access to /tmp to unpack the lego release tarball during installation
|
||||
- write access to /opt/lego (or whatever `lego_base_path` is set to) for configuration and certificate data
|
||||
- `become` privileges of the `ansible_user` on the target
|
||||
|
||||
## Usage
|
||||
|
||||
### Required configuration
|
||||
|
||||
- `lego_instance` - used for allowing multiple lego jobs to run with systemd template units. recommended to be set to the CN / first SAN of the certificate.
|
||||
- `lego_cert_domains` - list of FQDNs to request a certificate for
|
||||
- `lego_acme_account_email` - when using letsencrypt, a contact email is mandatory
|
||||
|
||||
### Proxies / Registries
|
||||
|
||||
The role ensure `lego` is downloaded from the github release page. If you are behind a proxy or use a registry like Nexus3, set `lego_release_archive_server`.
|
||||
|
||||
### ACME server
|
||||
|
||||
Per default, the Letsencrypt Staging ACME server is configured. Set `lego_acme_server_url` from `lego_letsencrypt_server_urls.{qa,prod}` or configure your own ACME v2 server directly.
|
||||
|
||||
### Certificate
|
||||
|
||||
To set for which domains to request a certificate for, set them as a list of SANs in `lego_cert_domains`. The default key type is EC256 and can be overridden using `lego_cert_key_type`.
|
||||
|
||||
Set the type of challenge in `lego_acme_challenge_type` (to either `http` or `dns`), and `lego_acme_challenge_provider` to, for example, `rfc2136` for DNS challenges using the DNSUPDATE mechanism. If your challenge needs additional data, set that in `lego_command_config` as a dictionary analog to `lego_base_command_config` (see [defaults](defaults/main.yml)).
|
||||
|
||||
## Trivia
|
||||
|
||||
### Architecture
|
||||
|
||||
By default, the lego distribution for `linux` on `amd64` is downloaded. If your target needs a different architecture or target OS, adjust this in `lego_os` and `lego_architecture`, cross-checking with the [lego GitHub release page](https://github.com/go-acme/lego/releases/tag/v4.17.4) for upstream availability.
|
||||
|
||||
### User management
|
||||
|
||||
The role will attempt to create user+group for each seperate lego instance for data isolation (i.e. to avoid leaking a TSIG key from one lego instance to other services). The user and group are of the form `acme-{{ lego_instance }}`. Beware that changing this in `lego_cert_{user,group}` also requires `lego_systemd_{user,group}` to be adjusted!
|
||||
|
||||
### Binding to ports < 1024 (HTTP-01 challenge)
|
||||
|
||||
Set `lego_binary_allow_net_bind_service: true` to allow the lego binary to bind to ports in the 'privileged' (< 1024) port range.
|
71
roles/lego/defaults/main.yml
Normal file
71
roles/lego/defaults/main.yml
Normal file
@ -0,0 +1,71 @@
|
||||
---
|
||||
lego_user: "lego"
|
||||
lego_version: "4.19.2"
|
||||
lego_instance: default
|
||||
lego_base_path: "/opt/lego"
|
||||
lego_cert_user: "acme-{{ lego_instance }}"
|
||||
lego_cert_group: "{{ lego_cert_user }}"
|
||||
lego_cert_mode: "0640" # rw-r-----
|
||||
lego_systemd_user: "acme-%i"
|
||||
lego_systemd_group: "{{ lego_systemd_user }}"
|
||||
lego_instance_base_path: "{{ lego_base_path }}/instances"
|
||||
lego_instance_path: "{{ lego_instance_base_path }}/{{ lego_instance }}"
|
||||
|
||||
lego_cert_domains: []
|
||||
lego_cert_key_type: ec256
|
||||
lego_cert_days_to_renew: 30
|
||||
lego_acme_account_email: ~
|
||||
lego_acme_challenge_type: http
|
||||
lego_acme_challenge_provider: ~
|
||||
lego_letsencrypt_server_urls:
|
||||
qa: "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
prod: "https://acme-v02.api.letsencrypt.org/directory"
|
||||
lego_acme_server_url: "{{ lego_letsencrypt_server_urls.qa }}"
|
||||
|
||||
lego_base_environment:
|
||||
LEGO_CERT_USER: "{{ lego_cert_user }}"
|
||||
LEGO_CERT_GROUP: "{{ lego_cert_group }}"
|
||||
LEGO_CERT_MODE: "{{ lego_cert_mode }}"
|
||||
LEGO_CERT_STORE_PATH: "{{ lego_instance_path }}"
|
||||
LEGO_CERT_DAYS_TO_RENEW: "{{ lego_cert_days_to_renew }}"
|
||||
LEGO_KEY_TYPE: "{{ lego_cert_key_type }}"
|
||||
LEGO_ACME_CHALLENGE_TYPE: "{{ lego_acme_challenge_type }}"
|
||||
LEGO_ACME_SERVER: "{{ lego_acme_server_url }}"
|
||||
LEGO_COMMAND_ARGS: "{{ lego_command_args }}"
|
||||
|
||||
lego_base_command_config:
|
||||
server: "{{ lego_acme_server_url }}"
|
||||
accept_tos: true
|
||||
email: "{{ lego_acme_account_email }}"
|
||||
path: "{{ lego_instance_path }}"
|
||||
key_type: "{{ lego_cert_key_type }}"
|
||||
|
||||
lego_acme_challenge_config: >-
|
||||
{{ {lego_acme_challenge_type: lego_acme_challenge_provider} }}
|
||||
|
||||
lego_systemd_unit_path: "/etc/systemd/system"
|
||||
lego_systemd_template_unit_name: "lego@.service"
|
||||
lego_systemd_template_unit_file: "{{ lego_systemd_template_unit_name }}.j2"
|
||||
lego_systemd_service_name: "lego@{{ lego_instance }}.service"
|
||||
lego_systemd_environment: >-
|
||||
{{ lego_base_environment | combine(lego_environment | default({})) }}
|
||||
lego_full_command_config: >-
|
||||
{{ lego_base_command_config
|
||||
| combine(lego_acme_challenge_config)
|
||||
| combine(lego_command_config | default({})) }}
|
||||
|
||||
lego_systemd_timer_name: "lego-{{ lego_instance }}.timer"
|
||||
lego_systemd_timer_template: lego.timer.j2
|
||||
lego_systemd_timer_calendar: "*-*-* *:00/15:00"
|
||||
|
||||
lego_architecture: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
|
||||
lego_os: "linux"
|
||||
lego_binary_allow_net_bind_service: false
|
||||
|
||||
lego_release_archive_server: "https://github.com"
|
||||
lego_release_archive_filename: >-
|
||||
lego_v{{ lego_version }}_{{ lego_os }}_{{ lego_architecture }}.tar.gz
|
||||
lego_release_archive_url: >-
|
||||
{{ lego_release_archive_server }}/go-acme/lego/releases/download/v{{ lego_version }}/{{ lego_release_archive_filename }}
|
||||
lego_release_archive_file_path: "/tmp/{{ lego_release_archive_filename }}"
|
||||
lego_release_archive_path: "/tmp/lego_v{{ lego_version }}_{{ lego_os }}_{{ lego_architecture }}"
|
22
roles/lego/files/lego_run.sh
Normal file
22
roles/lego/files/lego_run.sh
Normal file
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
LEGO_BINARY=$(/usr/bin/env which lego)
|
||||
|
||||
if [[ -n "$LEGO_HTTP_FALLBACK_PORT" ]]; then
|
||||
nc -z 127.0.0.1 $LEGO_HTTP_PORT;
|
||||
if [[ $? -eq 0 ]]; then
|
||||
LEGO_HTTP_PORT=$LEGO_HTTP_FALLBACK_PORT
|
||||
fi
|
||||
fi
|
||||
|
||||
LEGO_COMMAND_ARGS_EXPANDED=$(bash -c "echo $LEGO_COMMAND_ARGS") # This is a bit icky
|
||||
|
||||
FILES_IN_DIR=$(find "$LEGO_CERT_STORE_PATH/certificates" | wc -l)
|
||||
if [[ $FILES_IN_DIR -gt 2 ]]; then
|
||||
$LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED renew --days=$LEGO_CERT_DAYS_TO_RENEW
|
||||
else
|
||||
$LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED run
|
||||
fi
|
||||
|
||||
ls "$LEGO_CERT_STORE_PATH/certificates" | xargs -I{} -n 1 chmod "$LEGO_CERT_MODE" "$LEGO_CERT_STORE_PATH/certificates/{}"
|
||||
ls "$LEGO_CERT_STORE_PATH/certificates" | xargs -I{} -n 1 chown "$LEGO_CERT_USER":"$LEGO_CERT_GROUP" "$LEGO_CERT_STORE_PATH/certificates/{}"
|
5
roles/lego/handlers/main.yml
Normal file
5
roles/lego/handlers/main.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
- name: Ensure systemd daemon is reloaded
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
listen: systemd_reload
|
157
roles/lego/tasks/main.yml
Normal file
157
roles/lego/tasks/main.yml
Normal file
@ -0,0 +1,157 @@
|
||||
---
|
||||
- name: Ensure lego cert group is created
|
||||
ansible.builtin.group:
|
||||
name: "{{ lego_cert_group }}"
|
||||
state: present
|
||||
system: true
|
||||
|
||||
- name: Ensure lego cert user is created
|
||||
ansible.builtin.user:
|
||||
name: "{{ lego_cert_user }}"
|
||||
state: present
|
||||
system: true
|
||||
create_home: false
|
||||
groups:
|
||||
- "{{ lego_cert_group }}"
|
||||
append: true
|
||||
|
||||
- name: Ensure lego user is created
|
||||
ansible.builtin.user:
|
||||
name: "{{ lego_user }}"
|
||||
state: present
|
||||
system: true
|
||||
create_home: false
|
||||
groups:
|
||||
- "{{ lego_cert_group }}"
|
||||
append: true
|
||||
|
||||
- name: Ensure lego is installed
|
||||
block:
|
||||
- name: Check if lego is present
|
||||
ansible.builtin.command:
|
||||
cmd: which lego
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: lego_binary_info
|
||||
|
||||
- name: Download lego from source
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ lego_release_archive_url }}"
|
||||
url_username: "{{ lego_release_archive_url_username | default(omit) }}"
|
||||
url_password: "{{ lego_release_archive_url_password | default(omit) }}"
|
||||
dest: "{{ lego_release_archive_file_path }}"
|
||||
when: lego_binary_info.rc != 0
|
||||
|
||||
- name: Create folder to uncompress into
|
||||
ansible.builtin.file:
|
||||
dest: "{{ lego_release_archive_path }}"
|
||||
state: directory
|
||||
when: lego_binary_info.rc != 0
|
||||
|
||||
- name: Uncompress lego source archive
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ lego_release_archive_file_path }}"
|
||||
dest: "{{ lego_release_archive_path }}"
|
||||
remote_src: true
|
||||
when: lego_binary_info.rc != 0
|
||||
|
||||
- name: Ensure lego binary is present in PATH
|
||||
ansible.builtin.copy:
|
||||
src: "{{ lego_release_archive_path }}/lego"
|
||||
dest: "/usr/local/bin/lego"
|
||||
mode: "u+rwx,g+rx,o+rx"
|
||||
remote_src: true
|
||||
when: lego_binary_info.rc != 0
|
||||
|
||||
- name: Ensure lego is allowed to bind to ports < 1024
|
||||
community.general.capabilities:
|
||||
path: "/usr/local/bin/lego"
|
||||
capability: "cap_net_bind_service+ep"
|
||||
state: present
|
||||
when: lego_binary_allow_net_bind_service
|
||||
|
||||
- name: Ensure intermediate data is gone
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- "{{ lego_release_archive_path }}"
|
||||
- "{{ lego_release_archive_file_path }}"
|
||||
when: lego_binary_info.rc != 0
|
||||
|
||||
- name: Ensure lego base path exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ lego_base_path }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
|
||||
- name: Ensure template unit file is present
|
||||
ansible.builtin.template:
|
||||
src: "{{ lego_systemd_template_unit_file }}"
|
||||
dest: "{{ lego_systemd_unit_path }}/{{ lego_systemd_template_unit_name }}"
|
||||
notify:
|
||||
- systemd_reload
|
||||
|
||||
- name: Ensure env file is templated
|
||||
ansible.builtin.copy:
|
||||
content: |+
|
||||
{% for entry in lego_systemd_environment | dict2items %}
|
||||
{{ entry.key }}={{ entry.value }}
|
||||
{% endfor %}
|
||||
dest: "{{ lego_base_path }}/{{ lego_instance }}.conf"
|
||||
|
||||
- name: Ensure timer unit is templated
|
||||
ansible.builtin.template:
|
||||
src: "{{ lego_systemd_timer_template }}"
|
||||
dest: "{{ lego_systemd_unit_path }}/{{ lego_systemd_timer_name }}"
|
||||
notify:
|
||||
- systemd_reload
|
||||
|
||||
- name: Ensure handling script is templated
|
||||
ansible.builtin.copy:
|
||||
src: "lego_run.sh"
|
||||
dest: "{{ lego_base_path }}/run.sh"
|
||||
mode: "0755"
|
||||
|
||||
- name: Ensure per-instance base path is created
|
||||
ansible.builtin.file:
|
||||
path: "{{ lego_instance_path }}"
|
||||
state: directory
|
||||
owner: "{{ lego_cert_user }}"
|
||||
group: "{{ lego_cert_group }}"
|
||||
mode: "0755"
|
||||
|
||||
- name: Ensure per-instance sub folders are created with correct permissions
|
||||
ansible.builtin.file:
|
||||
path: "{{ item.path }}"
|
||||
state: directory
|
||||
owner: "{{ item.owner | default(lego_cert_user) }}"
|
||||
group: "{{ item.group | default(lego_cert_group) }}"
|
||||
mode: "{{ item.mode }}"
|
||||
loop:
|
||||
- path: "{{ lego_instance_path }}/secrets"
|
||||
mode: "0750"
|
||||
- path: "{{ lego_instance_path }}/accounts"
|
||||
mode: "0770"
|
||||
- path: "{{ lego_instance_path }}/certificates"
|
||||
mode: "0775"
|
||||
loop_control:
|
||||
label: "{{ item.path }}"
|
||||
|
||||
- name: Ensure systemd daemon is reloaded
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Ensure systemd timer is enabled
|
||||
ansible.builtin.systemd_service:
|
||||
name: "{{ lego_systemd_timer_name }}"
|
||||
enabled: true
|
||||
|
||||
- name: Ensure systemd timer is started
|
||||
ansible.builtin.systemd_service:
|
||||
name: "{{ lego_systemd_timer_name }}"
|
||||
state: "started"
|
||||
|
||||
- name: Ensure systemd service is started once to obtain the certificate
|
||||
ansible.builtin.systemd_service:
|
||||
name: "{{ lego_systemd_service_name }}"
|
||||
state: "started"
|
9
roles/lego/templates/lego.timer.j2
Normal file
9
roles/lego/templates/lego.timer.j2
Normal file
@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Run lego@{{ lego_instance}}.service
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ lego_systemd_timer_calendar }}
|
||||
Unit=lego@{{ lego_instance }}.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
14
roles/lego/templates/lego@.service.j2
Normal file
14
roles/lego/templates/lego@.service.j2
Normal file
@ -0,0 +1,14 @@
|
||||
[Unit]
|
||||
Description=Run lego (letsencrypt client in go)
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
EnvironmentFile={{ lego_base_path }}/%i.conf
|
||||
User={{ lego_systemd_user }}
|
||||
Group={{ lego_systemd_group }}
|
||||
ExecStart={{ lego_base_path }}/run.sh
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
|
||||
[Install]
|
||||
WantedBy=basic.target
|
||||
DefaultInstance=default
|
16
roles/lego/vars/main.yml
Normal file
16
roles/lego/vars/main.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
|
||||
lego_domain_command_args: >-
|
||||
{% for domain in lego_cert_domains %}
|
||||
--domains={{ domain }}
|
||||
{%- endfor %}
|
||||
|
||||
lego_config_command_args: >-
|
||||
{% for key in lego_full_command_config %}
|
||||
--{{ key | replace("_", "-") }}
|
||||
{%- if lego_full_command_config[key] != None and lego_full_command_config[key] != '' -%}
|
||||
={{ lego_full_command_config[key] }}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
|
||||
lego_command_args: "{{ lego_domain_command_args }} {{ lego_config_command_args }}"
|
19
roles/mariadb/README.md
Normal file
19
roles/mariadb/README.md
Normal file
@ -0,0 +1,19 @@
|
||||
# `finallycoffee.base.mariadb` ansible role
|
||||
|
||||
This role deploys a MariaDB instance in a docker container.
|
||||
|
||||
## Usage
|
||||
|
||||
The role expects the following variables to be populated with values and/or secrets:
|
||||
|
||||
```yaml
|
||||
mariadb_root_password: #mariadb root password
|
||||
mariadb_database: # name of the database to create
|
||||
mariadb_username: # name of a user to auto-create and assign permission on the mariadb_database
|
||||
mariadb_password: # password of the user in mariadb_username
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- Docker installed
|
||||
- python-docker present on target system for ansible to be able to talk with the docker API.
|
49
roles/mariadb/defaults/main.yml
Normal file
49
roles/mariadb/defaults/main.yml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
mariadb_version: "10.11.9"
|
||||
mariadb_base_path: /var/lib/mariadb
|
||||
mariadb_data_path: >-2
|
||||
{{ mariadb_base_path }}/{{ mariadb_version | split('.') | first }}
|
||||
mariadb_state: present
|
||||
|
||||
mariadb_root_password: ~
|
||||
mariadb_database: ~
|
||||
mariadb_username: ~
|
||||
mariadb_password: ~
|
||||
|
||||
mariadb_container_base_environment:
|
||||
MARIADB_ROOT_PASSWORD: "{{ mariadb_root_password }}"
|
||||
mariadb_container_extra_environment: {}
|
||||
|
||||
mariadb_container_image_registry: docker.io
|
||||
mariadb_container_image_namespace: ~
|
||||
mariadb_container_image_name: mariadb
|
||||
mariadb_container_image_tag: ~
|
||||
mariadb_container_image: >-2
|
||||
{{
|
||||
([
|
||||
mariadb_container_image_registry | default([], true),
|
||||
mariadb_container_image_namespace | default([], true),
|
||||
mariadb_container_image_name,
|
||||
] | flatten | join('/'))
|
||||
+ ':' + mariadb_container_image_tag | default(mariadb_version, true)
|
||||
}}
|
||||
mariadb_container_image_source: pull
|
||||
mariadb_container_image_force_source: >-2
|
||||
{{ mariadb_container_image_tag | default(false, true) | bool }}
|
||||
|
||||
mariadb_container_name: mariadb
|
||||
mariadb_container_base_volumes:
|
||||
- "{{ mariadb_data_path }}:{{ mariadb_container_data_path }}:z"
|
||||
mariadb_container_extra_volumes: []
|
||||
mariadb_container_base_labels:
|
||||
version: "{{ mariadb_version }}"
|
||||
mariadb_container_extra_labels: {}
|
||||
mariadb_container_restart_policy: "unless-stopped"
|
||||
mariadb_container_environment: >-2
|
||||
{{ mariadb_container_base_environment
|
||||
| combine(mariadb_container_database_environment
|
||||
if (mariadb_database and mariadb_username and mariadb_password)
|
||||
else {}, recursive=True)
|
||||
| combine(mariadb_container_extra_environment) }}
|
||||
mariadb_container_state: >-2
|
||||
{{ (mariadb_state == 'present') | ternary('started', 'absent') }}
|
21
roles/mariadb/tasks/main.yml
Normal file
21
roles/mariadb/tasks/main.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
- name: Ensure mariadb container image '{{ mariadb_container_image }}' is {{ mariadb_state }}
|
||||
community.docker.docker_image:
|
||||
name: "{{ mariadb_container_image }}"
|
||||
state: "{{ mariadb_state }}"
|
||||
source: "{{ mariadb_container_image_source }}"
|
||||
force_source: "{{ mariadb_container_image_force_source }}"
|
||||
|
||||
- name: Ensure mariadb container '{{ mariadb_container_name }}' is {{ mariadb_container_state }}
|
||||
community.docker.docker_container:
|
||||
name: "{{ mariadb_container_name }}"
|
||||
image: "{{ mariadb_container_image }}"
|
||||
env: "{{ mariadb_container_environment }}"
|
||||
ports: "{{ mariadb_container_ports | default(omit, true) }}"
|
||||
labels: "{{ mariadb_container_labels | default(omit, true) }}"
|
||||
volumes: "{{ mariadb_container_volumes }}"
|
||||
networks: "{{ mariadb_container_networks | default(omit, true) }}"
|
||||
etc_hosts: "{{ mariadb_container_etc_hosts | default(omit, true) }}"
|
||||
purge_networks: "{{ mariadb_container_purge_networks | default(omit, true) }}"
|
||||
restart_policy: "{{ mariadb_container_restart_policy }}"
|
||||
state: "{{ mariadb_container_state }}"
|
10
roles/mariadb/vars/main.yml
Normal file
10
roles/mariadb/vars/main.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
|
||||
mariadb_container_database_environment:
|
||||
MARIADB_DATABASE: "{{ mariadb_database }}"
|
||||
MARIADB_USER: "{{ mariadb_username }}"
|
||||
MARIADB_PASSWORD: "{{ mariadb_password }}"
|
||||
|
||||
mariadb_container_data_path: /var/lib/mysql
|
||||
mariadb_container_volumes: "{{ mariadb_container_base_volumes + mariadb_container_extra_volumes }}"
|
||||
mariadb_container_labels: "{{ mariadb_container_base_labels | combine(mariadb_container_extra_labels, recursive=True) }}"
|
29
roles/minio/README.md
Normal file
29
roles/minio/README.md
Normal file
@ -0,0 +1,29 @@
|
||||
# `finallycoffee.base.minio` ansible role
|
||||
|
||||
## Overview
|
||||
|
||||
This role deploys a [min.io](https://min.io) server (s3-compatible object storage server)
|
||||
using the official docker container image.
|
||||
|
||||
## Configuration
|
||||
|
||||
The role requires setting the password for the `root` user (name can be changed by
|
||||
setting `minio_root_username`) in `minio_root_password`. That user has full control
|
||||
over the minio-server instance.
|
||||
|
||||
### Useful config hints
|
||||
|
||||
Most configuration is done by setting environment variables in
|
||||
`minio_container_extra_env`, for example:
|
||||
|
||||
```yaml
|
||||
minio_container_extra_env:
|
||||
# disable the "console" web browser UI
|
||||
MINIO_BROWSER: off
|
||||
# enable public prometheus metrics on `/minio/v2/metrics/cluster`
|
||||
MINIO_PROMETHEUS_AUTH_TYPE: public
|
||||
```
|
||||
|
||||
When serving minio (or any s3-compatible server) on a "subfolder",
|
||||
see https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTRedirect.html
|
||||
and https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html
|
41
roles/minio/defaults/main.yml
Normal file
41
roles/minio/defaults/main.yml
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
|
||||
minio_user: ~
|
||||
minio_data_path: /opt/minio
|
||||
|
||||
minio_create_user: false
|
||||
minio_manage_host_filesystem: false
|
||||
|
||||
minio_root_username: root
|
||||
minio_root_password: ~
|
||||
|
||||
minio_container_name: minio
|
||||
minio_container_image_name: docker.io/minio/minio
|
||||
minio_container_image_tag: latest
|
||||
minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}"
|
||||
minio_container_networks: []
|
||||
minio_container_ports: []
|
||||
|
||||
minio_container_base_volumes:
|
||||
- "{{ minio_data_path }}:{{ minio_container_data_path }}:z"
|
||||
minio_container_extra_volumes: []
|
||||
|
||||
minio_container_base_env:
|
||||
MINIO_ROOT_USER: "{{ minio_root_username }}"
|
||||
MINIO_ROOT_PASSWORD: "{{ minio_root_password }}"
|
||||
minio_container_extra_env: {}
|
||||
|
||||
minio_container_labels: {}
|
||||
|
||||
minio_container_command:
|
||||
- "server"
|
||||
- "{{ minio_container_data_path }}"
|
||||
- "--console-address"
|
||||
- ":{{ minio_container_listen_port_console }}"
|
||||
minio_container_restart_policy: "unless-stopped"
|
||||
minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}"
|
||||
|
||||
minio_container_listen_port_api: 9000
|
||||
minio_container_listen_port_console: 8900
|
||||
|
||||
minio_container_data_path: /storage
|
37
roles/minio/tasks/main.yml
Normal file
37
roles/minio/tasks/main.yml
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
|
||||
- name: Ensure minio run user is present
|
||||
user:
|
||||
name: "{{ minio_user }}"
|
||||
state: present
|
||||
system: yes
|
||||
when: minio_create_user
|
||||
|
||||
- name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present
|
||||
file:
|
||||
path: "{{ minio_data_path }}"
|
||||
state: directory
|
||||
user: "{{ minio_user|default(omit, True) }}"
|
||||
group: "{{ minio_user|default(omit, True) }}"
|
||||
when: minio_manage_host_filesystem
|
||||
|
||||
- name: Ensure container image for minio is present
|
||||
community.docker.docker_image:
|
||||
name: "{{ minio_container_image }}"
|
||||
state: present
|
||||
source: pull
|
||||
force_source: "{{ minio_container_image_force_source }}"
|
||||
|
||||
- name: Ensure container {{ minio_container_name }} is running
|
||||
docker_container:
|
||||
name: "{{ minio_container_name }}"
|
||||
image: "{{ minio_container_image }}"
|
||||
volumes: "{{ minio_container_volumes }}"
|
||||
env: "{{ minio_container_env }}"
|
||||
labels: "{{ minio_container_labels }}"
|
||||
networks: "{{ minio_container_networks }}"
|
||||
ports: "{{ minio_container_ports }}"
|
||||
user: "{{ minio_user|default(omit, True) }}"
|
||||
command: "{{ minio_container_command }}"
|
||||
restart_policy: "{{ minio_container_restart_policy }}"
|
||||
state: started
|
5
roles/minio/vars/main.yml
Normal file
5
roles/minio/vars/main.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
|
||||
minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}"
|
||||
|
||||
minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}"
|
33
roles/nginx/README.md
Normal file
33
roles/nginx/README.md
Normal file
@ -0,0 +1,33 @@
|
||||
# `finallycoffee.services.nginx` ansible role
|
||||
|
||||
## Description
|
||||
|
||||
Runs `nginx`, a HTTP reverse proxy, in a docker container.
|
||||
|
||||
## Usage
|
||||
|
||||
For the role to do anything, `nginx_config` needs to be populated with the configuration for nginx.
|
||||
An example would be:
|
||||
|
||||
```yaml
|
||||
nginx_config: |+
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name my.server.fqdn;
|
||||
location / { return 200; }
|
||||
}
|
||||
```
|
||||
|
||||
The container is named `nginx` by default, this can be overridden in `nginx_container_name`.
|
||||
When running this role multiple times, `nginx_base_path` should also be changed for each run,
|
||||
otherwise the configuration files collide in the filesystem.
|
||||
|
||||
For exposing this server to the host and/or internet, the `nginx_container_ports` (port forwarding host
|
||||
from host to container), `nginx_container_networks` (docker networking) or `nginx_container_labels`
|
||||
(for label-based routing discovery like traefik) can be used. The options correspond to the arguments
|
||||
of the `community.docker.docker_container` module.
|
||||
|
||||
## Deployment methods
|
||||
|
||||
Set `nginx_deployment_method` to either `docker` or `podman` to use the respective ansible modules for
|
||||
creating and managing the container and its image. See all supported methods in `nginx_deployment_methods`.
|
37
roles/nginx/defaults/main.yml
Normal file
37
roles/nginx/defaults/main.yml
Normal file
@ -0,0 +1,37 @@
|
||||
---
|
||||
nginx_version: "1.27.2"
|
||||
nginx_flavour: alpine
|
||||
nginx_base_path: /opt/nginx
|
||||
nginx_config_file: "{{ nginx_base_path }}/nginx.conf"
|
||||
nginx_state: present
|
||||
nginx_deployment_method: docker
|
||||
|
||||
nginx_container_name: nginx
|
||||
nginx_container_image_reference: >-
|
||||
{{
|
||||
nginx_container_image_repository
|
||||
+ ':' + (nginx_container_image_tag
|
||||
| default(nginx_version
|
||||
+ (('-' + nginx_flavour) if nginx_flavour is defined else ''), true))
|
||||
}}
|
||||
nginx_container_image_repository: >-
|
||||
{{
|
||||
(
|
||||
container_registries[nginx_container_image_registry]
|
||||
| default(nginx_container_image_registry)
|
||||
)
|
||||
+ '/'
|
||||
+ nginx_container_image_namespace | default('')
|
||||
+ nginx_container_image_name
|
||||
}}
|
||||
nginx_container_image_registry: "docker.io"
|
||||
nginx_container_image_name: "nginx"
|
||||
nginx_container_image_tag: ~
|
||||
nginx_container_image_source: pull
|
||||
nginx_container_state: >-2
|
||||
{{ (nginx_state == 'present') | ternary('started', 'absent') }}
|
||||
|
||||
nginx_container_restart_policy: "unless-stopped"
|
||||
nginx_container_volumes:
|
||||
- "{{ nginx_config_file }}:/etc/nginx/conf.d/nginx.conf:ro"
|
||||
|
8
roles/nginx/handlers/main.yml
Normal file
8
roles/nginx/handlers/main.yml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
|
||||
- name: Ensure nginx container '{{ nginx_container_name }}' is restarted
|
||||
community.docker.docker_container:
|
||||
name: "{{ nginx_container_name }}"
|
||||
state: started
|
||||
restart: true
|
||||
listen: restart-nginx
|
12
roles/nginx/meta/main.yml
Normal file
12
roles/nginx/meta/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
allow_duplicates: true
|
||||
dependencies: []
|
||||
galaxy_info:
|
||||
role_name: nginx
|
||||
description: Deploy nginx, a webserver
|
||||
galaxy_tags:
|
||||
- nginx
|
||||
- http
|
||||
- webserver
|
||||
- docker
|
||||
- podman
|
28
roles/nginx/tasks/deploy-docker.yml
Normal file
28
roles/nginx/tasks/deploy-docker.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
- name: Ensure docker container image '{{ nginx_container_image_reference }}' is {{ nginx_state }}
|
||||
community.docker.docker_image:
|
||||
name: "{{ nginx_container_image_reference }}"
|
||||
state: "{{ nginx_state }}"
|
||||
source: "{{ nginx_container_image_source }}"
|
||||
force_source: >-2
|
||||
{{ nginx_container_image_force_source
|
||||
| default(nginx_container_image_tag | default(false, true)) }}
|
||||
register: nginx_container_image_info
|
||||
until: nginx_container_image_info is success
|
||||
retries: 5
|
||||
delay: 3
|
||||
|
||||
- name: Ensure docker container '{{ nginx_container_name }}' is {{ nginx_container_state }}
|
||||
community.docker.docker_container:
|
||||
name: "{{ nginx_container_name }}"
|
||||
image: "{{ nginx_container_image_reference }}"
|
||||
env: "{{ nginx_container_env | default(omit, true) }}"
|
||||
user: "{{ nginx_container_user | default(omit, true) }}"
|
||||
ports: "{{ nginx_container_ports | default(omit, true) }}"
|
||||
labels: "{{ nginx_container_labels | default(omit, true) }}"
|
||||
volumes: "{{ nginx_container_volumes | default(omit, true) }}"
|
||||
etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}"
|
||||
networks: "{{ nginx_container_networks | default(omit, true) }}"
|
||||
purge_networks: "{{ nginx_container_purge_networks | default(omit, true) }}"
|
||||
restart_policy: "{{ nginx_container_restart_policy }}"
|
||||
state: "{{ nginx_container_state }}"
|
27
roles/nginx/tasks/deploy-podman.yml
Normal file
27
roles/nginx/tasks/deploy-podman.yml
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
- name: Ensure container image '{{ nginx_container_image_reference }}' is {{ nginx_state }}
|
||||
containers.podman.podman_image:
|
||||
name: "{{ nginx_container_image_reference }}"
|
||||
state: "{{ nginx_state }}"
|
||||
pull: "{{ nginx_container_image_source == 'pull' }}"
|
||||
force: >-2
|
||||
{{ nginx_container_image_force_source
|
||||
| default(nginx_container_image_tag | default(false, true)) }}
|
||||
register: nginx_container_image_info
|
||||
until: nginx_container_image_info is success
|
||||
retries: 5
|
||||
delay: 3
|
||||
|
||||
- name: Ensure container '{{ nginx_container_name }}' is {{ nginx_container_state }}
|
||||
containers.podman.podman_container:
|
||||
name: "{{ nginx_container_name }}"
|
||||
image: "{{ nginx_container_image_reference }}"
|
||||
env: "{{ nginx_container_env | default(omit, true) }}"
|
||||
user: "{{ nginx_container_user | default(omit, true) }}"
|
||||
ports: "{{ nginx_container_ports | default(omit, true) }}"
|
||||
labels: "{{ nginx_container_labels | default(omit, true) }}"
|
||||
volumes: "{{ nginx_container_volumes | default(omit, true) }}"
|
||||
etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}"
|
||||
network: "{{ nginx_container_networks | default(omit, true) }}"
|
||||
restart_policy: "{{ nginx_container_restart_policy }}"
|
||||
state: "{{ nginx_container_state }}"
|
40
roles/nginx/tasks/main.yml
Normal file
40
roles/nginx/tasks/main.yml
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
- name: Check if state is supported
|
||||
ansible.builtin.fail:
|
||||
msg: >-2
|
||||
Unsupported state '{{ nginx_state }}'. Supported
|
||||
states are {{ nginx_states | join(', ') }}.
|
||||
when: nginx_state not in nginx_states
|
||||
|
||||
- name: Check if deployment_method is supported
|
||||
ansible.builtin.fail:
|
||||
msg: >-2
|
||||
Unsupported state '{{ nginx_deployment_method }}'. Supported
|
||||
states are {{ nginx_deployment_methods | join(', ') }}.
|
||||
when: nginx_deployment_method not in nginx_deployment_methods
|
||||
|
||||
- name: Ensure nginx config file is {{ nginx_state }}
|
||||
ansible.builtin.file:
|
||||
path: "{{ nginx_config_file }}"
|
||||
state: "{{ nginx_state }}"
|
||||
when: nginx_state == 'absent'
|
||||
|
||||
- name: Ensure base path '{{ nginx_base_path }}' is {{ nginx_state }}
|
||||
ansible.builtin.file:
|
||||
path: "{{ nginx_base_path }}"
|
||||
mode: "0755"
|
||||
state: >-2
|
||||
{{ (nginx_state == 'present') | ternary('directory', 'absent') }}
|
||||
|
||||
- name: Ensure nginx config file is templated
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ nginx_config_file }}"
|
||||
content: "{{ nginx_config }}"
|
||||
mode: 0640
|
||||
notify:
|
||||
- restart-nginx
|
||||
when: nginx_state == 'present'
|
||||
|
||||
- name: Deploy using {{ nginx_deployment_method }}
|
||||
ansible.builtin.include_tasks:
|
||||
file: "deploy-{{ nginx_deployment_method }}.yml"
|
7
roles/nginx/vars/main.yml
Normal file
7
roles/nginx/vars/main.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
nginx_states:
|
||||
- present
|
||||
- absent
|
||||
nginx_deployment_methods:
|
||||
- docker
|
||||
- podman
|
25
roles/powerdns_tsig_key/README.md
Normal file
25
roles/powerdns_tsig_key/README.md
Normal file
@ -0,0 +1,25 @@
|
||||
# `finallycoffee.base.powerdns_tsig_key`
|
||||
|
||||
Simple ansible role for ensuring a TSIG key is present in a given PowerDNS-
|
||||
instance.
|
||||
|
||||
## Usage
|
||||
|
||||
The usage example below assumes `powerdns` is running in a container named `powerdns` (as supplied to `powerdns_tsig_key_container_name`.
|
||||
|
||||
```yaml
|
||||
- hosts: "{{ target_hosts }}"
|
||||
become: true
|
||||
roles:
|
||||
- role: finallycoffee.base.powerdns_tsig_key
|
||||
vars:
|
||||
powerdns_tsig_key_name: "nameofmykey"
|
||||
powerdns_tsig_key_path: "/var/lib/myapp/tsig.key"
|
||||
powernds_tsig_key_algo: "hmac-sha512"
|
||||
powerdns_tsig_key_path_owner: "myappuser"
|
||||
powerdns_tsig_key_path_group: "myappgroup"
|
||||
powerdns_tsig_key_container_name: 'powerdns'
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> Support for non-docker deployments is pending.
|
2
roles/powerdns_tsig_key/defaults/main.yml
Normal file
2
roles/powerdns_tsig_key/defaults/main.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
powerdns_tsig_key_container_name: powerdns
|
104
roles/powerdns_tsig_key/tasks/main.yml
Normal file
104
roles/powerdns_tsig_key/tasks/main.yml
Normal file
@ -0,0 +1,104 @@
|
||||
---
|
||||
- name: Ensure unix group '{{ powerdns_tsig_key_path_group }}' exists
|
||||
ansible.builtin.group:
|
||||
name: "{{ powerdns_tsig_key_path_group }}"
|
||||
state: "present"
|
||||
system: true
|
||||
register: powerdns_tsig_key_path_group_info
|
||||
when: powerdns_tsig_key_path_group is defined
|
||||
|
||||
- name: Ensure unix user '{{ powerdns_tsig_key_path_owner }}' exists
|
||||
ansible.builtin.user:
|
||||
name: "{{ powerdns_tsig_key_path_owner }}"
|
||||
state: "present"
|
||||
system: true
|
||||
create_home: false
|
||||
groups: "{{ powerdns_tsig_key_path_group is defined | ternary([powerdns_tsig_key_path_group], omit) }}"
|
||||
append: "{{ powerdns_tsig_key_path_group is defined | ternary(true, omit) }}"
|
||||
register: powerdns_tsig_key_path_owner_info
|
||||
when: powerdns_tsig_key_path_owner is defined
|
||||
|
||||
- name: Check if TSIG key is already present
|
||||
ansible.builtin.stat:
|
||||
path: "{{ powerdns_tsig_key_path }}"
|
||||
register: powerdns_tsig_key_info
|
||||
|
||||
- name: Ensure TSIG key directory is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ powerdns_tsig_key_path | dirname }}"
|
||||
state: directory
|
||||
owner: "{{ powerdns_tsig_key_path_owner | default(omit) }}"
|
||||
group: "{{ powerdns_tsig_key_path_group | default(omit) }}"
|
||||
mode: "u+rwX,g+rX"
|
||||
recurse: true
|
||||
|
||||
- name: Ensure a TSIG key is configured and persisted
|
||||
when: >-
|
||||
not powerdns_tsig_key_info.stat.exists
|
||||
or powerdns_tsig_key_info.stat.size == 0
|
||||
block:
|
||||
- name: Ensure TSIG key is not already present
|
||||
community.docker.docker_container_exec:
|
||||
container: "{{ powerdns_tsig_key_container_name }}"
|
||||
command: "pdnsutil list-tsig-keys"
|
||||
delegate_to: "{{ powerdns_tsig_key_hostname }}"
|
||||
register: powerdns_tsig_key_powerdns_info
|
||||
changed_when: false
|
||||
check_mode: false
|
||||
become: true
|
||||
|
||||
- name: Ensure TSIG key is generated in powerdns
|
||||
community.docker.docker_container_exec:
|
||||
container: "{{ powerdns_tsig_key_container_name }}"
|
||||
command: "pdnsutil generate-tsig-key '{{ powerdns_tsig_key_name }}' '{{ powerdns_tsig_key_algo }}'"
|
||||
when: >-
|
||||
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
|
||||
not in powerdns_tsig_key_powerdns_info.stdout
|
||||
delegate_to: "{{ powerdns_tsig_key_hostname }}"
|
||||
register: powerdns_tsig_key_powerdns_generated_tsig_key
|
||||
throttle: 1
|
||||
become: true
|
||||
|
||||
- name: Ensure PowerDNS is restarted
|
||||
community.docker.docker_container:
|
||||
name: "{{ powerdns_tsig_key_container_name }}"
|
||||
state: started
|
||||
restart: true
|
||||
when: >-
|
||||
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
|
||||
not in powerdns_tsig_key_powerdns_info.stdout
|
||||
delegate_to: "{{ powerdns_tsig_key_hostname }}"
|
||||
throttle: 1
|
||||
become: true
|
||||
|
||||
- name: Extract TSIG key into variable
|
||||
ansible.builtin.set_fact:
|
||||
powerdns_tsig_key_key: >-
|
||||
{{
|
||||
(powerdns_tsig_key_powerdns_generated_tsig_key.stdout | trim | split(' ') | list | last)
|
||||
if (powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
|
||||
not in powerdns_tsig_key_powerdns_info.stdout
|
||||
else (powerdns_generated_tsig_key | trim | split(' ') | list | last)
|
||||
}}
|
||||
vars:
|
||||
powerdns_generated_tsig_key: >-
|
||||
{% for line in powerdns_tsig_key_powerdns_info.stdout_lines %}
|
||||
{% if powerdns_tsig_key_name in line %}
|
||||
{{ line }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
- name: Ensure TSIG key is persisted into {{ powerdns_tsig_key_path }}
|
||||
ansible.builtin.copy:
|
||||
content: "{{ powerdns_tsig_key_key }}"
|
||||
dest: "{{ powerdns_tsig_key_path }}"
|
||||
owner: "{{ powerdns_tsig_key_path_owner | default(omit) }}"
|
||||
group: "{{ powerdns_tsig_key_path_group | default(omit) }}"
|
||||
mode: "0640"
|
||||
|
||||
- name: Ensure TSIG key permissions on {{ powerdns_tsig_key_path }} are correct
|
||||
ansible.builtin.file:
|
||||
path: "{{ powerdns_tsig_key_path }}"
|
||||
owner: "{{ powerdns_tsig_key_path_owner | default(omit) }}"
|
||||
group: "{{ powerdns_tsig_key_path_group | default(omit) }}"
|
||||
mode: "u+rwX,g+rwX"
|
77
roles/restic/README.md
Normal file
77
roles/restic/README.md
Normal file
@ -0,0 +1,77 @@
|
||||
# `finallycoffee.base.restic`
|
||||
|
||||
Ansible role for backup up data using `restic`, utilizing `systemd` timers for scheduling.
|
||||
|
||||
## Overview
|
||||
|
||||
As restic encrypts the data before storing it, the `restic_repo_password` needs
|
||||
to be populated with a strong key, and saved accordingly as only this key can
|
||||
be used to decrypt the data for a restore!
|
||||
|
||||
### Backends
|
||||
|
||||
#### S3 Backend
|
||||
|
||||
To use a `s3`-compatible backend like AWS buckets or minio, both `restic_s3_key_id`
|
||||
and `restic_s3_access_key` need to be populated, and the `restic_repo_url` has the
|
||||
format `s3:https://my.s3.endpoint:port/bucket-name`.
|
||||
|
||||
#### SFTP Backend
|
||||
|
||||
Using the `sftp` backend requires the configured `restic_user` to be able to
|
||||
authenticate to the configured SFTP-Server using password-less methods like
|
||||
publickey-authentication. The `restic_repo_url` then follows the format
|
||||
`sftp:{user}@{server}:/my-restic-repository` (or without leading `/` for relative
|
||||
paths to the `{user}`s home directory.
|
||||
|
||||
### Backing up data
|
||||
|
||||
A job name like `$service-postgres` or similar needs to be set in `restic_job_name`,
|
||||
which is used for naming the `systemd` units, their syslog identifiers etc.
|
||||
|
||||
If backing up filesystem locations, the paths need to be specified in
|
||||
`restic_backup_paths` as lists of strings representing absolute filesystem
|
||||
locations.
|
||||
|
||||
If backing up f.ex. database or other data which is generating backups using
|
||||
a command like `pg_dump`, use `restic_backup_stdin_command` (which needs to output
|
||||
to `stdout`) in conjunction with `restic_backup_stdin_command_filename` to name
|
||||
the resulting output (required).
|
||||
|
||||
### Policy
|
||||
|
||||
The backup policy can be adjusted by overriding the `restic_policy_keep_*`
|
||||
variables, with the defaults being:
|
||||
|
||||
```yaml
|
||||
restic_policy_keep_all_within: 1d
|
||||
restic_policy_keep_hourly: 6
|
||||
restic_policy_keep_daily: 2
|
||||
restic_policy_keep_weekly: 7
|
||||
restic_policy_keep_monthly: 4
|
||||
restic_policy_backup_frequency: hourly
|
||||
```
|
||||
|
||||
**Note:** `restic_policy_backup_frequency` must conform to `systemd`s
|
||||
`OnCalendar` syntax, which can be checked using `systemd-analyze calender $x`.
|
||||
|
||||
## Role behaviour
|
||||
|
||||
Per default, when the systemd unit for a job changes, the job is not immediately
|
||||
started. This can be overridden using `restic_start_job_on_unit_change: true`,
|
||||
which will immediately start the backup job if it's configuration changed.
|
||||
|
||||
The systemd unit runs with `restic_user`, which is root by default, guaranteeing
|
||||
that filesystem paths are always readable. The `restic_user` can be overridden,
|
||||
but care needs to be taken to ensure the user has permission to read all the
|
||||
provided filesystem paths / the backup command may be executed by the user.
|
||||
|
||||
If ansible should create the user, set `restic_create_user` to `true`, which
|
||||
will attempt to create the `restic_user` as a system user.
|
||||
|
||||
### Installing
|
||||
|
||||
For Debian and RedHat, the role attempts to install restic using the default
|
||||
package manager's ansible module (apt/dnf). For other distributions, the generic
|
||||
`package` module tries to install `restic_package_name` (default: `restic`),
|
||||
which can be overridden if needed.
|
60
roles/restic/defaults/main.yml
Normal file
60
roles/restic/defaults/main.yml
Normal file
@ -0,0 +1,60 @@
|
||||
---
|
||||
|
||||
restic_repo_url: ~
|
||||
restic_repo_password: ~
|
||||
restic_s3_key_id: ~
|
||||
restic_s3_access_key: ~
|
||||
|
||||
restic_backup_paths: []
|
||||
restic_backup_stdin_command: ~
|
||||
restic_backup_stdin_command_filename: ~
|
||||
|
||||
restic_policy_keep_all_within: 1d
|
||||
restic_policy_keep_hourly: 12
|
||||
restic_policy_keep_daily: 7
|
||||
restic_policy_keep_weekly: 6
|
||||
restic_policy_keep_monthly: 6
|
||||
restic_policy_keep_yearly: 5
|
||||
restic_policy_backup_frequency: hourly
|
||||
|
||||
restic_base_environment:
|
||||
RESTIC_JOBNAME: "{{ restic_job_name | default('unknown') }}"
|
||||
RESTIC_FORGET_KEEP_WITHIN: "{{ restic_policy_keep_all_within }}"
|
||||
RESTIC_FORGET_KEEP_HOURLY: "{{ restic_policy_keep_hourly }}"
|
||||
RESTIC_FORGET_KEEP_DAILY: "{{ restic_policy_keep_daily }}"
|
||||
RESTIC_FORGET_KEEP_WEEKLY: "{{ restic_policy_keep_weekly }}"
|
||||
RESTIC_FORGET_KEEP_MONTHLY: "{{ restic_policy_keep_monthly }}"
|
||||
RESTIC_FORGET_KEEP_YEARLY: "{{ restic_policy_keep_yearly }}"
|
||||
|
||||
restic_s3_environment:
|
||||
AWS_ACCESS_KEY_ID: "{{ restic_s3_key_id }}"
|
||||
AWS_SECRET_ACCESS_KEY: "{{ restic_s3_access_key }}"
|
||||
|
||||
restic_complete_environment: >-
|
||||
{{
|
||||
restic_base_environment
|
||||
| combine((restic_s3_environment
|
||||
if (restic_s3_key_id and restic_s3_access_key) else {}) | default({}))
|
||||
| combine(restic_environment | default({}))
|
||||
}}
|
||||
|
||||
restic_policy:
|
||||
keep_within: "{{ restic_policy_keep_all_within }}"
|
||||
hourly: "{{ restic_policy_keep_hourly }}"
|
||||
daily: "{{ restic_policy_keep_daily }}"
|
||||
weekly: "{{ restic_policy_keep_weekly }}"
|
||||
monthly: "{{ restic_policy_keep_monthly }}"
|
||||
yearly: "{{ restic_policy_keep_yearly }}"
|
||||
frequency: "{{ restic_policy_backup_frequency }}"
|
||||
|
||||
restic_user: root
|
||||
restic_create_user: false
|
||||
restic_start_job_on_unit_change: false
|
||||
|
||||
restic_job_name: ~
|
||||
restic_job_description: "Restic backup job for {{ restic_job_name }}"
|
||||
restic_systemd_unit_naming_scheme: "restic.{{ restic_job_name }}"
|
||||
restic_systemd_working_directory: /tmp
|
||||
restic_systemd_syslog_identifier: "restic-{{ restic_job_name }}"
|
||||
|
||||
restic_package_name: restic
|
13
roles/restic/handlers/main.yml
Normal file
13
roles/restic/handlers/main.yml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
|
||||
- name: Ensure system daemon is reloaded
|
||||
listen: reload-systemd
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Ensure systemd service for '{{ restic_job_name }}' is started immediately
|
||||
listen: trigger-restic
|
||||
systemd:
|
||||
name: "{{ restic_systemd_unit_naming_scheme }}.service"
|
||||
state: started
|
||||
when: restic_start_job_on_unit_change
|
77
roles/restic/tasks/main.yml
Normal file
77
roles/restic/tasks/main.yml
Normal file
@ -0,0 +1,77 @@
|
||||
---
|
||||
|
||||
- name: Ensure {{ restic_user }} system user exists
|
||||
user:
|
||||
name: "{{ restic_user }}"
|
||||
state: present
|
||||
system: true
|
||||
when: restic_create_user
|
||||
|
||||
- name: Ensure either backup_paths or backup_stdin_command is populated
|
||||
when: restic_backup_paths|length > 0 and restic_backup_stdin_command and false
|
||||
fail:
|
||||
msg: "Setting both `restic_backup_paths` and `restic_backup_stdin_command` is not supported"
|
||||
|
||||
- name: Ensure a filename for stdin_command backup is given
|
||||
when: restic_backup_stdin_command and not restic_backup_stdin_command_filename
|
||||
fail:
|
||||
msg: "`restic_backup_stdin_command` was set but no filename for the resulting output was supplied in `restic_backup_stdin_command_filename`"
|
||||
|
||||
- name: Ensure backup frequency adheres to systemd's OnCalender syntax
|
||||
command:
|
||||
cmd: "systemd-analyze calendar {{ restic_policy.frequency }}"
|
||||
register: systemd_calender_parse_res
|
||||
failed_when: systemd_calender_parse_res.rc != 0
|
||||
changed_when: false
|
||||
|
||||
- name: Ensure restic is installed
|
||||
block:
|
||||
- name: Ensure restic is installed via apt
|
||||
apt:
|
||||
package: restic
|
||||
state: latest
|
||||
when: ansible_os_family == 'Debian'
|
||||
- name: Ensure restic is installed via dnf
|
||||
dnf:
|
||||
name: restic
|
||||
state: latest
|
||||
when: ansible_os_family == 'RedHat'
|
||||
- name: Ensure restic is installed using the auto-detected package-manager
|
||||
package:
|
||||
name: "{{ restic_package_name }}"
|
||||
state: present
|
||||
when: ansible_os_family not in ['RedHat', 'Debian']
|
||||
|
||||
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
|
||||
template:
|
||||
dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.service"
|
||||
src: restic.service.j2
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0640
|
||||
notify:
|
||||
- reload-systemd
|
||||
- trigger-restic
|
||||
|
||||
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
|
||||
template:
|
||||
dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.timer"
|
||||
src: restic.timer.j2
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0640
|
||||
notify:
|
||||
- reload-systemd
|
||||
|
||||
- name: Flush handlers to ensure systemd knows about '{{ restic_job_name }}'
|
||||
meta: flush_handlers
|
||||
|
||||
- name: Ensure systemd timer for '{{ restic_job_name }}' is activated
|
||||
systemd:
|
||||
name: "{{ restic_systemd_unit_naming_scheme }}.timer"
|
||||
enabled: true
|
||||
|
||||
- name: Ensure systemd timer for '{{ restic_job_name }}' is started
|
||||
systemd:
|
||||
name: "{{ restic_systemd_unit_naming_scheme }}.timer"
|
||||
state: started
|
51
roles/restic/templates/restic.service.j2
Normal file
51
roles/restic/templates/restic.service.j2
Normal file
@ -0,0 +1,51 @@
|
||||
[Unit]
|
||||
Description={{ restic_job_description }}
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User={{ restic_user }}
|
||||
WorkingDirectory={{ restic_systemd_working_directory }}
|
||||
SyslogIdentifier={{ restic_systemd_syslog_identifier }}
|
||||
|
||||
Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
|
||||
Environment=RESTIC_PASSWORD={{ restic_repo_password }}
|
||||
{% for kv in restic_complete_environment | dict2items %}
|
||||
Environment={{ kv.key }}={{ kv.value }}
|
||||
{% endfor %}
|
||||
|
||||
{% if restic_init | default(true) %}
|
||||
ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init'
|
||||
{% endif %}
|
||||
{% if restic_unlock_before_backup | default(false) %}
|
||||
ExecStartPre=-/bin/sh -c 'sleep 3 && /usr/bin/restic unlock'
|
||||
{% endif %}
|
||||
{% if restic_backup_pre_hook | default(false) %}
|
||||
ExecStartPre=-{{ restic_backup_pre_hook }}
|
||||
{% endif %}
|
||||
{% if restic_backup_stdin_command %}
|
||||
ExecStart=/bin/sh -c '{{ restic_backup_stdin_command }} | /usr/bin/restic backup \
|
||||
--retry-lock {{ restic_retry_lock | default('5m') }} \
|
||||
--verbose --stdin \
|
||||
--stdin-filename {{ restic_backup_stdin_command_filename }}'
|
||||
{% else %}
|
||||
ExecStart=/opt/restic-backup-directories.sh {{ restic_backup_paths | join(' ') }}
|
||||
{% endif %}
|
||||
{% if restic_forget_prune | default(true) %}
|
||||
ExecStartPost=/usr/bin/restic forget --prune \
|
||||
--retry-lock {{ restic_retry_lock | default('5m') }} \
|
||||
--keep-within={{ restic_policy.keep_within }} \
|
||||
--keep-hourly={{ restic_policy.hourly }} \
|
||||
--keep-daily={{ restic_policy.daily }} \
|
||||
--keep-weekly={{ restic_policy.weekly }} \
|
||||
--keep-monthly={{ restic_policy.monthly }} \
|
||||
--keep-yearly={{ restic_policy.yearly }}
|
||||
{% endif %}
|
||||
{% if restic_list_snapshots | default(true) %}
|
||||
ExecStartPost=-/usr/bin/restic snapshots --retry-lock {{ restic_retry_lock | default('5m') }}
|
||||
{% endif %}
|
||||
{% if restic_backup_post_hook | default(false) %}
|
||||
ExecStartPost=-{{ restic_backup_post_hook }}
|
||||
{% endif %}
|
||||
{% if restic_check | default(true) %}
|
||||
ExecStartPost=/usr/bin/restic check --retry-lock {{ restic_retry_lock | default('5m') }}
|
||||
{% endif %}
|
9
roles/restic/templates/restic.timer.j2
Normal file
9
roles/restic/templates/restic.timer.j2
Normal file
@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Run {{ restic_timer_description | default(restic_job_name) }}
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ restic_policy.frequency }}
|
||||
Unit={{ restic_systemd_unit_naming_scheme }}.service
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
Loading…
Reference in New Issue
Block a user