1
0
forked from finallycoffee/base

Compare commits

..

59 Commits

Author SHA1 Message Date
0771787c98
feat(docker): add ansible role 2025-04-20 21:19:31 +02:00
1024921a74
feat: add user role 2025-04-20 15:36:43 +02:00
62263726fa
update(nginx): bump version to 1.27.5 2025-04-16 16:45:40 +02:00
ae887a1936
update(lego): bump version to 4.23.0 2025-04-16 16:45:09 +02:00
1e57ae1ec0
update(minio): bump container tag to RELEASE.2025-04-08T15-41-24Z 2025-04-14 20:12:27 +02:00
90dae1311f
meta: bump galaxy version to 0.2.1, require community.docker@4.2.0 2025-04-07 17:23:30 +02:00
3733c0b7ab
feat(lego): add auto-update mechanism 2025-04-07 17:23:26 +02:00
877c5a137a
fix(lego): quoting issues leading to wrongful error 2025-04-07 16:29:06 +02:00
11d4b397ef
fix(lego): ensure variables are either defined or have null-check handling 2025-04-06 09:11:28 +02:00
76e89db5c6
refactor(minio): add state and deployment_method parametrization, bump version to RELEASE.2025-04-03T14-56-28Z 2025-04-05 17:34:33 +02:00
4bddc95161
fix(restic): if repository exists but is locked, ensure it is unlocked 2025-04-05 15:39:17 +02:00
ed95d4fd3d
update(nginx): bump version to 1.27.4 2025-03-22 12:25:00 +01:00
788c4bada0
feat(restic): add scripts for directory backup and snapshot metrics generation 2025-03-08 22:47:58 +01:00
7ecf8778ca
feat(lego): add pre- and post-renewal hooks 2025-03-01 22:27:29 +01:00
11fec18afc
update(lego): bump version to 4.22.2 2025-02-18 17:24:12 +01:00
b0ba86f78e
update(lego): bump version to 4.22.1 2025-02-17 17:56:20 +01:00
bd418a6199
update(nginx): bump version to 1.27.3 2025-01-30 15:36:40 +01:00
60b36db8a7
update(lego): bump version to 4.21.0 2024-12-20 20:58:59 +01:00
9a02652d98
meta!: bump galaxy collection to 0.2.0 and drop deprecated roles 2024-12-01 09:46:44 +01:00
2c1b3cb47e
chore(mariadb): role was migrated to finallycoffee.databases collection 2024-12-01 09:37:40 +01:00
114cf13871
chore(elasticsearch): role was migrated to finallycoffee.databases collection 2024-12-01 09:36:27 +01:00
b77c81f754
update(lego): bump version to 4.20.4 2024-11-21 18:39:34 +01:00
bdf1871855
update(lego): bump version to 4.20.2 2024-11-12 17:32:39 +01:00
9454845ea1
meta: bump collection version to 0.1.3 2024-10-29 17:53:42 +01:00
4e8cc9bcf2
meta: deprecate elasticsearch role 2024-10-29 17:53:16 +01:00
3b9d6e19da
meta: deprecate mariadb role 2024-10-29 17:49:00 +01:00
c847046720
refactor(mariadb): add state parameter and split container image arguments 2024-10-27 16:04:07 +01:00
d7b7c59f46
update(lego): bump version to 4.19.2 2024-10-23 20:48:59 +02:00
153df81836
feat(lego): Detect lego_architecture automatically 2024-10-18 14:53:59 +02:00
7021ed1a89
meta: bump collection version to 0.1.2, require community.docker@^3.0.0, take issues on codeberg repo mirror 2024-10-05 10:19:15 +02:00
afe72f554e
chore(nginx): add deployment_method=podman 2024-10-05 10:16:53 +02:00
c36e95d7eb
chore(nginx): add state=absent support 2024-10-05 10:14:35 +02:00
97526aec36
update(nginx): bump version to 1.27.2 2024-10-05 10:01:07 +02:00
fc73fdd1fa
fix(docs): typos and formatting 2024-09-21 11:36:21 +02:00
bd43f3963c
meta: bump collection version to 0.1.1 2024-09-21 11:11:36 +02:00
1076a9f384
update(mariadb): bump version to 10.11.9 2024-09-21 11:10:09 +02:00
82e69bdda3
update(nginx): bump version to 1.26.2 2024-09-21 11:08:57 +02:00
b9b5c19d38
update(lego): bump version to 4.18.0 2024-09-21 11:07:42 +02:00
b9e4abdf36
meta: require ansible >=2.15.0 2024-09-21 11:06:41 +02:00
aac6891518
meta: update collection version to 0.1.0, add community.docker as dependency 2024-09-21 10:58:40 +02:00
31d025ed75
docs: add READMEs to roles powerdns_tsig_key and dns 2024-09-21 10:57:46 +02:00
1423d2a243
feat(restic): add support for additional env vars, optional init and checking and pre-backup hooks 2024-09-18 18:20:12 +02:00
967ebab4c1
feat(lego): Ensure certificates have correct mode and owner 2024-09-11 17:47:49 +02:00
5f4fbd492c
feat(lego): Add cap_net_bind capabilities to systemd unit 2024-09-09 13:14:35 +02:00
96f5da9bf6
feat(roles/lego): Add support for LEGO_HTTP_PORT_FALLBACK 2024-08-04 15:13:59 +02:00
2aaa529585
feat(lego): allow setting capabilites on lego binary for net_bind_service 2024-08-01 19:42:36 +02:00
8941b9357a
update(lego): bump version to 4.17.4 2024-08-01 18:56:41 +02:00
04b5837fd8
chore(lego): add README, extract systemd user pattern into defaults 2024-08-01 18:55:30 +02:00
4837172f64
fix(powerdns_tsig_key): restart powerdns after tsig key creation 2024-07-22 18:17:53 +02:00
ab7cca0947
fix(minio): correct process arguments 2024-07-21 17:11:31 +02:00
960d95a924
feat: add finallycoffee.base.lego role 2024-05-19 20:40:43 +02:00
eab7b7e915
fix(powerdns-tsig-key): fix permissions on files for nicer integration with lego 2024-05-19 20:39:05 +02:00
e7886d8c98
feat(restic): add optional hook and optional unlock 2024-05-05 16:36:30 +02:00
13d40341a0
fix(restic): change systemd service type to simple, remove wanted-by, allow post-start hooks
The old service type oneshot combined with a wanted-by of multi-user.target
can lead to an infite recursion which systemd does not warn about,
causing a service that never activates.
2024-04-14 15:14:02 +02:00
12b98487a5
update(mariadb): bump version to 10.11.6 2024-02-06 12:41:16 +01:00
2e6cb0a4d5
update(mariadb): bump version to 10.6.16 2024-02-06 12:33:19 +01:00
52d25942b4
update(nginx): bump version to 1.25.3 2024-02-06 11:05:13 +01:00
af17bea1e1
feat: add finallycoffee.base.powerdns_tsig_key role 2023-11-07 18:38:16 +01:00
52bf02e084
feat: add finallycoffee.base.dns role 2023-11-07 18:37:58 +01:00
62 changed files with 1090 additions and 410 deletions

View File

@ -5,15 +5,12 @@
This ansible collection provides various roles for installing This ansible collection provides various roles for installing
and configuring basic system utilities like gnupg, ssh etc and configuring basic system utilities like gnupg, ssh etc
- [`elasticsearch`](roles/elasticsearch/README.md): Deploy [elasticsearch](https://www.docker.elastic.co/r/elasticsearch/elasticsearch-oss),
a popular (distributed) search and analytics engine, mostly known by it's
letter "E" in the ELK-stack.
- [`git`](roles/git/README.md): configures git on the target system - [`git`](roles/git/README.md): configures git on the target system
- [`gnupg`](roles/gnupg/README.md): configures gnupg on the target system - [`gnupg`](roles/gnupg/README.md): configures gnupg on the target system
- [`mariadb`](roles/mariadb/README.md): runs [MariaDB Server](https://mariadb.org/), one of the world's most popular open source relational database - [`lego`](roles/lego/README.md): runs [lego (LetsEncrypt Go)](https://github.com/go-acme/lego),
a ACME client written in go, using systemd (timers). Multi-instance capable.
- [`minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an - [`minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an
s3-compatible object storage server, using docker containers. s3-compatible object storage server, using docker containers.
@ -24,6 +21,9 @@ and configuring basic system utilities like gnupg, ssh etc
- [`restic`](roles/restic/README.md): Manage backups using restic - [`restic`](roles/restic/README.md): Manage backups using restic
and persist them to a configurable backend. and persist them to a configurable backend.
- [`powerdns_tsig_key`](roles/powerdns_tsig_key/README.md): Simple ansible role
for generating TSIG keys in PowerDNS.
## License ## License
[CNPLv7+](LICENSE.md): Cooperative Nonviolent Public License [CNPLv7+](LICENSE.md): Cooperative Nonviolent Public License

View File

@ -1,12 +1,21 @@
namespace: finallycoffee namespace: finallycoffee
name: base name: base
version: 0.0.2 version: 0.2.1
readme: README.md readme: README.md
authors: authors:
- transcaffeine <transcaffeine@finally.coffee> - transcaffeine <transcaffeine@finally.coffee>
description: Roles for base services which are common dependencies other services like databases description: Roles for base services which are common dependencies other services like databases
dependencies:
"community.docker": "^4.2.0"
license_file: LICENSE.md license_file: LICENSE.md
build_ignore: build_ignore:
- '*.tar.gz' - '*.tar.gz'
repository: https://git.finally.coffee/finallycoffee/base repository: https://git.finally.coffee/finallycoffee/base
issues: https://git.finally.coffee/finallycoffee/base/issues issues: https://codeberg.org/finallycoffee/ansible-collection-base/issues
tags:
- docker
- lego
- minio
- nginx
- restic
- docker

View File

@ -1,3 +1,3 @@
--- ---
requires_ansible: ">=2.12" requires_ansible: ">=2.15"

6
playbooks/docker.yml Normal file
View File

@ -0,0 +1,6 @@
---
- name: Install and configure docker daemon
hosts: "{{ docker_hosts | default('docker', true) }}"
become: "{{ docker_become | default(false, true) }}"
roles:
- role: finallycoffee.base.docker

7
playbooks/user.yml Normal file
View File

@ -0,0 +1,7 @@
---
- name: Configure user accounts
hosts: "{{ user_hosts | default('all', true) }}"
become: "{{ user_role_become | default(false, true) }}"
gather_facts: "{{ user_role_gather_facts | default(false, true) }}"
roles:
- role: finallycoffee.base.user

33
roles/dns/README.md Normal file
View File

@ -0,0 +1,33 @@
# `finallycoffee.base.dns` ansible role
Simple role for wrapping around the
[`famedly.dns.update`](https://github.com/famedly/ansible-collection-dns/blob/main/plugins/modules/update.py)
ansible module.
## Usage
### Example playbook
```yaml
- target: "{{ target_hosts }}"
roles:
- role: finallycoffee.base.dns
vars:
dns_server: "dns.example.org"
dns_zone: "zone.example.org"
dns_records: "{{ dns_records }}"
dns_record_state: exact
dns_tsig_name: "mykeyname"
dns_tsig_algo: "hmac-sha256"
dns_tsig_key: "mykeycontent"
vars:
dns_records:
- type: A
name: gitea
content: "127.0.0.1"
- type: AAAA
name: gitea
content: "fe80::1"
- type: CNAME
name: "_acme_challenge.gitea"
content: "delegated-cname.challenge.example.org"
```

View File

@ -0,0 +1,2 @@
---
dns_record_state: present

11
roles/dns/tasks/main.yml Normal file
View File

@ -0,0 +1,11 @@
---
- name: Ensure DNS records in '{{ dns_zone }}' are up to date
famedly.dns.update:
primary_master: "{{ dns_server }}"
zone: "{{ dns_zone }}"
tsig_name: "{{ dns_tsig_name }}"
tsig_algo: "{{ dns_tsig_algo }}"
tsig_key: "{{ dns_tsig_key }}"
rr_set: "{{ dns_records }}"
state: "{{ dns_record_state }}"

13
roles/docker/README.md Normal file
View File

@ -0,0 +1,13 @@
# `finallycoffee.base.docker` ansible role
Install and configure the docker daemon.
## Configuration
- `docker_daemon_config` - configuration for the docker daemon
- `docker_remove_legacy_packages` - clean up old versions of docker (see https://docs.docker.com/engine/install/debian/#uninstall-old-versions)
## Plugins
- `docker_plugin_buildx_enable` - enable the buildx plugin
- `docker_plugin_compose_enable` - enable docker compose

View File

@ -0,0 +1,31 @@
---
docker_apt_key_url: "https://download.docker.com/linux/debian/gpg"
docker_apt_key_id: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88"
docker_apt_arch: amd64
docker_apt_release_channel: stable
docker_apt_repository_url: "https://download.docker.com/linux/debian"
docker_apt_repository: >-2
deb [arch={{ docker_apt_arch }}] {{ docker_apt_repository_url }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}
docker_apt_cli_package: "docker-ce-cli"
docker_apt_plugin_buildx_package: "docker-buildx-plugin"
docker_apt_plugin_compose_package: "docker-compose-plugin"
docker_apt_base_packages:
- "docker-ce"
- "docker-ce-cli"
- "containerd.io"
docker_apt_packages: >-2
{{
docker_apt_base_packages
+ (docker_plugin_buildx_enable | default(false)
| ternary([ docker_apt_plugin_buildx_package ], []))
+ (docker_plugin_compose_enable | default(false)
| ternary([ docker_apt_plugin_compose_package ], []))
}}
docker_apt_legacy_packages:
- "docker.io"
- "docker-compose"
- "docker-doc"
- "podman-docker"
- "containerd"
- "runc"

View File

@ -0,0 +1,13 @@
---
docker_state: "present"
docker_daemon_config: {}
docker_daemon_config_file: "/etc/docker/daemon.json"
docker_daemon_config_file_mode: "0644"
docker_daemon_config_owner: root
docker_daemon_config_group: "{{ docker_daemon_config_owner }}"
docker_plugin_buildx_enable: false
docker_plugin_compose_enable: false
docker_remove_legacy_packages: true

View File

@ -0,0 +1,5 @@
---
docker_systemd_service_name: "docker.service"
docker_systemd_service_state: >-2
{{ (docker_state == 'present') | ternary('started', 'stopped') }}
docker_systemd_service_enabled: "{{ (docker_state == 'present') }}"

View File

@ -0,0 +1,6 @@
---
- name: Restart docker daemon
ansible.builtin.systemd_service:
name: "{{ docker_systemd_service_name }}"
state: "restarted"
listen: "docker-restart"

View File

@ -0,0 +1,18 @@
---
- name: Ensure config directory '{{ docker_daemon_config_file | dirname }}' is present
ansible.builtin.file:
path: "{{ docker_daemon_config_file | dirname }}"
state: "directory"
mode: "0755"
owner: "{{ docker_daemon_config_owner }}"
group: "{{ docker_daemon_config_group }}"
- name: Configure docker daemon using '{{ docker_daemon_config_file }}'
ansible.builtin.copy:
content: "{{ docker_daemon_config | to_json }}"
dest: "{{ docker_daemon_config_file }}"
mode: "{{ docker_daemon_config_file_mode }}"
owner: "{{ docker_daemon_config_owner }}"
group: "{{ docker_daemon_config_group }}"
when: docker_daemon_config | string | length > 0
notify: docker-restart

View File

@ -0,0 +1,30 @@
---
- name: Ensure legacy docker packages are removed
ansible.builtin.apt:
name: "{{ docker_apt_legacy_packages }}"
state: absent
when: docker_remove_legacy_packages
- name: Add apt key for docker repository
ansible.builtin.apt_key:
id: "{{ docker_apt_key_id }}"
url: "{{ docker_apt_key_url }}"
state: "{{ docker_state }}"
- name: Add apt repository for docker
ansible.builtin.apt_repository:
repo: "{{ docker_apt_repository }}"
state: "{{ docker_state }}"
register: docker_apt_repository_info
- name: Update apt cache if repository was newly added
ansible.builtin.apt:
update_cache: true
when:
- docker_state == 'present'
- docker_apt_repository_info.changed
- name: Install apt packages for docker
ansible.builtin.apt:
name: "{{ docker_apt_packages }}"
state: "{{ docker_state }}"

View File

@ -0,0 +1,29 @@
---
- name: Check if target OS is supported
ansible.builtin.fail:
msg: >-2
OS Family '{{ docker_os_family }}' is not supported!
when: docker_os_family not in docker_supported_os_families
vars:
docker_os_family: "{{ ansible_os_family | lower }}"
- name: Ensure docker is {{ docker_state }} on {{ ansible_os_family }}-family
ansible.builtin.include_tasks:
file: "install-{{ ansible_os_family | lower }}.yml"
- name: Configure docker daemon
ansible.builtin.include_tasks:
file: "configure.yml"
when: docker_state == 'present'
- name: Ensure docker daemon is {{ docker_systemd_service_enabled | ternary('enabled', 'disabled') }}
ansible.builtin.systemd_service:
name: "{{ docker_systemd_service_name }}"
enabled: "{{ docker_systemd_service_enabled }}"
when: ansible_facts['service_mgr'] == 'systemd'
- name: Ensure docker daemon is {{ docker_systemd_service_state }}
ansible.builtin.systemd_service:
name: "{{ docker_systemd_service_name }}"
state: "{{ docker_systemd_service_state }}"
when: ansible_facts['service_mgr'] == 'systemd'

View File

@ -0,0 +1,3 @@
---
docker_supported_os_families:
- 'debian'

View File

@ -1,22 +0,0 @@
# `finallycoffee.base.elastiscsearch`
A simple ansible role which deploys a single-node elastic container to provide
an easy way to do some indexing.
## Usage
Per default, `/opt/elasticsearch/data` is used to persist data, it is
customizable by using either `elasticsearch_base_path` or `elasticsearch_data_path`.
As elasticsearch be can be quite memory heavy, the maximum amount of allowed RAM
can be configured using `elasticsearch_allocated_ram_mb`, defaulting to 512 (mb).
The cluster name and discovery type can be overridden using
`elasticsearch_config_cluster_name` (default: elastic) and
`elasticsearch_config_discovery_type` (default: single-node), should one
need a multi-node elasticsearch deployment.
Per default, no ports or networks are mapped, and explizit mapping using
either ports (`elasticsearch_container_ports`) or networks
(`elasticsearch_container_networks`) is required in order for other services
to use elastic.

View File

@ -1,35 +0,0 @@
---
elasticsearch_version: 7.17.7
elasticsearch_base_path: /opt/elasticsearch
elasticsearch_data_path: "{{ elasticsearch_base_path }}/data"
elasticsearch_config_cluster_name: elastic
elasticsearch_config_discovery_type: single-node
elasticsearch_config_boostrap_memory_lock: true
elasticsearch_allocated_ram_mb: 512
elasticsearch_container_image_name: docker.elastic.co/elasticsearch/elasticsearch-oss
elasticsearch_container_image_tag: ~
elasticsearch_container_image: >-
{{ elasticsearch_container_image_name }}:{{ elasticsearch_container_image_tag | default(elasticsearch_version, true) }}
elasticsearch_container_name: elasticsearch
elasticsearch_container_env:
"ES_JAVA_OPTS": "-Xms{{ elasticsearch_allocated_ram_mb }}m -Xmx{{ elasticsearch_allocated_ram_mb }}m"
"cluster.name": "{{ elasticsearch_config_cluster_name }}"
"discovery.type": "{{ elasticsearch_config_discovery_type }}"
"bootstrap.memory_lock": "{{ 'true' if elasticsearch_config_boostrap_memory_lock else 'false' }}"
elasticsearch_container_user: ~
elasticsearch_container_ports: ~
elasticsearch_container_labels:
version: "{{ elasticsearch_version }}"
elasticsearch_container_ulimits:
# - "memlock:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}"
- "memlock:-1:-1"
elasticsearch_container_volumes:
- "{{ elasticsearch_data_path }}:/usr/share/elasticsearch/data:z"
elasticsearch_container_networks: ~
elasticsearch_container_purge_networks: ~
elasticsearch_container_restart_policy: unless-stopped

View File

@ -1,32 +0,0 @@
---
- name: Ensure host directories are present
file:
path: "{{ item }}"
state: directory
mode: "0777"
loop:
- "{{ elasticsearch_base_path }}"
- "{{ elasticsearch_data_path }}"
- name: Ensure elastic container image is present
docker_image:
name: "{{ elasticsearch_container_image }}"
state: present
source: pull
force_source: "{{ elasticsearch_container_image_tag|default(false, true)|bool }}"
- name: Ensure elastic container is running
docker_container:
name: "{{ elasticsearch_container_name }}"
image: "{{ elasticsearch_container_image }}"
env: "{{ elasticsearch_container_env | default(omit, True) }}"
user: "{{ elasticsearch_container_user | default(omit, True) }}"
ports: "{{ elasticsearch_container_ports | default(omit, True) }}"
labels: "{{ elasticsearch_container_labels | default(omit, True) }}"
volumes: "{{ elasticsearch_container_volumes }}"
ulimits: "{{ elasticsearch_container_ulimits }}"
networks: "{{ elasticsearch_container_networks | default(omit, True) }}"
purge_networks: "{{ elasticsearch_container_purge_networks | default(omit, True) }}"
restart_policy: "{{ elasticsearch_container_restart_policy }}"
state: started

46
roles/lego/README.md Normal file
View File

@ -0,0 +1,46 @@
# `finallycoffee.base.lego` ansible role
This role can be used to retrieve ACME certificates on the target host. It uses `lego` for that, and with systemd template units provides an easy way to configure and monitor the status for each certificate.
## Requirements
- `systemd`
- write access to /tmp to unpack the lego release tarball during installation
- write access to /opt/lego (or whatever `lego_base_path` is set to) for configuration and certificate data
- `become` privileges of the `ansible_user` on the target
## Usage
### Required configuration
- `lego_instance` - used for allowing multiple lego jobs to run with systemd template units. recommended to be set to the CN / first SAN of the certificate.
- `lego_cert_domains` - list of FQDNs to request a certificate for
- `lego_acme_account_email` - when using letsencrypt, a contact email is mandatory
### Proxies / Registries
The role ensure `lego` is downloaded from the github release page. If you are behind a proxy or use a registry like Nexus3, set `lego_release_archive_server`.
### ACME server
Per default, the Letsencrypt Staging ACME server is configured. Set `lego_acme_server_url` from `lego_letsencrypt_server_urls.{qa,prod}` or configure your own ACME v2 server directly.
### Certificate
To set for which domains to request a certificate for, set them as a list of SANs in `lego_cert_domains`. The default key type is EC256 and can be overridden using `lego_cert_key_type`.
Set the type of challenge in `lego_acme_challenge_type` (to either `http` or `dns`), and `lego_acme_challenge_provider` to, for example, `rfc2136` for DNS challenges using the DNSUPDATE mechanism. If your challenge needs additional data, set that in `lego_command_config` as a dictionary analog to `lego_base_command_config` (see [defaults](defaults/main.yml)).
## Trivia
### Architecture
By default, the lego distribution for `linux` on `amd64` is downloaded. If your target needs a different architecture or target OS, adjust this in `lego_os` and `lego_architecture`, cross-checking with the [lego GitHub release page](https://github.com/go-acme/lego/releases/tag/v4.17.4) for upstream availability.
### User management
The role will attempt to create user+group for each seperate lego instance for data isolation (i.e. to avoid leaking a TSIG key from one lego instance to other services). The user and group are of the form `acme-{{ lego_instance }}`. Beware that changing this in `lego_cert_{user,group}` also requires `lego_systemd_{user,group}` to be adjusted!
### Binding to ports < 1024 (HTTP-01 challenge)
Set `lego_binary_allow_net_bind_service: true` to allow the lego binary to bind to ports in the 'privileged' (< 1024) port range.

View File

@ -0,0 +1,71 @@
---
lego_user: "lego"
lego_version: "4.23.0"
lego_instance: default
lego_base_path: "/opt/lego"
lego_cert_user: "acme-{{ lego_instance }}"
lego_cert_group: "{{ lego_cert_user }}"
lego_cert_mode: "0640" # rw-r-----
lego_systemd_user: "acme-%i"
lego_systemd_group: "{{ lego_systemd_user }}"
lego_instance_base_path: "{{ lego_base_path }}/instances"
lego_instance_path: "{{ lego_instance_base_path }}/{{ lego_instance }}"
lego_cert_domains: []
lego_cert_key_type: ec256
lego_cert_days_to_renew: 30
lego_acme_account_email: ~
lego_acme_challenge_type: http
lego_acme_challenge_provider: ~
lego_letsencrypt_server_urls:
qa: "https://acme-staging-v02.api.letsencrypt.org/directory"
prod: "https://acme-v02.api.letsencrypt.org/directory"
lego_acme_server_url: "{{ lego_letsencrypt_server_urls.qa }}"
lego_base_environment:
LEGO_CERT_USER: "{{ lego_cert_user }}"
LEGO_CERT_GROUP: "{{ lego_cert_group }}"
LEGO_CERT_MODE: "{{ lego_cert_mode }}"
LEGO_CERT_STORE_PATH: "{{ lego_instance_path }}"
LEGO_CERT_DAYS_TO_RENEW: "{{ lego_cert_days_to_renew }}"
LEGO_KEY_TYPE: "{{ lego_cert_key_type }}"
LEGO_ACME_CHALLENGE_TYPE: "{{ lego_acme_challenge_type }}"
LEGO_ACME_SERVER: "{{ lego_acme_server_url }}"
LEGO_COMMAND_ARGS: "{{ lego_command_args }}"
lego_base_command_config:
server: "{{ lego_acme_server_url }}"
accept_tos: true
email: "{{ lego_acme_account_email }}"
path: "{{ lego_instance_path }}"
key_type: "{{ lego_cert_key_type }}"
lego_acme_challenge_config: >-
{{ {lego_acme_challenge_type: lego_acme_challenge_provider} }}
lego_systemd_unit_path: "/etc/systemd/system"
lego_systemd_template_unit_name: "lego@.service"
lego_systemd_template_unit_file: "{{ lego_systemd_template_unit_name }}.j2"
lego_systemd_service_name: "lego@{{ lego_instance }}.service"
lego_systemd_environment: >-
{{ lego_base_environment | combine(lego_environment | default({})) }}
lego_full_command_config: >-
{{ lego_base_command_config
| combine(lego_acme_challenge_config)
| combine(lego_command_config | default({})) }}
lego_systemd_timer_name: "lego-{{ lego_instance }}.timer"
lego_systemd_timer_template: lego.timer.j2
lego_systemd_timer_calendar: "*-*-* *:00/15:00"
lego_architecture: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
lego_os: "linux"
lego_binary_allow_net_bind_service: false
lego_release_archive_server: "https://github.com"
lego_release_archive_filename: >-
lego_v{{ lego_version }}_{{ lego_os }}_{{ lego_architecture }}.tar.gz
lego_release_archive_url: >-
{{ lego_release_archive_server }}/go-acme/lego/releases/download/v{{ lego_version }}/{{ lego_release_archive_filename }}
lego_release_archive_file_path: "/tmp/{{ lego_release_archive_filename }}"
lego_release_archive_path: "/tmp/lego_v{{ lego_version }}_{{ lego_os }}_{{ lego_architecture }}"

View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -euo pipefail
LEGO_BINARY=$(/usr/bin/env which lego)
if [[ -n "${LEGO_HTTP_FALLBACK_PORT:-}" ]]; then
if ! nc_binary="$(type -p 'nc')" || [[ -z $nc_binary ]]; then
echo "nc not found (in PATH), exiting"
exit 1
fi
nc -z 127.0.0.1 $LEGO_HTTP_PORT;
if [[ $? -eq 0 ]]; then
LEGO_HTTP_PORT=$LEGO_HTTP_FALLBACK_PORT
fi
fi
if [[ -n "${LEGO_PRE_RENEWAL_HOOK:-}" ]]; then
$LEGO_PRE_RENEWAL_HOOK
fi
LEGO_COMMAND_ARGS_EXPANDED=$(bash -c "echo $LEGO_COMMAND_ARGS") # This is a bit icky
FILES_IN_DIR=$(find "$LEGO_CERT_STORE_PATH/certificates" -type f | wc -l)
if [[ $FILES_IN_DIR -gt 2 ]]; then
$LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED renew --days=$LEGO_CERT_DAYS_TO_RENEW
else
$LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED run
fi
find "$LEGO_CERT_STORE_PATH/certificates" -type f | xargs -I{} -n 1 chmod "$LEGO_CERT_MODE" "{}"
find "$LEGO_CERT_STORE_PATH/certificates" -type f | xargs -I{} -n 1 chown "${LEGO_CERT_USER}:${LEGO_CERT_GROUP}" "{}"
if [[ -n "${LEGO_POST_RENEWAL_HOOK:-}" ]]; then
$LEGO_POST_RENEWAL_HOOK
fi

View File

@ -0,0 +1,5 @@
---
- name: Ensure systemd daemon is reloaded
ansible.builtin.systemd:
daemon_reload: true
listen: systemd_reload

165
roles/lego/tasks/main.yml Normal file
View File

@ -0,0 +1,165 @@
---
- name: Ensure lego cert group is created
ansible.builtin.group:
name: "{{ lego_cert_group }}"
state: present
system: true
- name: Ensure lego cert user is created
ansible.builtin.user:
name: "{{ lego_cert_user }}"
state: present
system: true
create_home: false
groups:
- "{{ lego_cert_group }}"
append: true
- name: Ensure lego user is created
ansible.builtin.user:
name: "{{ lego_user }}"
state: present
system: true
create_home: false
groups:
- "{{ lego_cert_group }}"
append: true
- name: Check if lego is present
ansible.builtin.command:
cmd: which lego
changed_when: false
failed_when: false
register: lego_binary_info
check_mode: false
- name: Check which version of lego is present
ansible.builtin.command:
cmd: "lego --version"
changed_when: false
failed_when: false
register: lego_binary_version_info
when: lego_binary_info.rc == 0
check_mode: false
- name: Ensure lego is installed
when: (lego_binary_info.rc != 0) or (lego_version not in lego_binary_version_info.stdout)
block:
- name: Download lego from source
ansible.builtin.get_url:
url: "{{ lego_release_archive_url }}"
url_username: "{{ lego_release_archive_url_username | default(omit) }}"
url_password: "{{ lego_release_archive_url_password | default(omit) }}"
dest: "{{ lego_release_archive_file_path }}"
- name: Create folder to uncompress into
ansible.builtin.file:
dest: "{{ lego_release_archive_path }}"
state: directory
- name: Uncompress lego source archive
ansible.builtin.unarchive:
src: "{{ lego_release_archive_file_path }}"
dest: "{{ lego_release_archive_path }}"
remote_src: true
ignore_errors: "{{ ansible_check_mode }}"
- name: Ensure lego binary is present in PATH
ansible.builtin.copy:
src: "{{ lego_release_archive_path }}/lego"
dest: "/usr/local/bin/lego"
mode: "u+rwx,g+rx,o+rx"
remote_src: true
ignore_errors: "{{ ansible_check_mode }}"
- name: Ensure intermediate data is gone
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "{{ lego_release_archive_path }}"
- "{{ lego_release_archive_file_path }}"
- name: Ensure lego is allowed to bind to ports < 1024
community.general.capabilities:
path: "/usr/local/bin/lego"
capability: "cap_net_bind_service+ep"
state: present
when: lego_binary_allow_net_bind_service
- name: Ensure lego base path exists
ansible.builtin.file:
path: "{{ lego_base_path }}"
state: directory
mode: "0755"
- name: Ensure template unit file is present
ansible.builtin.template:
src: "{{ lego_systemd_template_unit_file }}"
dest: "{{ lego_systemd_unit_path }}/{{ lego_systemd_template_unit_name }}"
notify:
- systemd_reload
- name: Ensure env file is templated
ansible.builtin.copy:
content: |+
{% for entry in lego_systemd_environment | dict2items %}
{{ entry.key }}={{ entry.value }}
{% endfor %}
dest: "{{ lego_base_path }}/{{ lego_instance }}.conf"
- name: Ensure timer unit is templated
ansible.builtin.template:
src: "{{ lego_systemd_timer_template }}"
dest: "{{ lego_systemd_unit_path }}/{{ lego_systemd_timer_name }}"
notify:
- systemd_reload
- name: Ensure handling script is templated
ansible.builtin.copy:
src: "lego_run.sh"
dest: "{{ lego_base_path }}/run.sh"
mode: "0755"
- name: Ensure per-instance base path is created
ansible.builtin.file:
path: "{{ lego_instance_path }}"
state: directory
owner: "{{ lego_cert_user }}"
group: "{{ lego_cert_group }}"
mode: "0755"
- name: Ensure per-instance sub folders are created with correct permissions
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
owner: "{{ item.owner | default(lego_cert_user) }}"
group: "{{ item.group | default(lego_cert_group) }}"
mode: "{{ item.mode }}"
loop:
- path: "{{ lego_instance_path }}/secrets"
mode: "0750"
- path: "{{ lego_instance_path }}/accounts"
mode: "0770"
- path: "{{ lego_instance_path }}/certificates"
mode: "0775"
loop_control:
label: "{{ item.path }}"
- name: Ensure systemd daemon is reloaded
meta: flush_handlers
- name: Ensure systemd timer is enabled
ansible.builtin.systemd_service:
name: "{{ lego_systemd_timer_name }}"
enabled: true
- name: Ensure systemd timer is started
ansible.builtin.systemd_service:
name: "{{ lego_systemd_timer_name }}"
state: "started"
- name: Ensure systemd service is started once to obtain the certificate
ansible.builtin.systemd_service:
name: "{{ lego_systemd_service_name }}"
state: "started"

View File

@ -0,0 +1,9 @@
[Unit]
Description=Run lego@{{ lego_instance}}.service
[Timer]
OnCalendar={{ lego_systemd_timer_calendar }}
Unit=lego@{{ lego_instance }}.service
[Install]
WantedBy=timers.target

View File

@ -0,0 +1,14 @@
[Unit]
Description=Run lego (letsencrypt client in go)
[Service]
Type=oneshot
EnvironmentFile={{ lego_base_path }}/%i.conf
User={{ lego_systemd_user }}
Group={{ lego_systemd_group }}
ExecStart={{ lego_base_path }}/run.sh
AmbientCapabilities=CAP_NET_BIND_SERVICE
[Install]
WantedBy=basic.target
DefaultInstance=default

16
roles/lego/vars/main.yml Normal file
View File

@ -0,0 +1,16 @@
---
lego_domain_command_args: >-
{% for domain in lego_cert_domains %}
--domains={{ domain }}
{%- endfor %}
lego_config_command_args: >-
{% for key in lego_full_command_config %}
--{{ key | replace("_", "-") }}
{%- if lego_full_command_config[key] != None and lego_full_command_config[key] != '' -%}
={{ lego_full_command_config[key] }}
{%- endif -%}
{%- endfor -%}
lego_command_args: "{{ lego_domain_command_args }} {{ lego_config_command_args }}"

View File

@ -1,19 +0,0 @@
# `finallycoffee.base.mariadb` ansible role
This role deploys a MariaDB instance in a docker container.
## Usage
The role expects the following variables to be populated with values and/or secrets:
```yaml
mariadb_root_password: #mariadb root password
mariadb_database: # name of the database to create
mariadb_username: # name of a user to auto-create and assign permission on the mariadb_database
mariadb_password: # password of the user in mariadb_username
```
## Requirements
- Docker installed
- python-docker present on target system for ansible to be able to talk with the docker API.

View File

@ -1,32 +0,0 @@
---
mariadb_version: "10.6.11"
mariadb_base_path: /var/lib/mariadb
mariadb_data_path: "{{ mariadb_base_path }}/{{ mariadb_version }}"
mariadb_root_password: ~
mariadb_database: ~
mariadb_username: ~
mariadb_password: ~
mariadb_container_base_environment:
MARIADB_ROOT_PASSWORD: "{{ mariadb_root_password }}"
mariadb_container_extra_environment: {}
mariadb_container_name: mariadb
mariadb_container_image_name: docker.io/mariadb
mariadb_container_image_tag: ~
mariadb_container_image: "{{ mariadb_container_image_name }}:{{ mariadb_container_image_tag | default(mariadb_version, true) }}"
mariadb_container_base_volumes:
- "{{ mariadb_data_path }}:{{ mariadb_container_data_path }}:z"
mariadb_container_extra_volumes: []
mariadb_container_base_labels:
version: "{{ mariadb_version }}"
mariadb_container_extra_labels: {}
mariadb_container_restart_policy: "unless-stopped"
mariadb_container_environment: >-2
{{ mariadb_container_base_environment
| combine(mariadb_container_database_environment
if (mariadb_database and mariadb_username and mariadb_password)
else {}, recursive=True)
| combine(mariadb_container_extra_environment) }}

View File

@ -1,20 +0,0 @@
---
- name: Ensure mariaDB container image is present on host
community.docker.docker_image:
name: "{{ mariadb_container_image }}"
state: present
source: pull
- name: Ensure mariaDB {{ mariadb_version }} is running as '{{ mariadb_container_name }}'
community.docker.docker_container:
name: "{{ mariadb_container_name }}"
image: "{{ mariadb_container_image }}"
env: "{{ mariadb_container_environment }}"
ports: "{{ mariadb_container_ports }}"
labels: "{{ mariadb_container_labels }}"
volumes: "{{ mariadb_container_volumes }}"
networks: "{{ mariadb_container_networks | default(omit, true) }}"
etc_hosts: "{{ mariadb_container_etc_hosts | default(omit, true) }}"
purge_networks: "{{ mariadb_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ mariadb_container_restart_policy }}"
state: started

View File

@ -1,10 +0,0 @@
---
mariadb_container_database_environment:
MARIADB_DATABASE: "{{ mariadb_database }}"
MARIADB_USER: "{{ mariadb_username }}"
MARIADB_PASSWORD: "{{ mariadb_password }}"
mariadb_container_data_path: /var/lib/mysql
mariadb_container_volumes: "{{ mariadb_container_base_volumes + mariadb_container_extra_volumes }}"
mariadb_container_labels: "{{ mariadb_container_base_labels | combine(mariadb_container_extra_labels, recursive=True) }}"

View File

@ -1,17 +1,7 @@
--- ---
minio_user: ~
minio_data_path: /opt/minio
minio_create_user: false
minio_manage_host_filesystem: false
minio_root_username: root
minio_root_password: ~
minio_container_name: minio minio_container_name: minio
minio_container_image_name: docker.io/minio/minio minio_container_image_name: "docker.io/minio/minio"
minio_container_image_tag: latest minio_container_image_tag: "RELEASE.2025-04-08T15-41-24Z"
minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}" minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}"
minio_container_networks: [] minio_container_networks: []
minio_container_ports: [] minio_container_ports: []
@ -30,9 +20,12 @@ minio_container_labels: {}
minio_container_command: minio_container_command:
- "server" - "server"
- "{{ minio_container_data_path }}" - "{{ minio_container_data_path }}"
- "--console-address \":{{ minio_container_listen_port_console }}\"" - "--console-address"
- ":{{ minio_container_listen_port_console }}"
minio_container_restart_policy: "unless-stopped" minio_container_restart_policy: "unless-stopped"
minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}" minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}"
minio_container_state: >-2
{{ (minio_state == 'present') | ternary('started', 'absent') }}
minio_container_listen_port_api: 9000 minio_container_listen_port_api: 9000
minio_container_listen_port_console: 8900 minio_container_listen_port_console: 8900

View File

@ -0,0 +1,12 @@
---
minio_user: ~
minio_data_path: /opt/minio
minio_create_user: false
minio_manage_host_filesystem: false
minio_root_username: root
minio_root_password: ~
minio_state: present
minio_deployment_method: docker

View File

@ -0,0 +1,29 @@
---
- name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present
ansible.builtin.file:
path: "{{ minio_data_path }}"
state: directory
user: "{{ minio_user|default(omit, True) }}"
group: "{{ minio_user|default(omit, True) }}"
when: minio_manage_host_filesystem
- name: Ensure container image '{{ minio_container_image }}' is {{ minio_state }}
community.docker.docker_image:
name: "{{ minio_container_image }}"
state: "{{ minio_state }}"
source: pull
force_source: "{{ minio_container_image_force_source }}"
- name: Ensure container '{{ minio_container_name }}' is {{ minio_container_state }}
community.docker.docker_container:
name: "{{ minio_container_name }}"
image: "{{ minio_container_image }}"
volumes: "{{ minio_container_volumes }}"
env: "{{ minio_container_env }}"
labels: "{{ minio_container_labels }}"
networks: "{{ minio_container_networks }}"
ports: "{{ minio_container_ports }}"
user: "{{ minio_user|default(omit, True) }}"
command: "{{ minio_container_command }}"
restart_policy: "{{ minio_container_restart_policy }}"
state: "{{ minio_container_state }}"

View File

@ -1,37 +1,25 @@
--- ---
- name: Ensure 'minio_state' is valid
ansible.builtin.fail:
msg: >-
Unsupported state '{{ minio_state }}'!
Supported states are {{ minio_states | join(', ') }}.
when: minio_state not in minio_states
- name: Ensure minio run user is present - name: Ensure 'minio_deployment_method' is valid
user: ansible.builtin.fail:
msg: >-
Unsupported state '{{ minio_deployment_method }}'!
Supported states are {{ minio_deployment_methods | join(', ') }}.
when: minio_deployment_method not in minio_deployment_methods
- name: Ensure minio run user is {{ minio_state }}
ansible.builtin.user:
name: "{{ minio_user }}" name: "{{ minio_user }}"
state: present state: "{{ minio_state }}"
system: yes system: true
when: minio_create_user when: minio_create_user
- name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present - name: Deploy minio using {{ minio_deployment_method }}
file: ansible.builtin.include_tasks:
path: "{{ minio_data_path }}" file: "deploy-{{ minio_deployment_method }}.yml"
state: directory
user: "{{ minio_user|default(omit, True) }}"
group: "{{ minio_user|default(omit, True) }}"
when: minio_manage_host_filesystem
- name: Ensure container image for minio is present
community.docker.docker_image:
name: "{{ minio_container_image }}"
state: present
source: pull
force_source: "{{ minio_container_image_force_source }}"
- name: Ensure container {{ minio_container_name }} is running
docker_container:
name: "{{ minio_container_name }}"
image: "{{ minio_container_image }}"
volumes: "{{ minio_container_volumes }}"
env: "{{ minio_container_env }}"
labels: "{{ minio_container_labels }}"
networks: "{{ minio_container_networks }}"
ports: "{{ minio_container_ports }}"
user: "{{ minio_user|default(omit, True) }}"
command: "{{ minio_container_command }}"
restart_policy: "{{ minio_container_restart_policy }}"
state: started

View File

@ -1,5 +1,9 @@
--- ---
minio_states:
- present
- absent
minio_deployment_methods:
- docker
minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}" minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}"
minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}" minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}"

View File

@ -26,3 +26,8 @@ For exposing this server to the host and/or internet, the `nginx_container_ports
from host to container), `nginx_container_networks` (docker networking) or `nginx_container_labels` from host to container), `nginx_container_networks` (docker networking) or `nginx_container_labels`
(for label-based routing discovery like traefik) can be used. The options correspond to the arguments (for label-based routing discovery like traefik) can be used. The options correspond to the arguments
of the `community.docker.docker_container` module. of the `community.docker.docker_container` module.
## Deployment methods
Set `nginx_deployment_method` to either `docker` or `podman` to use the respective ansible modules for
creating and managing the container and its image. See all supported methods in `nginx_deployment_methods`.

View File

@ -1,9 +1,10 @@
--- ---
nginx_version: "1.27.5"
nginx_version: "1.25.1"
nginx_flavour: alpine nginx_flavour: alpine
nginx_base_path: /opt/nginx nginx_base_path: /opt/nginx
nginx_config_file: "{{ nginx_base_path }}/nginx.conf" nginx_config_file: "{{ nginx_base_path }}/nginx.conf"
nginx_state: present
nginx_deployment_method: docker
nginx_container_name: nginx nginx_container_name: nginx
nginx_container_image_reference: >- nginx_container_image_reference: >-
@ -26,6 +27,9 @@ nginx_container_image_repository: >-
nginx_container_image_registry: "docker.io" nginx_container_image_registry: "docker.io"
nginx_container_image_name: "nginx" nginx_container_image_name: "nginx"
nginx_container_image_tag: ~ nginx_container_image_tag: ~
nginx_container_image_source: pull
nginx_container_state: >-2
{{ (nginx_state == 'present') | ternary('started', 'absent') }}
nginx_container_restart_policy: "unless-stopped" nginx_container_restart_policy: "unless-stopped"
nginx_container_volumes: nginx_container_volumes:

12
roles/nginx/meta/main.yml Normal file
View File

@ -0,0 +1,12 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: nginx
description: Deploy nginx, a webserver
galaxy_tags:
- nginx
- http
- webserver
- docker
- podman

View File

@ -0,0 +1,28 @@
---
- name: Ensure docker container image '{{ nginx_container_image_reference }}' is {{ nginx_state }}
community.docker.docker_image:
name: "{{ nginx_container_image_reference }}"
state: "{{ nginx_state }}"
source: "{{ nginx_container_image_source }}"
force_source: >-2
{{ nginx_container_image_force_source
| default(nginx_container_image_tag | default(false, true)) }}
register: nginx_container_image_info
until: nginx_container_image_info is success
retries: 5
delay: 3
- name: Ensure docker container '{{ nginx_container_name }}' is {{ nginx_container_state }}
community.docker.docker_container:
name: "{{ nginx_container_name }}"
image: "{{ nginx_container_image_reference }}"
env: "{{ nginx_container_env | default(omit, true) }}"
user: "{{ nginx_container_user | default(omit, true) }}"
ports: "{{ nginx_container_ports | default(omit, true) }}"
labels: "{{ nginx_container_labels | default(omit, true) }}"
volumes: "{{ nginx_container_volumes | default(omit, true) }}"
etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}"
networks: "{{ nginx_container_networks | default(omit, true) }}"
purge_networks: "{{ nginx_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ nginx_container_restart_policy }}"
state: "{{ nginx_container_state }}"

View File

@ -0,0 +1,27 @@
---
- name: Ensure container image '{{ nginx_container_image_reference }}' is {{ nginx_state }}
containers.podman.podman_image:
name: "{{ nginx_container_image_reference }}"
state: "{{ nginx_state }}"
pull: "{{ nginx_container_image_source == 'pull' }}"
force: >-2
{{ nginx_container_image_force_source
| default(nginx_container_image_tag | default(false, true)) }}
register: nginx_container_image_info
until: nginx_container_image_info is success
retries: 5
delay: 3
- name: Ensure container '{{ nginx_container_name }}' is {{ nginx_container_state }}
containers.podman.podman_container:
name: "{{ nginx_container_name }}"
image: "{{ nginx_container_image_reference }}"
env: "{{ nginx_container_env | default(omit, true) }}"
user: "{{ nginx_container_user | default(omit, true) }}"
ports: "{{ nginx_container_ports | default(omit, true) }}"
labels: "{{ nginx_container_labels | default(omit, true) }}"
volumes: "{{ nginx_container_volumes | default(omit, true) }}"
etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}"
network: "{{ nginx_container_networks | default(omit, true) }}"
restart_policy: "{{ nginx_container_restart_policy }}"
state: "{{ nginx_container_state }}"

View File

@ -1,10 +1,30 @@
--- ---
- name: Check if state is supported
ansible.builtin.fail:
msg: >-2
Unsupported state '{{ nginx_state }}'. Supported
states are {{ nginx_states | join(', ') }}.
when: nginx_state not in nginx_states
- name: Ensure base path '{{ nginx_base_path }}' exists - name: Check if deployment_method is supported
ansible.builtin.fail:
msg: >-2
Unsupported state '{{ nginx_deployment_method }}'. Supported
states are {{ nginx_deployment_methods | join(', ') }}.
when: nginx_deployment_method not in nginx_deployment_methods
- name: Ensure nginx config file is {{ nginx_state }}
ansible.builtin.file:
path: "{{ nginx_config_file }}"
state: "{{ nginx_state }}"
when: nginx_state == 'absent'
- name: Ensure base path '{{ nginx_base_path }}' is {{ nginx_state }}
ansible.builtin.file: ansible.builtin.file:
path: "{{ nginx_base_path }}" path: "{{ nginx_base_path }}"
state: directory mode: "0755"
mode: 0755 state: >-2
{{ (nginx_state == 'present') | ternary('directory', 'absent') }}
- name: Ensure nginx config file is templated - name: Ensure nginx config file is templated
ansible.builtin.copy: ansible.builtin.copy:
@ -13,25 +33,8 @@
mode: 0640 mode: 0640
notify: notify:
- restart-nginx - restart-nginx
when: nginx_state == 'present'
- name: Ensure docker container image is present - name: Deploy using {{ nginx_deployment_method }}
community.docker.docker_image: ansible.builtin.include_tasks:
name: "{{ nginx_container_image_reference }}" file: "deploy-{{ nginx_deployment_method }}.yml"
state: present
source: pull
force_source: "{{ nginx_container_image_tag is defined and nginx_container_image_tag | string != '' }}"
- name: Ensure docker container '{{ nginx_container_name }}' is running
community.docker.docker_container:
name: "{{ nginx_container_name }}"
image: "{{ nginx_container_image_reference }}"
env: "{{ nginx_container_env | default(omit, true) }}"
user: "{{ nginx_container_user | default(omit, true) }}"
ports: "{{ nginx_container_ports | default(omit, true) }}"
labels: "{{ nginx_container_labels | default(omit, true) }}"
volumes: "{{ nginx_container_volumes | default(omit, true) }}"
etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}"
networks: "{{ nginx_container_networks | default(omit, true) }}"
purge_networks: "{{ nginx_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ nginx_container_restart_policy }}"
state: started

View File

@ -0,0 +1,7 @@
---
nginx_states:
- present
- absent
nginx_deployment_methods:
- docker
- podman

View File

@ -0,0 +1,25 @@
# `finallycoffee.base.powerdns_tsig_key`
Simple ansible role for ensuring a TSIG key is present in a given PowerDNS-
instance.
## Usage
The usage example below assumes `powerdns` is running in a container named `powerdns` (as supplied to `powerdns_tsig_key_container_name`.
```yaml
- hosts: "{{ target_hosts }}"
become: true
roles:
- role: finallycoffee.base.powerdns_tsig_key
vars:
powerdns_tsig_key_name: "nameofmykey"
powerdns_tsig_key_path: "/var/lib/myapp/tsig.key"
powernds_tsig_key_algo: "hmac-sha512"
powerdns_tsig_key_path_owner: "myappuser"
powerdns_tsig_key_path_group: "myappgroup"
powerdns_tsig_key_container_name: 'powerdns'
```
> [!NOTE]
> Support for non-docker deployments is pending.

View File

@ -0,0 +1,2 @@
---
powerdns_tsig_key_container_name: powerdns

View File

@ -0,0 +1,104 @@
---
- name: Ensure unix group '{{ powerdns_tsig_key_path_group }}' exists
ansible.builtin.group:
name: "{{ powerdns_tsig_key_path_group }}"
state: "present"
system: true
register: powerdns_tsig_key_path_group_info
when: powerdns_tsig_key_path_group is defined
- name: Ensure unix user '{{ powerdns_tsig_key_path_owner }}' exists
ansible.builtin.user:
name: "{{ powerdns_tsig_key_path_owner }}"
state: "present"
system: true
create_home: false
groups: "{{ powerdns_tsig_key_path_group is defined | ternary([powerdns_tsig_key_path_group], omit) }}"
append: "{{ powerdns_tsig_key_path_group is defined | ternary(true, omit) }}"
register: powerdns_tsig_key_path_owner_info
when: powerdns_tsig_key_path_owner is defined
- name: Check if TSIG key is already present
ansible.builtin.stat:
path: "{{ powerdns_tsig_key_path }}"
register: powerdns_tsig_key_info
- name: Ensure TSIG key directory is present
ansible.builtin.file:
path: "{{ powerdns_tsig_key_path | dirname }}"
state: directory
owner: "{{ powerdns_tsig_key_path_owner | default(omit) }}"
group: "{{ powerdns_tsig_key_path_group | default(omit) }}"
mode: "u+rwX,g+rX"
recurse: true
- name: Ensure a TSIG key is configured and persisted
when: >-
not powerdns_tsig_key_info.stat.exists
or powerdns_tsig_key_info.stat.size == 0
block:
- name: Ensure TSIG key is not already present
community.docker.docker_container_exec:
container: "{{ powerdns_tsig_key_container_name }}"
command: "pdnsutil list-tsig-keys"
delegate_to: "{{ powerdns_tsig_key_hostname }}"
register: powerdns_tsig_key_powerdns_info
changed_when: false
check_mode: false
become: true
- name: Ensure TSIG key is generated in powerdns
community.docker.docker_container_exec:
container: "{{ powerdns_tsig_key_container_name }}"
command: "pdnsutil generate-tsig-key '{{ powerdns_tsig_key_name }}' '{{ powerdns_tsig_key_algo }}'"
when: >-
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout
delegate_to: "{{ powerdns_tsig_key_hostname }}"
register: powerdns_tsig_key_powerdns_generated_tsig_key
throttle: 1
become: true
- name: Ensure PowerDNS is restarted
community.docker.docker_container:
name: "{{ powerdns_tsig_key_container_name }}"
state: started
restart: true
when: >-
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout
delegate_to: "{{ powerdns_tsig_key_hostname }}"
throttle: 1
become: true
- name: Extract TSIG key into variable
ansible.builtin.set_fact:
powerdns_tsig_key_key: >-
{{
(powerdns_tsig_key_powerdns_generated_tsig_key.stdout | trim | split(' ') | list | last)
if (powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout
else (powerdns_generated_tsig_key | trim | split(' ') | list | last)
}}
vars:
powerdns_generated_tsig_key: >-
{% for line in powerdns_tsig_key_powerdns_info.stdout_lines %}
{% if powerdns_tsig_key_name in line %}
{{ line }}
{% endif %}
{% endfor %}
- name: Ensure TSIG key is persisted into {{ powerdns_tsig_key_path }}
ansible.builtin.copy:
content: "{{ powerdns_tsig_key_key }}"
dest: "{{ powerdns_tsig_key_path }}"
owner: "{{ powerdns_tsig_key_path_owner | default(omit) }}"
group: "{{ powerdns_tsig_key_path_group | default(omit) }}"
mode: "0640"
- name: Ensure TSIG key permissions on {{ powerdns_tsig_key_path }} are correct
ansible.builtin.file:
path: "{{ powerdns_tsig_key_path }}"
owner: "{{ powerdns_tsig_key_path_owner | default(omit) }}"
group: "{{ powerdns_tsig_key_path_group | default(omit) }}"
mode: "u+rwX,g+rwX"

View File

@ -1,13 +0,0 @@
---
# `finallycoffee.base.redis` ansible role
Ansible role to deploy redis. Can use systemd or docker, depending on the
value of `redis_deployment_method`. Supports running the role multiple times
by setting `redis_instance` to a unique string to avoid namespace-collisions.
## Configuration
Extra configurations keys for redis can be provided as key-value pairs
in `redis_config`. For all configuration keys, consult the upstream example
redis.conf.

View File

@ -1,51 +0,0 @@
---
redis_instance: ''
redis_version: "7.2"
redis_user: "redis{{ '-' ~ redis_instance }}"
redis_deployment_method: docker
redis_config_file: "/etc/redis/redis{{ '-' ~ redis_instance }}.conf"
redis_data_directory: "/var/lib/redis/"
redis_config_dbfilename: "redis{{ '-' ~ redis_instance }}.rdb"
redis_config_dir: "{{ redis_data_directory }}"
redis_config_bind:
- -::1
- "{{ (redis_deployment_method == 'docker') | ternary('0.0.0.0', '127.0.0.1') }}"
- "{{ (redis_deployment_method == 'docker') | ternary('-::*', '::1') }}"
redis_config_port: "6379"
redis_config_procted_mode: true
#redis_config_maxmemory_bytes: 100mb
#redis_config_maxmemory_policy: noeviction
redis_config_unix_socket: "/run/redis.sock"
redis_config_unix_socket_perm: "700"
redis_container_name: "redis{{ '_' ~ redis_instance }}"
redis_container_image_flavour: alpine
redis_container_image_registry: "docker.io"
redis_container_image_namespace: ~
redis_container_image_name: "redis"
redis_container_image_reference: >-
{{ redis_container_image_repository ~ ':'
~ redis_container_image_tage | default(
redis_version ~ (redis_container_image_flavour | ternary(
'-' ~ redis_container_image_flavour, '')), true) }}
redis_container_image_repository: >-
{{ redis_container_image_registry ~ '/'
~ (redis_container_image_namespace | ternary(redis_container_image_namespace ~ '/'))
~ redis_container_image_name }}
redis_container_ports:
- "127.0.0.1:{{ redis_config_port }}:{{ redis_config_port }}"
- "[i::1]:{{ redis_config_port }}:{{ redis_config_port }}"
redis_container_restart_policy: "unless-stopped"
redis_container_state: "started"
redis_container_base_labels:
version: "{{ redis_version }}"
redis_container_all_labels: >-
{{ redis_container_base_labels | combine(redis_container_labels | default({})) }}
redis_container_base_volumes:
- "{{ redis_config_file }}:/usr/local/etc/redis/redis.conf:ro"
- "{{ redis_data_directory }}:{{ redis_data_directory }}:rw"
redis_container_all_volumes: >-
{{ redis_container_base_volumes + redis_container_volumes | default([]) }}

View File

@ -1,11 +0,0 @@
---
- name: Ensure redis container '{{ redis_container_name }}' is restarted
listen: restart-redis
community.docker.docker_container:
name: "{{ redis_container_image }}"
state: "started"
restart: true
when:
- redis_deployment_method == "docker"
- not redis_container_info.changed

View File

@ -1,50 +0,0 @@
---
- name: Ensure redis user '{{ redis_user }}' is present
ansible.builtin.user:
name: "{{ redis_user }}"
state: "present"
system: true
create_home: false
groups: "{{ redis_user_groups | default(omit) }}"
append: "{{ redis_user_groups is defined | ternary('true', omit) }}"
register: redis_user_info
- name: Ensure redis configuration is written out
ansible.builtin.copy:
content: |+
{% for key, value in redis_config_to_write %}
{{ key }} {{ value }}
{% endfor %}
dest: "{{ redis_config_file }}"
owner: "{{ redis_user_info.uid | default(redis_user) }}"
group: "{{ redis_user_info.group | default(redis_user) }}"
mode: "0640"
notify:
- restart-redis
- name: Ensure container image is present on host
community.docker.docker_image:
name: "{{ redis_container_image_reference }}"
state: "present"
source: "pull"
force_source: "{{ redis_container_image_tag | bool }}"
when: "redis_deployment_method == 'docker'"
- name: Ensure redis container '{{ redis_container_name }}' is '{{ redis_container_state }}'
community.docker.docker_container:
name: "{{ redis_container_name }}"
image: "{{ redis_container_image_reference }}"
env: "{{ redis_container_env | default(omit) }}"
ports: "{{ redis_container_ports | default(omit) }}"
labels: "{{ redis_container_all_labels }}"
volumes: "{{ redis_container_all_volumes }}"
networks: "{{ redis_container_networks | default(omit) }}"
purge_networks: "{{ redis_container_purge_networks | default(omit) }}"
etc_hosts: "{{ redis_container_etc_hosts | default(omit) }}"
memory: "{{ redis_container_memory | default(omit) }}"
memory_swap: "{{ redis_container_memory_swap | default(omit) }}"
restart_policy: "{{ redis_container_restart_policy }}"
state: "{{ redis_container_state }}"
register: redis_container_info
when: "redis_deployment_method == 'docker'"

View File

@ -1,13 +0,0 @@
---
redis_base_config:
dbfilename: "{{ redis_config_dbfilename }}"
dir: "{{ redis_data_directory }}"
bind: "{{ redis_config_bind | join(' ') }}"
port: "{{ redis_config_port }}"
"protected-mode": "{{ redis_config_protected_mode | ternary('yes', 'no') }}"
unixsocket: "{{ redis_config_unix_socket }}"
unixsocketperm: "{{ redis_config_unix_socket_perm }}"
redis_config_to_write: >-
{{ redis_base_config | combine(redis_config | default({}), recursive=True) }}

View File

@ -10,18 +10,41 @@ restic_backup_stdin_command: ~
restic_backup_stdin_command_filename: ~ restic_backup_stdin_command_filename: ~
restic_policy_keep_all_within: 1d restic_policy_keep_all_within: 1d
restic_policy_keep_hourly: 6 restic_policy_keep_hourly: 12
restic_policy_keep_daily: 2 restic_policy_keep_daily: 7
restic_policy_keep_weekly: 7 restic_policy_keep_weekly: 6
restic_policy_keep_monthly: 4 restic_policy_keep_monthly: 6
restic_policy_keep_yearly: 5
restic_policy_backup_frequency: hourly restic_policy_backup_frequency: hourly
restic_base_environment:
RESTIC_JOBNAME: "{{ restic_job_name | default('unknown') }}"
RESTIC_FORGET_KEEP_WITHIN: "{{ restic_policy_keep_all_within }}"
RESTIC_FORGET_KEEP_HOURLY: "{{ restic_policy_keep_hourly }}"
RESTIC_FORGET_KEEP_DAILY: "{{ restic_policy_keep_daily }}"
RESTIC_FORGET_KEEP_WEEKLY: "{{ restic_policy_keep_weekly }}"
RESTIC_FORGET_KEEP_MONTHLY: "{{ restic_policy_keep_monthly }}"
RESTIC_FORGET_KEEP_YEARLY: "{{ restic_policy_keep_yearly }}"
restic_s3_environment:
AWS_ACCESS_KEY_ID: "{{ restic_s3_key_id }}"
AWS_SECRET_ACCESS_KEY: "{{ restic_s3_access_key }}"
restic_complete_environment: >-
{{
restic_base_environment
| combine((restic_s3_environment
if (restic_s3_key_id and restic_s3_access_key) else {}) | default({}))
| combine(restic_environment | default({}))
}}
restic_policy: restic_policy:
keep_within: "{{ restic_policy_keep_all_within }}" keep_within: "{{ restic_policy_keep_all_within }}"
hourly: "{{ restic_policy_keep_hourly }}" hourly: "{{ restic_policy_keep_hourly }}"
daily: "{{ restic_policy_keep_daily }}" daily: "{{ restic_policy_keep_daily }}"
weekly: "{{ restic_policy_keep_weekly }}" weekly: "{{ restic_policy_keep_weekly }}"
monthly: "{{ restic_policy_keep_monthly }}" monthly: "{{ restic_policy_keep_monthly }}"
yearly: "{{ restic_policy_keep_yearly }}"
frequency: "{{ restic_policy_backup_frequency }}" frequency: "{{ restic_policy_backup_frequency }}"
restic_user: root restic_user: root

View File

@ -0,0 +1,37 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ -n ${RESTIC_PRE_BACKUP_HOOK-} ]]; then
/bin/bash -c "$RESTIC_PRE_BACKUP_HOOK"
fi
echo "List existing snapshots or attempt to initialize/unlock repository"
restic snapshots || restic init || restic unlock
sleep 1;
echo "Attempting to remove lock if present"
restic unlock
sleep 2
echo "Start backup on ${@:1}"
restic --verbose --retry-lock=${RESTIC_RETRY_LOCK:-5m} backup "${@:1}"
sleep 2
echo "Forget and prune old snapshots"
restic forget --prune --retry-lock=${RESTIC_RETRY_LOCK:-5m} \
--keep-within=${RESTIC_FORGET_KEEP_WITHIN:-1d} \
--keep-hourly=${RESTIC_FORGET_KEEP_HOURLY:-6} \
--keep-daily=${RESTIC_FORGET_KEEP_DAILY:-2} \
--keep-weekly=${RESTIC_FORGET_KEEP_WEEKLY:-7} \
--keep-monthly=${RESTIC_FORGET_KEEP_MONTHLY:-4} \
--verbose
sleep 2
echo "Generate snapshot metrics"
restic --json snapshots | /opt/restic-generate-snapshot-metrics.sh \
> /var/lib/node_exporter/restic-snapshots-${RESTIC_JOBNAME:-unknown}.prom-src
sleep 2
echo "Check repository"
restic check

View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
RESTIC_JSON=$(</dev/stdin)
echo $RESTIC_JSON | jq -r '.[]
| {
"hostname": .hostname,
"username": .username,
"short_id": .short_id,
"time": ((((.time | split(".")[0]) + "Z") | fromdate) - (3600 * (.time | split("+")[1] | split(":")[0] | tonumber + 1))),
"paths": .paths[]
} | "restic_snapshots{hostname=\"\(.hostname)\",username=\"\(.username)\",short_id=\"\(.short_id)\",paths=\"\(.paths)\"} \(.time)"'

View File

@ -8,7 +8,7 @@
when: restic_create_user when: restic_create_user
- name: Ensure either backup_paths or backup_stdin_command is populated - name: Ensure either backup_paths or backup_stdin_command is populated
when: restic_backup_paths|length > 0 and restic_backup_stdin_command when: restic_backup_paths|length > 0 and restic_backup_stdin_command and false
fail: fail:
msg: "Setting both `restic_backup_paths` and `restic_backup_stdin_command` is not supported" msg: "Setting both `restic_backup_paths` and `restic_backup_stdin_command` is not supported"

View File

@ -2,27 +2,50 @@
Description={{ restic_job_description }} Description={{ restic_job_description }}
[Service] [Service]
Type=oneshot Type=simple
User={{ restic_user }} User={{ restic_user }}
WorkingDirectory={{ restic_systemd_working_directory }} WorkingDirectory={{ restic_systemd_working_directory }}
SyslogIdentifier={{ restic_systemd_syslog_identifier }} SyslogIdentifier={{ restic_systemd_syslog_identifier }}
Environment=RESTIC_REPOSITORY={{ restic_repo_url }} Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
Environment=RESTIC_PASSWORD={{ restic_repo_password }} Environment=RESTIC_PASSWORD={{ restic_repo_password }}
{% if restic_s3_key_id and restic_s3_access_key %} {% for kv in restic_complete_environment | dict2items %}
Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }} Environment={{ kv.key }}={{ kv.value }}
Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }} {% endfor %}
{% endif %}
{% if restic_init | default(true) %}
ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init' ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init'
{% if restic_backup_stdin_command %}
ExecStart=/bin/sh -c '{{ restic_backup_stdin_command }} | /usr/bin/restic backup --verbose --stdin --stdin-filename {{ restic_backup_stdin_command_filename }}'
{% else %}
ExecStart=/usr/bin/restic --verbose backup {{ restic_backup_paths | join(' ') }}
{% endif %} {% endif %}
ExecStartPost=/usr/bin/restic forget --prune --keep-within={{ restic_policy.keep_within }} --keep-hourly={{ restic_policy.hourly }} --keep-daily={{ restic_policy.daily }} --keep-weekly={{ restic_policy.weekly }} --keep-monthly={{ restic_policy.monthly }} {% if restic_unlock_before_backup | default(false) %}
ExecStartPost=-/usr/bin/restic snapshots ExecStartPre=-/bin/sh -c 'sleep 3 && /usr/bin/restic unlock'
ExecStartPost=/usr/bin/restic check {% endif %}
{% if restic_backup_pre_hook | default(false) %}
[Install] ExecStartPre=-{{ restic_backup_pre_hook }}
WantedBy=multi-user.target {% endif %}
{% if restic_backup_stdin_command %}
ExecStart=/bin/sh -c '{{ restic_backup_stdin_command }} | /usr/bin/restic backup \
--retry-lock {{ restic_retry_lock | default('5m') }} \
--verbose --stdin \
--stdin-filename {{ restic_backup_stdin_command_filename }}'
{% else %}
ExecStart=/opt/restic-backup-directories.sh {{ restic_backup_paths | join(' ') }}
{% endif %}
{% if restic_forget_prune | default(true) %}
ExecStartPost=/usr/bin/restic forget --prune \
--retry-lock {{ restic_retry_lock | default('5m') }} \
--keep-within={{ restic_policy.keep_within }} \
--keep-hourly={{ restic_policy.hourly }} \
--keep-daily={{ restic_policy.daily }} \
--keep-weekly={{ restic_policy.weekly }} \
--keep-monthly={{ restic_policy.monthly }} \
--keep-yearly={{ restic_policy.yearly }}
{% endif %}
{% if restic_list_snapshots | default(true) %}
ExecStartPost=-/usr/bin/restic snapshots --retry-lock {{ restic_retry_lock | default('5m') }}
{% endif %}
{% if restic_backup_post_hook | default(false) %}
ExecStartPost=-{{ restic_backup_post_hook }}
{% endif %}
{% if restic_check | default(true) %}
ExecStartPost=/usr/bin/restic check --retry-lock {{ restic_retry_lock | default('5m') }}
{% endif %}

View File

@ -1,9 +1,8 @@
[Unit] [Unit]
Description=Run {{ restic_job_name }} Description=Run {{ restic_timer_description | default(restic_job_name) }}
[Timer] [Timer]
OnCalendar={{ restic_policy.frequency }} OnCalendar={{ restic_policy.frequency }}
Persistent=True
Unit={{ restic_systemd_unit_naming_scheme }}.service Unit={{ restic_systemd_unit_naming_scheme }}.service
[Install] [Install]

23
roles/user/README.md Normal file
View File

@ -0,0 +1,23 @@
# `finallycoffee.base.user` ansible role
Provision and manage user accounts on the remote host. Supports setting user
home, gecos (display name) and shell.
Warning: if the users' home exists and is changed, the role will attempt to
move the home directory. Set `move_home` to false on the user to disable this
behaviour.
## Examples
```yaml
- hosts: all
roles:
- role: finallycoffee.base.user
vars:
users:
- name: root
- name: alice
- name: bob
state: present
- name: eve
state: absent
```

View File

@ -0,0 +1,2 @@
---
users: []

View File

@ -0,0 +1,41 @@
---
- name: Ensure user '{{ user.name }}' is {{ user_state }}
ansible.builtin.user:
name: "{{ user.name }}"
state: "{{ user_state }}"
system: "{{ user.system | default(false, true) }}"
shell: "{{ user.shell | default(omit, true) }}"
home: "{{ user.home | default(omit, true) }}"
create_home: "{{ user.create_home | default(true, true) }}"
move_home: "{{ user.move_home | default(true, true) }}"
skeleton: >-2
{{ (user.create_home | default(true, true) and 'skeleton' in user)
| ternary(user.skeleton | default(''), omit) }}
comment: "{{ user.comment | default(user.gecos | default(omit, true), true) }}"
vars:
user_state: "{{ user.state | default('present', false) }}"
- name: Ensure SSH authorized keys for '{{ user.name }}' are {{ user_state }}
vars:
user_state: "{{ user.state | default('present', false) }}"
when:
- user_state == 'present'
- user.authorized_keys | default([]) | length > 0
block:
- name: Ensure .ssh directory for user '{{ user.name }}' exists
ansible.builtin.file:
path: "{{ user.home | default('/home/' + user.name) + '/.ssh' }}"
state: "directory"
owner: "{{ user.name }}"
group: "{{ user.name }}"
mode: "0700"
- name: Ensure key is up to date
ansible.posix.authorized_key:
user: "{{ user.name }}"
state: "{{ key.state | default('present', true) }}"
key: "{{ key.type }} {{ key.key }}"
comment: "{{ user.name }}-{{ key.comment }}"
loop: "{{ user.authorized_keys }}"
loop_control:
loop_var: key
label: "{{ user.name }}-{{ key.comment }}"

View File

@ -0,0 +1,8 @@
---
- name: Ensure users are configured
ansible.builtin.include_tasks:
file: "configure-user.yml"
loop: "{{ users }}"
loop_control:
loop_var: user
label: "{{ user.name }}"