Compare commits

...

48 Commits

Author SHA1 Message Date
19e41c15ad meta: bump collection version to 0.4.1 2026-01-30 22:54:10 +01:00
b82fb86d83 fix(wg_quick): fix syntax error in Table definition 2026-01-25 14:58:51 +01:00
3c0f9efbb3 feat(wg_quick): allow specifying an optional PresharedKey 2026-01-25 13:57:27 +01:00
acf1e32eca feat(wg_quick): add ansible role and playbook 2026-01-10 21:58:41 +01:00
0b11837c72 update(lego): bump version to 4.31.0 2026-01-08 22:16:25 +01:00
07b9a3f3d2 chore(playbooks/caddy_reverse_proxy): allow specifying full own caddy
config
2026-01-03 19:13:46 +01:00
3e43cca13b feat(ntp): add role and playbook 2025-12-30 22:13:13 +01:00
f765345da3 feat(playbooks/docker_shorewall): add playbook 2025-12-29 17:16:09 +01:00
1f63757567 feat(playbooks/caddy_reverse_proxy): allow adding extra reverse proxy configuration 2025-12-29 15:26:33 +01:00
b0796f9812 feat(playbooks/caddy_reverse_proxy): allow custom reverse proxy
definitions
2025-12-28 14:05:01 +01:00
044ee49795 feat(caddy_site): add ansible role for configuring sites using caddy 2025-12-25 15:34:14 +01:00
4fc0a671b3 feat(caddy): add ansible role 2025-12-24 22:18:02 +01:00
05ace71da7 update(lego): bump version to 4.30.1 2025-12-19 22:39:06 +01:00
2268d803a7 fix(powerdns_tsig_key): delegation to undefined variable breaks even when skipped 2025-12-14 17:39:36 +01:00
4634749378 update(nginx): bump version to 1.29.4 2025-12-10 18:12:35 +01:00
6688cf4039 update(lego): bump version to 4.29.0 2025-11-30 18:39:27 +01:00
fb639db97c feat(restic): allow delaying timer activation by a random delay in a specified interval 2025-11-29 20:42:11 +01:00
39b7190342 update(minio): bump container image tag to RELEASE.2025-10-15T17-29-55Z 2025-11-07 22:39:21 +01:00
a3ed7081f8 update(lego): bump version to 4.28.1 2025-11-06 19:41:21 +01:00
b564b1579f fix(playbooks/lego_certificate): fix missing trailing dot in record name 2025-11-02 17:27:10 +01:00
52f6f5b45b update(lego): bump version to 4.28.0 2025-11-01 15:30:08 +01:00
d0203a5dd7 update(nginx): bump veersion to 1.29.3 2025-10-28 16:58:43 +01:00
72bb3fd273 update(lego): bump version to 4.27.0 2025-10-18 22:50:35 +02:00
766957254a update(nginx): bump version to 1.29.2 2025-10-08 15:39:13 +02:00
5e777934ed update(lego): bump version to 4.26.0 2025-09-14 21:30:58 +02:00
b44110704b update(minio): bump container image tag to RELEASE.2025-09-07T16-13-09Z 2025-09-07 21:52:08 +02:00
416044692b meta: bump collection version to 0.4.0, update dependencies 2025-08-13 19:17:28 +02:00
31c68f5e89 update(nginx): bump version to 1.29.1 2025-08-13 18:13:56 +02:00
7b9ac4b0d5 update(minio): bump container image tag to RELEASE.2025-07-23T15-54-02Z 2025-08-08 21:15:31 +02:00
dcab6c5404 update(lego): bump version to 4.25.2 2025-08-06 21:34:03 +02:00
498834eb41 feat: add playbook for obtaining certificate for domains from letsencrypt using RFC2136 2025-08-05 20:26:36 +02:00
c827446a9f update(lego): bump version to 4.25.1 2025-07-22 22:13:52 +02:00
8117dab057 update(lego): bump version to 4.25.0 2025-07-22 22:13:25 +02:00
0aa69e03a9 update(minio): bump container image tag to RELEASE.2025-07-18T21-56-31Z 2025-07-19 21:33:46 +02:00
06cc920f18 update(lego): bump version to 4.24.0 2025-07-08 16:40:07 +02:00
0ce218e538 update(nginx): bump version to 1.29.0 2025-06-25 20:43:00 +02:00
814b5fbf78 update(minio): bump container tag to RELEASE.2025-06-13T11-33-47Z 2025-06-24 17:51:51 +02:00
a01ef18ea9 update(minio): bump container tag to RELEASE.2025-05-24T17-08-30Z 2025-06-24 17:49:45 +02:00
b692f58fd3 fix(openldap): allow properly configuring the cn=config namespace and the config db 2025-05-13 20:46:04 +02:00
0b03640e40 feat: add playbook for managing LDAP directory contents 2025-05-13 19:34:20 +02:00
66f7293710 feat(openldap): add ansible role for deployment 2025-05-06 19:18:01 +02:00
b14f36c7e8 meta: update collection version to 0.3.0 2025-04-27 18:44:40 +02:00
762e2ffc27 feat(mosh): add ansible role 2025-04-27 17:36:49 +02:00
115cfa8236 feat(openssh): add ansible role 2025-04-27 13:59:41 +02:00
e27eb145f1 update(minio): bump container tag to RELEASE.2025-04-22T22-12-26Z 2025-04-25 19:36:15 +02:00
c286e1a6b4 feat(docker_registry): add playbook to manage registry credentials 2025-04-24 16:55:42 +02:00
517a2fe96c update(nginx): bump version to 1.28.0 2025-04-24 15:10:24 +02:00
9d4baad491 fix(lego): only start systemd service if certificates are not present or changes occured 2025-04-23 15:36:18 +02:00
72 changed files with 1439 additions and 10 deletions

View File

@@ -5,6 +5,8 @@
This ansible collection provides various roles for installing
and configuring basic system utilities like gnupg, ssh etc
- [`caddy`](roles/caddy/README.md): configures and runs caddy
- [`git`](roles/git/README.md): configures git on the target system
- [`gnupg`](roles/gnupg/README.md): configures gnupg on the target system

View File

@@ -1,21 +1,30 @@
namespace: finallycoffee
name: base
version: 0.2.1
version: 0.4.1
readme: README.md
authors:
- transcaffeine <transcaffeine@finally.coffee>
description: Roles for base services which are common dependencies other services like databases
description: >-2
Roles for base services which are core functionality like managing packages
and ssh or common dependencies other services like databases
dependencies:
"community.docker": "^4.2.0"
"community.docker": "^4.7.0"
"community.general": "^11.1.2"
"containers.podman": "^1.17.0"
license_file: LICENSE.md
build_ignore:
- '*.tar.gz'
repository: https://git.finally.coffee/finallycoffee/base
issues: https://codeberg.org/finallycoffee/ansible-collection-base/issues
tags:
- bootstrap
- ssh
- mosh
- docker
- lego
- minio
- nginx
- caddy
- restic
- docker
- user_management
- openldap

7
playbooks/caddy.yml Normal file
View File

@@ -0,0 +1,7 @@
---
- name: Install and configure caddy
hosts: "{{ caddy_hosts | default('caddy') }}"
become: "{{ caddy_become | default(false) }}"
gather_facts: "{{ caddy_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.caddy

View File

@@ -0,0 +1,31 @@
---
- name: Ensure reverse proxy configuration is created
hosts: "{{ target_hosts }}"
become: "{{ target_become | default(false) }}"
gather_facts: "{{ target_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.caddy_site
vars:
caddy_site_cert_basepath: >-2
{{ caddy_site_tls_store | default('/tls') }}/{{ caddy_site_name }}/certificates/{{ caddy_site_name }}
caddy_site_config: >-2
{{ caddy_site_config_override | default(caddy_site_default_config, true) }}
caddy_site_default_config: |+2
https://{{ caddy_site_name }} {
tls {{ caddy_site_cert_basepath}}.crt {{ caddy_site_cert_basepath }}.key
header {
Strict-Transport-Security "max-age=31536000"
}
encode zstd gzip
{% if caddy_reverse_proxy_template_block | default(true) -%}
reverse_proxy {{ caddy_reverse_proxy_backend_addr }} {
{{ caddy_reverse_proxy_extra_config | default('') | indent(6) }}
{%- if caddy_reverse_proxy_import_proxyheaders | default(true, true) %}
import proxyheaders
{%- endif +%}
}
{%- else -%}
{{- caddy_reverse_proxy_block | default('') | indent(4) }}
{%- endif +%}
}

View File

@@ -0,0 +1,16 @@
---
- name: Manage docker registry credentials
hosts: "{{ docker_hosts | default('docker', true) }}"
become: "{{ docker_become | default(false) }}"
gather_facts: "{{ docker_registry_gather_facts | default(true) }}"
tasks:
- name: Manage docker registry credentials
community.docker.docker_login:
registry_url: "{{ docker_registry.registry }}"
username: "{{ docker_registry.username | default(omit) }}"
password: "{{ docker_registry.password | default(omit) }}"
state: "{{ docker_registry.state | default('present') }}"
loop: "{{ docker_registries | default([], true) }}"
loop_control:
loop_var: "docker_registry"
label: "{{ docker_registry.username}}@{{ docker_registry.registry }}"

View File

@@ -0,0 +1,54 @@
---
- name: Configure shorewall for docker egress
hosts: "{{ docker_shorewall_hosts | default('docker:&shorewall') }}"
become: "{{ docker_shorewall_become | default(true, true) }}"
tasks:
- name: Add docker interface
ansible.builtin.lineinfile:
path: /etc/shorewall/interfaces
regex: "^dock"
line: |
dock docker0 bridge
- name: Add docker routing policy
ansible.builtin.blockinfile:
path: /etc/shorewall/policy
insertbefore: "^# THE FOLLOWING POLICY MUST BE LAST"
content: |
# Docker specific configuration
dock all ACCEPT
- name: Add docker zone
ansible.builtin.lineinfile:
path: /etc/shorewall/zones
regex: "^dock"
line: |
dock ipv4
- name: Add docker egress rules
ansible.builtin.blockinfile:
path: /etc/shorewall/rules
marker: "#{mark} ANSIBLE MANAGED BLOCK - DOCKER EGRESS"
content: |
#
# Docker egress configuration
#
ACCEPT dock all
- name: Add docker dns rules
ansible.builtin.blockinfile:
path: /etc/shorewall/rules
marker: "#{mark} ANSIBLE MANAGED BLOCK - DOCKER DNS"
content: |
#
# Docker dns configuration
#
DNS(ACCEPT) dock all
- name: Enable shorewall docker support
ansible.builtin.lineinfile:
path: /etc/shorewall/shorewall.conf
line: "DOCKER=Yes"
regex: "^DOCKER="
- name: Ensure shorewall reloaded
community.general.systemd_service:
service: "{{ item }}"
state: reloaded
loop:
- shorewall.service
- shorewall6.service

109
playbooks/ldap.yml Normal file
View File

@@ -0,0 +1,109 @@
---
- name: Configure LDAP directory information tree
hosts: "{{ ldap_hosts | default('ldap') }}"
become: "{{ ldap_become | default(false) }}"
gather_facts: "{{ ldap_gather_facts | default(false) }}"
vars:
_state: "{{ ldap_state | default('present') }}"
_ldap_bind_info: &ldap_bind_info
server_uri: "{{ ldap_server_uri }}"
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
roles:
# Ensure all defaults from openldap role are in scope
- role: finallycoffee.base.openldap
when: false
tasks:
- name: Ensure org units in '{{ ldap_base_dn }}' are {{ _state }}
community.general.ldap_entry:
<<: *ldap_bind_info
dn: "ou={{ org_unit }},{{ ldap_base_dn }}"
objectClass: "organizationalUnit"
state: "{{ _state }}"
loop: "{{ ldap_org_units | default([], true) }}"
loop_control:
loop_var: org_unit
- name: Ensure admin user is {{ _state }}
community.general.ldap_entry:
<<: *ldap_bind_info
dn: "uid={{ ldap_admin_user_rdn }},{{ ldap_admin_user_base }}"
objectClass: "{{ ldap_admin_user_object_classes }}"
attributes: "{{ ldap_admin_user_attributes }}"
state: "{{ _state }}"
vars:
ldap_admin_user_base: >-2
{{ ldap_admin_user_base_dn | default(ldap_base_dn, true) }}
when: ldap_admin_user_rdn is defined
- name: Ensure admin user attributes are correct
community.general.ldap_attrs:
<<: *ldap_bind_info
dn: "uid={{ ldap_admin_user_rdn }},{{ ldap_admin_user_base }}"
attributes: "{{ ldap_admin_user_attributes }}"
state: "{{ _state }}"
vars:
ldap_admin_user_base: >-2
{{ ldap_admin_user_base_dn | default(ldap_base_dn, true) }}
when:
- ldap_admin_user_rdn is defined
- _state == 'present'
- name: Ensure ldap groups are {{ _state }}
community.general.ldap_entry:
<<: *ldap_bind_info
dn: "{{ _ldap_group_dn }}"
objectClass: "{{ _ldap_group_object_classes }}"
attributes: "{{ _ldap_group_attributes }}"
state: "{{ _state }}"
vars:
_ldap_group_dn: >-2
cn={{ _ldap_group.name }},{{ ldap_group_base_dn }}
_ldap_group_object_classes:
- "groupOfNames"
_ldap_group_attributes:
cn: "{{ _ldap_group.name }}"
member: >-2
{{ _ldap_group.members | default([]) }}
loop: "{{ ldap_groups | default([], true) }}"
loop_control:
loop_var: _ldap_group
label: "{{ _ldap_group.name }}"
when:
- ldap_groups is defined
- ldap_group_base_dn is defined
- name: Ensure service accounts are {{ _state }}
community.general.ldap_entry:
<<: *ldap_bind_info
dn: "{{ _ldap_service_account_dn }}"
objectClass: "{{ _ldap_service_account_object_classes }}"
attributes: "{{ _ldap_service_account_attributes }}"
state: "{{ _state }}"
loop: &ldap_service_account_loop "{{ ldap_service_accounts | default([]) }}"
loop_control: &ldap_service_account_loop_control
loop_var: "_ldap_service_account"
label: "{{ _ldap_service_account.name }}"
vars: &ldap_service_account_vars
_ldap_service_account_dn: >-2
uid={{ _ldap_service_account.name }},{{ ldap_service_account_base_dn }}
_ldap_service_account_object_classes:
- "account"
- "simpleSecurityObject"
_ldap_service_account_attributes:
uid: "{{ _ldap_service_account.name }}"
userPassword: "{{ _ldap_service_account.password }}"
when: &ldap_service_account_when
- ldap_service_accounts is defined
- ldap_service_account_base_dn is defined
- name: Ensure service accounts attributes are correct
community.general.ldap_attrs:
<<: *ldap_bind_info
dn: "{{ _ldap_service_account_dn }}"
attributes: "{{ _ldap_service_account_attributes }}"
state: exact
loop: *ldap_service_account_loop
loop_control: *ldap_service_account_loop_control
vars: *ldap_service_account_vars
when: *ldap_service_account_when

View File

@@ -0,0 +1,85 @@
---
- name: Populate DNS, acquire TSIG key and obtain certificate
hosts: "{{ target_hosts | default('all') }}"
become: "{{ target_become | default(true) }}"
gather_facts: "{{ target_gather_facts | default(false) }}"
pre_tasks:
- name: Build target dns records
ansible.builtin.set_fact:
target_dns_records: "{{ target_dns_records + [ _dns_record ] }}"
vars:
_dns_record:
type: "CNAME"
name: "_acme-challenge.{{ _domain }}."
content: "{{ target_tsig_key_name }}.{{ target_acme_zone }}."
loop: "{{ target_domains }}"
loop_control:
loop_var: "_domain"
- name: Populate dns_server if not given
ansible.builtin.set_fact:
dns_server: "{{ target_dns_server }}"
when: dns_server is not defined
roles:
- role: finallycoffee.base.dns
vars:
dns_records: "{{ target_dns_records + target_dns_additional_records }}"
dns_tsig_name: "{{ target_dns_tsig_key.name }}"
dns_tsig_algo: "{{ target_dns_tsig_key.algorithm }}"
dns_tsig_key: "{{target_dns_tsig_key.key }}"
delegate_to: localhost
- role: finallycoffee.base.powerdns_tsig_key
vars:
powerdns_tsig_key_algo: "{{ target_powerdns_tsig_key_algo }}"
powerdns_tsig_key_name: "{{ target_tsig_key_name }}"
powerdns_tsig_key_path: "{{ target_tsig_key_path }}"
powerdns_tsig_key_path_owner: "{{ target_acme_user }}"
powerdns_tsig_key_path_group: "{{ target_acme_group }}"
- role: finallycoffee.base.lego
vars:
lego_instance: "{{ target_lego_instance }}"
lego_instance_base_path: "{{ target_lego_instance_base_path }}"
lego_environment: "{{ target_lego_environment }}"
lego_cert_domains: "{{ target_lego_domains }}"
lego_acme_account_email: "{{ target_acme_account_email }}"
lego_acme_challenge_type: "{{ target_lego_acme_challenge_type }}"
lego_acme_challenge_provider: "{{ target_lego_acme_challenge_provider }}"
lego_acme_server_url: "{{ target_lego_acme_server_url }}"
vars:
target_domains: []
target_acme_zone: ~
target_acme_account_email: ~
target_dns_server: ~
target_dns_additional_records: []
target_dns_tsig_key: {}
target_lego_instance: "{{ target_domains | first }}"
target_lego_instance_base_path: "/opt/acme"
target_lego_domains: "{{ target_domains }}"
target_lego_acme_challenge_type: "dns"
target_lego_acme_challenge_provider: "rfc2136"
target_lego_acme_server_url: >-2
{{ lego_letsencrypt_server_urls.prod }}
target_lego_environment:
RFC2136_TSIG_KEY: "{{ target_tsig_key_name }}"
RFC2136_TSIG_SECRET_FILE: "{{ target_tsig_key_path }}"
RFC2136_TSIG_ALGORITHM: "{{ target_powerdns_tsig_key_algo }}"
RFC2136_NAMESERVER: "{{ target_dns_server }}"
RFC2136_DNS_TIMEOUT: 15
RFC2136_TTL: 60
RFC2136_SEQUENCE_INTERVAL: 5
RFC2136_POLLING_INTERVAL: 10
RFC2136_PROPAGATION_TIMEOUT: >-2
{{ (target_lego_domains | length * 120) | int }}
LEGO_EXPERIMENTAL_CNAME_SUPPORT: "true"
target_tsig_key_name: "{{ target_lego_instance | hash('sha1') }}"
target_tsig_key_path: >-2
{{ target_lego_instance_base_path }}/{{ target_lego_instance }}/secrets/rfc2136_tsig.key
target_tsig_key_path_owner:
target_tsig_key_path_group:
target_acme_user: "acme-{{ target_lego_instance }}"
target_acme_user_id: >-2
{{ powerdns_tsig_key_path_owner_info.uid }}
target_acme_group: "acme-{{ target_lego_instance }}"
target_acme_group_id: >-2
{{ powerdns_tsig_key_path_owner_info.gid }}
target_powerdns_tsig_key_algo: "hmac-sha256"
target_dns_records: []

6
playbooks/mosh.yml Normal file
View File

@@ -0,0 +1,6 @@
---
- name: Manage and configure mosh
hosts: "{{ mosh_hosts | default('mosh', true) }}"
become: "{{ mosh_become | default(true) }}"
roles:
- role: finallycoffee.base.mosh

7
playbooks/ntp.yml Normal file
View File

@@ -0,0 +1,7 @@
---
- name: Install and configure network time protocol daemon
hosts: "{{ ntp_hosts | default('ntp') }}"
become: "{{ ntp_become | default(false) }}"
gather_facts: "{{ ntp_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.ntp

7
playbooks/openldap.yml Normal file
View File

@@ -0,0 +1,7 @@
---
- name: Deploy and configure openLDAP
hosts: "{{ openldap_hosts | default('openldap', true) }}"
become: "{{ openldap_become | default(true) }}"
gather_facts: "{{ openldap_playbook_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.openldap

7
playbooks/openssh.yml Normal file
View File

@@ -0,0 +1,7 @@
---
- name: Ensure openssh is installed and configured
hosts: "{{ openssh_target | default('openssh') }}"
become: "{{ openssh_become | default(true) }}"
gather_facts: "{{ openssh_gather_facts | default(true) }}"
roles:
- role: finallycoffee.base.openssh

7
playbooks/wg_quick.yml Normal file
View File

@@ -0,0 +1,7 @@
---
- name: Configure wireguard interfaces with wg_quick
hosts: "{{ wg_quick_hosts | default('wg_quick') }}"
become: "{{ wg_quick_become | default(false) }}"
gather_facts: "{{ wg_quick_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.wg_quick

10
roles/caddy/README.md Normal file
View File

@@ -0,0 +1,10 @@
# `finallycoffee.base.caddy` ansible role
Deploy a (pre-)configure [caddy v2](https://caddyserver.com) web
server / proxy using ansible.
## Configuration
To change the default configuration of reading all files from
`/etc/caddy/sites.d/` (see `caddy_dynamic_config_dir`), specify
your desired configuration in `caddy_config`.

View File

@@ -0,0 +1,23 @@
---
caddy_config: |+2
{
auto_https disable_redirects
}
(proxyheaders) {
header_up X-Forwarded-Ssl on
header_up Host {host}
header_up X-Real-IP {remote}
header_up X-Forwarded-For {remote}
# header_up X-Forwarded-Port {port}
header_up X-Forwarded-Proto {scheme}
header_up X-Url-Scheme {scheme}
header_up X-Forwarded-Host {host}
}
# Import all configurations
import {{ caddy_dynamic_configs_dir }}/*/Caddyfile
:80 {
redir / https://{host}{uri} 301
}

View File

@@ -0,0 +1,43 @@
---
caddy_container_image_registry: "docker.io"
caddy_container_image_namespace: "library"
caddy_container_image_repository: "caddy"
caddy_container_image_name: >-2
{{ [
caddy_container_image_registry | default([], true),
caddy_container_image_namespace | default([], true),
caddy_container_image_repository
] | flatten | join('/') }}
caddy_container_image_tag: ~
caddy_container_image: >-2
{{ [
caddy_container_image_name,
caddy_container_image_tag | default(caddy_version, true)
] | join(':') }}
caddy_container_image_source: "pull"
caddy_container_image_force_source: >-2
{{ caddy_container_image_tag | ansible.builtin.type_debug != 'NoneType' }}
caddy_container_image_state: "{{ caddy_state }}"
caddy_container_name: "caddy"
caddy_container_env: ~
caddy_container_ports: ~
caddy_container_user: ~
caddy_container_labels: ~
caddy_container_volumes: ~
caddy_container_config_dir: "/etc/caddy"
caddy_container_default_volumes:
- "{{ caddy_config_dir }}:{{ caddy_container_config_dir }}:ro"
- "{{ caddy_dynamic_configs_dir }}:{{ caddy_dynamic_configs_dir }}:ro"
- "{{ caddy_config_internal_dir }}:/config:rw"
- "{{ caddy_state_dir }}:/data:rw"
caddy_container_all_volumes: >-2
{{ caddy_container_default_volumes | default([], true)
+ caddy_container_volumes | default([], true) }}
caddy_container_state: >-2
{{ (caddy_state == 'present') | ternary('started', 'absent') }}
caddy_container_restart_policy: "on-failure"
caddy_container_networks: ~
caddy_container_network_mode: ~
caddy_container_etc_hosts: ~

View File

@@ -0,0 +1,11 @@
---
caddy_user: "caddy"
caddy_version: "2.10.2"
caddy_config_file: "/etc/caddy/Caddyfile"
caddy_config_dir: "{{ caddy_config_file | ansible.builtin.dirname }}"
caddy_config_internal_dir: "{{ caddy_config_dir }}/config"
caddy_dynamic_configs_dir: "{{ caddy_config_dir }}/sites.d"
caddy_state_dir: "/var/lib/caddy"
caddy_state: "present"
caddy_deployment_method: "docker"

View File

@@ -0,0 +1,7 @@
---
caddy_user_state: "{{ caddy_state }}"
caddy_user_system: true
caddy_user_create_home: false
caddy_run_uid: "{{ caddy_user_info.uid | default(caddy_user) }}"
caddy_run_gid: "{{ caddy_user_info.group | default(caddy_user) }}"

13
roles/caddy/meta/main.yml Normal file
View File

@@ -0,0 +1,13 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: caddy
description: Deploy caddy, a webserver
galaxy_tags:
- caddy
- zerossl
- http
- webserver
- docker
- podman

View File

@@ -0,0 +1,26 @@
---
- name: Ensure container image '{{ caddy_container_image }}' is {{ caddy_container_image_state }}
community.docker.docker_image:
name: "{{ caddy_container_image }}"
state: "{{ caddy_container_image_state }}"
source: "{{ caddy_container_image_source }}"
force_source: "{{ caddy_container_image_force_source }}"
register: caddy_container_image_info
until: caddy_container_image_info is success
retries: 10
delay: 3
- name: Ensure container '{{ caddy_container_name }}' is {{ caddy_container_state }}
community.docker.docker_container:
name: "{{ caddy_container_name }}"
image: "{{ caddy_container_image }}"
state: "{{ caddy_container_state }}"
env: "{{ caddy_container_env | default(omit, true) }}"
user: "{{ caddy_container_user | default(omit, true) }}"
ports: "{{ caddy_container_ports | default(omit, true) }}"
labels: "{{ caddy_container_labels | default(omit, true) }}"
volumes: "{{ caddy_container_all_volumes }}"
networks: "{{ caddy_container_networks | default(omit, true) }}"
etc_hosts: "{{ caddy_container_etc_hosts | default(omit, true) }}"
network_mode: "{{ caddy_container_network_mode | default(omit, true) }}"
restart_policy: "{{ caddy_container_restart_policy }}"

View File

@@ -0,0 +1,52 @@
---
- name: Ensure state '{{ caddy_state }}' is valid
ansible.builtin.fail:
msg: >-2
Unsupported caddy_state '{{ caddy_state }}'.
Supported states are {{ caddy_states | join(', ') }}.
when: caddy_state not in caddy_states
- name: Ensure deployment method '{{ caddy_deployment_method }}' is valid
ansible.builtin.fail:
msg: >-2
Unsupported caddy_deployment_method '{{ caddy_deployment_method }}'.
Supported deployment methods are {{ caddy_deployment_methods | join(', ') }}.
when: caddy_deployment_method not in caddy_deployment_methods
- name: Ensure caddy user '{{ caddy_user }}' is {{ caddy_user_state }}
ansible.builtin.user:
name: "{{ caddy_user }}"
state: "{{ caddy_user_state }}"
system: "{{ caddy_user_system }}"
create_home: "{{ caddy_user_create_home }}"
register: "caddy_user_info"
- name: Ensure base directories are present
ansible.builtin.file:
path: "{{ dir.name }}"
state: "directory"
owner: "{{ dir.owner | default(caddy_run_uid) }}"
group: "{{ dir.group | default(caddy_run_uid) }}"
mode: "{{ dir.mode | default('0750') }}"
when: caddy_state == 'present'
loop:
- name: "{{ caddy_config_dir }}"
- name: "{{ caddy_dynamic_configs_dir }}"
- name: "{{ caddy_config_internal_dir }}"
- name: "{{ caddy_state_dir }}"
loop_control:
loop_var: "dir"
label: "{{ dir.name }}"
- name: Ensure caddy configuration is up to date
ansible.builtin.copy:
dest: "{{ caddy_config_file }}"
content: "{{ caddy_config }}"
owner: "{{ caddy_run_uid }}"
group: "{{ caddy_run_gid }}"
mode: "0640"
when: caddy_state == 'present'
- name: Ensure caddy is deployed using {{ caddy_deployment_method }}
ansible.builtin.include_tasks:
file: "deploy-{{ caddy_deployment_method }}.yml"

View File

@@ -0,0 +1,6 @@
---
caddy_states:
- "present"
- "absent"
caddy_deployment_methods:
- "docker"

View File

@@ -0,0 +1,7 @@
# `finallycoffee.base.caddy_site` ansible role
Provision a single site configuration in caddy.
Set `caddy_site_name` as a unique
site identifier (needs to be a valid filename) and `caddy_site_config`
to contain the actual `Caddyfile` contents.

View File

@@ -0,0 +1,13 @@
---
caddy_site_name: ~
caddy_site_config: ~
caddy_site_state: "present"
caddy_site_configs: "/etc/caddy/sites.d"
caddy_site_config_dir: >-2
{{ caddy_site_configs }}/{{ caddy_site_name }}
caddy_site_config_file: >-2
{{ caddy_site_config_dir }}/Caddyfile
caddy_site_owner: "caddy"
caddy_site_group: "caddy"

View File

@@ -0,0 +1,11 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: caddy_site
description: Deploy a sites' configuration in caddy
galaxy_tags:
- caddy
- zerossl
- http
- webserver

View File

@@ -0,0 +1,26 @@
---
- name: Fail if required variables are not populated
ansible.builtin.fail:
msg: "Either `caddy_site_name` or `caddy_site_config` is not provided"
when: >-2
(caddy_site_name | ansible.builtin.type_debug == 'NoneType')
or
(caddy_site_config | ansible.builtin.type_debug == 'NoneType')
- name: Ensure directory for caddy site config '{{ caddy_site_name }}' is {{ caddy_site_state }}
ansible.builtin.file:
path: "{{ caddy_site_config_dir }}"
state: >-2
{{ (caddy_site_state == 'present') | ternary('directory', 'absent') }}
owner: "{{ caddy_site_owner }}"
group: "{{ caddy_site_group }}"
mode: "0750"
- name: Ensure caddy site configuration is templated
ansible.builtin.copy:
dest: "{{ caddy_site_config_file }}"
content: "{{ caddy_site_config }}"
owner: "{{ caddy_site_owner }}"
group: "{{ caddy_site_group }}"
mode: "0640"
when: caddy_site_state == 'present'

View File

@@ -1,6 +1,6 @@
---
lego_user: "lego"
lego_version: "4.23.0"
lego_version: "4.31.0"
lego_instance: default
lego_base_path: "/opt/lego"
lego_cert_user: "acme-{{ lego_instance }}"

View File

@@ -107,6 +107,7 @@
{{ entry.key }}={{ entry.value }}
{% endfor %}
dest: "{{ lego_base_path }}/{{ lego_instance }}.conf"
register: lego_env_file_info
- name: Ensure timer unit is templated
ansible.builtin.template:
@@ -120,6 +121,7 @@
src: "lego_run.sh"
dest: "{{ lego_base_path }}/run.sh"
mode: "0755"
register: lego_handler_script_info
- name: Ensure per-instance base path is created
ansible.builtin.file:
@@ -159,7 +161,18 @@
name: "{{ lego_systemd_timer_name }}"
state: "started"
- name: Check if certificates are present
ansible.builtin.find:
path: "{{ lego_instance_path }}/certificates"
recurse: false
file_type: "file"
register: lego_certificate_info
- name: Ensure systemd service is started once to obtain the certificate
ansible.builtin.systemd_service:
name: "{{ lego_systemd_service_name }}"
state: "started"
when: >-2
lego_handler_script_info.changed
or lego_env_file_info.changed
or lego_certificate_info.files | default([]) | length == 0

View File

@@ -1,7 +1,7 @@
---
minio_container_name: minio
minio_container_image_name: "docker.io/minio/minio"
minio_container_image_tag: "RELEASE.2025-04-08T15-41-24Z"
minio_container_image_tag: "RELEASE.2025-10-15T17-29-55Z"
minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}"
minio_container_networks: []
minio_container_ports: []

4
roles/mosh/README.md Normal file
View File

@@ -0,0 +1,4 @@
# `finallycoffee.base.mosh`
Installs [`mosh`](https://mosh.org/#), a remote 'mobile shell' which supports
roaming and re-uses SSH for the authentication layer.

View File

@@ -0,0 +1,2 @@
---
mosh_state: present

View File

@@ -0,0 +1,15 @@
---
mosh_debian_packages:
- "mosh"
- "openssh-server"
mosh_fedora_packages:
- "mosh"
- "openssh-server"
mosh_archlinux_packages:
- "mosh"
- "openssh"
mosh_packages:
debian: "{{ mosh_debian_packages }}"
fedora: "{{ mosh_fedora_packages }}"
archlinux: "{{ mosh_archlinux_packages }}"

View File

@@ -0,0 +1,30 @@
---
- name: Ensure mosh is {{ mosh_state }} (dnf)
ansible.builtin.dnf:
name: "{{ mosh_packages[_key] }}"
state: "{{ mosh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['dnf', 'dnf5']
- _key in mosh_packages.keys()
vars:
_key: "{{ ansible_distribution | lower }}"
- name: Ensure mosh is {{ mosh_state }} (apt)
ansible.builtin.apt:
package: "{{ mosh_packages[_key] }}"
state: "{{ mosh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['apt']
- _key in mosh_packages.keys()
vars:
_key: "{{ ansible_distribution | lower }}"
- name: Ensure mosh is {{ mosh_state }} (pacman)
community.general.pacman:
name: "{{ mosh_packages[_key] }}"
state: "{{ mosh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['pacman']
- _key in mosh_packages.keys()
vars:
_key: "{{ ansible_distribution | lower }}"

11
roles/mosh/tasks/main.yml Normal file
View File

@@ -0,0 +1,11 @@
---
- name: Ensure 'mosh_state' is valid
ansible.builtin.fail:
msg: >-2
Invalid state '{{ mosh_state }}' for 'mosh_state'!
Allowed states are {{ mosh_states | join(', ') }}.
when: mosh_state not in mosh_states
- name: Ensure mosh is {{ mosh_state }}
ansible.builtin.include_tasks:
file: "install.yml"

4
roles/mosh/vars/main.yml Normal file
View File

@@ -0,0 +1,4 @@
---
mosh_states:
- "present"
- "absent"

View File

@@ -1,5 +1,5 @@
---
nginx_version: "1.27.5"
nginx_version: "1.29.4"
nginx_flavour: alpine
nginx_base_path: /opt/nginx
nginx_config_file: "{{ nginx_base_path }}/nginx.conf"

4
roles/ntp/README.md Normal file
View File

@@ -0,0 +1,4 @@
# `finallycoffee.base.ntp`
Install `ntp`, configure a timezone by sym-linking `/etc/localtime`
and enable the systemd service.

View File

@@ -0,0 +1,14 @@
---
ntp_state: present
ntp_package_name: "ntp"
ntp_timezone: "Europe/Paris"
ntp_systemd_service_name: "ntpd.service"
ntp_systemd_service_state: >-2
{{ (ntp_state == 'present') | ternary('started', 'stopped') }}
ntp_systemd_service_enabled: >-2
{{ (ntp_state == 'present') }}
ntp_etc_localtime_file: "/etc/localtime"
ntp_usr_share_zoneinfo_path: "/usr/share/zoneinfo"

8
roles/ntp/meta/main.yml Normal file
View File

@@ -0,0 +1,8 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: ntp
description: Install network time protocol daemon
galaxy_tags:
- ntp

28
roles/ntp/tasks/main.yml Normal file
View File

@@ -0,0 +1,28 @@
---
- name: Check if 'ntp_state' is valid
ansible.builtin.fail:
msg: >-2
Invalid state '{{ ntp_state }}'! Valid
states are {{ ntp_states | join(', ') }}.
when: ntp_state not in ntp_states
- name: Ensure system package is {{ ntp_state }}
ansible.builtin.package:
name: "{{ ntp_package_name }}"
state: "{{ ntp_state }}"
- name: Ensure /etc/localtime is symlinked
ansible.builtin.file:
src: "{{ ntp_usr_share_zoneinfo_path }}/{{ ntp_timezone }}"
dest: "{{ ntp_etc_localtime_file }}"
state: "{{ (ntp_state == 'present') | ternary('link', 'absent') }}"
- name: Ensure ntp systemd service is configured
ansible.builtin.systemd:
name: "{{ ntp_systemd_service_name }}"
enabled: "{{ ntp_systemd_service_enabled }}"
- name: Ensure ntp systemd service is {{ ntp_systemd_service_state }}
ansible.builtin.systemd:
name: "{{ ntp_systemd_service_name }}"
state: "{{ ntp_systemd_service_state }}"

4
roles/ntp/vars/main.yml Normal file
View File

@@ -0,0 +1,4 @@
---
ntp_states:
- "present"
- "absent"

3
roles/openldap/README.md Normal file
View File

@@ -0,0 +1,3 @@
# `finallycoffee.base.openldap` ansible role
Deploy and configure [OpenLDAP](https://www.openldap.org/).

View File

@@ -0,0 +1,61 @@
---
openldap_container_name: "openldap"
openldap_container_image_registry: docker.finally.coffee
openldap_container_image_namespace: containers
openldap_container_image_name: "openldap"
openldap_container_image_tag: ~
openldap_container_image_source: "pull"
openldap_container_image_force_source: >-2
{{ openldap_container_image_tag | default(false, true) }}
openldap_container_image_repository: >-2
{{
[
openldap_container_image_registry | default([], true),
openldap_container_image_namespace | default([], true),
openldap_container_image_name
] | flatten | join('/')
}}
openldap_container_image: >-2
{{
[
openldap_container_image_repository,
openldap_container_image_tag
| default(openldap_alpine_package_version, true),
] | join(':')
}}
openldap_container_env: ~
openldap_container_user: ~
openldap_container_ports: ~
openldap_container_labels: ~
openldap_container_volumes: ~
openldap_container_networks: ~
openldap_container_network_mode: ~
openldap_container_dns_servers: ~
openldap_container_etc_hosts: ~
openldap_container_ulimits:
- "nofile:{{ openldap_fd_soft_limit }}:{{ openldap_fd_hard_limit }}"
openldap_container_memory: "256M"
openldap_container_memory_swap: ~
openldap_container_memory_reservation: "128M"
openldap_container_restart_policy: "on-failure"
openldap_container_state: >-2
{{ (openldap_state == 'present') | ternary('started', 'absent') }}
openldap_container_data_path: "{{ openldap_data_path }}"
openldap_container_config_path: "{{ openldap_config_path }}"
openldap_container_socket_path: "{{ openldap_socket_path }}"
openldap_container_base_volumes:
- "{{ openldap_config_path }}:{{ openldap_container_config_path }}:Z"
- "{{ openldap_data_path }}:{{ openldap_container_data_path }}:rw"
- "{{ openldap_socket_path }}:{{ openldap_container_socket_path }}:rw"
openldap_container_all_volumes: >-2
{{ openldap_container_base_volumes | default([], true)
+ openldap_container_volumes | default([], true) }}
openldap_init_container_volumes:
- "{{ [openldap_slapd_path, openldap_slapd_path, 'ro'] | join(':') }}"
openldap_container_healthcheck:
test: >-2
[[ $(netstat -plnte | grep slapd | wc -l) -ge 1 ]]
&& [[ $(ps aux | grep slapd | wc -l) -ge 1 ]]
|| exit 1

View File

@@ -0,0 +1,24 @@
---
openldap_version: "2.6.8"
openldap_alpine_revision: "0"
openldap_alpine_package_version: >-2
v{{ openldap_version }}-r{{ openldap_alpine_revision | string }}
openldap_domain: ~
openldap_organization: ~
openldap_config_path: "/etc/openldap/"
openldap_olc_path: "{{ openldap_config_path }}/{0}config"
openldap_slapd_path: "{{ openldap_config_path }}/slapd.ldif"
openldap_schema_path: "{{ openldap_config_path }}/schema"
openldap_data_path: "/var/lib/openldap"
openldap_socket_path: "/run/openldap"
openldap_socket: "{{ openldap_socket_path }}/slapd.sock"
openldap_socket_url: >-2
ldapi://{{ openldap_socket | urlencode | replace('/', '%2F') }}
openldap_state: "present"
openldap_deployment_method: "docker"
openldap_slapadd_init_command: >-2
slapadd -v -F {{ openldap_olc_path }} -n 0 -l {{ openldap_slapd_path }}

View File

@@ -0,0 +1,65 @@
---
openldap_dn: >-2
dc={{ openldap_domain | regex_replace('\.', ',dc=') }}
openldap_root_username: "admin"
openldap_root_pw: ~
openldap_root_node_object_classes:
- "top"
- "dcObject"
- "organization"
openldap_root_node_dc: "{{ openldap_domain | regex_replace('\\..+', '') }}"
openldap_root_node_o: "{{ openldap_organization | default('not set!', true) }}"
openldap_fd_soft_limit: "8192"
openldap_fd_hard_limit: "8192"
openldap_module_path: "/usr/lib/openldap"
openldap_modules:
- "mdb"
- "hdb"
openldap_core_schema_path: "{{ openldap_schema_path }}/core.ldif"
openldap_enabled_schemas:
- name: "cosine"
- name: "inetorgperson"
openldap_additional_schemas: []
openldap_schemas: >-2
{{ openldap_enabled_schemas + openldap_additional_schemas }}
openldap_config_dn: "cn=config"
openldap_config_db_dn: "olcDatabase={0}config,cn=config"
openldap_config_db_olc_access:
- '{0} to *
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" manage
by * none'
openldap_config_attributes: {}
openldap_config_db_attributes:
olcAccess: "{{ openldap_config_db_olc_access }}"
openldap_default_indices:
- "objectClass eq"
openldap_indices: []
openldap_default_database_name: "mdb"
openldap_default_database_object_class: "olcMdbConfig"
openldap_default_database_suffix: "{{ openldap_dn }}"
openldap_default_database_root_dn: >-2
cn={{ openldap_root_username }},{{ openldap_default_database_suffix }}
openldap_default_database_root_pw: "{{ openldap_root_pw }}"
openldap_default_database_directory: >-2
{{ openldap_data_path }}/{{ openldap_default_database_name }}
openldap_default_database_indices: >-2
{{ openldap_default_indices + openldap_indices }}
openldap_default_database_config: >-2
olcDatabase={1}{{ openldap_default_database_name }},{{ openldap_config_dn }}
openldap_default_database:
name: "{{ openldap_default_database_name }}"
object_class: "{{ openldap_default_database_object_class }}"
suffix: "{{ openldap_default_database_suffix }}"
root_dn: "{{ openldap_default_database_root_dn }}"
root_pw: "{{ openldap_default_database_root_pw }}"
directory: "{{ openldap_default_database_directory }}"
indices: "{{ openldap_default_database_indices }}"
openldap_default_database_olc_access: "{{ openldap_config_db_olc_access }}"
openldap_databases:
- "{{ openldap_default_database }}"

View File

@@ -0,0 +1,80 @@
---
- name: Ensure config attributes are configured
community.general.ldap_attrs:
dn: "{{ openldap_config_dn }}"
attributes: "{{ { entry.key : entry.value } }}"
state: exact
server_uri: "{{ openldap_socket_url }}"
loop: "{{ openldap_config_attributes | dict2items }}"
loop_control:
loop_var: "entry"
label: "{{ entry.key }}"
- name: Ensure config db attributes are configured
community.general.ldap_attrs:
dn: "{{ openldap_config_db_dn }}"
attributes: "{{ { entry.key : entry.value } }}"
state: exact
server_uri: "{{ openldap_socket_url }}"
loop: "{{ openldap_config_db_attributes | dict2items }}"
loop_control:
loop_var: "entry"
label: "{{ entry.key }}"
- name: Ensure ACLs for default database are configured
community.general.ldap_attrs:
dn: "{{ openldap_default_database_config }}"
attributes:
olcAccess: "{{ openldap_default_database_olc_access }}"
state: "exact"
server_uri: "{{ openldap_socket_url }}"
retries: 3
delay: 3
register: openldap_acl_result
until: openldap_acl_result is succeeded
- name: Ensure LDAP DIT is configured
when:
- openldap_default_database_root_dn is defined
- openldap_default_database_root_pw is defined
vars:
_meta: &openldap_bind_info
bind_dn: "{{ openldap_default_database_root_dn }}"
bind_pw: "{{ openldap_default_database_root_pw }}"
server_uri: "{{ openldap_socket_url }}"
block:
- name: Ensure rootDN + credentials are correct
community.general.ldap_attrs:
dn: "{{ openldap_default_database_config }}"
attributes: "{{ {entry.key: entry.value} }}"
state: "exact"
server_uri: "{{ openldap_socket_url }}"
no_log: "{{ entry.log is defined and not entry.log }}"
loop:
- key: "olcRootDN"
value: "{{ openldap_default_database_root_dn }}"
- key: "olcRootPW"
value: "{{ openldap_default_database_root_pw }}"
log: false
loop_control:
loop_var: "entry"
label: "{{ entry.key }}"
- name: Ensure root node is {{ openldap_state }}
community.general.ldap_entry:
dn: "{{ openldap_dn }}"
objectClass: "{{ openldap_root_node_object_classes }}"
attributes:
dc: "{{ openldap_root_node_dc }}"
o: "{{ openldap_root_node_o }}"
<<: *openldap_bind_info
- name: Ensure root node attributes are up to date
community.general.ldap_attrs:
dn: "{{ openldap_dn }}"
attributes:
dc: "{{ openldap_root_node_dc }}"
o: "{{ openldap_root_node_o }}"
state: exact
<<: *openldap_bind_info
when: openldap_state == 'present'

View File

@@ -0,0 +1,25 @@
---
- name: Ensure container '{{ openldap_container_name }}' is {{ openldap_container_state }}
community.docker.docker_container:
name: "{{ openldap_container_name }}"
image: "{{ openldap_container_image }}"
env: "{{ openldap_container_env | default(omit, true) }}"
user: "{{ openldap_container_user | default(omit, true) }}"
ports: "{{ openldap_container_ports | default(omit, true) }}"
labels: "{{ openldap_container_labels | default(omit, true) }}"
volumes: "{{ openldap_container_all_volumes | default(omit, true) }}"
networks: "{{ openldap_container_networks | default(omit, true) }}"
network_mode: "{{ openldap_container_network_mode | default(omit, true) }}"
dns_servers: "{{ openldap_container_dns_servers | default(omit, true) }}"
etc_hosts: "{{ openldap_container_etc_hosts | default(omit, true) }}"
command: "{{ openldap_container_command | default(omit, true) }}"
ulimits: "{{ openldap_container_ulimits | default(omit, true) }}"
memory: "{{ openldap_container_memory | default(omit, true) }}"
memory_swap: "{{ openldap_container_memory_swap | default(omit, true) }}"
memory_reservation: >-2
{{ openldap_container_memory_reservation | default(omit, true) }}
restart_policy: >-2
{{ openldap_container_restart_policy | default(omit, true) }}
healthcheck: "{{ openldap_container_healthcheck | default(omit, true) }}"
state: "{{ openldap_container_state }}"

View File

@@ -0,0 +1,37 @@
---
- name: Ensure additional schemas are mapped to container
ansible.builtin.set_fact:
openldap_init_container_volumes: >-2
{{ openldap_init_container_volumes + [ schema_mount ] }}
vars:
schema_file: "{{ openldap_schema_path }}/{{ schema.name }}.ldif"
schema_mount: >-2
{{ schema_file }}:{{ schema_file }}:ro
loop: "{{ openldap_additional_schemas }}"
loop_control:
loop_var: "schema"
label: "{{ schema.name }}"
- name: Ensure ldap container is initialized
community.docker.docker_container:
name: "{{ openldap_container_name }}"
image: "{{ openldap_container_image }}"
env: "{{ openldap_container_env | default(omit, true) }}"
user: "{{ openldap_container_user | default(omit, true) }}"
ports: "{{ openldap_container_ports | default(omit, true) }}"
labels: "{{ openldap_container_labels | default(omit, true) }}"
volumes: "{{ openldap_container_merged_volumes | default(omit, true) }}"
networks: "{{ openldap_container_networks | default(omit, true) }}"
network_mode: "{{ openldap_container_network_mode | default(omit, true) }}"
dns_servers: "{{ openldap_container_dns_servers | default(omit, true) }}"
etc_hosts: "{{ openldap_container_etc_hosts | default(omit, true) }}"
memory: "{{ openldap_container_memory | default(omit, true) }}"
memory_swap: "{{ openldap_container_memory_swap | default(omit, true) }}"
memory_reservation: >-2
{{ openldap_container_memory_reservation | default(omit, true) }}
command: "{{ openldap_slapadd_init_command }}"
detach: false
cleanup: true
vars:
openldap_container_merged_volumes: >-2
{{ openldap_container_all_volumes + openldap_init_container_volumes }}

View File

@@ -0,0 +1,47 @@
---
- name: Determine if persisted OLC config exists
ansible.builtin.stat:
path: "{{ openldap_olc_path }}/cn=config"
register: openldap_olc_stat_info
- name: Ensure openldap databases are initialized
when: not openldap_olc_stat_info.stat.exists
block:
- name: Ensure initial slapd.ldif is templated
ansible.builtin.template:
src: "slapd.ldif.j2"
dest: "{{ openldap_slapd_path }}"
mode: "0644"
- name: Ensure additional schemas to install are present
ansible.builtin.copy:
content: "{{ schema.content }}"
dest: "{{ openldap_schema_path }}/{{ schema.name }}.ldif"
mode: "0644"
loop: "{{ openldap_additional_schemas }}"
loop_control:
loop_var: "schema"
label: "{{ schema.name }}"
- name: Ensure db data directory exists
ansible.builtin.file:
path: "{{ openldap_default_database_directory }}"
state: directory
mode: "0750"
- name: Ensure container is initialized using {{ openldap_deployment_method }}
ansible.builtin.include_tasks:
file: "initialize-{{ openldap_deployment_method }}.yml"
rescue:
- name: Ensure temporary schema files are absent
ansible.builtin.file:
path: "{{ openldap_schema_path }}/{{ file.name }}.ldif"
state: absent
loop: >-2
{{ openldap_additional_schemas }}
loop_control:
loop_var: "file"
label: "{{ file.name }}"
ignore_errors: true
- name: Ensure intial slapd.ldif file is absent
ansible.builtin.file:
path: "{{ openldap_slapd_path }}"
state: absent
ignore_errors: true

View File

@@ -0,0 +1,26 @@
---
- name: Check if 'openldap_state' is valid
ansible.builtin.fail:
msg: >-2
Invalid state '{{ openldap_state }}'!
Supported states are {{ openldap_states | join(', ') }}.
when: openldap_state not in openldap_states
- name: Check if 'openldap_deployment_method' is valid
ansible.builtin.fail:
msg: >-2
Invalid state '{{ openldap_deployment_method }}'!
Supported states are {{ openldap_deployment_methods | join(', ') }}.
when: openldap_deployment_method not in openldap_deployment_methods
- name: Ensure openldap deployment is prepared
ansible.builtin.include_tasks:
file: "prepare-{{ openldap_deployment_method }}.yml"
- name: Ensure openldap is deployed using {{ openldap_deployment_method }}
ansible.builtin.include_tasks:
file: "deploy-{{ openldap_deployment_method }}.yml"
- name: Ensure openldap is configured
ansible.builtin.include_tasks:
file: "configure.yml"

View File

@@ -0,0 +1,7 @@
---
- name: Ensure container image '{{ openldap_container_image }}' is {{ openldap_state }}
community.docker.docker_image:
name: "{{ openldap_container_image }}"
state: "{{ openldap_state }}"
source: "{{ openldap_container_image_source }}"
force_source: "{{ openldap_container_image_force_source }}"

View File

@@ -0,0 +1,64 @@
dn: cn=config
objectClass: olcGlobal
cn: config
olcPidFile: /run/openldap/slapd.pid
olcArgsFile: /run/openldap/slapd.args
# Dynamic backend modules
dn: cn=module,cn=config
objectClass: olcModuleList
cn: module
olcModulepath: {{ openldap_module_path }}
{% for mod in openldap_modules | default([]) %}
olcModuleload: back_{{ mod }}.so
{% endfor %}
# Schema config
dn: cn=schema,cn=config
objectClass: olcSchemaConfig
cn: schema
include: file://{{ openldap_core_schema_path }}
{% for schema in openldap_schemas %}
include: file://{{ openldap_schema_path }}/{{ schema.name }}.ldif
{% endfor %}
# Frontend settings
dn: olcDatabase=frontend,cn=config
objectClass: olcDatabaseConfig
objectClass: olcFrontendConfig
olcDatabase: frontend
# Config-DB settings
dn: olcDatabase=config,cn=config
objectClass: olcDatabaseConfig
olcDatabase: config
{% for attr in openldap_config_db_attributes | dict2items %}
{% if attr is string %}
{{ attr.key }}: {{ attr.value }}
{% else %}
{% for val in attr.value %}
{{ attr.key }}: {{ val }}
{% endfor %}
{% endif %}
{% endfor %}
# database settings
{% for db in openldap_databases %}
dn: olcDatabase={{ db.name }},cn=config
objectClass: olcDatabaseConfig
objectClass: {{ db.object_class }}
olcDatabase: {{ db.name }}
olcSuffix: {{ db.suffix }}
olcRootDN: {{ db.root_dn }}
olcRootPW: {{ db.root_pw }}
olcDbDirectory: {{ db.directory }}
{% for idx in db.indices %}
olcDbIndex: {{ idx }}
{% endfor %}
{% endfor %}

View File

@@ -0,0 +1,6 @@
---
openldap_states:
- "present"
- "absent"
openldap_deployment_methods:
- "docker"

13
roles/openssh/README.md Normal file
View File

@@ -0,0 +1,13 @@
# `finallycoffee.base.openssh`
Ansible role to manage and configure openssh and it's components (like `sshd`).
Currently supports `fedora` and `debian` linux distributions.
## `sshd`
To configure `sshd`, see the [`defaults/main/sshd.yml`](defaults/main/sshd.yml),
where snake\_cased config keys for `/etc/ssh/sshd_config` are available in
the `openssh_sshd_config_` namespace.
To add your own config on top, simply use key-value syntax in `openssh_sshd_config`.

View File

@@ -0,0 +1,3 @@
---
openssh_state: 'present'
openssh_sshd_config_file: "/etc/ssh/sshd_config"

View File

@@ -0,0 +1,8 @@
---
openssh_packages:
fedora: "{{ openssh_fedora_packages }}"
debian: "{{ openssh_debian_packages }}"
openssh_fedora_packages:
- "openssh-server"
openssh_debian_packages:
- "openssh-server"

View File

@@ -0,0 +1,33 @@
---
openssh_sshd_enable: true
openssh_sshd_config_pubkey_authentication: true
openssh_sshd_config_password_authentication: false
openssh_sshd_config_challenge_response_authentication: false
openssh_sshd_config_permit_root_login: false
# Limits
openssh_sshd_config_max_sessions: ~
openssh_sshd_config_max_startups: ~
# Hardening
openssh_sshd_config_protocol: 2
openssh_sshd_config_x11_forwarding: false
openssh_sshd_config_allow_agent_forwarding: false
openssh_sshd_config_allow_tcp_forwarding: false
openssh_sshd_default_config:
PubkeyAuthentication: "{{ openssh_sshd_config_pubkey_authentication }}"
PasswordAuthentication: "{{ openssh_sshd_config_password_authentication }}"
ChallengeResponseAuthentication: >-2
{{ openssh_sshd_config_challenge_response_authentication }}
PermitRootLogin: "{{ openssh_sshd_config_permit_root_login }}"
MaxSessions: "{{ openssh_sshd_config_max_sessions }}"
MaxStartups: "{{ openssh_sshd_config_max_startups }}"
Protocol: "{{ openssh_sshd_config_protocol }}"
X11Forwarding: "{{ openssh_sshd_config_x11_forwarding }}"
AllowAgentForwarding: "{{ openssh_sshd_config_allow_agent_forwarding }}"
AllowTcpForwarding: "{{ openssh_sshd_config_allow_tcp_forwarding }}"
openssh_sshd_merged_config: >-2
{{ openssh_sshd_default_config | default({}, true)
| combine(openssh_sshd_config | default({}, true)) }}

View File

@@ -0,0 +1,2 @@
---
openssh_sshd_systemd_service_name: "sshd.service"

View File

@@ -0,0 +1,7 @@
---
- name: Ensure sshd is reloaded
ansible.builtin.systemd_service:
name: "{{ openssh_sshd_systemd_service_name }}"
state: "reloaded"
when: ansible_facts['service_mgr'] == 'systemd'
listen: openssh_sshd_reload

View File

@@ -0,0 +1,28 @@
---
- name: Configure sshd
ansible.builtin.lineinfile:
path: "{{ openssh_sshd_config_file }}"
regexp: "{{ openssh_sshd_config_regexp }}"
line: "{{ openssh_sshd_config_line }}"
firstmatch: true
state: present
validate: "sshd -Tf %s"
loop: "{{ openssh_sshd_merged_config | dict2items }}"
loop_control:
loop_var: "tuple"
label: "{{ tuple.key }}"
notify:
- openssh_sshd_reload
vars:
openssh_sshd_config_regexp: "^\\s*#?\\s*{{ tuple.key }}"
openssh_sshd_config_line: >-2
{{ openssh_sshd_config_line_commented }}{{ tuple.key }} {{ openssh_sshd_config_value }}
openssh_sshd_config_value_is_none: "{{ tuple.value is none }}"
openssh_sshd_config_line_commented: >-2
{{ openssh_sshd_config_value_is_none | ternary('#', '') }}
openssh_sshd_config_value: >-2
{{ (tuple.value is boolean) | ternary(
tuple.value | ternary('yes', 'no'),
tuple.value
)
}}

View File

@@ -0,0 +1,16 @@
---
- name: Ensure openssh server package is {{ openssh_state }} (dnf)
ansible.builtin.dnf:
name: "{{ openssh_packages[ansible_distribution | lower] }}"
state: "{{ openssh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['dnf', 'dnf5']
- ansible_distribution | lower in openssh_packages.keys()
- name: Ensure openssh server package is {{ openssh_state }} (apt)
ansible.builtin.apt:
package: "{{ openssh_packages[ansible_distribution | lower] }}"
state: "{{ openssh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['apt']
- ansible_distribution | lower in openssh_packages.keys()

View File

@@ -0,0 +1,15 @@
---
- name: Ensure 'openssh_state' is valid
ansible.builtin.fail:
msg: >-2
Invalid value '{{ openssh_state }}' for 'openssh_state'.
Valid values are {{ openssh_states | join(', ') }}!
when: openssh_state not in openssh_states
- name: Ensure openssh is {{ openssh_state }}
ansible.builtin.include_tasks:
file: "install.yml"
- name: Ensure sshd is configured
ansible.builtin.include_tasks:
file: "configure-sshd.yml"

View File

@@ -0,0 +1,4 @@
---
openssh_states:
- "present"
- "absent"

View File

@@ -41,7 +41,7 @@
community.docker.docker_container_exec:
container: "{{ powerdns_tsig_key_container_name }}"
command: "pdnsutil list-tsig-keys"
delegate_to: "{{ powerdns_tsig_key_hostname }}"
delegate_to: "{{ powerdns_tsig_key_hostname | default(inventory_hostname) }}"
register: powerdns_tsig_key_powerdns_info
changed_when: false
check_mode: false
@@ -54,7 +54,7 @@
when: >-
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout
delegate_to: "{{ powerdns_tsig_key_hostname }}"
delegate_to: "{{ powerdns_tsig_key_hostname | default(inventory_hostname) }}"
register: powerdns_tsig_key_powerdns_generated_tsig_key
throttle: 1
become: true
@@ -67,7 +67,7 @@
when: >-
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout
delegate_to: "{{ powerdns_tsig_key_hostname }}"
delegate_to: "{{ powerdns_tsig_key_hostname | default(inventory_hostname) }}"
throttle: 1
become: true

View File

@@ -4,6 +4,9 @@ Description=Run {{ restic_timer_description | default(restic_job_name) }}
[Timer]
OnCalendar={{ restic_policy.frequency }}
Unit={{ restic_systemd_unit_naming_scheme }}.service
{% if restic_systemd_timer_randomized_delay_sec %}
RandomizedDelaySec={{ restic_systemd_timer_randomized_delay_sec }}
{% endif %}
[Install]
WantedBy=timers.target

5
roles/wg_quick/README.md Normal file
View File

@@ -0,0 +1,5 @@
# `finallycoffee.base.wg_quick` ansible role
Configure a wireguard interface using `wg_quick`. This role writes
the configuration files and activates the interface using the systemd
template service abstractions.

View File

@@ -0,0 +1,20 @@
---
wg_quick_interface_name: ~
wg_quick_interface_address: ~
wg_quick_interface_listen_port: ~
wg_quick_interface_private_key: ~
wg_quick_interface_private_key_file: ~
wg_quick_interface_peer_endpoint: ~
wg_quick_interface_peer_public_key: ~
wg_quick_interface_peer_allowed_ips: ~
wg_quick_interfaces:
- name: "{{ wg_quck_interface_name }}"
address: "{{ wg_quick_interface_address }}"
listen_port: "{{ wg_quick_interface_listen_port }}"
private_key: "{{ wg_quick_interface_private_key }}"
private_key_file: "{{ wg_quick_interface_private_key_file }}"
peers:
- endpoint: "{{ wg_quick_interface_peer_endpoint }}"
public_key: "{{ wg_quick_interface_peer_public_key }}"
allowed_ips: "{{ wg_quick_interface_peer_allowed_ips }}"

View File

@@ -0,0 +1,7 @@
---
wg_quick_state: "present"
wg_quick_package_name: "wireguard-tools"
wg_quick_system_packages:
- "{{ wg_quick_package_name }}"
wg_quick_configuration_dir: "/etc/wireguard"

View File

@@ -0,0 +1,25 @@
---
- name: Ensure wg-quick configuration for interface '{{ wg_quick_iface.name }}' is up to date
ansible.builtin.template:
src: "wg-quick.conf.j2"
dest: "{{ wg_quick_configuration_dir }}/{{ wg_quick_iface.name }}.conf"
when: wg_quick_iface.state | default(wg_quick_state) == 'present'
- name: Ensure systemd service is enabled
ansible.builtin.systemd_service:
name: "wg-quick@{{ wg_quick_iface.name }}.service"
enabled: true
when: wg_quick_iface.state | default(wg_quick_state) == 'present'
- name: Ensure systemd service is {{ wg_quick_iface.state | default(wg_quick_state) }}
ansible.builtin.systemd_service:
name: "wg-quick@{{ wg_quick_iface.name }}.service"
state: >-2
{{ (wg_quick_iface.state | default(wg_quick_state) == 'present')
| ternary('started', 'absent') }}
- name: Ensure wg-quick configuration for interface '{{ wg_quick_iface.name }}' is absent
ansible.builtin.file:
path: "{{ wg_quick_configuration_dir }}/{{ wg_quick_face.name }}.conf"
state: "absent"
when: wg_quick_iface.state | default(wg_quick_state) == 'absent'

View File

@@ -0,0 +1,27 @@
---
- name: Ensure wg_quick_state is valid
ansible.builtin.fail:
msg: >-2
Invalid state '{{ wg_quick_state }}'. Valid
states are {{ wg_quick_states | join(', ') }}.
when: wg_quick_state not in wg_quick_states
- name: Ensure system packages are available
ansible.builtin.package:
name: "{{ wg_quick_system_packages }}"
state: "present"
when: wg_quick_state == 'present'
- name: Ensure configuration folder is present
ansible.builtin.file:
name: "{{ wg_quick_configuration_dir }}"
state: "directory"
when: wg_quick_state == 'present'
- name: Ensure connections are in the configured state
ansible.builtin.include_tasks:
file: "configure-interface.yml"
loop: "{{ wg_quick_interfaces }}"
loop_control:
loop_var: "wg_quick_iface"
label: "{{ wg_quick_iface.name }}"

View File

@@ -0,0 +1,32 @@
[Interface]
Address = {{ wg_quick_iface.address | join(', ') }}
ListenPort = {{ wg_quick_iface.listen_port }}
{% if wg_quick_iface.private_key %}
PrivateKey = {{ wg_quick_iface.private_key }}
{% elif wg_quick_iface.private_key_file %}
PrivateKeyFile = {{ wg_quick_iface.private_key_file }}
{% endif %}
{% if wg_quick_iface.table is defined %}
Table = {{ wg_quick_iface.table | ternary('on', 'off') }}
{% endif %}
{% if wg_quick_iface.post_up %}
PostUp = /bin/bash -c "{{ wg_quick_iface.post_up | join('; ') }}"
{% endif %}
{% if wg_quick_iface.pre_down %}
PreDown = /bin/bash -c "{{ wg_quick_iface.pre_down | join('; ') }}"
{% endif %}
{% for _peer in wg_quick_iface.peers %}
[Peer]
Endpoint = {{ _peer.endpoint }}
PublicKey = {{ _peer.public_key }}
AllowedIPs = {{ _peer.allowed_ips | join(', ') }}
{% if _peer.persistent_keepalive %}
PersistentKeepalive = {{ _peer.persistent_keepalive }}
{% endif %}
{% if 'psk' in _peer %}
PresharedKey = {{ _peer.psk }}
{% endif %}
{% endfor %}

View File

@@ -0,0 +1,4 @@
---
wg_quick_states:
- "present"
- "absent"