1
0
forked from finallycoffee/base

Compare commits

..

1 Commits

108 changed files with 286 additions and 1996 deletions

View File

@@ -5,7 +5,9 @@
This ansible collection provides various roles for installing This ansible collection provides various roles for installing
and configuring basic system utilities like gnupg, ssh etc and configuring basic system utilities like gnupg, ssh etc
- [`caddy`](roles/caddy/README.md): configures and runs caddy - [`elasticsearch`](roles/elasticsearch/README.md): Deploy [elasticsearch](https://www.docker.elastic.co/r/elasticsearch/elasticsearch-oss),
a popular (distributed) search and analytics engine, mostly known by it's
letter "E" in the ELK-stack.
- [`git`](roles/git/README.md): configures git on the target system - [`git`](roles/git/README.md): configures git on the target system
@@ -14,6 +16,8 @@ and configuring basic system utilities like gnupg, ssh etc
- [`lego`](roles/lego/README.md): runs [lego (LetsEncrypt Go)](https://github.com/go-acme/lego), - [`lego`](roles/lego/README.md): runs [lego (LetsEncrypt Go)](https://github.com/go-acme/lego),
a ACME client written in go, using systemd (timers). Multi-instance capable. a ACME client written in go, using systemd (timers). Multi-instance capable.
- [`mariadb`](roles/mariadb/README.md): runs [MariaDB Server](https://mariadb.org/), one of the world's most popular open source relational database
- [`minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an - [`minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an
s3-compatible object storage server, using docker containers. s3-compatible object storage server, using docker containers.

View File

@@ -1,30 +1,22 @@
namespace: finallycoffee namespace: finallycoffee
name: base name: base
version: 0.4.1 version: 0.1.2
readme: README.md readme: README.md
authors: authors:
- transcaffeine <transcaffeine@finally.coffee> - transcaffeine <transcaffeine@finally.coffee>
description: >-2 description: Roles for base services which are common dependencies other services like databases
Roles for base services which are core functionality like managing packages
and ssh or common dependencies other services like databases
dependencies: dependencies:
"community.docker": "^4.7.0" "community.docker": "^3.0.0"
"community.general": "^11.1.2"
"containers.podman": "^1.17.0"
license_file: LICENSE.md license_file: LICENSE.md
build_ignore: build_ignore:
- '*.tar.gz' - '*.tar.gz'
repository: https://git.finally.coffee/finallycoffee/base repository: https://git.finally.coffee/finallycoffee/base
issues: https://codeberg.org/finallycoffee/ansible-collection-base/issues issues: https://codeberg.org/finallycoffee/ansible-collection-base/issues
tags: tags:
- bootstrap
- ssh
- mosh
- docker - docker
- elastic
- lego - lego
- mariadb
- minio - minio
- nginx - nginx
- caddy
- restic - restic
- user_management
- openldap

View File

@@ -1,78 +0,0 @@
---
- name: Bootstrap everything needed for an ansible connection
hosts: "{{ target_hosts | default('all', true) }}"
become: "{{ target_host_become | default(true, false) }}"
gather_facts: false
pre_tasks:
- name: Gather information about the target system id
ansible.builtin.raw: "cat /etc/os-release | grep '^ID=' | cut -d '=' -f2"
register: target_host_os_info
check_mode: false
changed_when: false
- name: Set /etc/os-release system id
ansible.builtin.set_fact:
target_host_system_id: "{{ target_host_os_info.stdout_lines | first | trim }}"
delegate_to: localhost
- name: Gather information about the target system version
ansible.builtin.raw: "cat /etc/os-release | grep '^VERSION_ID=' | cut -d '=' -f2"
register: target_host_os_info_version
check_mode: false
changed_when: false
- name: Set /etc/os-release system version id
ansible.builtin.set_fact:
target_host_system_version_id: "{{ target_host_os_info_version.stdout_lines | first | trim }}"
delegate_to: localhost
tasks:
- name: Ensure apt bootstrap packages are installed
ansible.builtin.raw: "apt install {{ apt_bootstrap_packages | join(' ') }}"
register: target_host_apt_info
when: target_host_system_id in targets_using_apt
changed_when:
- "'0 upgraded' not in target_host_apt_info.stdout_lines | last"
- "'0 newly installed' not in target_host_apt_info.stdout_lines | last"
- name: Ensure dnf < 4 bootstrap packages are installed
ansible.builtin.raw: "dnf install --assumeyes {{ dnf4_bootstrap_packages | join(' ') }}"
register: target_host_dnf_info
when:
- target_host_system_id in targets_using_dnf4.keys()
- target_host_system_version_id | int < targets_using_dnf4[target_host_system_id]
changed_when:
- "(target_host_dnf_info.stdout_lines | last) != 'Nothing to do.'"
- name: Ensure dnf5 bootstrap packages are installed
ansible.builtin.raw: "dnf install --assumeyes {{ dnf5_bootstrap_packages | join(' ') }}"
register: target_host_dnf_info
when:
- target_host_system_id in targets_using_dnf5.keys()
- target_host_system_version_id | int >= targets_using_dnf5[target_host_system_id]
changed_when:
- "(target_host_dnf_info.stdout_lines | last) != 'Nothing to do.'"
- name: Sort hosts into os-specific groups
ansible.builtin.group_by:
key: >-2
{{ (os_group_prefix
| default(false, true)
| ternary(os_group_prefix | default('') + (os_group_seperator | default('_')), ''))
+ target_host_system_id }}
when: target_hosts_sort_into_system_ids | default(false, true)
changed_when: false
delegate_to: localhost
vars:
targets_using_apt:
- debian
- ubuntu
apt_bootstrap_packages:
- python3
- python3-apt
# default package manager is dnf5 since fedora 41
# https://fedoraproject.org/wiki/Changes/SwitchToDnf5#Current_status
targets_using_dnf4:
fedora: 41
targets_using_dnf5:
fedora: 41
dnf4_bootstrap_packages:
- python3
- python3-dnf
- python3-libdnf
dnf5_bootstrap_packages:
- python3-libdnf5

View File

@@ -1,7 +0,0 @@
---
- name: Install and configure caddy
hosts: "{{ caddy_hosts | default('caddy') }}"
become: "{{ caddy_become | default(false) }}"
gather_facts: "{{ caddy_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.caddy

View File

@@ -1,31 +0,0 @@
---
- name: Ensure reverse proxy configuration is created
hosts: "{{ target_hosts }}"
become: "{{ target_become | default(false) }}"
gather_facts: "{{ target_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.caddy_site
vars:
caddy_site_cert_basepath: >-2
{{ caddy_site_tls_store | default('/tls') }}/{{ caddy_site_name }}/certificates/{{ caddy_site_name }}
caddy_site_config: >-2
{{ caddy_site_config_override | default(caddy_site_default_config, true) }}
caddy_site_default_config: |+2
https://{{ caddy_site_name }} {
tls {{ caddy_site_cert_basepath}}.crt {{ caddy_site_cert_basepath }}.key
header {
Strict-Transport-Security "max-age=31536000"
}
encode zstd gzip
{% if caddy_reverse_proxy_template_block | default(true) -%}
reverse_proxy {{ caddy_reverse_proxy_backend_addr }} {
{{ caddy_reverse_proxy_extra_config | default('') | indent(6) }}
{%- if caddy_reverse_proxy_import_proxyheaders | default(true, true) %}
import proxyheaders
{%- endif +%}
}
{%- else -%}
{{- caddy_reverse_proxy_block | default('') | indent(4) }}
{%- endif +%}
}

View File

@@ -1,6 +0,0 @@
---
- name: Install and configure docker daemon
hosts: "{{ docker_hosts | default('docker', true) }}"
become: "{{ docker_become | default(false, true) }}"
roles:
- role: finallycoffee.base.docker

View File

@@ -1,16 +0,0 @@
---
- name: Manage docker registry credentials
hosts: "{{ docker_hosts | default('docker', true) }}"
become: "{{ docker_become | default(false) }}"
gather_facts: "{{ docker_registry_gather_facts | default(true) }}"
tasks:
- name: Manage docker registry credentials
community.docker.docker_login:
registry_url: "{{ docker_registry.registry }}"
username: "{{ docker_registry.username | default(omit) }}"
password: "{{ docker_registry.password | default(omit) }}"
state: "{{ docker_registry.state | default('present') }}"
loop: "{{ docker_registries | default([], true) }}"
loop_control:
loop_var: "docker_registry"
label: "{{ docker_registry.username}}@{{ docker_registry.registry }}"

View File

@@ -1,54 +0,0 @@
---
- name: Configure shorewall for docker egress
hosts: "{{ docker_shorewall_hosts | default('docker:&shorewall') }}"
become: "{{ docker_shorewall_become | default(true, true) }}"
tasks:
- name: Add docker interface
ansible.builtin.lineinfile:
path: /etc/shorewall/interfaces
regex: "^dock"
line: |
dock docker0 bridge
- name: Add docker routing policy
ansible.builtin.blockinfile:
path: /etc/shorewall/policy
insertbefore: "^# THE FOLLOWING POLICY MUST BE LAST"
content: |
# Docker specific configuration
dock all ACCEPT
- name: Add docker zone
ansible.builtin.lineinfile:
path: /etc/shorewall/zones
regex: "^dock"
line: |
dock ipv4
- name: Add docker egress rules
ansible.builtin.blockinfile:
path: /etc/shorewall/rules
marker: "#{mark} ANSIBLE MANAGED BLOCK - DOCKER EGRESS"
content: |
#
# Docker egress configuration
#
ACCEPT dock all
- name: Add docker dns rules
ansible.builtin.blockinfile:
path: /etc/shorewall/rules
marker: "#{mark} ANSIBLE MANAGED BLOCK - DOCKER DNS"
content: |
#
# Docker dns configuration
#
DNS(ACCEPT) dock all
- name: Enable shorewall docker support
ansible.builtin.lineinfile:
path: /etc/shorewall/shorewall.conf
line: "DOCKER=Yes"
regex: "^DOCKER="
- name: Ensure shorewall reloaded
community.general.systemd_service:
service: "{{ item }}"
state: reloaded
loop:
- shorewall.service
- shorewall6.service

View File

@@ -1,109 +0,0 @@
---
- name: Configure LDAP directory information tree
hosts: "{{ ldap_hosts | default('ldap') }}"
become: "{{ ldap_become | default(false) }}"
gather_facts: "{{ ldap_gather_facts | default(false) }}"
vars:
_state: "{{ ldap_state | default('present') }}"
_ldap_bind_info: &ldap_bind_info
server_uri: "{{ ldap_server_uri }}"
bind_dn: "{{ ldap_bind_dn }}"
bind_pw: "{{ ldap_bind_pw }}"
roles:
# Ensure all defaults from openldap role are in scope
- role: finallycoffee.base.openldap
when: false
tasks:
- name: Ensure org units in '{{ ldap_base_dn }}' are {{ _state }}
community.general.ldap_entry:
<<: *ldap_bind_info
dn: "ou={{ org_unit }},{{ ldap_base_dn }}"
objectClass: "organizationalUnit"
state: "{{ _state }}"
loop: "{{ ldap_org_units | default([], true) }}"
loop_control:
loop_var: org_unit
- name: Ensure admin user is {{ _state }}
community.general.ldap_entry:
<<: *ldap_bind_info
dn: "uid={{ ldap_admin_user_rdn }},{{ ldap_admin_user_base }}"
objectClass: "{{ ldap_admin_user_object_classes }}"
attributes: "{{ ldap_admin_user_attributes }}"
state: "{{ _state }}"
vars:
ldap_admin_user_base: >-2
{{ ldap_admin_user_base_dn | default(ldap_base_dn, true) }}
when: ldap_admin_user_rdn is defined
- name: Ensure admin user attributes are correct
community.general.ldap_attrs:
<<: *ldap_bind_info
dn: "uid={{ ldap_admin_user_rdn }},{{ ldap_admin_user_base }}"
attributes: "{{ ldap_admin_user_attributes }}"
state: "{{ _state }}"
vars:
ldap_admin_user_base: >-2
{{ ldap_admin_user_base_dn | default(ldap_base_dn, true) }}
when:
- ldap_admin_user_rdn is defined
- _state == 'present'
- name: Ensure ldap groups are {{ _state }}
community.general.ldap_entry:
<<: *ldap_bind_info
dn: "{{ _ldap_group_dn }}"
objectClass: "{{ _ldap_group_object_classes }}"
attributes: "{{ _ldap_group_attributes }}"
state: "{{ _state }}"
vars:
_ldap_group_dn: >-2
cn={{ _ldap_group.name }},{{ ldap_group_base_dn }}
_ldap_group_object_classes:
- "groupOfNames"
_ldap_group_attributes:
cn: "{{ _ldap_group.name }}"
member: >-2
{{ _ldap_group.members | default([]) }}
loop: "{{ ldap_groups | default([], true) }}"
loop_control:
loop_var: _ldap_group
label: "{{ _ldap_group.name }}"
when:
- ldap_groups is defined
- ldap_group_base_dn is defined
- name: Ensure service accounts are {{ _state }}
community.general.ldap_entry:
<<: *ldap_bind_info
dn: "{{ _ldap_service_account_dn }}"
objectClass: "{{ _ldap_service_account_object_classes }}"
attributes: "{{ _ldap_service_account_attributes }}"
state: "{{ _state }}"
loop: &ldap_service_account_loop "{{ ldap_service_accounts | default([]) }}"
loop_control: &ldap_service_account_loop_control
loop_var: "_ldap_service_account"
label: "{{ _ldap_service_account.name }}"
vars: &ldap_service_account_vars
_ldap_service_account_dn: >-2
uid={{ _ldap_service_account.name }},{{ ldap_service_account_base_dn }}
_ldap_service_account_object_classes:
- "account"
- "simpleSecurityObject"
_ldap_service_account_attributes:
uid: "{{ _ldap_service_account.name }}"
userPassword: "{{ _ldap_service_account.password }}"
when: &ldap_service_account_when
- ldap_service_accounts is defined
- ldap_service_account_base_dn is defined
- name: Ensure service accounts attributes are correct
community.general.ldap_attrs:
<<: *ldap_bind_info
dn: "{{ _ldap_service_account_dn }}"
attributes: "{{ _ldap_service_account_attributes }}"
state: exact
loop: *ldap_service_account_loop
loop_control: *ldap_service_account_loop_control
vars: *ldap_service_account_vars
when: *ldap_service_account_when

View File

@@ -1,85 +0,0 @@
---
- name: Populate DNS, acquire TSIG key and obtain certificate
hosts: "{{ target_hosts | default('all') }}"
become: "{{ target_become | default(true) }}"
gather_facts: "{{ target_gather_facts | default(false) }}"
pre_tasks:
- name: Build target dns records
ansible.builtin.set_fact:
target_dns_records: "{{ target_dns_records + [ _dns_record ] }}"
vars:
_dns_record:
type: "CNAME"
name: "_acme-challenge.{{ _domain }}."
content: "{{ target_tsig_key_name }}.{{ target_acme_zone }}."
loop: "{{ target_domains }}"
loop_control:
loop_var: "_domain"
- name: Populate dns_server if not given
ansible.builtin.set_fact:
dns_server: "{{ target_dns_server }}"
when: dns_server is not defined
roles:
- role: finallycoffee.base.dns
vars:
dns_records: "{{ target_dns_records + target_dns_additional_records }}"
dns_tsig_name: "{{ target_dns_tsig_key.name }}"
dns_tsig_algo: "{{ target_dns_tsig_key.algorithm }}"
dns_tsig_key: "{{target_dns_tsig_key.key }}"
delegate_to: localhost
- role: finallycoffee.base.powerdns_tsig_key
vars:
powerdns_tsig_key_algo: "{{ target_powerdns_tsig_key_algo }}"
powerdns_tsig_key_name: "{{ target_tsig_key_name }}"
powerdns_tsig_key_path: "{{ target_tsig_key_path }}"
powerdns_tsig_key_path_owner: "{{ target_acme_user }}"
powerdns_tsig_key_path_group: "{{ target_acme_group }}"
- role: finallycoffee.base.lego
vars:
lego_instance: "{{ target_lego_instance }}"
lego_instance_base_path: "{{ target_lego_instance_base_path }}"
lego_environment: "{{ target_lego_environment }}"
lego_cert_domains: "{{ target_lego_domains }}"
lego_acme_account_email: "{{ target_acme_account_email }}"
lego_acme_challenge_type: "{{ target_lego_acme_challenge_type }}"
lego_acme_challenge_provider: "{{ target_lego_acme_challenge_provider }}"
lego_acme_server_url: "{{ target_lego_acme_server_url }}"
vars:
target_domains: []
target_acme_zone: ~
target_acme_account_email: ~
target_dns_server: ~
target_dns_additional_records: []
target_dns_tsig_key: {}
target_lego_instance: "{{ target_domains | first }}"
target_lego_instance_base_path: "/opt/acme"
target_lego_domains: "{{ target_domains }}"
target_lego_acme_challenge_type: "dns"
target_lego_acme_challenge_provider: "rfc2136"
target_lego_acme_server_url: >-2
{{ lego_letsencrypt_server_urls.prod }}
target_lego_environment:
RFC2136_TSIG_KEY: "{{ target_tsig_key_name }}"
RFC2136_TSIG_SECRET_FILE: "{{ target_tsig_key_path }}"
RFC2136_TSIG_ALGORITHM: "{{ target_powerdns_tsig_key_algo }}"
RFC2136_NAMESERVER: "{{ target_dns_server }}"
RFC2136_DNS_TIMEOUT: 15
RFC2136_TTL: 60
RFC2136_SEQUENCE_INTERVAL: 5
RFC2136_POLLING_INTERVAL: 10
RFC2136_PROPAGATION_TIMEOUT: >-2
{{ (target_lego_domains | length * 120) | int }}
LEGO_EXPERIMENTAL_CNAME_SUPPORT: "true"
target_tsig_key_name: "{{ target_lego_instance | hash('sha1') }}"
target_tsig_key_path: >-2
{{ target_lego_instance_base_path }}/{{ target_lego_instance }}/secrets/rfc2136_tsig.key
target_tsig_key_path_owner:
target_tsig_key_path_group:
target_acme_user: "acme-{{ target_lego_instance }}"
target_acme_user_id: >-2
{{ powerdns_tsig_key_path_owner_info.uid }}
target_acme_group: "acme-{{ target_lego_instance }}"
target_acme_group_id: >-2
{{ powerdns_tsig_key_path_owner_info.gid }}
target_powerdns_tsig_key_algo: "hmac-sha256"
target_dns_records: []

View File

@@ -1,6 +0,0 @@
---
- name: Manage and configure mosh
hosts: "{{ mosh_hosts | default('mosh', true) }}"
become: "{{ mosh_become | default(true) }}"
roles:
- role: finallycoffee.base.mosh

View File

@@ -1,7 +0,0 @@
---
- name: Install and configure network time protocol daemon
hosts: "{{ ntp_hosts | default('ntp') }}"
become: "{{ ntp_become | default(false) }}"
gather_facts: "{{ ntp_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.ntp

View File

@@ -1,7 +0,0 @@
---
- name: Deploy and configure openLDAP
hosts: "{{ openldap_hosts | default('openldap', true) }}"
become: "{{ openldap_become | default(true) }}"
gather_facts: "{{ openldap_playbook_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.openldap

View File

@@ -1,7 +0,0 @@
---
- name: Ensure openssh is installed and configured
hosts: "{{ openssh_target | default('openssh') }}"
become: "{{ openssh_become | default(true) }}"
gather_facts: "{{ openssh_gather_facts | default(true) }}"
roles:
- role: finallycoffee.base.openssh

View File

@@ -1,24 +0,0 @@
---
- name: Install system packages on the remote
hosts: "{{ target_hosts | default('all', true) }}"
become: "{{ target_host_become | default(true, true) }}"
gather_facts: "{{ target_host_gather_facts | default(true, true) }}"
tasks:
- name: Install packages (apt)
ansible.builtin.apt:
package: "{{ package.name }}"
state: "{{ package.state | default('present') }}"
loop: "{{ system_packages | default([], true) }}"
loop_control:
loop_var: package
label: "{{ package.name }}"
when: ansible_facts['pkg_mgr'] == 'apt'
- name: Install packages (dnf)
ansible.builtin.dnf:
name: "{{ package.name }}"
state: "{{ package.state | default('present') }}"
loop: "{{ system_packages | default([], true) }}"
loop_control:
loop_var: package
label: "{{ package.name }}"
when: ansible_facts['pkg_mgr'] in ['dnf', 'dnf5', 'yum']

View File

@@ -1,7 +0,0 @@
---
- name: Configure user accounts
hosts: "{{ user_hosts | default('all', true) }}"
become: "{{ user_role_become | default(false, true) }}"
gather_facts: "{{ user_role_gather_facts | default(false, true) }}"
roles:
- role: finallycoffee.base.user

View File

@@ -1,7 +0,0 @@
---
- name: Configure wireguard interfaces with wg_quick
hosts: "{{ wg_quick_hosts | default('wg_quick') }}"
become: "{{ wg_quick_become | default(false) }}"
gather_facts: "{{ wg_quick_gather_facts | default(false) }}"
roles:
- role: finallycoffee.base.wg_quick

View File

@@ -1,10 +0,0 @@
# `finallycoffee.base.caddy` ansible role
Deploy a (pre-)configure [caddy v2](https://caddyserver.com) web
server / proxy using ansible.
## Configuration
To change the default configuration of reading all files from
`/etc/caddy/sites.d/` (see `caddy_dynamic_config_dir`), specify
your desired configuration in `caddy_config`.

View File

@@ -1,23 +0,0 @@
---
caddy_config: |+2
{
auto_https disable_redirects
}
(proxyheaders) {
header_up X-Forwarded-Ssl on
header_up Host {host}
header_up X-Real-IP {remote}
header_up X-Forwarded-For {remote}
# header_up X-Forwarded-Port {port}
header_up X-Forwarded-Proto {scheme}
header_up X-Url-Scheme {scheme}
header_up X-Forwarded-Host {host}
}
# Import all configurations
import {{ caddy_dynamic_configs_dir }}/*/Caddyfile
:80 {
redir / https://{host}{uri} 301
}

View File

@@ -1,43 +0,0 @@
---
caddy_container_image_registry: "docker.io"
caddy_container_image_namespace: "library"
caddy_container_image_repository: "caddy"
caddy_container_image_name: >-2
{{ [
caddy_container_image_registry | default([], true),
caddy_container_image_namespace | default([], true),
caddy_container_image_repository
] | flatten | join('/') }}
caddy_container_image_tag: ~
caddy_container_image: >-2
{{ [
caddy_container_image_name,
caddy_container_image_tag | default(caddy_version, true)
] | join(':') }}
caddy_container_image_source: "pull"
caddy_container_image_force_source: >-2
{{ caddy_container_image_tag | ansible.builtin.type_debug != 'NoneType' }}
caddy_container_image_state: "{{ caddy_state }}"
caddy_container_name: "caddy"
caddy_container_env: ~
caddy_container_ports: ~
caddy_container_user: ~
caddy_container_labels: ~
caddy_container_volumes: ~
caddy_container_config_dir: "/etc/caddy"
caddy_container_default_volumes:
- "{{ caddy_config_dir }}:{{ caddy_container_config_dir }}:ro"
- "{{ caddy_dynamic_configs_dir }}:{{ caddy_dynamic_configs_dir }}:ro"
- "{{ caddy_config_internal_dir }}:/config:rw"
- "{{ caddy_state_dir }}:/data:rw"
caddy_container_all_volumes: >-2
{{ caddy_container_default_volumes | default([], true)
+ caddy_container_volumes | default([], true) }}
caddy_container_state: >-2
{{ (caddy_state == 'present') | ternary('started', 'absent') }}
caddy_container_restart_policy: "on-failure"
caddy_container_networks: ~
caddy_container_network_mode: ~
caddy_container_etc_hosts: ~

View File

@@ -1,11 +0,0 @@
---
caddy_user: "caddy"
caddy_version: "2.10.2"
caddy_config_file: "/etc/caddy/Caddyfile"
caddy_config_dir: "{{ caddy_config_file | ansible.builtin.dirname }}"
caddy_config_internal_dir: "{{ caddy_config_dir }}/config"
caddy_dynamic_configs_dir: "{{ caddy_config_dir }}/sites.d"
caddy_state_dir: "/var/lib/caddy"
caddy_state: "present"
caddy_deployment_method: "docker"

View File

@@ -1,7 +0,0 @@
---
caddy_user_state: "{{ caddy_state }}"
caddy_user_system: true
caddy_user_create_home: false
caddy_run_uid: "{{ caddy_user_info.uid | default(caddy_user) }}"
caddy_run_gid: "{{ caddy_user_info.group | default(caddy_user) }}"

View File

@@ -1,13 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: caddy
description: Deploy caddy, a webserver
galaxy_tags:
- caddy
- zerossl
- http
- webserver
- docker
- podman

View File

@@ -1,26 +0,0 @@
---
- name: Ensure container image '{{ caddy_container_image }}' is {{ caddy_container_image_state }}
community.docker.docker_image:
name: "{{ caddy_container_image }}"
state: "{{ caddy_container_image_state }}"
source: "{{ caddy_container_image_source }}"
force_source: "{{ caddy_container_image_force_source }}"
register: caddy_container_image_info
until: caddy_container_image_info is success
retries: 10
delay: 3
- name: Ensure container '{{ caddy_container_name }}' is {{ caddy_container_state }}
community.docker.docker_container:
name: "{{ caddy_container_name }}"
image: "{{ caddy_container_image }}"
state: "{{ caddy_container_state }}"
env: "{{ caddy_container_env | default(omit, true) }}"
user: "{{ caddy_container_user | default(omit, true) }}"
ports: "{{ caddy_container_ports | default(omit, true) }}"
labels: "{{ caddy_container_labels | default(omit, true) }}"
volumes: "{{ caddy_container_all_volumes }}"
networks: "{{ caddy_container_networks | default(omit, true) }}"
etc_hosts: "{{ caddy_container_etc_hosts | default(omit, true) }}"
network_mode: "{{ caddy_container_network_mode | default(omit, true) }}"
restart_policy: "{{ caddy_container_restart_policy }}"

View File

@@ -1,52 +0,0 @@
---
- name: Ensure state '{{ caddy_state }}' is valid
ansible.builtin.fail:
msg: >-2
Unsupported caddy_state '{{ caddy_state }}'.
Supported states are {{ caddy_states | join(', ') }}.
when: caddy_state not in caddy_states
- name: Ensure deployment method '{{ caddy_deployment_method }}' is valid
ansible.builtin.fail:
msg: >-2
Unsupported caddy_deployment_method '{{ caddy_deployment_method }}'.
Supported deployment methods are {{ caddy_deployment_methods | join(', ') }}.
when: caddy_deployment_method not in caddy_deployment_methods
- name: Ensure caddy user '{{ caddy_user }}' is {{ caddy_user_state }}
ansible.builtin.user:
name: "{{ caddy_user }}"
state: "{{ caddy_user_state }}"
system: "{{ caddy_user_system }}"
create_home: "{{ caddy_user_create_home }}"
register: "caddy_user_info"
- name: Ensure base directories are present
ansible.builtin.file:
path: "{{ dir.name }}"
state: "directory"
owner: "{{ dir.owner | default(caddy_run_uid) }}"
group: "{{ dir.group | default(caddy_run_uid) }}"
mode: "{{ dir.mode | default('0750') }}"
when: caddy_state == 'present'
loop:
- name: "{{ caddy_config_dir }}"
- name: "{{ caddy_dynamic_configs_dir }}"
- name: "{{ caddy_config_internal_dir }}"
- name: "{{ caddy_state_dir }}"
loop_control:
loop_var: "dir"
label: "{{ dir.name }}"
- name: Ensure caddy configuration is up to date
ansible.builtin.copy:
dest: "{{ caddy_config_file }}"
content: "{{ caddy_config }}"
owner: "{{ caddy_run_uid }}"
group: "{{ caddy_run_gid }}"
mode: "0640"
when: caddy_state == 'present'
- name: Ensure caddy is deployed using {{ caddy_deployment_method }}
ansible.builtin.include_tasks:
file: "deploy-{{ caddy_deployment_method }}.yml"

View File

@@ -1,6 +0,0 @@
---
caddy_states:
- "present"
- "absent"
caddy_deployment_methods:
- "docker"

View File

@@ -1,7 +0,0 @@
# `finallycoffee.base.caddy_site` ansible role
Provision a single site configuration in caddy.
Set `caddy_site_name` as a unique
site identifier (needs to be a valid filename) and `caddy_site_config`
to contain the actual `Caddyfile` contents.

View File

@@ -1,13 +0,0 @@
---
caddy_site_name: ~
caddy_site_config: ~
caddy_site_state: "present"
caddy_site_configs: "/etc/caddy/sites.d"
caddy_site_config_dir: >-2
{{ caddy_site_configs }}/{{ caddy_site_name }}
caddy_site_config_file: >-2
{{ caddy_site_config_dir }}/Caddyfile
caddy_site_owner: "caddy"
caddy_site_group: "caddy"

View File

@@ -1,11 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: caddy_site
description: Deploy a sites' configuration in caddy
galaxy_tags:
- caddy
- zerossl
- http
- webserver

View File

@@ -1,26 +0,0 @@
---
- name: Fail if required variables are not populated
ansible.builtin.fail:
msg: "Either `caddy_site_name` or `caddy_site_config` is not provided"
when: >-2
(caddy_site_name | ansible.builtin.type_debug == 'NoneType')
or
(caddy_site_config | ansible.builtin.type_debug == 'NoneType')
- name: Ensure directory for caddy site config '{{ caddy_site_name }}' is {{ caddy_site_state }}
ansible.builtin.file:
path: "{{ caddy_site_config_dir }}"
state: >-2
{{ (caddy_site_state == 'present') | ternary('directory', 'absent') }}
owner: "{{ caddy_site_owner }}"
group: "{{ caddy_site_group }}"
mode: "0750"
- name: Ensure caddy site configuration is templated
ansible.builtin.copy:
dest: "{{ caddy_site_config_file }}"
content: "{{ caddy_site_config }}"
owner: "{{ caddy_site_owner }}"
group: "{{ caddy_site_group }}"
mode: "0640"
when: caddy_site_state == 'present'

View File

@@ -1,13 +0,0 @@
# `finallycoffee.base.docker` ansible role
Install and configure the docker daemon.
## Configuration
- `docker_daemon_config` - configuration for the docker daemon
- `docker_remove_legacy_packages` - clean up old versions of docker (see https://docs.docker.com/engine/install/debian/#uninstall-old-versions)
## Plugins
- `docker_plugin_buildx_enable` - enable the buildx plugin
- `docker_plugin_compose_enable` - enable docker compose

View File

@@ -1,31 +0,0 @@
---
docker_apt_key_url: "https://download.docker.com/linux/debian/gpg"
docker_apt_key_id: "9DC858229FC7DD38854AE2D88D81803C0EBFCD88"
docker_apt_arch: amd64
docker_apt_release_channel: stable
docker_apt_repository_url: "https://download.docker.com/linux/debian"
docker_apt_repository: >-2
deb [arch={{ docker_apt_arch }}] {{ docker_apt_repository_url }} {{ ansible_distribution_release }} {{ docker_apt_release_channel }}
docker_apt_cli_package: "docker-ce-cli"
docker_apt_plugin_buildx_package: "docker-buildx-plugin"
docker_apt_plugin_compose_package: "docker-compose-plugin"
docker_apt_base_packages:
- "docker-ce"
- "docker-ce-cli"
- "containerd.io"
docker_apt_packages: >-2
{{
docker_apt_base_packages
+ (docker_plugin_buildx_enable | default(false)
| ternary([ docker_apt_plugin_buildx_package ], []))
+ (docker_plugin_compose_enable | default(false)
| ternary([ docker_apt_plugin_compose_package ], []))
}}
docker_apt_legacy_packages:
- "docker.io"
- "docker-compose"
- "docker-doc"
- "podman-docker"
- "containerd"
- "runc"

View File

@@ -1,34 +0,0 @@
---
docker_fedora_repo_name: "docker-ce-stable"
docker_fedora_repo_description: "Docker CE Stable - $basearch"
docker_fedora_repo_url: "https://download.docker.com/linux/fedora/$releasever/$basearch/stable"
docker_fedora_repo_validate_certs: true
docker_fedora_repo_gpg_check: true
docker_fedora_repo_gpg_key: "https://download.docker.com/linux/fedora/gpg"
docker_fedora_cli_package: "docker-ce-cli"
docker_fedora_plugin_buildx_package: "docker-buildx-plugin"
docker_fedora_plugin_compose_package: "docker-compose-plugin"
docker_fedora_base_packages:
- "docker-ce"
- "docker-ce-cli"
- "containerd.io"
docker_fedora_packages: >-2
{{
docker_fedora_base_packages
+ (docker_plugin_buildx_enable | default(false)
| ternary([ docker_fedora_plugin_buildx_package ], []))
+ (docker_plugin_compose_enable | default(false)
| ternary([ docker_fedora_plugin_compose_package ], []))
}}
docker_fedora_legacy_packages:
- "docker"
- "docker-client"
- "docker-client-latest"
- "docker-common"
- "docker-latest"
- "docker-latest-logrotate"
- "docker-logrotate"
- "docker-selinux"
- "docker-engine-selinux"
- "docker-engine"

View File

@@ -1,13 +0,0 @@
---
docker_state: "present"
docker_daemon_config: {}
docker_daemon_config_file: "/etc/docker/daemon.json"
docker_daemon_config_file_mode: "0644"
docker_daemon_config_owner: root
docker_daemon_config_group: "{{ docker_daemon_config_owner }}"
docker_plugin_buildx_enable: false
docker_plugin_compose_enable: false
docker_remove_legacy_packages: true

View File

@@ -1,5 +0,0 @@
---
docker_systemd_service_name: "docker.service"
docker_systemd_service_state: >-2
{{ (docker_state == 'present') | ternary('started', 'stopped') }}
docker_systemd_service_enabled: "{{ (docker_state == 'present') }}"

View File

@@ -1,6 +0,0 @@
---
- name: Restart docker daemon
ansible.builtin.systemd_service:
name: "{{ docker_systemd_service_name }}"
state: "restarted"
listen: "docker-restart"

View File

@@ -1,18 +0,0 @@
---
- name: Ensure config directory '{{ docker_daemon_config_file | dirname }}' is present
ansible.builtin.file:
path: "{{ docker_daemon_config_file | dirname }}"
state: "directory"
mode: "0755"
owner: "{{ docker_daemon_config_owner }}"
group: "{{ docker_daemon_config_group }}"
- name: Configure docker daemon using '{{ docker_daemon_config_file }}'
ansible.builtin.copy:
content: "{{ docker_daemon_config | to_nice_json }}"
dest: "{{ docker_daemon_config_file }}"
mode: "{{ docker_daemon_config_file_mode }}"
owner: "{{ docker_daemon_config_owner }}"
group: "{{ docker_daemon_config_group }}"
when: docker_daemon_config | string | length > 0
notify: docker-restart

View File

@@ -1,30 +0,0 @@
---
- name: Ensure legacy docker packages are removed
ansible.builtin.apt:
name: "{{ docker_apt_legacy_packages }}"
state: absent
when: docker_remove_legacy_packages
- name: Add apt key for docker repository
ansible.builtin.apt_key:
id: "{{ docker_apt_key_id }}"
url: "{{ docker_apt_key_url }}"
state: "{{ docker_state }}"
- name: Add apt repository for docker
ansible.builtin.apt_repository:
repo: "{{ docker_apt_repository }}"
state: "{{ docker_state }}"
register: docker_apt_repository_info
- name: Update apt cache if repository was newly added
ansible.builtin.apt:
update_cache: true
when:
- docker_state == 'present'
- docker_apt_repository_info.changed
- name: Install apt packages for docker
ansible.builtin.apt:
name: "{{ docker_apt_packages }}"
state: "{{ docker_state }}"

View File

@@ -1,21 +0,0 @@
---
- name: Ensure legacy docker packages are removed
ansible.builtin.dnf:
name: "{{ docker_fedora_legacy_packages }}"
state: "removed"
when: docker_remove_legacy_packages
- name: Ensure dnf repository for docker is {{ docker_state }}
ansible.builtin.yum_repository:
name: "{{ docker_fedora_repo_name }}"
description: "{{ docker_fedora_repo_description }}"
baseurl: "{{ docker_fedora_repo_url }}"
validate_certs: "{{ docker_fedora_repo_validate_certs }}"
gpgkey: "{{ docker_fedora_repo_gpg_key }}"
gpgcheck: "{{ docker_fedora_repo_gpg_check }}"
state: "{{ docker_state }}"
- name: Install dnf packages for docker
ansible.builtin.dnf:
name: "{{ docker_fedora_packages }}"
state: "{{ docker_state }}"

View File

@@ -1,29 +0,0 @@
---
- name: Check if target OS is supported
ansible.builtin.fail:
msg: >-2
OS '{{ docker_os }}' is not supported!
when: docker_os not in docker_supported_oses
vars:
docker_os: "{{ ansible_distribution | lower }}"
- name: Ensure docker is {{ docker_state }} on {{ ansible_distribution }}
ansible.builtin.include_tasks:
file: "install-{{ ansible_distribution | lower }}.yml"
- name: Configure docker daemon
ansible.builtin.include_tasks:
file: "configure.yml"
when: docker_state == 'present'
- name: Ensure docker daemon is {{ docker_systemd_service_enabled | ternary('enabled', 'disabled') }}
ansible.builtin.systemd_service:
name: "{{ docker_systemd_service_name }}"
enabled: "{{ docker_systemd_service_enabled }}"
when: ansible_facts['service_mgr'] == 'systemd'
- name: Ensure docker daemon is {{ docker_systemd_service_state }}
ansible.builtin.systemd_service:
name: "{{ docker_systemd_service_name }}"
state: "{{ docker_systemd_service_state }}"
when: ansible_facts['service_mgr'] == 'systemd'

View File

@@ -1,4 +0,0 @@
---
docker_supported_oses:
- 'debian'
- 'fedora'

View File

@@ -0,0 +1,22 @@
# `finallycoffee.base.elastiscsearch`
A simple ansible role which deploys a single-node elastic container to provide
an easy way to do some indexing.
## Usage
Per default, `/opt/elasticsearch/data` is used to persist data, it is
customizable by using either `elasticsearch_base_path` or `elasticsearch_data_path`.
As elasticsearch be can be quite memory heavy, the maximum amount of allowed RAM
can be configured using `elasticsearch_allocated_ram_mb`, defaulting to 512 (mb).
The cluster name and discovery type can be overridden using
`elasticsearch_config_cluster_name` (default: elastic) and
`elasticsearch_config_discovery_type` (default: single-node), should one
need a multi-node elasticsearch deployment.
Per default, no ports or networks are mapped, and explizit mapping using
either ports (`elasticsearch_container_ports`) or networks
(`elasticsearch_container_networks`) is required in order for other services
to use elastic.

View File

@@ -0,0 +1,35 @@
---
elasticsearch_version: 7.17.7
elasticsearch_base_path: /opt/elasticsearch
elasticsearch_data_path: "{{ elasticsearch_base_path }}/data"
elasticsearch_config_cluster_name: elastic
elasticsearch_config_discovery_type: single-node
elasticsearch_config_boostrap_memory_lock: true
elasticsearch_allocated_ram_mb: 512
elasticsearch_container_image_name: docker.elastic.co/elasticsearch/elasticsearch-oss
elasticsearch_container_image_tag: ~
elasticsearch_container_image: >-
{{ elasticsearch_container_image_name }}:{{ elasticsearch_container_image_tag | default(elasticsearch_version, true) }}
elasticsearch_container_name: elasticsearch
elasticsearch_container_env:
"ES_JAVA_OPTS": "-Xms{{ elasticsearch_allocated_ram_mb }}m -Xmx{{ elasticsearch_allocated_ram_mb }}m"
"cluster.name": "{{ elasticsearch_config_cluster_name }}"
"discovery.type": "{{ elasticsearch_config_discovery_type }}"
"bootstrap.memory_lock": "{{ 'true' if elasticsearch_config_boostrap_memory_lock else 'false' }}"
elasticsearch_container_user: ~
elasticsearch_container_ports: ~
elasticsearch_container_labels:
version: "{{ elasticsearch_version }}"
elasticsearch_container_ulimits:
# - "memlock:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}"
- "memlock:-1:-1"
elasticsearch_container_volumes:
- "{{ elasticsearch_data_path }}:/usr/share/elasticsearch/data:z"
elasticsearch_container_networks: ~
elasticsearch_container_purge_networks: ~
elasticsearch_container_restart_policy: unless-stopped

View File

@@ -0,0 +1,32 @@
---
- name: Ensure host directories are present
file:
path: "{{ item }}"
state: directory
mode: "0777"
loop:
- "{{ elasticsearch_base_path }}"
- "{{ elasticsearch_data_path }}"
- name: Ensure elastic container image is present
docker_image:
name: "{{ elasticsearch_container_image }}"
state: present
source: pull
force_source: "{{ elasticsearch_container_image_tag|default(false, true)|bool }}"
- name: Ensure elastic container is running
docker_container:
name: "{{ elasticsearch_container_name }}"
image: "{{ elasticsearch_container_image }}"
env: "{{ elasticsearch_container_env | default(omit, True) }}"
user: "{{ elasticsearch_container_user | default(omit, True) }}"
ports: "{{ elasticsearch_container_ports | default(omit, True) }}"
labels: "{{ elasticsearch_container_labels | default(omit, True) }}"
volumes: "{{ elasticsearch_container_volumes }}"
ulimits: "{{ elasticsearch_container_ulimits }}"
networks: "{{ elasticsearch_container_networks | default(omit, True) }}"
purge_networks: "{{ elasticsearch_container_purge_networks | default(omit, True) }}"
restart_policy: "{{ elasticsearch_container_restart_policy }}"
state: started

View File

@@ -1,6 +1,6 @@
--- ---
lego_user: "lego" lego_user: "lego"
lego_version: "4.31.0" lego_version: "4.18.0"
lego_instance: default lego_instance: default
lego_base_path: "/opt/lego" lego_base_path: "/opt/lego"
lego_cert_user: "acme-{{ lego_instance }}" lego_cert_user: "acme-{{ lego_instance }}"

View File

@@ -1,35 +1,22 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -euo pipefail
LEGO_BINARY=$(/usr/bin/env which lego) LEGO_BINARY=$(/usr/bin/env which lego)
if [[ -n "${LEGO_HTTP_FALLBACK_PORT:-}" ]]; then if [[ -n "$LEGO_HTTP_FALLBACK_PORT" ]]; then
if ! nc_binary="$(type -p 'nc')" || [[ -z $nc_binary ]]; then nc -z 127.0.0.1 $LEGO_HTTP_PORT;
echo "nc not found (in PATH), exiting" if [[ $? -eq 0 ]]; then
exit 1
fi
nc_exit_code=$(nc -z 127.0.0.1 $LEGO_HTTP_PORT);
if [[ $nc_exit_code -eq 0 ]]; then
LEGO_HTTP_PORT=$LEGO_HTTP_FALLBACK_PORT LEGO_HTTP_PORT=$LEGO_HTTP_FALLBACK_PORT
fi fi
fi fi
if [[ -n "${LEGO_PRE_RENEWAL_HOOK:-}" ]]; then
$LEGO_PRE_RENEWAL_HOOK
fi
LEGO_COMMAND_ARGS_EXPANDED=$(bash -c "echo $LEGO_COMMAND_ARGS") # This is a bit icky LEGO_COMMAND_ARGS_EXPANDED=$(bash -c "echo $LEGO_COMMAND_ARGS") # This is a bit icky
FILES_IN_DIR=$(find "$LEGO_CERT_STORE_PATH/certificates" -type f | wc -l) FILES_IN_DIR=$(find "$LEGO_CERT_STORE_PATH/certificates" | wc -l)
if [[ $FILES_IN_DIR -gt 2 ]]; then if [[ $FILES_IN_DIR -gt 2 ]]; then
$LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED renew --days=$LEGO_CERT_DAYS_TO_RENEW $LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED renew --days=$LEGO_CERT_DAYS_TO_RENEW
else else
$LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED run $LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED run
fi fi
find "$LEGO_CERT_STORE_PATH/certificates" -type f | xargs -I{} -n 1 chmod "$LEGO_CERT_MODE" "{}" ls "$LEGO_CERT_STORE_PATH/certificates" | xargs -I{} -n 1 chmod "$LEGO_CERT_MODE" "$LEGO_CERT_STORE_PATH/certificates/{}"
find "$LEGO_CERT_STORE_PATH/certificates" -type f | xargs -I{} -n 1 chown "${LEGO_CERT_USER}:${LEGO_CERT_GROUP}" "{}" ls "$LEGO_CERT_STORE_PATH/certificates" | xargs -I{} -n 1 chown "$LEGO_CERT_USER":"$LEGO_CERT_GROUP" "$LEGO_CERT_STORE_PATH/certificates/{}"
if [[ -n "${LEGO_POST_RENEWAL_HOOK:-}" ]]; then
$LEGO_POST_RENEWAL_HOOK
fi

View File

@@ -25,44 +25,35 @@
- "{{ lego_cert_group }}" - "{{ lego_cert_group }}"
append: true append: true
- name: Ensure lego is installed
block:
- name: Check if lego is present - name: Check if lego is present
ansible.builtin.command: ansible.builtin.command:
cmd: which lego cmd: which lego
changed_when: false changed_when: false
failed_when: false failed_when: false
register: lego_binary_info register: lego_binary_info
check_mode: false
- name: Check which version of lego is present
ansible.builtin.command:
cmd: "lego --version"
changed_when: false
failed_when: false
register: lego_binary_version_info
when: lego_binary_info.rc == 0
check_mode: false
- name: Ensure lego is installed
when: (lego_binary_info.rc != 0) or (lego_version not in lego_binary_version_info.stdout)
block:
- name: Download lego from source - name: Download lego from source
ansible.builtin.get_url: ansible.builtin.get_url:
url: "{{ lego_release_archive_url }}" url: "{{ lego_release_archive_url }}"
url_username: "{{ lego_release_archive_url_username | default(omit) }}" url_username: "{{ lego_release_archive_url_username | default(omit) }}"
url_password: "{{ lego_release_archive_url_password | default(omit) }}" url_password: "{{ lego_release_archive_url_password | default(omit) }}"
dest: "{{ lego_release_archive_file_path }}" dest: "{{ lego_release_archive_file_path }}"
when: lego_binary_info.rc != 0
- name: Create folder to uncompress into - name: Create folder to uncompress into
ansible.builtin.file: ansible.builtin.file:
dest: "{{ lego_release_archive_path }}" dest: "{{ lego_release_archive_path }}"
state: directory state: directory
when: lego_binary_info.rc != 0
- name: Uncompress lego source archive - name: Uncompress lego source archive
ansible.builtin.unarchive: ansible.builtin.unarchive:
src: "{{ lego_release_archive_file_path }}" src: "{{ lego_release_archive_file_path }}"
dest: "{{ lego_release_archive_path }}" dest: "{{ lego_release_archive_path }}"
remote_src: true remote_src: true
ignore_errors: "{{ ansible_check_mode }}" when: lego_binary_info.rc != 0
- name: Ensure lego binary is present in PATH - name: Ensure lego binary is present in PATH
ansible.builtin.copy: ansible.builtin.copy:
@@ -70,7 +61,14 @@
dest: "/usr/local/bin/lego" dest: "/usr/local/bin/lego"
mode: "u+rwx,g+rx,o+rx" mode: "u+rwx,g+rx,o+rx"
remote_src: true remote_src: true
ignore_errors: "{{ ansible_check_mode }}" when: lego_binary_info.rc != 0
- name: Ensure lego is allowed to bind to ports < 1024
community.general.capabilities:
path: "/usr/local/bin/lego"
capability: "cap_net_bind_service+ep"
state: present
when: lego_binary_allow_net_bind_service
- name: Ensure intermediate data is gone - name: Ensure intermediate data is gone
ansible.builtin.file: ansible.builtin.file:
@@ -79,13 +77,7 @@
loop: loop:
- "{{ lego_release_archive_path }}" - "{{ lego_release_archive_path }}"
- "{{ lego_release_archive_file_path }}" - "{{ lego_release_archive_file_path }}"
when: lego_binary_info.rc != 0
- name: Ensure lego is allowed to bind to ports < 1024
community.general.capabilities:
path: "/usr/local/bin/lego"
capability: "cap_net_bind_service+ep"
state: present
when: lego_binary_allow_net_bind_service
- name: Ensure lego base path exists - name: Ensure lego base path exists
ansible.builtin.file: ansible.builtin.file:
@@ -107,7 +99,6 @@
{{ entry.key }}={{ entry.value }} {{ entry.key }}={{ entry.value }}
{% endfor %} {% endfor %}
dest: "{{ lego_base_path }}/{{ lego_instance }}.conf" dest: "{{ lego_base_path }}/{{ lego_instance }}.conf"
register: lego_env_file_info
- name: Ensure timer unit is templated - name: Ensure timer unit is templated
ansible.builtin.template: ansible.builtin.template:
@@ -121,7 +112,6 @@
src: "lego_run.sh" src: "lego_run.sh"
dest: "{{ lego_base_path }}/run.sh" dest: "{{ lego_base_path }}/run.sh"
mode: "0755" mode: "0755"
register: lego_handler_script_info
- name: Ensure per-instance base path is created - name: Ensure per-instance base path is created
ansible.builtin.file: ansible.builtin.file:
@@ -161,18 +151,7 @@
name: "{{ lego_systemd_timer_name }}" name: "{{ lego_systemd_timer_name }}"
state: "started" state: "started"
- name: Check if certificates are present
ansible.builtin.find:
path: "{{ lego_instance_path }}/certificates"
recurse: false
file_type: "file"
register: lego_certificate_info
- name: Ensure systemd service is started once to obtain the certificate - name: Ensure systemd service is started once to obtain the certificate
ansible.builtin.systemd_service: ansible.builtin.systemd_service:
name: "{{ lego_systemd_service_name }}" name: "{{ lego_systemd_service_name }}"
state: "started" state: "started"
when: >-2
lego_handler_script_info.changed
or lego_env_file_info.changed
or lego_certificate_info.files | default([]) | length == 0

19
roles/mariadb/README.md Normal file
View File

@@ -0,0 +1,19 @@
# `finallycoffee.base.mariadb` ansible role
This role deploys a MariaDB instance in a docker container.
## Usage
The role expects the following variables to be populated with values and/or secrets:
```yaml
mariadb_root_password: #mariadb root password
mariadb_database: # name of the database to create
mariadb_username: # name of a user to auto-create and assign permission on the mariadb_database
mariadb_password: # password of the user in mariadb_username
```
## Requirements
- Docker installed
- python-docker present on target system for ansible to be able to talk with the docker API.

View File

@@ -0,0 +1,32 @@
---
mariadb_version: "10.11.9"
mariadb_base_path: /var/lib/mariadb
mariadb_data_path: "{{ mariadb_base_path }}/{{ mariadb_version }}"
mariadb_root_password: ~
mariadb_database: ~
mariadb_username: ~
mariadb_password: ~
mariadb_container_base_environment:
MARIADB_ROOT_PASSWORD: "{{ mariadb_root_password }}"
mariadb_container_extra_environment: {}
mariadb_container_name: mariadb
mariadb_container_image_name: docker.io/mariadb
mariadb_container_image_tag: ~
mariadb_container_image: "{{ mariadb_container_image_name }}:{{ mariadb_container_image_tag | default(mariadb_version, true) }}"
mariadb_container_base_volumes:
- "{{ mariadb_data_path }}:{{ mariadb_container_data_path }}:z"
mariadb_container_extra_volumes: []
mariadb_container_base_labels:
version: "{{ mariadb_version }}"
mariadb_container_extra_labels: {}
mariadb_container_restart_policy: "unless-stopped"
mariadb_container_environment: >-2
{{ mariadb_container_base_environment
| combine(mariadb_container_database_environment
if (mariadb_database and mariadb_username and mariadb_password)
else {}, recursive=True)
| combine(mariadb_container_extra_environment) }}

View File

@@ -0,0 +1,20 @@
---
- name: Ensure mariaDB container image is present on host
community.docker.docker_image:
name: "{{ mariadb_container_image }}"
state: present
source: pull
- name: Ensure mariaDB {{ mariadb_version }} is running as '{{ mariadb_container_name }}'
community.docker.docker_container:
name: "{{ mariadb_container_name }}"
image: "{{ mariadb_container_image }}"
env: "{{ mariadb_container_environment }}"
ports: "{{ mariadb_container_ports }}"
labels: "{{ mariadb_container_labels }}"
volumes: "{{ mariadb_container_volumes }}"
networks: "{{ mariadb_container_networks | default(omit, true) }}"
etc_hosts: "{{ mariadb_container_etc_hosts | default(omit, true) }}"
purge_networks: "{{ mariadb_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ mariadb_container_restart_policy }}"
state: started

View File

@@ -0,0 +1,10 @@
---
mariadb_container_database_environment:
MARIADB_DATABASE: "{{ mariadb_database }}"
MARIADB_USER: "{{ mariadb_username }}"
MARIADB_PASSWORD: "{{ mariadb_password }}"
mariadb_container_data_path: /var/lib/mysql
mariadb_container_volumes: "{{ mariadb_container_base_volumes + mariadb_container_extra_volumes }}"
mariadb_container_labels: "{{ mariadb_container_base_labels | combine(mariadb_container_extra_labels, recursive=True) }}"

View File

@@ -1,7 +1,17 @@
--- ---
minio_user: ~
minio_data_path: /opt/minio
minio_create_user: false
minio_manage_host_filesystem: false
minio_root_username: root
minio_root_password: ~
minio_container_name: minio minio_container_name: minio
minio_container_image_name: "docker.io/minio/minio" minio_container_image_name: docker.io/minio/minio
minio_container_image_tag: "RELEASE.2025-10-15T17-29-55Z" minio_container_image_tag: latest
minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}" minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}"
minio_container_networks: [] minio_container_networks: []
minio_container_ports: [] minio_container_ports: []
@@ -24,8 +34,6 @@ minio_container_command:
- ":{{ minio_container_listen_port_console }}" - ":{{ minio_container_listen_port_console }}"
minio_container_restart_policy: "unless-stopped" minio_container_restart_policy: "unless-stopped"
minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}" minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}"
minio_container_state: >-2
{{ (minio_state == 'present') | ternary('started', 'absent') }}
minio_container_listen_port_api: 9000 minio_container_listen_port_api: 9000
minio_container_listen_port_console: 8900 minio_container_listen_port_console: 8900

View File

@@ -1,12 +0,0 @@
---
minio_user: ~
minio_data_path: /opt/minio
minio_create_user: false
minio_manage_host_filesystem: false
minio_root_username: root
minio_root_password: ~
minio_state: present
minio_deployment_method: docker

View File

@@ -1,29 +0,0 @@
---
- name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present
ansible.builtin.file:
path: "{{ minio_data_path }}"
state: directory
user: "{{ minio_user|default(omit, True) }}"
group: "{{ minio_user|default(omit, True) }}"
when: minio_manage_host_filesystem
- name: Ensure container image '{{ minio_container_image }}' is {{ minio_state }}
community.docker.docker_image:
name: "{{ minio_container_image }}"
state: "{{ minio_state }}"
source: pull
force_source: "{{ minio_container_image_force_source }}"
- name: Ensure container '{{ minio_container_name }}' is {{ minio_container_state }}
community.docker.docker_container:
name: "{{ minio_container_name }}"
image: "{{ minio_container_image }}"
volumes: "{{ minio_container_volumes }}"
env: "{{ minio_container_env }}"
labels: "{{ minio_container_labels }}"
networks: "{{ minio_container_networks }}"
ports: "{{ minio_container_ports }}"
user: "{{ minio_user|default(omit, True) }}"
command: "{{ minio_container_command }}"
restart_policy: "{{ minio_container_restart_policy }}"
state: "{{ minio_container_state }}"

View File

@@ -1,25 +1,37 @@
--- ---
- name: Ensure 'minio_state' is valid
ansible.builtin.fail:
msg: >-
Unsupported state '{{ minio_state }}'!
Supported states are {{ minio_states | join(', ') }}.
when: minio_state not in minio_states
- name: Ensure 'minio_deployment_method' is valid - name: Ensure minio run user is present
ansible.builtin.fail: user:
msg: >-
Unsupported state '{{ minio_deployment_method }}'!
Supported states are {{ minio_deployment_methods | join(', ') }}.
when: minio_deployment_method not in minio_deployment_methods
- name: Ensure minio run user is {{ minio_state }}
ansible.builtin.user:
name: "{{ minio_user }}" name: "{{ minio_user }}"
state: "{{ minio_state }}" state: present
system: true system: yes
when: minio_create_user when: minio_create_user
- name: Deploy minio using {{ minio_deployment_method }} - name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present
ansible.builtin.include_tasks: file:
file: "deploy-{{ minio_deployment_method }}.yml" path: "{{ minio_data_path }}"
state: directory
user: "{{ minio_user|default(omit, True) }}"
group: "{{ minio_user|default(omit, True) }}"
when: minio_manage_host_filesystem
- name: Ensure container image for minio is present
community.docker.docker_image:
name: "{{ minio_container_image }}"
state: present
source: pull
force_source: "{{ minio_container_image_force_source }}"
- name: Ensure container {{ minio_container_name }} is running
docker_container:
name: "{{ minio_container_name }}"
image: "{{ minio_container_image }}"
volumes: "{{ minio_container_volumes }}"
env: "{{ minio_container_env }}"
labels: "{{ minio_container_labels }}"
networks: "{{ minio_container_networks }}"
ports: "{{ minio_container_ports }}"
user: "{{ minio_user|default(omit, True) }}"
command: "{{ minio_container_command }}"
restart_policy: "{{ minio_container_restart_policy }}"
state: started

View File

@@ -1,9 +1,5 @@
--- ---
minio_states:
- present
- absent
minio_deployment_methods:
- docker
minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}" minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}"
minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}" minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}"

View File

@@ -1,4 +0,0 @@
# `finallycoffee.base.mosh`
Installs [`mosh`](https://mosh.org/#), a remote 'mobile shell' which supports
roaming and re-uses SSH for the authentication layer.

View File

@@ -1,2 +0,0 @@
---
mosh_state: present

View File

@@ -1,15 +0,0 @@
---
mosh_debian_packages:
- "mosh"
- "openssh-server"
mosh_fedora_packages:
- "mosh"
- "openssh-server"
mosh_archlinux_packages:
- "mosh"
- "openssh"
mosh_packages:
debian: "{{ mosh_debian_packages }}"
fedora: "{{ mosh_fedora_packages }}"
archlinux: "{{ mosh_archlinux_packages }}"

View File

@@ -1,30 +0,0 @@
---
- name: Ensure mosh is {{ mosh_state }} (dnf)
ansible.builtin.dnf:
name: "{{ mosh_packages[_key] }}"
state: "{{ mosh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['dnf', 'dnf5']
- _key in mosh_packages.keys()
vars:
_key: "{{ ansible_distribution | lower }}"
- name: Ensure mosh is {{ mosh_state }} (apt)
ansible.builtin.apt:
package: "{{ mosh_packages[_key] }}"
state: "{{ mosh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['apt']
- _key in mosh_packages.keys()
vars:
_key: "{{ ansible_distribution | lower }}"
- name: Ensure mosh is {{ mosh_state }} (pacman)
community.general.pacman:
name: "{{ mosh_packages[_key] }}"
state: "{{ mosh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['pacman']
- _key in mosh_packages.keys()
vars:
_key: "{{ ansible_distribution | lower }}"

View File

@@ -1,11 +0,0 @@
---
- name: Ensure 'mosh_state' is valid
ansible.builtin.fail:
msg: >-2
Invalid state '{{ mosh_state }}' for 'mosh_state'!
Allowed states are {{ mosh_states | join(', ') }}.
when: mosh_state not in mosh_states
- name: Ensure mosh is {{ mosh_state }}
ansible.builtin.include_tasks:
file: "install.yml"

View File

@@ -1,4 +0,0 @@
---
mosh_states:
- "present"
- "absent"

View File

@@ -1,5 +1,5 @@
--- ---
nginx_version: "1.29.4" nginx_version: "1.27.2"
nginx_flavour: alpine nginx_flavour: alpine
nginx_base_path: /opt/nginx nginx_base_path: /opt/nginx
nginx_config_file: "{{ nginx_base_path }}/nginx.conf" nginx_config_file: "{{ nginx_base_path }}/nginx.conf"

View File

@@ -1,4 +0,0 @@
# `finallycoffee.base.ntp`
Install `ntp`, configure a timezone by sym-linking `/etc/localtime`
and enable the systemd service.

View File

@@ -1,14 +0,0 @@
---
ntp_state: present
ntp_package_name: "ntp"
ntp_timezone: "Europe/Paris"
ntp_systemd_service_name: "ntpd.service"
ntp_systemd_service_state: >-2
{{ (ntp_state == 'present') | ternary('started', 'stopped') }}
ntp_systemd_service_enabled: >-2
{{ (ntp_state == 'present') }}
ntp_etc_localtime_file: "/etc/localtime"
ntp_usr_share_zoneinfo_path: "/usr/share/zoneinfo"

View File

@@ -1,8 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: ntp
description: Install network time protocol daemon
galaxy_tags:
- ntp

View File

@@ -1,28 +0,0 @@
---
- name: Check if 'ntp_state' is valid
ansible.builtin.fail:
msg: >-2
Invalid state '{{ ntp_state }}'! Valid
states are {{ ntp_states | join(', ') }}.
when: ntp_state not in ntp_states
- name: Ensure system package is {{ ntp_state }}
ansible.builtin.package:
name: "{{ ntp_package_name }}"
state: "{{ ntp_state }}"
- name: Ensure /etc/localtime is symlinked
ansible.builtin.file:
src: "{{ ntp_usr_share_zoneinfo_path }}/{{ ntp_timezone }}"
dest: "{{ ntp_etc_localtime_file }}"
state: "{{ (ntp_state == 'present') | ternary('link', 'absent') }}"
- name: Ensure ntp systemd service is configured
ansible.builtin.systemd:
name: "{{ ntp_systemd_service_name }}"
enabled: "{{ ntp_systemd_service_enabled }}"
- name: Ensure ntp systemd service is {{ ntp_systemd_service_state }}
ansible.builtin.systemd:
name: "{{ ntp_systemd_service_name }}"
state: "{{ ntp_systemd_service_state }}"

View File

@@ -1,4 +0,0 @@
---
ntp_states:
- "present"
- "absent"

View File

@@ -1,3 +0,0 @@
# `finallycoffee.base.openldap` ansible role
Deploy and configure [OpenLDAP](https://www.openldap.org/).

View File

@@ -1,61 +0,0 @@
---
openldap_container_name: "openldap"
openldap_container_image_registry: docker.finally.coffee
openldap_container_image_namespace: containers
openldap_container_image_name: "openldap"
openldap_container_image_tag: ~
openldap_container_image_source: "pull"
openldap_container_image_force_source: >-2
{{ openldap_container_image_tag | default(false, true) }}
openldap_container_image_repository: >-2
{{
[
openldap_container_image_registry | default([], true),
openldap_container_image_namespace | default([], true),
openldap_container_image_name
] | flatten | join('/')
}}
openldap_container_image: >-2
{{
[
openldap_container_image_repository,
openldap_container_image_tag
| default(openldap_alpine_package_version, true),
] | join(':')
}}
openldap_container_env: ~
openldap_container_user: ~
openldap_container_ports: ~
openldap_container_labels: ~
openldap_container_volumes: ~
openldap_container_networks: ~
openldap_container_network_mode: ~
openldap_container_dns_servers: ~
openldap_container_etc_hosts: ~
openldap_container_ulimits:
- "nofile:{{ openldap_fd_soft_limit }}:{{ openldap_fd_hard_limit }}"
openldap_container_memory: "256M"
openldap_container_memory_swap: ~
openldap_container_memory_reservation: "128M"
openldap_container_restart_policy: "on-failure"
openldap_container_state: >-2
{{ (openldap_state == 'present') | ternary('started', 'absent') }}
openldap_container_data_path: "{{ openldap_data_path }}"
openldap_container_config_path: "{{ openldap_config_path }}"
openldap_container_socket_path: "{{ openldap_socket_path }}"
openldap_container_base_volumes:
- "{{ openldap_config_path }}:{{ openldap_container_config_path }}:Z"
- "{{ openldap_data_path }}:{{ openldap_container_data_path }}:rw"
- "{{ openldap_socket_path }}:{{ openldap_container_socket_path }}:rw"
openldap_container_all_volumes: >-2
{{ openldap_container_base_volumes | default([], true)
+ openldap_container_volumes | default([], true) }}
openldap_init_container_volumes:
- "{{ [openldap_slapd_path, openldap_slapd_path, 'ro'] | join(':') }}"
openldap_container_healthcheck:
test: >-2
[[ $(netstat -plnte | grep slapd | wc -l) -ge 1 ]]
&& [[ $(ps aux | grep slapd | wc -l) -ge 1 ]]
|| exit 1

View File

@@ -1,24 +0,0 @@
---
openldap_version: "2.6.8"
openldap_alpine_revision: "0"
openldap_alpine_package_version: >-2
v{{ openldap_version }}-r{{ openldap_alpine_revision | string }}
openldap_domain: ~
openldap_organization: ~
openldap_config_path: "/etc/openldap/"
openldap_olc_path: "{{ openldap_config_path }}/{0}config"
openldap_slapd_path: "{{ openldap_config_path }}/slapd.ldif"
openldap_schema_path: "{{ openldap_config_path }}/schema"
openldap_data_path: "/var/lib/openldap"
openldap_socket_path: "/run/openldap"
openldap_socket: "{{ openldap_socket_path }}/slapd.sock"
openldap_socket_url: >-2
ldapi://{{ openldap_socket | urlencode | replace('/', '%2F') }}
openldap_state: "present"
openldap_deployment_method: "docker"
openldap_slapadd_init_command: >-2
slapadd -v -F {{ openldap_olc_path }} -n 0 -l {{ openldap_slapd_path }}

View File

@@ -1,65 +0,0 @@
---
openldap_dn: >-2
dc={{ openldap_domain | regex_replace('\.', ',dc=') }}
openldap_root_username: "admin"
openldap_root_pw: ~
openldap_root_node_object_classes:
- "top"
- "dcObject"
- "organization"
openldap_root_node_dc: "{{ openldap_domain | regex_replace('\\..+', '') }}"
openldap_root_node_o: "{{ openldap_organization | default('not set!', true) }}"
openldap_fd_soft_limit: "8192"
openldap_fd_hard_limit: "8192"
openldap_module_path: "/usr/lib/openldap"
openldap_modules:
- "mdb"
- "hdb"
openldap_core_schema_path: "{{ openldap_schema_path }}/core.ldif"
openldap_enabled_schemas:
- name: "cosine"
- name: "inetorgperson"
openldap_additional_schemas: []
openldap_schemas: >-2
{{ openldap_enabled_schemas + openldap_additional_schemas }}
openldap_config_dn: "cn=config"
openldap_config_db_dn: "olcDatabase={0}config,cn=config"
openldap_config_db_olc_access:
- '{0} to *
by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" manage
by * none'
openldap_config_attributes: {}
openldap_config_db_attributes:
olcAccess: "{{ openldap_config_db_olc_access }}"
openldap_default_indices:
- "objectClass eq"
openldap_indices: []
openldap_default_database_name: "mdb"
openldap_default_database_object_class: "olcMdbConfig"
openldap_default_database_suffix: "{{ openldap_dn }}"
openldap_default_database_root_dn: >-2
cn={{ openldap_root_username }},{{ openldap_default_database_suffix }}
openldap_default_database_root_pw: "{{ openldap_root_pw }}"
openldap_default_database_directory: >-2
{{ openldap_data_path }}/{{ openldap_default_database_name }}
openldap_default_database_indices: >-2
{{ openldap_default_indices + openldap_indices }}
openldap_default_database_config: >-2
olcDatabase={1}{{ openldap_default_database_name }},{{ openldap_config_dn }}
openldap_default_database:
name: "{{ openldap_default_database_name }}"
object_class: "{{ openldap_default_database_object_class }}"
suffix: "{{ openldap_default_database_suffix }}"
root_dn: "{{ openldap_default_database_root_dn }}"
root_pw: "{{ openldap_default_database_root_pw }}"
directory: "{{ openldap_default_database_directory }}"
indices: "{{ openldap_default_database_indices }}"
openldap_default_database_olc_access: "{{ openldap_config_db_olc_access }}"
openldap_databases:
- "{{ openldap_default_database }}"

View File

@@ -1,80 +0,0 @@
---
- name: Ensure config attributes are configured
community.general.ldap_attrs:
dn: "{{ openldap_config_dn }}"
attributes: "{{ { entry.key : entry.value } }}"
state: exact
server_uri: "{{ openldap_socket_url }}"
loop: "{{ openldap_config_attributes | dict2items }}"
loop_control:
loop_var: "entry"
label: "{{ entry.key }}"
- name: Ensure config db attributes are configured
community.general.ldap_attrs:
dn: "{{ openldap_config_db_dn }}"
attributes: "{{ { entry.key : entry.value } }}"
state: exact
server_uri: "{{ openldap_socket_url }}"
loop: "{{ openldap_config_db_attributes | dict2items }}"
loop_control:
loop_var: "entry"
label: "{{ entry.key }}"
- name: Ensure ACLs for default database are configured
community.general.ldap_attrs:
dn: "{{ openldap_default_database_config }}"
attributes:
olcAccess: "{{ openldap_default_database_olc_access }}"
state: "exact"
server_uri: "{{ openldap_socket_url }}"
retries: 3
delay: 3
register: openldap_acl_result
until: openldap_acl_result is succeeded
- name: Ensure LDAP DIT is configured
when:
- openldap_default_database_root_dn is defined
- openldap_default_database_root_pw is defined
vars:
_meta: &openldap_bind_info
bind_dn: "{{ openldap_default_database_root_dn }}"
bind_pw: "{{ openldap_default_database_root_pw }}"
server_uri: "{{ openldap_socket_url }}"
block:
- name: Ensure rootDN + credentials are correct
community.general.ldap_attrs:
dn: "{{ openldap_default_database_config }}"
attributes: "{{ {entry.key: entry.value} }}"
state: "exact"
server_uri: "{{ openldap_socket_url }}"
no_log: "{{ entry.log is defined and not entry.log }}"
loop:
- key: "olcRootDN"
value: "{{ openldap_default_database_root_dn }}"
- key: "olcRootPW"
value: "{{ openldap_default_database_root_pw }}"
log: false
loop_control:
loop_var: "entry"
label: "{{ entry.key }}"
- name: Ensure root node is {{ openldap_state }}
community.general.ldap_entry:
dn: "{{ openldap_dn }}"
objectClass: "{{ openldap_root_node_object_classes }}"
attributes:
dc: "{{ openldap_root_node_dc }}"
o: "{{ openldap_root_node_o }}"
<<: *openldap_bind_info
- name: Ensure root node attributes are up to date
community.general.ldap_attrs:
dn: "{{ openldap_dn }}"
attributes:
dc: "{{ openldap_root_node_dc }}"
o: "{{ openldap_root_node_o }}"
state: exact
<<: *openldap_bind_info
when: openldap_state == 'present'

View File

@@ -1,25 +0,0 @@
---
- name: Ensure container '{{ openldap_container_name }}' is {{ openldap_container_state }}
community.docker.docker_container:
name: "{{ openldap_container_name }}"
image: "{{ openldap_container_image }}"
env: "{{ openldap_container_env | default(omit, true) }}"
user: "{{ openldap_container_user | default(omit, true) }}"
ports: "{{ openldap_container_ports | default(omit, true) }}"
labels: "{{ openldap_container_labels | default(omit, true) }}"
volumes: "{{ openldap_container_all_volumes | default(omit, true) }}"
networks: "{{ openldap_container_networks | default(omit, true) }}"
network_mode: "{{ openldap_container_network_mode | default(omit, true) }}"
dns_servers: "{{ openldap_container_dns_servers | default(omit, true) }}"
etc_hosts: "{{ openldap_container_etc_hosts | default(omit, true) }}"
command: "{{ openldap_container_command | default(omit, true) }}"
ulimits: "{{ openldap_container_ulimits | default(omit, true) }}"
memory: "{{ openldap_container_memory | default(omit, true) }}"
memory_swap: "{{ openldap_container_memory_swap | default(omit, true) }}"
memory_reservation: >-2
{{ openldap_container_memory_reservation | default(omit, true) }}
restart_policy: >-2
{{ openldap_container_restart_policy | default(omit, true) }}
healthcheck: "{{ openldap_container_healthcheck | default(omit, true) }}"
state: "{{ openldap_container_state }}"

View File

@@ -1,37 +0,0 @@
---
- name: Ensure additional schemas are mapped to container
ansible.builtin.set_fact:
openldap_init_container_volumes: >-2
{{ openldap_init_container_volumes + [ schema_mount ] }}
vars:
schema_file: "{{ openldap_schema_path }}/{{ schema.name }}.ldif"
schema_mount: >-2
{{ schema_file }}:{{ schema_file }}:ro
loop: "{{ openldap_additional_schemas }}"
loop_control:
loop_var: "schema"
label: "{{ schema.name }}"
- name: Ensure ldap container is initialized
community.docker.docker_container:
name: "{{ openldap_container_name }}"
image: "{{ openldap_container_image }}"
env: "{{ openldap_container_env | default(omit, true) }}"
user: "{{ openldap_container_user | default(omit, true) }}"
ports: "{{ openldap_container_ports | default(omit, true) }}"
labels: "{{ openldap_container_labels | default(omit, true) }}"
volumes: "{{ openldap_container_merged_volumes | default(omit, true) }}"
networks: "{{ openldap_container_networks | default(omit, true) }}"
network_mode: "{{ openldap_container_network_mode | default(omit, true) }}"
dns_servers: "{{ openldap_container_dns_servers | default(omit, true) }}"
etc_hosts: "{{ openldap_container_etc_hosts | default(omit, true) }}"
memory: "{{ openldap_container_memory | default(omit, true) }}"
memory_swap: "{{ openldap_container_memory_swap | default(omit, true) }}"
memory_reservation: >-2
{{ openldap_container_memory_reservation | default(omit, true) }}
command: "{{ openldap_slapadd_init_command }}"
detach: false
cleanup: true
vars:
openldap_container_merged_volumes: >-2
{{ openldap_container_all_volumes + openldap_init_container_volumes }}

View File

@@ -1,47 +0,0 @@
---
- name: Determine if persisted OLC config exists
ansible.builtin.stat:
path: "{{ openldap_olc_path }}/cn=config"
register: openldap_olc_stat_info
- name: Ensure openldap databases are initialized
when: not openldap_olc_stat_info.stat.exists
block:
- name: Ensure initial slapd.ldif is templated
ansible.builtin.template:
src: "slapd.ldif.j2"
dest: "{{ openldap_slapd_path }}"
mode: "0644"
- name: Ensure additional schemas to install are present
ansible.builtin.copy:
content: "{{ schema.content }}"
dest: "{{ openldap_schema_path }}/{{ schema.name }}.ldif"
mode: "0644"
loop: "{{ openldap_additional_schemas }}"
loop_control:
loop_var: "schema"
label: "{{ schema.name }}"
- name: Ensure db data directory exists
ansible.builtin.file:
path: "{{ openldap_default_database_directory }}"
state: directory
mode: "0750"
- name: Ensure container is initialized using {{ openldap_deployment_method }}
ansible.builtin.include_tasks:
file: "initialize-{{ openldap_deployment_method }}.yml"
rescue:
- name: Ensure temporary schema files are absent
ansible.builtin.file:
path: "{{ openldap_schema_path }}/{{ file.name }}.ldif"
state: absent
loop: >-2
{{ openldap_additional_schemas }}
loop_control:
loop_var: "file"
label: "{{ file.name }}"
ignore_errors: true
- name: Ensure intial slapd.ldif file is absent
ansible.builtin.file:
path: "{{ openldap_slapd_path }}"
state: absent
ignore_errors: true

View File

@@ -1,26 +0,0 @@
---
- name: Check if 'openldap_state' is valid
ansible.builtin.fail:
msg: >-2
Invalid state '{{ openldap_state }}'!
Supported states are {{ openldap_states | join(', ') }}.
when: openldap_state not in openldap_states
- name: Check if 'openldap_deployment_method' is valid
ansible.builtin.fail:
msg: >-2
Invalid state '{{ openldap_deployment_method }}'!
Supported states are {{ openldap_deployment_methods | join(', ') }}.
when: openldap_deployment_method not in openldap_deployment_methods
- name: Ensure openldap deployment is prepared
ansible.builtin.include_tasks:
file: "prepare-{{ openldap_deployment_method }}.yml"
- name: Ensure openldap is deployed using {{ openldap_deployment_method }}
ansible.builtin.include_tasks:
file: "deploy-{{ openldap_deployment_method }}.yml"
- name: Ensure openldap is configured
ansible.builtin.include_tasks:
file: "configure.yml"

View File

@@ -1,7 +0,0 @@
---
- name: Ensure container image '{{ openldap_container_image }}' is {{ openldap_state }}
community.docker.docker_image:
name: "{{ openldap_container_image }}"
state: "{{ openldap_state }}"
source: "{{ openldap_container_image_source }}"
force_source: "{{ openldap_container_image_force_source }}"

View File

@@ -1,64 +0,0 @@
dn: cn=config
objectClass: olcGlobal
cn: config
olcPidFile: /run/openldap/slapd.pid
olcArgsFile: /run/openldap/slapd.args
# Dynamic backend modules
dn: cn=module,cn=config
objectClass: olcModuleList
cn: module
olcModulepath: {{ openldap_module_path }}
{% for mod in openldap_modules | default([]) %}
olcModuleload: back_{{ mod }}.so
{% endfor %}
# Schema config
dn: cn=schema,cn=config
objectClass: olcSchemaConfig
cn: schema
include: file://{{ openldap_core_schema_path }}
{% for schema in openldap_schemas %}
include: file://{{ openldap_schema_path }}/{{ schema.name }}.ldif
{% endfor %}
# Frontend settings
dn: olcDatabase=frontend,cn=config
objectClass: olcDatabaseConfig
objectClass: olcFrontendConfig
olcDatabase: frontend
# Config-DB settings
dn: olcDatabase=config,cn=config
objectClass: olcDatabaseConfig
olcDatabase: config
{% for attr in openldap_config_db_attributes | dict2items %}
{% if attr is string %}
{{ attr.key }}: {{ attr.value }}
{% else %}
{% for val in attr.value %}
{{ attr.key }}: {{ val }}
{% endfor %}
{% endif %}
{% endfor %}
# database settings
{% for db in openldap_databases %}
dn: olcDatabase={{ db.name }},cn=config
objectClass: olcDatabaseConfig
objectClass: {{ db.object_class }}
olcDatabase: {{ db.name }}
olcSuffix: {{ db.suffix }}
olcRootDN: {{ db.root_dn }}
olcRootPW: {{ db.root_pw }}
olcDbDirectory: {{ db.directory }}
{% for idx in db.indices %}
olcDbIndex: {{ idx }}
{% endfor %}
{% endfor %}

View File

@@ -1,6 +0,0 @@
---
openldap_states:
- "present"
- "absent"
openldap_deployment_methods:
- "docker"

View File

@@ -1,13 +0,0 @@
# `finallycoffee.base.openssh`
Ansible role to manage and configure openssh and it's components (like `sshd`).
Currently supports `fedora` and `debian` linux distributions.
## `sshd`
To configure `sshd`, see the [`defaults/main/sshd.yml`](defaults/main/sshd.yml),
where snake\_cased config keys for `/etc/ssh/sshd_config` are available in
the `openssh_sshd_config_` namespace.
To add your own config on top, simply use key-value syntax in `openssh_sshd_config`.

View File

@@ -1,3 +0,0 @@
---
openssh_state: 'present'
openssh_sshd_config_file: "/etc/ssh/sshd_config"

View File

@@ -1,8 +0,0 @@
---
openssh_packages:
fedora: "{{ openssh_fedora_packages }}"
debian: "{{ openssh_debian_packages }}"
openssh_fedora_packages:
- "openssh-server"
openssh_debian_packages:
- "openssh-server"

View File

@@ -1,33 +0,0 @@
---
openssh_sshd_enable: true
openssh_sshd_config_pubkey_authentication: true
openssh_sshd_config_password_authentication: false
openssh_sshd_config_challenge_response_authentication: false
openssh_sshd_config_permit_root_login: false
# Limits
openssh_sshd_config_max_sessions: ~
openssh_sshd_config_max_startups: ~
# Hardening
openssh_sshd_config_protocol: 2
openssh_sshd_config_x11_forwarding: false
openssh_sshd_config_allow_agent_forwarding: false
openssh_sshd_config_allow_tcp_forwarding: false
openssh_sshd_default_config:
PubkeyAuthentication: "{{ openssh_sshd_config_pubkey_authentication }}"
PasswordAuthentication: "{{ openssh_sshd_config_password_authentication }}"
ChallengeResponseAuthentication: >-2
{{ openssh_sshd_config_challenge_response_authentication }}
PermitRootLogin: "{{ openssh_sshd_config_permit_root_login }}"
MaxSessions: "{{ openssh_sshd_config_max_sessions }}"
MaxStartups: "{{ openssh_sshd_config_max_startups }}"
Protocol: "{{ openssh_sshd_config_protocol }}"
X11Forwarding: "{{ openssh_sshd_config_x11_forwarding }}"
AllowAgentForwarding: "{{ openssh_sshd_config_allow_agent_forwarding }}"
AllowTcpForwarding: "{{ openssh_sshd_config_allow_tcp_forwarding }}"
openssh_sshd_merged_config: >-2
{{ openssh_sshd_default_config | default({}, true)
| combine(openssh_sshd_config | default({}, true)) }}

View File

@@ -1,2 +0,0 @@
---
openssh_sshd_systemd_service_name: "sshd.service"

View File

@@ -1,7 +0,0 @@
---
- name: Ensure sshd is reloaded
ansible.builtin.systemd_service:
name: "{{ openssh_sshd_systemd_service_name }}"
state: "reloaded"
when: ansible_facts['service_mgr'] == 'systemd'
listen: openssh_sshd_reload

View File

@@ -1,28 +0,0 @@
---
- name: Configure sshd
ansible.builtin.lineinfile:
path: "{{ openssh_sshd_config_file }}"
regexp: "{{ openssh_sshd_config_regexp }}"
line: "{{ openssh_sshd_config_line }}"
firstmatch: true
state: present
validate: "sshd -Tf %s"
loop: "{{ openssh_sshd_merged_config | dict2items }}"
loop_control:
loop_var: "tuple"
label: "{{ tuple.key }}"
notify:
- openssh_sshd_reload
vars:
openssh_sshd_config_regexp: "^\\s*#?\\s*{{ tuple.key }}"
openssh_sshd_config_line: >-2
{{ openssh_sshd_config_line_commented }}{{ tuple.key }} {{ openssh_sshd_config_value }}
openssh_sshd_config_value_is_none: "{{ tuple.value is none }}"
openssh_sshd_config_line_commented: >-2
{{ openssh_sshd_config_value_is_none | ternary('#', '') }}
openssh_sshd_config_value: >-2
{{ (tuple.value is boolean) | ternary(
tuple.value | ternary('yes', 'no'),
tuple.value
)
}}

View File

@@ -1,16 +0,0 @@
---
- name: Ensure openssh server package is {{ openssh_state }} (dnf)
ansible.builtin.dnf:
name: "{{ openssh_packages[ansible_distribution | lower] }}"
state: "{{ openssh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['dnf', 'dnf5']
- ansible_distribution | lower in openssh_packages.keys()
- name: Ensure openssh server package is {{ openssh_state }} (apt)
ansible.builtin.apt:
package: "{{ openssh_packages[ansible_distribution | lower] }}"
state: "{{ openssh_state }}"
when:
- ansible_facts['pkg_mgr'] in ['apt']
- ansible_distribution | lower in openssh_packages.keys()

View File

@@ -1,15 +0,0 @@
---
- name: Ensure 'openssh_state' is valid
ansible.builtin.fail:
msg: >-2
Invalid value '{{ openssh_state }}' for 'openssh_state'.
Valid values are {{ openssh_states | join(', ') }}!
when: openssh_state not in openssh_states
- name: Ensure openssh is {{ openssh_state }}
ansible.builtin.include_tasks:
file: "install.yml"
- name: Ensure sshd is configured
ansible.builtin.include_tasks:
file: "configure-sshd.yml"

View File

@@ -1,4 +0,0 @@
---
openssh_states:
- "present"
- "absent"

View File

@@ -41,7 +41,7 @@
community.docker.docker_container_exec: community.docker.docker_container_exec:
container: "{{ powerdns_tsig_key_container_name }}" container: "{{ powerdns_tsig_key_container_name }}"
command: "pdnsutil list-tsig-keys" command: "pdnsutil list-tsig-keys"
delegate_to: "{{ powerdns_tsig_key_hostname | default(inventory_hostname) }}" delegate_to: "{{ powerdns_tsig_key_hostname }}"
register: powerdns_tsig_key_powerdns_info register: powerdns_tsig_key_powerdns_info
changed_when: false changed_when: false
check_mode: false check_mode: false
@@ -54,7 +54,7 @@
when: >- when: >-
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ') (powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout not in powerdns_tsig_key_powerdns_info.stdout
delegate_to: "{{ powerdns_tsig_key_hostname | default(inventory_hostname) }}" delegate_to: "{{ powerdns_tsig_key_hostname }}"
register: powerdns_tsig_key_powerdns_generated_tsig_key register: powerdns_tsig_key_powerdns_generated_tsig_key
throttle: 1 throttle: 1
become: true become: true
@@ -67,7 +67,7 @@
when: >- when: >-
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ') (powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout not in powerdns_tsig_key_powerdns_info.stdout
delegate_to: "{{ powerdns_tsig_key_hostname | default(inventory_hostname) }}" delegate_to: "{{ powerdns_tsig_key_hostname }}"
throttle: 1 throttle: 1
become: true become: true

View File

@@ -51,6 +51,11 @@ restic_user: root
restic_create_user: false restic_create_user: false
restic_start_job_on_unit_change: false restic_start_job_on_unit_change: false
restic_base_path: /opt/restic
resitc_credentials_base_path: "/var/lib/restic/{{ restic_job_name }}/"
restic_password_file_path: "{{ resitc_credentials_base_path }}/restic-passwd"
restic_password_file_mode: 0600 #should not be less restrictive
restic_job_name: ~ restic_job_name: ~
restic_job_description: "Restic backup job for {{ restic_job_name }}" restic_job_description: "Restic backup job for {{ restic_job_name }}"
restic_systemd_unit_naming_scheme: "restic.{{ restic_job_name }}" restic_systemd_unit_naming_scheme: "restic.{{ restic_job_name }}"

View File

@@ -1,37 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
if [[ -n ${RESTIC_PRE_BACKUP_HOOK-} ]]; then
/bin/bash -c "$RESTIC_PRE_BACKUP_HOOK"
fi
echo "List existing snapshots or attempt to initialize/unlock repository"
restic snapshots || restic init || restic unlock
sleep 1;
echo "Attempting to remove lock if present"
restic unlock
sleep 2
echo "Start backup on ${@:1}"
restic --verbose --retry-lock=${RESTIC_RETRY_LOCK:-5m} backup "${@:1}"
sleep 2
echo "Forget and prune old snapshots"
restic forget --prune --retry-lock=${RESTIC_RETRY_LOCK:-5m} \
--keep-within=${RESTIC_FORGET_KEEP_WITHIN:-1d} \
--keep-hourly=${RESTIC_FORGET_KEEP_HOURLY:-6} \
--keep-daily=${RESTIC_FORGET_KEEP_DAILY:-2} \
--keep-weekly=${RESTIC_FORGET_KEEP_WEEKLY:-7} \
--keep-monthly=${RESTIC_FORGET_KEEP_MONTHLY:-4} \
--verbose
sleep 2
echo "Generate snapshot metrics"
restic --json snapshots | /opt/restic-generate-snapshot-metrics.sh \
> /var/lib/node_exporter/restic-snapshots-${RESTIC_JOBNAME:-unknown}.prom-src
sleep 2
echo "Check repository"
restic check

View File

@@ -1,12 +0,0 @@
#!/usr/bin/env bash
RESTIC_JSON=$(</dev/stdin)
echo $RESTIC_JSON | jq -r '.[]
| {
"hostname": .hostname,
"username": .username,
"short_id": .short_id,
"time": ((((.time | split(".")[0]) + "Z") | fromdate) - (3600 * (.time | split("+")[1] | split(":")[0] | tonumber + 1))),
"paths": .paths[]
} | "restic_snapshots{hostname=\"\(.hostname)\",username=\"\(.username)\",short_id=\"\(.short_id)\",paths=\"\(.paths)\"} \(.time)"'

View File

@@ -6,6 +6,23 @@
state: present state: present
system: true system: true
when: restic_create_user when: restic_create_user
register: restic_user_res
- name: Ensure base directory for restic is created
ansible.builtin.file:
path: "{{ restic_base_path }}"
state: directory
owner: "{{ restic_user_res.uid | default(root) }}"
group: "{{ restic_user_res.group | default(root) }}"
mode: "0755"
- name: Ensure credentials directory for restic is created
ansible.builtin.file:
path: "{{ resitc_credentials_base_path }}"
state: directory
owner: "{{ restic_user_res.uid | default(root) }}"
group: "{{ restic_user_res.group | default(root) }}"
mode: "0700"
- name: Ensure either backup_paths or backup_stdin_command is populated - name: Ensure either backup_paths or backup_stdin_command is populated
when: restic_backup_paths|length > 0 and restic_backup_stdin_command and false when: restic_backup_paths|length > 0 and restic_backup_stdin_command and false
@@ -42,6 +59,14 @@
state: present state: present
when: ansible_os_family not in ['RedHat', 'Debian'] when: ansible_os_family not in ['RedHat', 'Debian']
- name: Ensure restic password file is created and uptodate
copy:
dest: "{{ restic_password_file_path }}"
mode: "{{ restic_password_file_mode }}"
owner: "{{ restic_user_res.uid | default(root) }}"
group: "{{ restic_user_res.group | default(root) }}"
content: "{{ restic_repo_password }}"
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated - name: Ensure systemd service file for '{{ restic_job_name }}' is templated
template: template:
dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.service" dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.service"

View File

@@ -8,7 +8,7 @@ WorkingDirectory={{ restic_systemd_working_directory }}
SyslogIdentifier={{ restic_systemd_syslog_identifier }} SyslogIdentifier={{ restic_systemd_syslog_identifier }}
Environment=RESTIC_REPOSITORY={{ restic_repo_url }} Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
Environment=RESTIC_PASSWORD={{ restic_repo_password }} Environment=RESTIC_PASSWORD_FILE={{ restic_password_file_path }}
{% for kv in restic_complete_environment | dict2items %} {% for kv in restic_complete_environment | dict2items %}
Environment={{ kv.key }}={{ kv.value }} Environment={{ kv.key }}={{ kv.value }}
{% endfor %} {% endfor %}

View File

@@ -4,9 +4,6 @@ Description=Run {{ restic_timer_description | default(restic_job_name) }}
[Timer] [Timer]
OnCalendar={{ restic_policy.frequency }} OnCalendar={{ restic_policy.frequency }}
Unit={{ restic_systemd_unit_naming_scheme }}.service Unit={{ restic_systemd_unit_naming_scheme }}.service
{% if restic_systemd_timer_randomized_delay_sec %}
RandomizedDelaySec={{ restic_systemd_timer_randomized_delay_sec }}
{% endif %}
[Install] [Install]
WantedBy=timers.target WantedBy=timers.target

View File

@@ -1,23 +0,0 @@
# `finallycoffee.base.user` ansible role
Provision and manage user accounts on the remote host. Supports setting user
home, gecos (display name) and shell.
Warning: if the users' home exists and is changed, the role will attempt to
move the home directory. Set `move_home` to false on the user to disable this
behaviour.
## Examples
```yaml
- hosts: all
roles:
- role: finallycoffee.base.user
vars:
users:
- name: root
- name: alice
- name: bob
state: present
- name: eve
state: absent
```

View File

@@ -1,2 +0,0 @@
---
users: []

View File

@@ -1,41 +0,0 @@
---
- name: Ensure user '{{ user.name }}' is {{ user_state }}
ansible.builtin.user:
name: "{{ user.name }}"
state: "{{ user_state }}"
system: "{{ user.system | default(false, true) }}"
shell: "{{ user.shell | default(omit, true) }}"
home: "{{ user.home | default(omit, true) }}"
create_home: "{{ user.create_home | default(true, true) }}"
move_home: "{{ user.move_home | default(true, true) }}"
skeleton: >-2
{{ (user.create_home | default(true, true) and 'skeleton' in user)
| ternary(user.skeleton | default(''), omit) }}
comment: "{{ user.comment | default(user.gecos | default(omit, true), true) }}"
vars:
user_state: "{{ user.state | default('present', false) }}"
- name: Ensure SSH authorized keys for '{{ user.name }}' are {{ user_state }}
vars:
user_state: "{{ user.state | default('present', false) }}"
when:
- user_state == 'present'
- user.authorized_keys | default([]) | length > 0
block:
- name: Ensure .ssh directory for user '{{ user.name }}' exists
ansible.builtin.file:
path: "{{ user.home | default('/home/' + user.name) + '/.ssh' }}"
state: "directory"
owner: "{{ user.name }}"
group: "{{ user.name }}"
mode: "0700"
- name: Ensure key is up to date
ansible.posix.authorized_key:
user: "{{ user.name }}"
state: "{{ key.state | default('present', true) }}"
key: "{{ key.type }} {{ key.key }}"
comment: "{{ user.name }}-{{ key.comment }}"
loop: "{{ user.authorized_keys }}"
loop_control:
loop_var: key
label: "{{ user.name }}-{{ key.comment }}"

Some files were not shown because too many files have changed in this diff Show More