1
0
forked from finallycoffee/base

Compare commits

...

27 Commits

Author SHA1 Message Date
967ebab4c1
feat(lego): Ensure certificates have correct mode and owner 2024-09-11 17:47:49 +02:00
5f4fbd492c
feat(lego): Add cap_net_bind capabilities to systemd unit 2024-09-09 13:14:35 +02:00
96f5da9bf6
feat(roles/lego): Add support for LEGO_HTTP_PORT_FALLBACK 2024-08-04 15:13:59 +02:00
2aaa529585
feat(lego): allow setting capabilites on lego binary for net_bind_service 2024-08-01 19:42:36 +02:00
8941b9357a
update(lego): bump version to 4.17.4 2024-08-01 18:56:41 +02:00
04b5837fd8
chore(lego): add README, extract systemd user pattern into defaults 2024-08-01 18:55:30 +02:00
4837172f64
fix(powerdns_tsig_key): restart powerdns after tsig key creation 2024-07-22 18:17:53 +02:00
ab7cca0947
fix(minio): correct process arguments 2024-07-21 17:11:31 +02:00
960d95a924
feat: add finallycoffee.base.lego role 2024-05-19 20:40:43 +02:00
eab7b7e915
fix(powerdns-tsig-key): fix permissions on files for nicer integration with lego 2024-05-19 20:39:05 +02:00
e7886d8c98
feat(restic): add optional hook and optional unlock 2024-05-05 16:36:30 +02:00
13d40341a0
fix(restic): change systemd service type to simple, remove wanted-by, allow post-start hooks
The old service type oneshot combined with a wanted-by of multi-user.target
can lead to an infite recursion which systemd does not warn about,
causing a service that never activates.
2024-04-14 15:14:02 +02:00
12b98487a5
update(mariadb): bump version to 10.11.6 2024-02-06 12:41:16 +01:00
2e6cb0a4d5
update(mariadb): bump version to 10.6.16 2024-02-06 12:33:19 +01:00
52d25942b4
update(nginx): bump version to 1.25.3 2024-02-06 11:05:13 +01:00
af17bea1e1
feat: add finallycoffee.base.powerdns_tsig_key role 2023-11-07 18:38:16 +01:00
52bf02e084
feat: add finallycoffee.base.dns role 2023-11-07 18:37:58 +01:00
f72dd239f9
meta: bump galaxy version to 0.0.2 2023-07-28 15:45:11 +02:00
8e11c3734b
feat(restic): add migrated role from finallycoffee.services 2023-07-28 15:36:28 +02:00
4a2321cbaa
feat(minio): add migrated role from finallycoffee.services 2023-07-28 15:36:23 +02:00
af0adbcb34
feat(elasticsearch): add migrated role from finallycoffee.services 2023-07-28 15:36:14 +02:00
f9bbcd6c71
chore(mariadb): add role-README, update collection README 2023-07-26 20:04:50 +02:00
1778ffac2a
chore(gnupg): add role-README, update collection README 2023-07-26 19:59:50 +02:00
b8585b38b7
chore(git): add role-README, update collection README 2023-07-26 19:58:21 +02:00
f9a0f92e27
feat(nginx): add role migrated from finallycoffee.services 2023-07-26 19:39:14 +02:00
048df5e3f6
chore: update ansible collection metadata 2023-07-26 19:36:40 +02:00
bee1722cea
feat(mariadb): add ansible role for deployment 2023-02-06 21:11:27 +01:00
38 changed files with 1143 additions and 5 deletions

View File

@ -5,6 +5,25 @@
This ansible collection provides various roles for installing This ansible collection provides various roles for installing
and configuring basic system utilities like gnupg, ssh etc and configuring basic system utilities like gnupg, ssh etc
- [`elasticsearch`](roles/elasticsearch/README.md): Deploy [elasticsearch](https://www.docker.elastic.co/r/elasticsearch/elasticsearch-oss),
a popular (distributed) search and analytics engine, mostly known by it's
letter "E" in the ELK-stack.
- [`git`](roles/git/README.md): configures git on the target system
- [`gnupg`](roles/gnupg/README.md): configures gnupg on the target system
- [`mariadb`](roles/mariadb/README.md): runs [MariaDB Server](https://mariadb.org/), one of the world's most popular open source relational database
- [`minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an
s3-compatible object storage server, using docker containers.
- [`nginx`](roles/nginx/README.md): [nginx](https://www.nginx.com/),
an advanced load balancer, webserver and reverse proxy.
- [`restic`](roles/restic/README.md): Manage backups using restic
and persist them to a configurable backend.
## License ## License
[CNPLv7+](LICENSE.md): Cooperative Nonviolent Public License [CNPLv7+](LICENSE.md): Cooperative Nonviolent Public License

View File

@ -1,12 +1,11 @@
namespace: finallycoffee namespace: finallycoffee
name: base name: base
version: 0.0.1 version: 0.0.2
readme: README.md readme: README.md
authors: authors:
- Johanna Dorothea Reichmann <transcaffeine@finally.coffee> - transcaffeine <transcaffeine@finally.coffee>
description: Roles for bootstrapping tools like gpg, ssh and git description: Roles for base services which are common dependencies other services like databases
license: license_file: LICENSE.md
- CNPLv7+
build_ignore: build_ignore:
- '*.tar.gz' - '*.tar.gz'
repository: https://git.finally.coffee/finallycoffee/base repository: https://git.finally.coffee/finallycoffee/base

3
meta/runtime.yml Normal file
View File

@ -0,0 +1,3 @@
---
requires_ansible: ">=2.12"

View File

@ -0,0 +1,2 @@
---
dns_record_state: present

11
roles/dns/tasks/main.yml Normal file
View File

@ -0,0 +1,11 @@
---
- name: Ensure DNS records in '{{ dns_zone }}' are up to date
famedly.dns.update:
primary_master: "{{ dns_server }}"
zone: "{{ dns_zone }}"
tsig_name: "{{ dns_tsig_name }}"
tsig_algo: "{{ dns_tsig_algo }}"
tsig_key: "{{ dns_tsig_key }}"
rr_set: "{{ dns_records }}"
state: "{{ dns_record_state }}"

View File

@ -0,0 +1,22 @@
# `finallycoffee.base.elastiscsearch`
A simple ansible role which deploys a single-node elastic container to provide
an easy way to do some indexing.
## Usage
Per default, `/opt/elasticsearch/data` is used to persist data, it is
customizable by using either `elasticsearch_base_path` or `elasticsearch_data_path`.
As elasticsearch be can be quite memory heavy, the maximum amount of allowed RAM
can be configured using `elasticsearch_allocated_ram_mb`, defaulting to 512 (mb).
The cluster name and discovery type can be overridden using
`elasticsearch_config_cluster_name` (default: elastic) and
`elasticsearch_config_discovery_type` (default: single-node), should one
need a multi-node elasticsearch deployment.
Per default, no ports or networks are mapped, and explizit mapping using
either ports (`elasticsearch_container_ports`) or networks
(`elasticsearch_container_networks`) is required in order for other services
to use elastic.

View File

@ -0,0 +1,35 @@
---
elasticsearch_version: 7.17.7
elasticsearch_base_path: /opt/elasticsearch
elasticsearch_data_path: "{{ elasticsearch_base_path }}/data"
elasticsearch_config_cluster_name: elastic
elasticsearch_config_discovery_type: single-node
elasticsearch_config_boostrap_memory_lock: true
elasticsearch_allocated_ram_mb: 512
elasticsearch_container_image_name: docker.elastic.co/elasticsearch/elasticsearch-oss
elasticsearch_container_image_tag: ~
elasticsearch_container_image: >-
{{ elasticsearch_container_image_name }}:{{ elasticsearch_container_image_tag | default(elasticsearch_version, true) }}
elasticsearch_container_name: elasticsearch
elasticsearch_container_env:
"ES_JAVA_OPTS": "-Xms{{ elasticsearch_allocated_ram_mb }}m -Xmx{{ elasticsearch_allocated_ram_mb }}m"
"cluster.name": "{{ elasticsearch_config_cluster_name }}"
"discovery.type": "{{ elasticsearch_config_discovery_type }}"
"bootstrap.memory_lock": "{{ 'true' if elasticsearch_config_boostrap_memory_lock else 'false' }}"
elasticsearch_container_user: ~
elasticsearch_container_ports: ~
elasticsearch_container_labels:
version: "{{ elasticsearch_version }}"
elasticsearch_container_ulimits:
# - "memlock:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}"
- "memlock:-1:-1"
elasticsearch_container_volumes:
- "{{ elasticsearch_data_path }}:/usr/share/elasticsearch/data:z"
elasticsearch_container_networks: ~
elasticsearch_container_purge_networks: ~
elasticsearch_container_restart_policy: unless-stopped

View File

@ -0,0 +1,32 @@
---
- name: Ensure host directories are present
file:
path: "{{ item }}"
state: directory
mode: "0777"
loop:
- "{{ elasticsearch_base_path }}"
- "{{ elasticsearch_data_path }}"
- name: Ensure elastic container image is present
docker_image:
name: "{{ elasticsearch_container_image }}"
state: present
source: pull
force_source: "{{ elasticsearch_container_image_tag|default(false, true)|bool }}"
- name: Ensure elastic container is running
docker_container:
name: "{{ elasticsearch_container_name }}"
image: "{{ elasticsearch_container_image }}"
env: "{{ elasticsearch_container_env | default(omit, True) }}"
user: "{{ elasticsearch_container_user | default(omit, True) }}"
ports: "{{ elasticsearch_container_ports | default(omit, True) }}"
labels: "{{ elasticsearch_container_labels | default(omit, True) }}"
volumes: "{{ elasticsearch_container_volumes }}"
ulimits: "{{ elasticsearch_container_ulimits }}"
networks: "{{ elasticsearch_container_networks | default(omit, True) }}"
purge_networks: "{{ elasticsearch_container_purge_networks | default(omit, True) }}"
restart_policy: "{{ elasticsearch_container_restart_policy }}"
state: started

19
roles/git/README.md Normal file
View File

@ -0,0 +1,19 @@
# `finallycoffee.base.git` ansible role
This role configures git for the `ansible_user` and can be used
to (pre)-configure git.
## Examples
```yaml
git_config_user_name: # user name to use for git
git_config_user_email: # email to use for git
git_config_core_editor: vim # editor to use
git_config_credentials:
- remote_url: https://github.com
config:
username: my_github_username
```

4
roles/gnupg/README.md Normal file
View File

@ -0,0 +1,4 @@
# `finallycoffee.base.gnupg` ansible role
Configures GnuPG on the target system, including a Smart-Card (SC) daemon
and can set up gpg-agent as an SSH-agent.

46
roles/lego/README.md Normal file
View File

@ -0,0 +1,46 @@
# `finallycoffee.base.lego` ansible role
This role can be used to retrieve ACME certificates on the target host. It uses `lego` for that, and with systemd template units provides an easy way to configure and monitor the status for each certificate.
## Requirements
- `systemd`
- write access to /tmp to unpack the lego release tarball during installation
- write access to /opt/lego (or whatever `lego_base_path` is set to) for configuration and certificate data
- `become` privileges of the `ansible_user` on the target
## Usage
### Required configuration
- `lego_instance` - used for allowing multiple lego jobs to run with systemd template units. recommended to be set to the CN / first SAN of the certificate.
- `lego_cert_domains` - list of FQDNs to request a certificate for
- `lego_acme_account_email` - when using letsencrypt, a contact email is mandatory
### Proxies / Registries
The role ensure `lego` is downloaded from the github release page. If you are behind a proxy or use a registry like Nexus3, set `lego_release_archive_server`.
### ACME server
Per default, the Letsencrypt Staging ACME server is configured. Set `lego_acme_server_url` from `lego_letsencrypt_server_urls.{qa,prod}` or configure your own ACME v2 server directly.
### Certificate
To set for which domains to request a certificate for, set them as a list of SANs in `lego_cert_domains`. The default key type is EC256 and can be overridden using `lego_cert_key_type`.
Set the type of challenge in `lego_acme_challenge_type` (to either `http` or `dns`), and `lego_acme_challenge_provider` to, for example, `rfc2136` for DNS challenges using the DNSUPDATE mechanism. If your challenge needs additional data, set that in `lego_command_config` as a dictionary analog to `lego_base_command_config` (see [defaults](defaults/main.yml)).
## Trivia
### Architecture
By default, the lego distribution for `linux` on `amd64` is downloaded. If your target needs a different architecture or target OS, adjust this in `lego_os` and `lego_architecture`, cross-checking with the [lego GitHub release page](https://github.com/go-acme/lego/releases/tag/v4.17.4) for upstream availability.
### User management
The role will attempt to create user+group for each seperate lego instance for data isolation (i.e. to avoid leaking a TSIG key from one lego instance to other services). The user and group are of the form `acme-{{ lego_instance }}`. Beware that changing this in `lego_cert_{user,group}` also requires `lego_systemd_{user,group}` to be adjusted!
### Binding to ports < 1024 (HTTP-01 challenge)
Set `lego_binary_allow_net_bind_service: true` to allow the lego binary to bind to ports in the 'privileged' (< 1024) port range.

View File

@ -0,0 +1,71 @@
---
lego_user: "lego"
lego_version: "4.17.4"
lego_instance: default
lego_base_path: "/opt/lego"
lego_cert_user: "acme-{{ lego_instance }}"
lego_cert_group: "{{ lego_cert_user }}"
lego_cert_mode: "0640" # rw-r-----
lego_systemd_user: "acme-%i"
lego_systemd_group: "{{ lego_systemd_user }}"
lego_instance_base_path: "{{ lego_base_path }}/instances"
lego_instance_path: "{{ lego_instance_base_path }}/{{ lego_instance }}"
lego_cert_domains: []
lego_cert_key_type: ec256
lego_cert_days_to_renew: 30
lego_acme_account_email: ~
lego_acme_challenge_type: http
lego_acme_challenge_provider: ~
lego_letsencrypt_server_urls:
qa: "https://acme-staging-v02.api.letsencrypt.org/directory"
prod: "https://acme-v02.api.letsencrypt.org/directory"
lego_acme_server_url: "{{ lego_letsencrypt_server_urls.qa }}"
lego_base_environment:
LEGO_CERT_USER: "{{ lego_cert_user }}"
LEGO_CERT_GROUP: "{{ lego_cert_group }}"
LEGO_CERT_MODE: "{{ lego_cert_mode }}"
LEGO_CERT_STORE_PATH: "{{ lego_instance_path }}"
LEGO_CERT_DAYS_TO_RENEW: "{{ lego_cert_days_to_renew }}"
LEGO_KEY_TYPE: "{{ lego_cert_key_type }}"
LEGO_ACME_CHALLENGE_TYPE: "{{ lego_acme_challenge_type }}"
LEGO_ACME_SERVER: "{{ lego_acme_server_url }}"
LEGO_COMMAND_ARGS: "{{ lego_command_args }}"
lego_base_command_config:
server: "{{ lego_acme_server_url }}"
accept_tos: true
email: "{{ lego_acme_account_email }}"
path: "{{ lego_instance_path }}"
key_type: "{{ lego_cert_key_type }}"
lego_acme_challenge_config: >-
{{ {lego_acme_challenge_type: lego_acme_challenge_provider} }}
lego_systemd_unit_path: "/etc/systemd/system"
lego_systemd_template_unit_name: "lego@.service"
lego_systemd_template_unit_file: "{{ lego_systemd_template_unit_name }}.j2"
lego_systemd_service_name: "lego@{{ lego_instance }}.service"
lego_systemd_environment: >-
{{ lego_base_environment | combine(lego_environment | default({})) }}
lego_full_command_config: >-
{{ lego_base_command_config
| combine(lego_acme_challenge_config)
| combine(lego_command_config | default({})) }}
lego_systemd_timer_name: "lego-{{ lego_instance }}.timer"
lego_systemd_timer_template: lego.timer.j2
lego_systemd_timer_calendar: "*-*-* *:00/15:00"
lego_architecture: "amd64"
lego_os: "linux"
lego_binary_allow_net_bind_service: false
lego_release_archive_server: "https://github.com"
lego_release_archive_filename: >-
lego_v{{ lego_version }}_{{ lego_os }}_{{ lego_architecture }}.tar.gz
lego_release_archive_url: >-
{{ lego_release_archive_server }}/go-acme/lego/releases/download/v{{ lego_version }}/{{ lego_release_archive_filename }}
lego_release_archive_file_path: "/tmp/{{ lego_release_archive_filename }}"
lego_release_archive_path: "/tmp/lego_v{{ lego_version }}_{{ lego_os }}_{{ lego_architecture }}"

View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
LEGO_BINARY=$(/usr/bin/env which lego)
if [[ -n "$LEGO_HTTP_FALLBACK_PORT" ]]; then
nc -z 127.0.0.1 $LEGO_HTTP_PORT;
if [[ $? -eq 0 ]]; then
LEGO_HTTP_PORT=$LEGO_HTTP_FALLBACK_PORT
fi
fi
LEGO_COMMAND_ARGS_EXPANDED=$(bash -c "echo $LEGO_COMMAND_ARGS") # This is a bit icky
FILES_IN_DIR=$(find "$LEGO_CERT_STORE_PATH/certificates" | wc -l)
if [[ $FILES_IN_DIR -gt 2 ]]; then
$LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED renew --days=$LEGO_CERT_DAYS_TO_RENEW
else
$LEGO_BINARY $LEGO_COMMAND_ARGS_EXPANDED run
fi
ls "$LEGO_CERT_STORE_PATH/certificates" | xargs -I{} -n 1 chmod "$LEGO_CERT_MODE" "$LEGO_CERT_STORE_PATH/certificates/{}"
ls "$LEGO_CERT_STORE_PATH/certificates" | xargs -I{} -n 1 chown "$LEGO_CERT_USER":"$LEGO_CERT_GROUP" "$LEGO_CERT_STORE_PATH/certificates/{}"

View File

@ -0,0 +1,5 @@
---
- name: Ensure systemd daemon is reloaded
ansible.builtin.systemd:
daemon_reload: true
listen: systemd_reload

157
roles/lego/tasks/main.yml Normal file
View File

@ -0,0 +1,157 @@
---
- name: Ensure lego cert group is created
ansible.builtin.group:
name: "{{ lego_cert_group }}"
state: present
system: true
- name: Ensure lego cert user is created
ansible.builtin.user:
name: "{{ lego_cert_user }}"
state: present
system: true
create_home: false
groups:
- "{{ lego_cert_group }}"
append: true
- name: Ensure lego user is created
ansible.builtin.user:
name: "{{ lego_user }}"
state: present
system: true
create_home: false
groups:
- "{{ lego_cert_group }}"
append: true
- name: Ensure lego is installed
block:
- name: Check if lego is present
ansible.builtin.command:
cmd: which lego
changed_when: false
failed_when: false
register: lego_binary_info
- name: Download lego from source
ansible.builtin.get_url:
url: "{{ lego_release_archive_url }}"
url_username: "{{ lego_release_archive_url_username | default(omit) }}"
url_password: "{{ lego_release_archive_url_password | default(omit) }}"
dest: "{{ lego_release_archive_file_path }}"
when: lego_binary_info.rc != 0
- name: Create folder to uncompress into
ansible.builtin.file:
dest: "{{ lego_release_archive_path }}"
state: directory
when: lego_binary_info.rc != 0
- name: Uncompress lego source archive
ansible.builtin.unarchive:
src: "{{ lego_release_archive_file_path }}"
dest: "{{ lego_release_archive_path }}"
remote_src: true
when: lego_binary_info.rc != 0
- name: Ensure lego binary is present in PATH
ansible.builtin.copy:
src: "{{ lego_release_archive_path }}/lego"
dest: "/usr/local/bin/lego"
mode: "u+rwx,g+rx,o+rx"
remote_src: true
when: lego_binary_info.rc != 0
- name: Ensure lego is allowed to bind to ports < 1024
community.general.capabilities:
path: "/usr/local/bin/lego"
capability: "cap_net_bind_service+ep"
state: present
when: lego_binary_allow_net_bind_service
- name: Ensure intermediate data is gone
ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- "{{ lego_release_archive_path }}"
- "{{ lego_release_archive_file_path }}"
when: lego_binary_info.rc != 0
- name: Ensure lego base path exists
ansible.builtin.file:
path: "{{ lego_base_path }}"
state: directory
mode: "0755"
- name: Ensure template unit file is present
ansible.builtin.template:
src: "{{ lego_systemd_template_unit_file }}"
dest: "{{ lego_systemd_unit_path }}/{{ lego_systemd_template_unit_name }}"
notify:
- systemd_reload
- name: Ensure env file is templated
ansible.builtin.copy:
content: |+
{% for entry in lego_systemd_environment | dict2items %}
{{ entry.key }}={{ entry.value }}
{% endfor %}
dest: "{{ lego_base_path }}/{{ lego_instance }}.conf"
- name: Ensure timer unit is templated
ansible.builtin.template:
src: "{{ lego_systemd_timer_template }}"
dest: "{{ lego_systemd_unit_path }}/{{ lego_systemd_timer_name }}"
notify:
- systemd_reload
- name: Ensure handling script is templated
ansible.builtin.copy:
src: "lego_run.sh"
dest: "{{ lego_base_path }}/run.sh"
mode: "0755"
- name: Ensure per-instance base path is created
ansible.builtin.file:
path: "{{ lego_instance_path }}"
state: directory
owner: "{{ lego_cert_user }}"
group: "{{ lego_cert_group }}"
mode: "0755"
- name: Ensure per-instance sub folders are created with correct permissions
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
owner: "{{ item.owner | default(lego_cert_user) }}"
group: "{{ item.group | default(lego_cert_group) }}"
mode: "{{ item.mode }}"
loop:
- path: "{{ lego_instance_path }}/secrets"
mode: "0750"
- path: "{{ lego_instance_path }}/accounts"
mode: "0770"
- path: "{{ lego_instance_path }}/certificates"
mode: "0775"
loop_control:
label: "{{ item.path }}"
- name: Ensure systemd daemon is reloaded
meta: flush_handlers
- name: Ensure systemd timer is enabled
ansible.builtin.systemd_service:
name: "{{ lego_systemd_timer_name }}"
enabled: true
- name: Ensure systemd timer is started
ansible.builtin.systemd_service:
name: "{{ lego_systemd_timer_name }}"
state: "started"
- name: Ensure systemd service is started once to obtain the certificate
ansible.builtin.systemd_service:
name: "{{ lego_systemd_service_name }}"
state: "started"

View File

@ -0,0 +1,9 @@
[Unit]
Description=Run lego@{{ lego_instance}}.service
[Timer]
OnCalendar={{ lego_systemd_timer_calendar }}
Unit=lego@{{ lego_instance }}.service
[Install]
WantedBy=timers.target

View File

@ -0,0 +1,14 @@
[Unit]
Description=Run lego (letsencrypt client in go)
[Service]
Type=oneshot
EnvironmentFile={{ lego_base_path }}/%i.conf
User={{ lego_systemd_user }}
Group={{ lego_systemd_group }}
ExecStart={{ lego_base_path }}/run.sh
AmbientCapabilities=CAP_NET_BIND_SERVICE
[Install]
WantedBy=basic.target
DefaultInstance=default

16
roles/lego/vars/main.yml Normal file
View File

@ -0,0 +1,16 @@
---
lego_domain_command_args: >-
{% for domain in lego_cert_domains %}
--domains={{ domain }}
{%- endfor %}
lego_config_command_args: >-
{% for key in lego_full_command_config %}
--{{ key | replace("_", "-") }}
{%- if lego_full_command_config[key] != None and lego_full_command_config[key] != '' -%}
={{ lego_full_command_config[key] }}
{%- endif -%}
{%- endfor -%}
lego_command_args: "{{ lego_domain_command_args }} {{ lego_config_command_args }}"

19
roles/mariadb/README.md Normal file
View File

@ -0,0 +1,19 @@
# `finallycoffee.base.mariadb` ansible role
This role deploys a MariaDB instance in a docker container.
## Usage
The role expects the following variables to be populated with values and/or secrets:
```yaml
mariadb_root_password: #mariadb root password
mariadb_database: # name of the database to create
mariadb_username: # name of a user to auto-create and assign permission on the mariadb_database
mariadb_password: # password of the user in mariadb_username
```
## Requirements
- Docker installed
- python-docker present on target system for ansible to be able to talk with the docker API.

View File

@ -0,0 +1,32 @@
---
mariadb_version: "10.11.6"
mariadb_base_path: /var/lib/mariadb
mariadb_data_path: "{{ mariadb_base_path }}/{{ mariadb_version }}"
mariadb_root_password: ~
mariadb_database: ~
mariadb_username: ~
mariadb_password: ~
mariadb_container_base_environment:
MARIADB_ROOT_PASSWORD: "{{ mariadb_root_password }}"
mariadb_container_extra_environment: {}
mariadb_container_name: mariadb
mariadb_container_image_name: docker.io/mariadb
mariadb_container_image_tag: ~
mariadb_container_image: "{{ mariadb_container_image_name }}:{{ mariadb_container_image_tag | default(mariadb_version, true) }}"
mariadb_container_base_volumes:
- "{{ mariadb_data_path }}:{{ mariadb_container_data_path }}:z"
mariadb_container_extra_volumes: []
mariadb_container_base_labels:
version: "{{ mariadb_version }}"
mariadb_container_extra_labels: {}
mariadb_container_restart_policy: "unless-stopped"
mariadb_container_environment: >-2
{{ mariadb_container_base_environment
| combine(mariadb_container_database_environment
if (mariadb_database and mariadb_username and mariadb_password)
else {}, recursive=True)
| combine(mariadb_container_extra_environment) }}

View File

@ -0,0 +1,20 @@
---
- name: Ensure mariaDB container image is present on host
community.docker.docker_image:
name: "{{ mariadb_container_image }}"
state: present
source: pull
- name: Ensure mariaDB {{ mariadb_version }} is running as '{{ mariadb_container_name }}'
community.docker.docker_container:
name: "{{ mariadb_container_name }}"
image: "{{ mariadb_container_image }}"
env: "{{ mariadb_container_environment }}"
ports: "{{ mariadb_container_ports }}"
labels: "{{ mariadb_container_labels }}"
volumes: "{{ mariadb_container_volumes }}"
networks: "{{ mariadb_container_networks | default(omit, true) }}"
etc_hosts: "{{ mariadb_container_etc_hosts | default(omit, true) }}"
purge_networks: "{{ mariadb_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ mariadb_container_restart_policy }}"
state: started

View File

@ -0,0 +1,10 @@
---
mariadb_container_database_environment:
MARIADB_DATABASE: "{{ mariadb_database }}"
MARIADB_USER: "{{ mariadb_username }}"
MARIADB_PASSWORD: "{{ mariadb_password }}"
mariadb_container_data_path: /var/lib/mysql
mariadb_container_volumes: "{{ mariadb_container_base_volumes + mariadb_container_extra_volumes }}"
mariadb_container_labels: "{{ mariadb_container_base_labels | combine(mariadb_container_extra_labels, recursive=True) }}"

29
roles/minio/README.md Normal file
View File

@ -0,0 +1,29 @@
# `finallycoffee.base.minio` ansible role
## Overview
This role deploys a [min.io](https://min.io) server (s3-compatible object storage server)
using the official docker container image.
## Configuration
The role requires setting the password for the `root` user (name can be changed by
setting `minio_root_username`) in `minio_root_password`. That user has full control
over the minio-server instance.
### Useful config hints
Most configuration is done by setting environment variables in
`minio_container_extra_env`, for example:
```yaml
minio_container_extra_env:
# disable the "console" web browser UI
MINIO_BROWSER: off
# enable public prometheus metrics on `/minio/v2/metrics/cluster`
MINIO_PROMETHEUS_AUTH_TYPE: public
```
When serving minio (or any s3-compatible server) on a "subfolder",
see https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTRedirect.html
and https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html

View File

@ -0,0 +1,41 @@
---
minio_user: ~
minio_data_path: /opt/minio
minio_create_user: false
minio_manage_host_filesystem: false
minio_root_username: root
minio_root_password: ~
minio_container_name: minio
minio_container_image_name: docker.io/minio/minio
minio_container_image_tag: latest
minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}"
minio_container_networks: []
minio_container_ports: []
minio_container_base_volumes:
- "{{ minio_data_path }}:{{ minio_container_data_path }}:z"
minio_container_extra_volumes: []
minio_container_base_env:
MINIO_ROOT_USER: "{{ minio_root_username }}"
MINIO_ROOT_PASSWORD: "{{ minio_root_password }}"
minio_container_extra_env: {}
minio_container_labels: {}
minio_container_command:
- "server"
- "{{ minio_container_data_path }}"
- "--console-address"
- ":{{ minio_container_listen_port_console }}"
minio_container_restart_policy: "unless-stopped"
minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}"
minio_container_listen_port_api: 9000
minio_container_listen_port_console: 8900
minio_container_data_path: /storage

View File

@ -0,0 +1,37 @@
---
- name: Ensure minio run user is present
user:
name: "{{ minio_user }}"
state: present
system: yes
when: minio_create_user
- name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present
file:
path: "{{ minio_data_path }}"
state: directory
user: "{{ minio_user|default(omit, True) }}"
group: "{{ minio_user|default(omit, True) }}"
when: minio_manage_host_filesystem
- name: Ensure container image for minio is present
community.docker.docker_image:
name: "{{ minio_container_image }}"
state: present
source: pull
force_source: "{{ minio_container_image_force_source }}"
- name: Ensure container {{ minio_container_name }} is running
docker_container:
name: "{{ minio_container_name }}"
image: "{{ minio_container_image }}"
volumes: "{{ minio_container_volumes }}"
env: "{{ minio_container_env }}"
labels: "{{ minio_container_labels }}"
networks: "{{ minio_container_networks }}"
ports: "{{ minio_container_ports }}"
user: "{{ minio_user|default(omit, True) }}"
command: "{{ minio_container_command }}"
restart_policy: "{{ minio_container_restart_policy }}"
state: started

View File

@ -0,0 +1,5 @@
---
minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}"
minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}"

28
roles/nginx/README.md Normal file
View File

@ -0,0 +1,28 @@
# `finallycoffee.services.nginx` ansible role
## Description
Runs `nginx`, a HTTP reverse proxy, in a docker container.
## Usage
For the role to do anything, `nginx_config` needs to be populated with the configuration for nginx.
An example would be:
```yaml
nginx_config: |+
server {
listen 80 default_server;
server_name my.server.fqdn;
location / { return 200; }
}
```
The container is named `nginx` by default, this can be overridden in `nginx_container_name`.
When running this role multiple times, `nginx_base_path` should also be changed for each run,
otherwise the configuration files collide in the filesystem.
For exposing this server to the host and/or internet, the `nginx_container_ports` (port forwarding host
from host to container), `nginx_container_networks` (docker networking) or `nginx_container_labels`
(for label-based routing discovery like traefik) can be used. The options correspond to the arguments
of the `community.docker.docker_container` module.

View File

@ -0,0 +1,33 @@
---
nginx_version: "1.25.3"
nginx_flavour: alpine
nginx_base_path: /opt/nginx
nginx_config_file: "{{ nginx_base_path }}/nginx.conf"
nginx_container_name: nginx
nginx_container_image_reference: >-
{{
nginx_container_image_repository
+ ':' + (nginx_container_image_tag
| default(nginx_version
+ (('-' + nginx_flavour) if nginx_flavour is defined else ''), true))
}}
nginx_container_image_repository: >-
{{
(
container_registries[nginx_container_image_registry]
| default(nginx_container_image_registry)
)
+ '/'
+ nginx_container_image_namespace | default('')
+ nginx_container_image_name
}}
nginx_container_image_registry: "docker.io"
nginx_container_image_name: "nginx"
nginx_container_image_tag: ~
nginx_container_restart_policy: "unless-stopped"
nginx_container_volumes:
- "{{ nginx_config_file }}:/etc/nginx/conf.d/nginx.conf:ro"

View File

@ -0,0 +1,8 @@
---
- name: Ensure nginx container '{{ nginx_container_name }}' is restarted
community.docker.docker_container:
name: "{{ nginx_container_name }}"
state: started
restart: true
listen: restart-nginx

View File

@ -0,0 +1,37 @@
---
- name: Ensure base path '{{ nginx_base_path }}' exists
ansible.builtin.file:
path: "{{ nginx_base_path }}"
state: directory
mode: 0755
- name: Ensure nginx config file is templated
ansible.builtin.copy:
dest: "{{ nginx_config_file }}"
content: "{{ nginx_config }}"
mode: 0640
notify:
- restart-nginx
- name: Ensure docker container image is present
community.docker.docker_image:
name: "{{ nginx_container_image_reference }}"
state: present
source: pull
force_source: "{{ nginx_container_image_tag is defined and nginx_container_image_tag | string != '' }}"
- name: Ensure docker container '{{ nginx_container_name }}' is running
community.docker.docker_container:
name: "{{ nginx_container_name }}"
image: "{{ nginx_container_image_reference }}"
env: "{{ nginx_container_env | default(omit, true) }}"
user: "{{ nginx_container_user | default(omit, true) }}"
ports: "{{ nginx_container_ports | default(omit, true) }}"
labels: "{{ nginx_container_labels | default(omit, true) }}"
volumes: "{{ nginx_container_volumes | default(omit, true) }}"
etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}"
networks: "{{ nginx_container_networks | default(omit, true) }}"
purge_networks: "{{ nginx_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ nginx_container_restart_policy }}"
state: started

View File

@ -0,0 +1,2 @@
---
powerdns_tsig_key_container_name: powerdns

View File

@ -0,0 +1,104 @@
---
- name: Ensure unix group '{{ powerdns_tsig_key_path_group }}' exists
ansible.builtin.group:
name: "{{ powerdns_tsig_key_path_group }}"
state: "present"
system: true
register: powerdns_tsig_key_path_group_info
when: powerdns_tsig_key_path_group is defined
- name: Ensure unix user '{{ powerdns_tsig_key_path_owner }}' exists
ansible.builtin.user:
name: "{{ powerdns_tsig_key_path_owner }}"
state: "present"
system: true
create_home: false
groups: "{{ powerdns_tsig_key_path_group is defined | ternary([powerdns_tsig_key_path_group], omit) }}"
append: "{{ powerdns_tsig_key_path_group is defined | ternary(true, omit) }}"
register: powerdns_tsig_key_path_owner_info
when: powerdns_tsig_key_path_owner is defined
- name: Check if TSIG key is already present
ansible.builtin.stat:
path: "{{ powerdns_tsig_key_path }}"
register: powerdns_tsig_key_info
- name: Ensure TSIG key directory is present
ansible.builtin.file:
path: "{{ powerdns_tsig_key_path | dirname }}"
state: directory
owner: "{{ powerdns_tsig_key_path_owner | default(omit) }}"
group: "{{ powerdns_tsig_key_path_group | default(omit) }}"
mode: "u+rwX,g+rX"
recurse: true
- name: Ensure a TSIG key is configured and persisted
when: >-
not powerdns_tsig_key_info.stat.exists
or powerdns_tsig_key_info.stat.size == 0
block:
- name: Ensure TSIG key is not already present
community.docker.docker_container_exec:
container: "{{ powerdns_tsig_key_container_name }}"
command: "pdnsutil list-tsig-keys"
delegate_to: "{{ powerdns_tsig_key_hostname }}"
register: powerdns_tsig_key_powerdns_info
changed_when: false
check_mode: false
become: true
- name: Ensure TSIG key is generated in powerdns
community.docker.docker_container_exec:
container: "{{ powerdns_tsig_key_container_name }}"
command: "pdnsutil generate-tsig-key '{{ powerdns_tsig_key_name }}' '{{ powerdns_tsig_key_algo }}'"
when: >-
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout
delegate_to: "{{ powerdns_tsig_key_hostname }}"
register: powerdns_tsig_key_powerdns_generated_tsig_key
throttle: 1
become: true
- name: Ensure PowerDNS is restarted
community.docker.docker_container:
name: "{{ powerdns_tsig_key_container_name }}"
state: started
restart: true
when: >-
(powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout
delegate_to: "{{ powerdns_tsig_key_hostname }}"
throttle: 1
become: true
- name: Extract TSIG key into variable
ansible.builtin.set_fact:
powerdns_tsig_key_key: >-
{{
(powerdns_tsig_key_powerdns_generated_tsig_key.stdout | trim | split(' ') | list | last)
if (powerdns_tsig_key_name ~ '. ' ~ powerdns_tsig_key_algo ~ '. ')
not in powerdns_tsig_key_powerdns_info.stdout
else (powerdns_generated_tsig_key | trim | split(' ') | list | last)
}}
vars:
powerdns_generated_tsig_key: >-
{% for line in powerdns_tsig_key_powerdns_info.stdout_lines %}
{% if powerdns_tsig_key_name in line %}
{{ line }}
{% endif %}
{% endfor %}
- name: Ensure TSIG key is persisted into {{ powerdns_tsig_key_path }}
ansible.builtin.copy:
content: "{{ powerdns_tsig_key_key }}"
dest: "{{ powerdns_tsig_key_path }}"
owner: "{{ powerdns_tsig_key_path_owner | default(omit) }}"
group: "{{ powerdns_tsig_key_path_group | default(omit) }}"
mode: "0640"
- name: Ensure TSIG key permissions on {{ powerdns_tsig_key_path }} are correct
ansible.builtin.file:
path: "{{ powerdns_tsig_key_path }}"
owner: "{{ powerdns_tsig_key_path_owner | default(omit) }}"
group: "{{ powerdns_tsig_key_path_group | default(omit) }}"
mode: "u+rwX,g+rwX"

77
roles/restic/README.md Normal file
View File

@ -0,0 +1,77 @@
# `finallycoffee.base.restic`
Ansible role for backup up data using `restic`, utilizing `systemd` timers for scheduling.
## Overview
As restic encrypts the data before storing it, the `restic_repo_password` needs
to be populated with a strong key, and saved accordingly as only this key can
be used to decrypt the data for a restore!
### Backends
#### S3 Backend
To use a `s3`-compatible backend like AWS buckets or minio, both `restic_s3_key_id`
and `restic_s3_access_key` need to be populated, and the `restic_repo_url` has the
format `s3:https://my.s3.endpoint:port/bucket-name`.
#### SFTP Backend
Using the `sftp` backend requires the configured `restic_user` to be able to
authenticate to the configured SFTP-Server using password-less methods like
publickey-authentication. The `restic_repo_url` then follows the format
`sftp:{user}@{server}:/my-restic-repository` (or without leading `/` for relative
paths to the `{user}`s home directory.
### Backing up data
A job name like `$service-postgres` or similar needs to be set in `restic_job_name`,
which is used for naming the `systemd` units, their syslog identifiers etc.
If backing up filesystem locations, the paths need to be specified in
`restic_backup_paths` as lists of strings representing absolute filesystem
locations.
If backing up f.ex. database or other data which is generating backups using
a command like `pg_dump`, use `restic_backup_stdin_command` (which needs to output
to `stdout`) in conjunction with `restic_backup_stdin_command_filename` to name
the resulting output (required).
### Policy
The backup policy can be adjusted by overriding the `restic_policy_keep_*`
variables, with the defaults being:
```yaml
restic_policy_keep_all_within: 1d
restic_policy_keep_hourly: 6
restic_policy_keep_daily: 2
restic_policy_keep_weekly: 7
restic_policy_keep_monthly: 4
restic_policy_backup_frequency: hourly
```
**Note:** `restic_policy_backup_frequency` must conform to `systemd`s
`OnCalendar` syntax, which can be checked using `systemd-analyze calender $x`.
## Role behaviour
Per default, when the systemd unit for a job changes, the job is not immediately
started. This can be overridden using `restic_start_job_on_unit_change: true`,
which will immediately start the backup job if it's configuration changed.
The systemd unit runs with `restic_user`, which is root by default, guaranteeing
that filesystem paths are always readable. The `restic_user` can be overridden,
but care needs to be taken to ensure the user has permission to read all the
provided filesystem paths / the backup command may be executed by the user.
If ansible should create the user, set `restic_create_user` to `true`, which
will attempt to create the `restic_user` as a system user.
### Installing
For Debian and RedHat, the role attempts to install restic using the default
package manager's ansible module (apt/dnf). For other distributions, the generic
`package` module tries to install `restic_package_name` (default: `restic`),
which can be overridden if needed.

View File

@ -0,0 +1,37 @@
---
restic_repo_url: ~
restic_repo_password: ~
restic_s3_key_id: ~
restic_s3_access_key: ~
restic_backup_paths: []
restic_backup_stdin_command: ~
restic_backup_stdin_command_filename: ~
restic_policy_keep_all_within: 1d
restic_policy_keep_hourly: 6
restic_policy_keep_daily: 2
restic_policy_keep_weekly: 7
restic_policy_keep_monthly: 4
restic_policy_backup_frequency: hourly
restic_policy:
keep_within: "{{ restic_policy_keep_all_within }}"
hourly: "{{ restic_policy_keep_hourly }}"
daily: "{{ restic_policy_keep_daily }}"
weekly: "{{ restic_policy_keep_weekly }}"
monthly: "{{ restic_policy_keep_monthly }}"
frequency: "{{ restic_policy_backup_frequency }}"
restic_user: root
restic_create_user: false
restic_start_job_on_unit_change: false
restic_job_name: ~
restic_job_description: "Restic backup job for {{ restic_job_name }}"
restic_systemd_unit_naming_scheme: "restic.{{ restic_job_name }}"
restic_systemd_working_directory: /tmp
restic_systemd_syslog_identifier: "restic-{{ restic_job_name }}"
restic_package_name: restic

View File

@ -0,0 +1,13 @@
---
- name: Ensure system daemon is reloaded
listen: reload-systemd
systemd:
daemon_reload: true
- name: Ensure systemd service for '{{ restic_job_name }}' is started immediately
listen: trigger-restic
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}.service"
state: started
when: restic_start_job_on_unit_change

View File

@ -0,0 +1,77 @@
---
- name: Ensure {{ restic_user }} system user exists
user:
name: "{{ restic_user }}"
state: present
system: true
when: restic_create_user
- name: Ensure either backup_paths or backup_stdin_command is populated
when: restic_backup_paths|length > 0 and restic_backup_stdin_command and false
fail:
msg: "Setting both `restic_backup_paths` and `restic_backup_stdin_command` is not supported"
- name: Ensure a filename for stdin_command backup is given
when: restic_backup_stdin_command and not restic_backup_stdin_command_filename
fail:
msg: "`restic_backup_stdin_command` was set but no filename for the resulting output was supplied in `restic_backup_stdin_command_filename`"
- name: Ensure backup frequency adheres to systemd's OnCalender syntax
command:
cmd: "systemd-analyze calendar {{ restic_policy.frequency }}"
register: systemd_calender_parse_res
failed_when: systemd_calender_parse_res.rc != 0
changed_when: false
- name: Ensure restic is installed
block:
- name: Ensure restic is installed via apt
apt:
package: restic
state: latest
when: ansible_os_family == 'Debian'
- name: Ensure restic is installed via dnf
dnf:
name: restic
state: latest
when: ansible_os_family == 'RedHat'
- name: Ensure restic is installed using the auto-detected package-manager
package:
name: "{{ restic_package_name }}"
state: present
when: ansible_os_family not in ['RedHat', 'Debian']
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
template:
dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.service"
src: restic.service.j2
owner: root
group: root
mode: 0640
notify:
- reload-systemd
- trigger-restic
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
template:
dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.timer"
src: restic.timer.j2
owner: root
group: root
mode: 0640
notify:
- reload-systemd
- name: Flush handlers to ensure systemd knows about '{{ restic_job_name }}'
meta: flush_handlers
- name: Ensure systemd timer for '{{ restic_job_name }}' is activated
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}.timer"
enabled: true
- name: Ensure systemd timer for '{{ restic_job_name }}' is started
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}.timer"
state: started

View File

@ -0,0 +1,34 @@
[Unit]
Description={{ restic_job_description }}
[Service]
Type=simple
User={{ restic_user }}
WorkingDirectory={{ restic_systemd_working_directory }}
SyslogIdentifier={{ restic_systemd_syslog_identifier }}
Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
Environment=RESTIC_PASSWORD={{ restic_repo_password }}
{% if restic_s3_key_id and restic_s3_access_key %}
Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }}
Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }}
{% endif %}
{% if restic_unlock_before_backup | default(false) %}
ExecStartPre=-/bin/sh -c '/usr/bin/restic unlock'
{% endif %}
ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init'
{% if restic_backup_pre_hook | default(false) %}
ExecStart=-{{ restic_backup_pre_hook }}
{% endif %}
{% if restic_backup_stdin_command %}
ExecStart=/bin/sh -c '{{ restic_backup_stdin_command }} | /usr/bin/restic backup --verbose --stdin --stdin-filename {{ restic_backup_stdin_command_filename }}'
{% else %}
ExecStart=/usr/bin/restic --verbose backup {{ restic_backup_paths | join(' ') }}
{% endif %}
ExecStartPost=/usr/bin/restic forget --prune --keep-within={{ restic_policy.keep_within }} --keep-hourly={{ restic_policy.hourly }} --keep-daily={{ restic_policy.daily }} --keep-weekly={{ restic_policy.weekly }} --keep-monthly={{ restic_policy.monthly }}
ExecStartPost=-/usr/bin/restic snapshots
{% if restic_backup_post_hook | default(false) %}
ExecStartPost=-{{ restic_backup_post_hook }}
{% endif %}
ExecStartPost=/usr/bin/restic check

View File

@ -0,0 +1,9 @@
[Unit]
Description=Run {{ restic_timer_description | default(restic_job_name) }}
[Timer]
OnCalendar={{ restic_policy.frequency }}
Unit={{ restic_systemd_unit_naming_scheme }}.service
[Install]
WantedBy=timers.target