27 Commits

Author SHA1 Message Date
98be926e89 chore: add ansible collection metadata 2023-07-19 19:14:47 +02:00
0fc751f7d6 chore(nginx): add README 2023-07-19 19:10:16 +02:00
271410f4c6 update(jellyfin): bump version to 10.8.10 2023-07-19 19:09:56 +02:00
e17369ae38 chore(jellyfin): add README 2023-07-19 19:08:36 +02:00
a3c2716b7f chore(ghost): add README 2023-07-19 19:08:10 +02:00
c8802b9dbf chore(vouch-proxy)!: rename role to vouch_proxy 2023-07-19 19:07:44 +02:00
4ef456efdf chore: update metadata 2023-07-17 21:05:05 +02:00
cd31d8b6af feat(nginx): add ansible role 2023-07-16 19:01:55 +02:00
20cb480915 feat(vouch-proxy): add ansible role for vouch-proxy 2023-07-16 18:58:33 +02:00
e1756fd4b0 update(authelia): bump version to 4.37.5 2023-07-14 12:15:16 +02:00
1e12c0fcfd update(gitea): bump version to 1.19.4 2023-07-14 12:07:25 +02:00
f368107966 update(gitea): bump version to 1.18.3 2023-02-09 21:09:49 +01:00
0aa621b510 feat(ghost): add role for deployment using docker 2023-02-06 21:22:21 +01:00
0c509f6b66 update(jellyfin): bump version to 10.8.6 2022-10-31 10:56:25 +01:00
a364a58717 update(elasticsearch): bump version to 7.17.7 2022-10-31 10:55:14 +01:00
ccd50cb8cf update(gitea): bump version to 1.17.3 2022-10-31 10:53:59 +01:00
1fe626fad5 feat(elasticsearch): add role for single-node deployment using docker containers 2022-08-26 09:00:02 +02:00
d4858c89f4 feat(authelia): allow customizing authelia by mapping the asset folder from the host 2022-08-26 08:48:36 +02:00
6658d7226c fix(authelia): notifier.smtp.startup_check_address changed from bool to email address 2022-08-11 16:52:52 +02:00
36224d0531 fix(authelia): prometheus metrics config was wrongly scoped to toplevel telemetry key 2022-08-11 16:49:35 +02:00
24be358a46 chore(authelia): authentication_backend.disable_password_reset was deprecated 2022-08-11 16:48:21 +02:00
c38e4f34dd update(authelia): bump version to 4.36.4 2022-08-11 16:40:30 +02:00
10a9779996 update(gitea): bump version to 1.17.0 2022-08-06 16:32:11 +02:00
b635a00a34 update(jellyfin): bump version to 10.8.1, switch to versioned role 2022-07-17 15:59:19 +02:00
159c4fda30 update(gitea): bump version to 1.16.9 2022-07-17 15:52:51 +02:00
1e104bf1fb update(authelia): bump version to 4.36.2 2022-07-17 15:51:34 +02:00
1417564e1d update(gitea): bump version to 1.16.8 2022-06-19 11:03:11 +02:00
29 changed files with 506 additions and 49 deletions

View File

@ -11,6 +11,10 @@ concise area of concern.
- [`roles/authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com)
instance, an authentication provider with beta OIDC provider support.
- [`roles/elasticsearch`](roles/elasticsearch/README.md): Deploy [elasticsearch](https://www.docker.elastic.co/r/elasticsearch/elasticsearch-oss),
a popular (distributed) search and analytics engine, mostly known by it's
letter "E" in the ELK-stack.
- [`roles/gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a
lightweight, self-hosted git service.

View File

@ -3,13 +3,12 @@ name: services
version: 0.0.1
readme: README.md
authors:
- Johanna Dorothea Reichmann <transcaffeine@finallycoffee.eu>
- transcaffeine <transcaffeine@finally.coffee>
description: Various ansible roles useful for automating infrastructure
dependencies:
"community.docker": "^1.10.0"
license:
- CNPLv7+
license_file: LICENSE.md
build_ignore:
- '*.tar.gz'
repository: https://git.finallycoffee.eu/finallycoffee.eu/services
issues: https://git.finallycoffee.eu/finallycoffee.eu/services/issues
repository: https://git.finally.coffee/finallycoffee/services
issues: https://git.finally.coffee/finallycoffee/services/issues

3
meta/runtime.yml Normal file
View File

@ -0,0 +1,3 @@
---
requires_ansible: ">=2.12"

View File

@ -1,6 +1,6 @@
---
authelia_version: 4.34.6
authelia_version: 4.37.5
authelia_user: authelia
authelia_base_dir: /opt/authelia
authelia_domain: authelia.example.org
@ -8,6 +8,7 @@ authelia_domain: authelia.example.org
authelia_config_dir: "{{ authelia_base_dir }}/config"
authelia_config_file: "{{ authelia_config_dir }}/config.yaml"
authelia_data_dir: "{{ authelia_base_dir }}/data"
authelia_asset_dir: "{{ authelia_base_dir }}/assets"
authelia_sqlite_storage_file: "{{ authelia_data_dir }}/authelia.sqlite3"
authelia_notification_storage_file: "{{ authelia_data_dir }}/notifications.txt"
authelia_user_storage_file: "{{ authelia_data_dir }}/user_database.yml"
@ -42,6 +43,7 @@ authelia_config_default_redirection_url: ~
authelia_config_server_host: 0.0.0.0
authelia_config_server_port: "{{ authelia_container_listen_port }}"
authelia_config_server_path: ""
authelia_config_server_asset_path: "/config/assets/"
authelia_config_server_read_buffer_size: 4096
authelia_config_server_write_buffer_size: 4096
authelia_config_server_enable_pprof: true
@ -55,6 +57,8 @@ authelia_config_log_level: info
authelia_config_log_format: json
authelia_config_log_file_path: ~
authelia_config_log_keep_stdout: false
authelia_config_telemetry_metrics_enabled: false
authelia_config_telemetry_metrics_address: '0.0.0.0:9959'
authelia_config_totp_disable: true
authelia_config_totp_issuer: "{{ authelia_domain }}"
authelia_config_totp_algorithm: sha1
@ -76,8 +80,8 @@ authelia_config_ntp_version: 4
authelia_config_ntp_max_desync: 3s
authelia_config_ntp_disable_startup_check: false
authelia_config_ntp_disable_failure: false
authelia_config_authentication_backend_disable_reset_password: false
authelia_config_authentication_backend_refresh_interval: 5m
authelia_config_authentication_backend_password_reset_disable: false
authelia_config_authentication_backend_password_reset_custom_url: ~
authelia_config_authentication_backend_ldap_implementation: custom
authelia_config_authentication_backend_ldap_url: ldap://127.0.0.1:389
@ -153,7 +157,7 @@ authelia_config_notifier_smtp_timeout: 5s
authelia_config_notifier_smtp_sender: "Authelia on {{ authelia_domain }} <admin@{{ authelia_domain }}>"
authelia_config_notifier_smtp_identifier: "{{ authelia_domain }}"
authelia_config_notifier_smtp_subject: "[Authelia @ {{ authelia_domain }}] {title}"
authelia_config_notifier_smtp_startup_check_address: false
authelia_config_notifier_smtp_startup_check_address: "authelia-test@{{ authelia_domain }}"
authelia_config_notifier_smtp_disable_require_tls: false
authelia_config_notifier_smtp_disable_html_emails: false
authelia_config_notifier_smtp_tls_skip_verify: false

View File

@ -14,6 +14,7 @@
owner: "{{ item.owner | default(authelia_user) }}"
group: "{{ item.group | default(authelia_user) }}"
mode: "{{ item.mode | default('0750') }}"
when: item.path | default(false, true) | bool
loop:
- path: "{{ authelia_base_dir }}"
mode: "0755"
@ -21,6 +22,8 @@
mode: "0750"
- path: "{{ authelia_data_dir }}"
mode: "0750"
- path: "{{ authelia_asset_dir }}"
mode: "0750"
- name: Ensure config file is generated
copy:

View File

@ -5,6 +5,7 @@ authelia_run_group: "{{ (authelia_user_info.group) if authelia_user_info is defi
authelia_container_base_volumes: >-2
{{ [ authelia_config_file + ":/config/configuration.yml:ro"]
+ ([authelia_asset_dir + '/:' + authelia_config_server_asset_path + ':ro'] if authelia_asset_dir | default(false, true) else [])
+ ([ authelia_sqlite_storage_file + ":" + authelia_config_storage_local_path + ":z" ]
if authelia_config_storage_local_path | default(false, true) else [])
+ ([ authelia_notification_storage_file + ":" + authelia_config_notifier_filesystem_filename + ":z" ]
@ -21,6 +22,7 @@ authelia_top_level_config:
theme: "{{ authelia_config_theme }}"
jwt_secret: "{{ authelia_config_jwt_secret }}"
log: "{{ authelia_config_log }}"
telemetry: "{{ authelia_config_telemetry }}"
totp: "{{ authelia_config_totp }}"
webauthn: "{{ authelia_config_webauthn }}"
duo_api: "{{ authelia_config_duo_api }}"
@ -49,6 +51,7 @@ authelia_config_server: >-2
"host": authelia_config_server_host,
"port": authelia_config_server_port,
"path": authelia_config_server_path,
"asset_path": authelia_config_server_asset_path,
"read_buffer_size": authelia_config_server_read_buffer_size,
"write_buffer_size": authelia_config_server_write_buffer_size,
"enable_pprof": authelia_config_server_enable_pprof,
@ -72,6 +75,10 @@ authelia_config_log: >-2
| combine({"keep_stdout": authelia_config_log_keep_stdout}
if authelia_config_log_file_path | default(false, true) else {})
}}
authelia_config_telemetry:
metrics:
enabled: "{{ authelia_config_telemetry_metrics_enabled }}"
address: "{{ authelia_config_telemetry_metrics_address }}"
authelia_config_totp:
disable: "{{ authelia_config_totp_disable }}"
issuer: "{{ authelia_config_totp_issuer }}"
@ -101,7 +108,6 @@ authelia_config_ntp:
authelia_config_authentication_backend: >-2
{{
{
"disable_reset_password": authelia_config_authentication_backend_disable_reset_password,
"refresh_interval": authelia_config_authentication_backend_refresh_interval,
}
| combine({"password_reset": authelia_config_authentication_backend_password_reset}
@ -112,6 +118,7 @@ authelia_config_authentication_backend: >-2
}}
authelia_config_authentication_backend_password_reset:
custom_url: "{{ authelia_config_authentication_backend_password_reset_custom_url }}"
disable: "{{ authelia_config_authentication_backend_password_reset_disable }}"
authelia_config_authentication_backend_ldap:
implementation: "{{ authelia_config_authentication_backend_ldap_implementation }}"
url: "{{ authelia_config_authentication_backend_ldap_url }}"

View File

@ -0,0 +1,22 @@
# `finallycoffee.services.elastiscsearch`
A simple ansible role which deploys a single-node elastic container to provide
an easy way to do some indexing.
## Usage
Per default, `/opt/elasticsearch/data` is used to persist data, it is
customizable by using either `elasticsearch_base_path` or `elasticsearch_data_path`.
As elasticsearch be can be quite memory heavy, the maximum amount of allowed RAM
can be configured using `elasticsearch_allocated_ram_mb`, defaulting to 512 (mb).
The cluster name and discovery type can be overridden using
`elasticsearch_config_cluster_name` (default: elastic) and
`elasticsearch_config_discovery_type` (default: single-node), should one
need a multi-node elasticsearch deployment.
Per default, no ports or networks are mapped, and explizit mapping using
either ports (`elasticsearch_container_ports`) or networks
(`elasticsearch_container_networks`) is required in order for other services
to use elastic.

View File

@ -0,0 +1,35 @@
---
elasticsearch_version: 7.17.7
elasticsearch_base_path: /opt/elasticsearch
elasticsearch_data_path: "{{ elasticsearch_base_path }}/data"
elasticsearch_config_cluster_name: elastic
elasticsearch_config_discovery_type: single-node
elasticsearch_config_boostrap_memory_lock: true
elasticsearch_allocated_ram_mb: 512
elasticsearch_container_image_name: docker.elastic.co/elasticsearch/elasticsearch-oss
elasticsearch_container_image_tag: ~
elasticsearch_container_image: >-
{{ elasticsearch_container_image_name }}:{{ elasticsearch_container_image_tag | default(elasticsearch_version, true) }}
elasticsearch_container_name: elasticsearch
elasticsearch_container_env:
"ES_JAVA_OPTS": "-Xms{{ elasticsearch_allocated_ram_mb }}m -Xmx{{ elasticsearch_allocated_ram_mb }}m"
"cluster.name": "{{ elasticsearch_config_cluster_name }}"
"discovery.type": "{{ elasticsearch_config_discovery_type }}"
"bootstrap.memory_lock": "{{ 'true' if elasticsearch_config_boostrap_memory_lock else 'false' }}"
elasticsearch_container_user: ~
elasticsearch_container_ports: ~
elasticsearch_container_labels:
version: "{{ elasticsearch_version }}"
elasticsearch_container_ulimits:
# - "memlock:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}"
- "memlock:-1:-1"
elasticsearch_container_volumes:
- "{{ elasticsearch_data_path }}:/usr/share/elasticsearch/data:z"
elasticsearch_container_networks: ~
elasticsearch_container_purge_networks: ~
elasticsearch_container_restart_policy: unless-stopped

View File

@ -0,0 +1,32 @@
---
- name: Ensure host directories are present
file:
path: "{{ item }}"
state: directory
mode: "0777"
loop:
- "{{ elasticsearch_base_path }}"
- "{{ elasticsearch_data_path }}"
- name: Ensure elastic container image is present
docker_image:
name: "{{ elasticsearch_container_image }}"
state: present
source: pull
force_source: "{{ elasticsearch_container_image_tag|default(false, true)|bool }}"
- name: Ensure elastic container is running
docker_container:
name: "{{ elasticsearch_container_name }}"
image: "{{ elasticsearch_container_image }}"
env: "{{ elasticsearch_container_env | default(omit, True) }}"
user: "{{ elasticsearch_container_user | default(omit, True) }}"
ports: "{{ elasticsearch_container_ports | default(omit, True) }}"
labels: "{{ elasticsearch_container_labels | default(omit, True) }}"
volumes: "{{ elasticsearch_container_volumes }}"
ulimits: "{{ elasticsearch_container_ulimits }}"
networks: "{{ elasticsearch_container_networks | default(omit, True) }}"
purge_networks: "{{ elasticsearch_container_purge_networks | default(omit, True) }}"
restart_policy: "{{ elasticsearch_container_restart_policy }}"
state: started

18
roles/ghost/README.md Normal file
View File

@ -0,0 +1,18 @@
# `finallycoffee.services.ghost` ansible role
[Ghost](https://ghost.org/) is a self-hosted blog with rich media capabilities,
which this role deploys in a docker container.
## Requirements
Ghost requires a MySQL-database (like mariadb) for storing it's data, which
can be configured using the `ghost_database_(host|username|password|database)` variables.
Setting `ghost_domain` to a fully-qualified domain on which ghost should be reachable
is also required.
Ghosts configuration can be changed using the `ghost_config` variable.
Container arguments which are equivalent to `community.docker.docker_container` can be
provided in the `ghost_container_[...]` syntax (e.g. `ghost_container_ports` to expose
ghosts port to the host).

View File

@ -0,0 +1,38 @@
---
ghost_domain: ~
ghost_version: "5.33.6"
ghost_user: ghost
ghost_user_group: ghost
ghost_base_path: /opt/ghost
ghost_data_path: "{{ ghost_base_path }}/data"
ghost_config_path: "{{ ghost_base_path }}/config"
ghost_config_file: "{{ ghost_config_path }}/ghost.env"
ghost_database_username: ghost
ghost_database_password: ~
ghost_database_database: ghost
ghost_database_host: ~
ghost_base_config:
url: "https://{{ ghost_domain }}"
database__client: mysql
database__connection__host: "{{ ghost_database_host }}"
database__connection__user: "{{ ghost_database_username }}"
database__connection__password: "{{ ghost_database_password }}"
database__connection__database: "{{ ghost_database_database }}"
ghost_config: {}
ghost_container_name: ghost
ghost_container_image_name: docker.io/ghost
ghost_container_image_tag: ~
ghost_container_base_volumes:
- "{{ ghost_data_path }}:{{ ghost_container_data_directory }}:rw"
ghost_container_extra_volumes: []
ghost_container_volumes:
"{{ ghost_container_base_volumes + ghost_container_extra_volumes }}"
ghost_container_base_labels:
version: "{{ ghost_version }}"
ghost_container_extra_labels: {}
ghost_container_restart_policy: "unless-stopped"
ghost_container_networks: ~
ghost_container_purge_networks: ~
ghost_container_etc_hosts: ~

View File

@ -0,0 +1,56 @@
---
- name: Ensure ghost group is created
ansible.builtin.group:
name: "{{ ghost_user_group }}"
state: present
system: true
- name: Ensure ghost user is created
ansible.builtin.user:
name: "{{ ghost_user }}"
groups:
- "{{ ghost_user_group }}"
append: true
state: present
system: true
- name: Ensure host paths for docker volumes exist for ghost
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: "0750"
owner: "{{ ghost_user }}"
group: "{{ ghost_user_group }}"
loop:
- "{{ ghost_base_path }}"
- "{{ ghost_data_path }}"
- "{{ ghost_config_path }}"
- name: Ensure ghost configuration file is templated
ansible.builtin.template:
src: "ghost.env.j2"
dest: "{{ ghost_config_file }}"
owner: "{{ ghost_user }}"
group: "{{ ghost_user_group }}"
mode: "0644"
- name: Ensure ghost container image is present on host
community.docker.docker_image:
name: "{{ ghost_container_image }}"
state: present
source: pull
force_source: "{{ ghost_container_image_tag is defined }}"
- name: Ensure ghost container is running
community.docker.docker_container:
name: "{{ ghost_container_name }}"
image: "{{ ghost_container_image }}"
ports: "{{ ghost_container_ports | default(omit, true) }}"
labels: "{{ ghost_container_labels }}"
volumes: "{{ ghost_container_volumes }}"
env_file: "{{ ghost_config_file }}"
etc_hosts: "{{ ghost_container_etc_hosts | default(omit, true) }}"
networks: "{{ ghost_container_networks | default(omit, true) }}"
purge_networks: "{{ ghost_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ ghost_container_restart_policy }}"
state: started

View File

@ -0,0 +1,3 @@
{% for key, value in ghost_config_complete.items() %}
{{ key }}={{ value }}
{% endfor %}

10
roles/ghost/vars/main.yml Normal file
View File

@ -0,0 +1,10 @@
---
ghost_container_image: "{{ ghost_container_image_name}}:{{ ghost_container_image_tag | default(ghost_version, true) }}"
ghost_container_labels: >-2
{{ ghost_container_base_labels
| combine(ghost_container_extra_labels) }}
ghost_container_data_directory: "/var/lib/ghost/content"
ghost_config_complete: >-2
{{ ghost_base_config | combine(ghost_config, recursive=True) }}

View File

@ -1,6 +1,6 @@
---
gitea_version: "1.16.4"
gitea_version: "1.19.4"
gitea_user: git
gitea_base_path: "/opt/gitea"
gitea_data_path: "{{ gitea_base_path }}/data"

15
roles/jellyfin/README.md Normal file
View File

@ -0,0 +1,15 @@
# `finallycoffee.services.jellyfin` ansible role
This role runs [Jellyfin](https://jellyfin.org/), a free software media system,
in a docker container.
## Usage
`jellyfin_domain` contains the FQDN which jellyfin should listen to. Most configuration
is done in the software itself.
Jellyfin runs in host networking mode by default, as that is needed for some features like
network discovery with chromecasts and similar.
Media can be mounted into jellyfin using `jellyfin_media_volumes`, taking a list of strings
akin to `community.docker.docker_container`'s `volumes` key.

View File

@ -1,6 +1,7 @@
---
jellyfin_user: jellyfin
jellyfin_version: 10.8.10
jellyfin_base_path: /opt/jellyfin
jellyfin_config_path: "{{ jellyfin_base_path }}/config"
@ -10,11 +11,13 @@ jellyfin_media_volumes: []
jellyfin_container_name: jellyfin
jellyfin_container_image_name: "docker.io/jellyfin/jellyfin"
jellyfin_container_image_tag: "latest"
jellyfin_container_image_ref: "{{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag }}"
jellyfin_container_image_tag: ~
jellyfin_container_image_ref: "{{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag | default(jellyfin_version, true) }}"
jellyfin_container_network_mode: host
jellyfin_container_networks: ~
jellyfin_container_volumes: "{{ jellyfin_container_base_volumes + jellyfin_media_volumes }}"
jellyfin_container_labels: "{{ jellyfin_container_base_labels | combine(jellyfin_container_extra_labels) }}"
jellyfin_container_extra_labels: {}
jellyfin_container_restart_policy: "unless-stopped"
jellyfin_host_directories:

View File

@ -21,13 +21,18 @@
name: "{{ jellyfin_container_image_ref }}"
state: present
source: pull
force_source: "{{ jellyfin_container_image_tag in ['stable', 'unstable'] }}"
force_source: "{{ jellyfin_container_image_tag | default(false, true) }}"
register: jellyfin_container_image_pull_result
until: jellyfin_container_image_pull_result is succeeded
retries: 5
delay: 3
- name: Ensure container '{{ jellyfin_container_name }}' is running
docker_container:
name: "{{ jellyfin_container_name }}"
image: "{{ jellyfin_container_image_ref }}"
user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}"
labels: "{{ jellyfin_container_labels }}"
volumes: "{{ jellyfin_container_volumes }}"
networks: "{{ jellyfin_container_networks | default(omit, True) }}"
network_mode: "{{ jellyfin_container_network_mode }}"

View File

@ -3,3 +3,6 @@
jellyfin_container_base_volumes:
- "{{ jellyfin_config_path }}:/config:z"
- "{{ jellyfin_cache_path }}:/cache:z"
jellyfin_container_base_labels:
version: "{{ jellyfin_version }}"

28
roles/nginx/README.md Normal file
View File

@ -0,0 +1,28 @@
# `finallycoffee.services.nginx` ansible role
## Description
Runs `nginx`, a HTTP reverse proxy, in a docker container.
## Usage
For the role to do anything, `nginx_config` needs to be populated with the configuration for nginx.
An example would be:
```yaml
nginx_config: |+
server {
listen 80 default_server;
server_name my.server.fqdn;
location / { return 200; }
}
```
The container is named `nginx` by default, this can be overridden in `nginx_container_name`.
When running this role multiple times, `nginx_base_path` should also be changed for each run,
otherwise the configuration files collide in the filesystem.
For exposing this server to the host and/or internet, the `nginx_container_ports` (port forwarding host
from host to container), `nginx_container_networks` (docker networking) or `nginx_container_labels`
(for label-based routing discovery like traefik) can be used. The options correspond to the arguments
of the `community.docker.docker_container` module.

View File

@ -0,0 +1,33 @@
---
nginx_version: "1.25.1"
nginx_flavour: alpine
nginx_base_path: /opt/nginx
nginx_config_file: "{{ nginx_base_path }}/nginx.conf"
nginx_container_name: nginx
nginx_container_image_reference: >-
{{
nginx_container_image_repository
+ ':' + (nginx_container_image_tag
| default(nginx_version
+ (('-' + nginx_flavour) if nginx_flavour is defined else ''), true))
}}
nginx_container_image_repository: >-
{{
(
container_registries[nginx_container_image_registry]
| default(nginx_container_image_registry)
)
+ '/'
+ nginx_container_image_namespace | default('')
+ nginx_container_image_name
}}
nginx_container_image_registry: "docker.io"
nginx_container_image_name: "nginx"
nginx_container_image_tag: ~
nginx_container_restart_policy: "unless-stopped"
nginx_container_volumes:
- "{{ nginx_config_file }}:/etc/nginx/conf.d/nginx.conf:ro"

View File

@ -0,0 +1,8 @@
---
- name: Ensure nginx container '{{ nginx_container_name }}' is restarted
community.docker.docker_container:
name: "{{ nginx_container_name }}"
state: started
restart: true
listen: restart-nginx

View File

@ -0,0 +1,37 @@
---
- name: Ensure base path '{{ nginx_base_path }}' exists
ansible.builtin.file:
path: "{{ nginx_base_path }}"
state: directory
mode: 0755
- name: Ensure nginx config file is templated
ansible.builtin.copy:
dest: "{{ nginx_config_file }}"
content: "{{ nginx_config }}"
mode: 0640
notify:
- restart-nginx
- name: Ensure docker container image is present
community.docker.docker_image:
name: "{{ nginx_container_image_reference }}"
state: present
source: pull
force_source: "{{ nginx_container_image_tag is defined and nginx_container_image_tag | string != '' }}"
- name: Ensure docker container '{{ nginx_container_name }}' is running
community.docker.docker_container:
name: "{{ nginx_container_name }}"
image: "{{ nginx_container_image_reference }}"
env: "{{ nginx_container_env | default(omit, true) }}"
user: "{{ nginx_container_user | default(omit, true) }}"
ports: "{{ nginx_container_ports | default(omit, true) }}"
labels: "{{ nginx_container_labels | default(omit, true) }}"
volumes: "{{ nginx_container_volumes | default(omit, true) }}"
etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}"
networks: "{{ nginx_container_networks | default(omit, true) }}"
purge_networks: "{{ nginx_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ nginx_container_restart_policy }}"
state: started

View File

@ -44,22 +44,14 @@
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
template:
dest: "/etc/systemd/system/{{ service.unit_name }}.service"
src: "{{ service.file }}"
dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.service"
src: restic.service.j2
owner: root
group: root
mode: 0640
notify:
- reload-systemd
- trigger-restic
loop:
- unit_name: "{{ restic_systemd_unit_naming_scheme }}"
file: restic.service.j2
- unit_name: "{{ restic_systemd_unit_naming_scheme }}-unlock"
file: restic-unlock.service.j2
loop_control:
loop_var: service
label: "{{ service.file }}"
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
template:
@ -74,11 +66,6 @@
- name: Flush handlers to ensure systemd knows about '{{ restic_job_name }}'
meta: flush_handlers
- name: Ensure systemd service for unlocking repository for '{{ restic_job_name }}' is enabled
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}-unlock.service"
enabled: true
- name: Ensure systemd timer for '{{ restic_job_name }}' is activated
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}.timer"

View File

@ -1,21 +0,0 @@
[Unit]
Description={{ restic_job_description }} - Unlock after reboot job
[Service]
Type=oneshot
User={{ restic_user }}
WorkingDirectory={{ restic_systemd_working_directory }}
SyslogIdentifier={{ restic_systemd_syslog_identifier }}
Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
Environment=RESTIC_PASSWORD={{ restic_repo_password }}
{% if restic_s3_key_id and restic_s3_access_key %}
Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }}
Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }}
{% endif %}
ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init'
ExecStart=/usr/bin/restic unlock
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,16 @@
# `finallycoffee.services.vouch-proxy`
[Vouch-Proxy](https://github.com/vouch/vouch-proxy) can be used in combination with
nginx' `auth_request` module to secure web services with OIDC/OAuth. This role runs
vouch-proxys' official docker container.
## Usage
The `oauth` config section must be supplied in `vouch_proxy_oauth_config`, and the
`vouch` config section can be overridden in `vouch_proxy_vouch_config`. For possible
configuration values, see https://github.com/vouch/vouch-proxy/blob/master/config/config.yml_example.
For an example nginx config, see https://github.com/vouch/vouch-proxy#installation-and-configuration.
Passing container arguments in the same way as `community.docker.docker_container` is supported
using the `vouch_proxy_container_[...]` prefix (e.g. `vouch_proxy_container_ports`).

View File

@ -0,0 +1,51 @@
---
vouch_proxy_user: vouch-proxy
vouch_proxy_version: 0.39.0
vouch_proxy_base_path: /opt/vouch-proxy
vouch_proxy_config_path: "{{ vouch_proxy_base_path }}/config"
vouch_proxy_config_file: "{{ vouch_proxy_config_path }}/config.yaml"
vouch_proxy_container_name: vouch-proxy
vouch_proxy_container_image_name: vouch-proxy
vouch_proxy_container_image_namespace: vouch/
vouch_proxy_container_image_registry: quay.io
vouch_proxy_container_image_repository: >-
{{
(container_registries[vouch_proxy_container_image_registry] | default(vouch_proxy_container_image_registry))
+ '/' + (vouch_proxy_container_image_namespace | default(''))
+ vouch_proxy_container_image_name
}}
vouch_proxy_container_image_reference: >-
{{
vouch_proxy_container_image_repository + ':'
+ (vouch_proxy_container_image_tag | default(vouch_proxy_version))
}}
vouch_proxy_container_image_force_pull: "{{ vouch_proxy_container_image_tag is defined }}"
vouch_proxy_container_default_volumes:
- "{{ vouch_proxy_config_file }}:/config/config.yaml:ro"
vouch_proxy_container_volumes: >-
{{ vouch_proxy_container_default_volumes
+ vouch_proxy_container_extra_volumes | default([]) }}
vouch_proxy_container_restart_policy: "unless-stopped"
vouch_proxy_config_vouch_log_level: info
vouch_proxy_config_vouch_listen: 0.0.0.0
vouch_proxy_config_vouch_port: 9090
vouch_proxy_config_vouch_domains: []
vouch_proxy_config_vouch_document_root: ~
vouch_proxy_oauth_config: {}
vouch_proxy_vouch_config:
logLevel: "{{ vouch_proxy_config_vouch_log_level }}"
listen: "{{ vouch_proxy_config_vouch_listen }}"
port: "{{ vouch_proxy_config_vouch_port }}"
domains: "{{ vouch_proxy_config_vouch_domains }}"
document_root: "{{ vouch_proxy_config_vouch_document_root }}"
vouch_proxy_config:
vouch: "{{ vouch_proxy_vouch_config }}"
oauth: "{{ vouch_proxy_oauth_config }}"

View File

@ -0,0 +1,8 @@
---
- name: Ensure vouch-proxy was restarted
community.docker.docker_container:
name: "{{ vouch_proxy_container_name }}"
state: started
restart: yes
listen: restart-vouch-proxy

View File

@ -0,0 +1,50 @@
---
- name: Ensure vouch-proxy user '{{ vouch_proxy_user }}' exists
ansible.builtin.user:
name: "{{ vouch_proxy_user }}"
state: present
system: true
register: vouch_proxy_user_info
- name: Ensure mounts are created
ansible.builtin.file:
dest: "{{ item.path }}"
state: directory
owner: "{{ item.owner | default(vouch_proxy_user_info.uid | default(vouch_proxy_user)) }}"
group: "{{ item.owner | default(vouch_proxy_user_info.group | default(vouch_proxy_user)) }}"
mode: "{{ item.mode | default('0755') }}"
loop:
- path: "{{ vouch_proxy_base_path }}"
- path: "{{ vouch_proxy_config_path }}"
- name: Ensure config file is templated
ansible.builtin.copy:
dest: "{{ vouch_proxy_config_file }}"
content: "{{ vouch_proxy_config | to_nice_yaml }}"
owner: "{{ vouch_proxy_user_info.uid | default(vouch_proxy_user) }}"
group: "{{ vouch_proxy_user_info.group | default(vouch_proxy_user) }}"
mode: "0640"
notify:
- restart-vouch-proxy
- name: Ensure container image is present on host
community.docker.docker_image:
name: "{{ vouch_proxy_container_image_reference }}"
state: present
source: pull
force_source: "{{ vouch_proxy_container_image_force_pull | bool }}"
- name: Ensure container '{{ vouch_proxy_container_name }}' is running
community.docker.docker_container:
name: "{{ vouch_proxy_container_name }}"
image: "{{ vouch_proxy_container_image_reference }}"
env: "{{ vouch_proxy_container_env | default(omit) }}"
user: "{{ vouch_proxy_user_info.uid | default(vouch_proxy_user) }}"
ports: "{{ vouch_proxy_container_ports | default(omit) }}"
volumes: "{{ vouch_proxy_container_volumes | default(omit) }}"
networks: "{{ vouch_proxy_container_networks | default(omit) }}"
purge_networks: "{{ vouch_proxy_container_purge_networks | default(omit) }}"
etc_hosts: "{{ vouch_proxy_container_etc_hosts | default(omit) }}"
restart_policy: "{{ vouch_proxy_container_restart_policy }}"
state: started