1 Commits

29 changed files with 49 additions and 506 deletions

View File

@ -11,10 +11,6 @@ concise area of concern.
- [`roles/authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com) - [`roles/authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com)
instance, an authentication provider with beta OIDC provider support. instance, an authentication provider with beta OIDC provider support.
- [`roles/elasticsearch`](roles/elasticsearch/README.md): Deploy [elasticsearch](https://www.docker.elastic.co/r/elasticsearch/elasticsearch-oss),
a popular (distributed) search and analytics engine, mostly known by it's
letter "E" in the ELK-stack.
- [`roles/gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a - [`roles/gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a
lightweight, self-hosted git service. lightweight, self-hosted git service.

View File

@ -3,12 +3,13 @@ name: services
version: 0.0.1 version: 0.0.1
readme: README.md readme: README.md
authors: authors:
- transcaffeine <transcaffeine@finally.coffee> - Johanna Dorothea Reichmann <transcaffeine@finallycoffee.eu>
description: Various ansible roles useful for automating infrastructure description: Various ansible roles useful for automating infrastructure
dependencies: dependencies:
"community.docker": "^1.10.0" "community.docker": "^1.10.0"
license_file: LICENSE.md license:
- CNPLv7+
build_ignore: build_ignore:
- '*.tar.gz' - '*.tar.gz'
repository: https://git.finally.coffee/finallycoffee/services repository: https://git.finallycoffee.eu/finallycoffee.eu/services
issues: https://git.finally.coffee/finallycoffee/services/issues issues: https://git.finallycoffee.eu/finallycoffee.eu/services/issues

View File

@ -1,3 +0,0 @@
---
requires_ansible: ">=2.12"

View File

@ -1,6 +1,6 @@
--- ---
authelia_version: 4.37.5 authelia_version: 4.34.6
authelia_user: authelia authelia_user: authelia
authelia_base_dir: /opt/authelia authelia_base_dir: /opt/authelia
authelia_domain: authelia.example.org authelia_domain: authelia.example.org
@ -8,7 +8,6 @@ authelia_domain: authelia.example.org
authelia_config_dir: "{{ authelia_base_dir }}/config" authelia_config_dir: "{{ authelia_base_dir }}/config"
authelia_config_file: "{{ authelia_config_dir }}/config.yaml" authelia_config_file: "{{ authelia_config_dir }}/config.yaml"
authelia_data_dir: "{{ authelia_base_dir }}/data" authelia_data_dir: "{{ authelia_base_dir }}/data"
authelia_asset_dir: "{{ authelia_base_dir }}/assets"
authelia_sqlite_storage_file: "{{ authelia_data_dir }}/authelia.sqlite3" authelia_sqlite_storage_file: "{{ authelia_data_dir }}/authelia.sqlite3"
authelia_notification_storage_file: "{{ authelia_data_dir }}/notifications.txt" authelia_notification_storage_file: "{{ authelia_data_dir }}/notifications.txt"
authelia_user_storage_file: "{{ authelia_data_dir }}/user_database.yml" authelia_user_storage_file: "{{ authelia_data_dir }}/user_database.yml"
@ -43,7 +42,6 @@ authelia_config_default_redirection_url: ~
authelia_config_server_host: 0.0.0.0 authelia_config_server_host: 0.0.0.0
authelia_config_server_port: "{{ authelia_container_listen_port }}" authelia_config_server_port: "{{ authelia_container_listen_port }}"
authelia_config_server_path: "" authelia_config_server_path: ""
authelia_config_server_asset_path: "/config/assets/"
authelia_config_server_read_buffer_size: 4096 authelia_config_server_read_buffer_size: 4096
authelia_config_server_write_buffer_size: 4096 authelia_config_server_write_buffer_size: 4096
authelia_config_server_enable_pprof: true authelia_config_server_enable_pprof: true
@ -57,8 +55,6 @@ authelia_config_log_level: info
authelia_config_log_format: json authelia_config_log_format: json
authelia_config_log_file_path: ~ authelia_config_log_file_path: ~
authelia_config_log_keep_stdout: false authelia_config_log_keep_stdout: false
authelia_config_telemetry_metrics_enabled: false
authelia_config_telemetry_metrics_address: '0.0.0.0:9959'
authelia_config_totp_disable: true authelia_config_totp_disable: true
authelia_config_totp_issuer: "{{ authelia_domain }}" authelia_config_totp_issuer: "{{ authelia_domain }}"
authelia_config_totp_algorithm: sha1 authelia_config_totp_algorithm: sha1
@ -80,8 +76,8 @@ authelia_config_ntp_version: 4
authelia_config_ntp_max_desync: 3s authelia_config_ntp_max_desync: 3s
authelia_config_ntp_disable_startup_check: false authelia_config_ntp_disable_startup_check: false
authelia_config_ntp_disable_failure: false authelia_config_ntp_disable_failure: false
authelia_config_authentication_backend_disable_reset_password: false
authelia_config_authentication_backend_refresh_interval: 5m authelia_config_authentication_backend_refresh_interval: 5m
authelia_config_authentication_backend_password_reset_disable: false
authelia_config_authentication_backend_password_reset_custom_url: ~ authelia_config_authentication_backend_password_reset_custom_url: ~
authelia_config_authentication_backend_ldap_implementation: custom authelia_config_authentication_backend_ldap_implementation: custom
authelia_config_authentication_backend_ldap_url: ldap://127.0.0.1:389 authelia_config_authentication_backend_ldap_url: ldap://127.0.0.1:389
@ -157,7 +153,7 @@ authelia_config_notifier_smtp_timeout: 5s
authelia_config_notifier_smtp_sender: "Authelia on {{ authelia_domain }} <admin@{{ authelia_domain }}>" authelia_config_notifier_smtp_sender: "Authelia on {{ authelia_domain }} <admin@{{ authelia_domain }}>"
authelia_config_notifier_smtp_identifier: "{{ authelia_domain }}" authelia_config_notifier_smtp_identifier: "{{ authelia_domain }}"
authelia_config_notifier_smtp_subject: "[Authelia @ {{ authelia_domain }}] {title}" authelia_config_notifier_smtp_subject: "[Authelia @ {{ authelia_domain }}] {title}"
authelia_config_notifier_smtp_startup_check_address: "authelia-test@{{ authelia_domain }}" authelia_config_notifier_smtp_startup_check_address: false
authelia_config_notifier_smtp_disable_require_tls: false authelia_config_notifier_smtp_disable_require_tls: false
authelia_config_notifier_smtp_disable_html_emails: false authelia_config_notifier_smtp_disable_html_emails: false
authelia_config_notifier_smtp_tls_skip_verify: false authelia_config_notifier_smtp_tls_skip_verify: false

View File

@ -14,7 +14,6 @@
owner: "{{ item.owner | default(authelia_user) }}" owner: "{{ item.owner | default(authelia_user) }}"
group: "{{ item.group | default(authelia_user) }}" group: "{{ item.group | default(authelia_user) }}"
mode: "{{ item.mode | default('0750') }}" mode: "{{ item.mode | default('0750') }}"
when: item.path | default(false, true) | bool
loop: loop:
- path: "{{ authelia_base_dir }}" - path: "{{ authelia_base_dir }}"
mode: "0755" mode: "0755"
@ -22,8 +21,6 @@
mode: "0750" mode: "0750"
- path: "{{ authelia_data_dir }}" - path: "{{ authelia_data_dir }}"
mode: "0750" mode: "0750"
- path: "{{ authelia_asset_dir }}"
mode: "0750"
- name: Ensure config file is generated - name: Ensure config file is generated
copy: copy:

View File

@ -5,7 +5,6 @@ authelia_run_group: "{{ (authelia_user_info.group) if authelia_user_info is defi
authelia_container_base_volumes: >-2 authelia_container_base_volumes: >-2
{{ [ authelia_config_file + ":/config/configuration.yml:ro"] {{ [ authelia_config_file + ":/config/configuration.yml:ro"]
+ ([authelia_asset_dir + '/:' + authelia_config_server_asset_path + ':ro'] if authelia_asset_dir | default(false, true) else [])
+ ([ authelia_sqlite_storage_file + ":" + authelia_config_storage_local_path + ":z" ] + ([ authelia_sqlite_storage_file + ":" + authelia_config_storage_local_path + ":z" ]
if authelia_config_storage_local_path | default(false, true) else []) if authelia_config_storage_local_path | default(false, true) else [])
+ ([ authelia_notification_storage_file + ":" + authelia_config_notifier_filesystem_filename + ":z" ] + ([ authelia_notification_storage_file + ":" + authelia_config_notifier_filesystem_filename + ":z" ]
@ -22,7 +21,6 @@ authelia_top_level_config:
theme: "{{ authelia_config_theme }}" theme: "{{ authelia_config_theme }}"
jwt_secret: "{{ authelia_config_jwt_secret }}" jwt_secret: "{{ authelia_config_jwt_secret }}"
log: "{{ authelia_config_log }}" log: "{{ authelia_config_log }}"
telemetry: "{{ authelia_config_telemetry }}"
totp: "{{ authelia_config_totp }}" totp: "{{ authelia_config_totp }}"
webauthn: "{{ authelia_config_webauthn }}" webauthn: "{{ authelia_config_webauthn }}"
duo_api: "{{ authelia_config_duo_api }}" duo_api: "{{ authelia_config_duo_api }}"
@ -51,7 +49,6 @@ authelia_config_server: >-2
"host": authelia_config_server_host, "host": authelia_config_server_host,
"port": authelia_config_server_port, "port": authelia_config_server_port,
"path": authelia_config_server_path, "path": authelia_config_server_path,
"asset_path": authelia_config_server_asset_path,
"read_buffer_size": authelia_config_server_read_buffer_size, "read_buffer_size": authelia_config_server_read_buffer_size,
"write_buffer_size": authelia_config_server_write_buffer_size, "write_buffer_size": authelia_config_server_write_buffer_size,
"enable_pprof": authelia_config_server_enable_pprof, "enable_pprof": authelia_config_server_enable_pprof,
@ -75,10 +72,6 @@ authelia_config_log: >-2
| combine({"keep_stdout": authelia_config_log_keep_stdout} | combine({"keep_stdout": authelia_config_log_keep_stdout}
if authelia_config_log_file_path | default(false, true) else {}) if authelia_config_log_file_path | default(false, true) else {})
}} }}
authelia_config_telemetry:
metrics:
enabled: "{{ authelia_config_telemetry_metrics_enabled }}"
address: "{{ authelia_config_telemetry_metrics_address }}"
authelia_config_totp: authelia_config_totp:
disable: "{{ authelia_config_totp_disable }}" disable: "{{ authelia_config_totp_disable }}"
issuer: "{{ authelia_config_totp_issuer }}" issuer: "{{ authelia_config_totp_issuer }}"
@ -108,6 +101,7 @@ authelia_config_ntp:
authelia_config_authentication_backend: >-2 authelia_config_authentication_backend: >-2
{{ {{
{ {
"disable_reset_password": authelia_config_authentication_backend_disable_reset_password,
"refresh_interval": authelia_config_authentication_backend_refresh_interval, "refresh_interval": authelia_config_authentication_backend_refresh_interval,
} }
| combine({"password_reset": authelia_config_authentication_backend_password_reset} | combine({"password_reset": authelia_config_authentication_backend_password_reset}
@ -118,7 +112,6 @@ authelia_config_authentication_backend: >-2
}} }}
authelia_config_authentication_backend_password_reset: authelia_config_authentication_backend_password_reset:
custom_url: "{{ authelia_config_authentication_backend_password_reset_custom_url }}" custom_url: "{{ authelia_config_authentication_backend_password_reset_custom_url }}"
disable: "{{ authelia_config_authentication_backend_password_reset_disable }}"
authelia_config_authentication_backend_ldap: authelia_config_authentication_backend_ldap:
implementation: "{{ authelia_config_authentication_backend_ldap_implementation }}" implementation: "{{ authelia_config_authentication_backend_ldap_implementation }}"
url: "{{ authelia_config_authentication_backend_ldap_url }}" url: "{{ authelia_config_authentication_backend_ldap_url }}"

View File

@ -1,22 +0,0 @@
# `finallycoffee.services.elastiscsearch`
A simple ansible role which deploys a single-node elastic container to provide
an easy way to do some indexing.
## Usage
Per default, `/opt/elasticsearch/data` is used to persist data, it is
customizable by using either `elasticsearch_base_path` or `elasticsearch_data_path`.
As elasticsearch be can be quite memory heavy, the maximum amount of allowed RAM
can be configured using `elasticsearch_allocated_ram_mb`, defaulting to 512 (mb).
The cluster name and discovery type can be overridden using
`elasticsearch_config_cluster_name` (default: elastic) and
`elasticsearch_config_discovery_type` (default: single-node), should one
need a multi-node elasticsearch deployment.
Per default, no ports or networks are mapped, and explizit mapping using
either ports (`elasticsearch_container_ports`) or networks
(`elasticsearch_container_networks`) is required in order for other services
to use elastic.

View File

@ -1,35 +0,0 @@
---
elasticsearch_version: 7.17.7
elasticsearch_base_path: /opt/elasticsearch
elasticsearch_data_path: "{{ elasticsearch_base_path }}/data"
elasticsearch_config_cluster_name: elastic
elasticsearch_config_discovery_type: single-node
elasticsearch_config_boostrap_memory_lock: true
elasticsearch_allocated_ram_mb: 512
elasticsearch_container_image_name: docker.elastic.co/elasticsearch/elasticsearch-oss
elasticsearch_container_image_tag: ~
elasticsearch_container_image: >-
{{ elasticsearch_container_image_name }}:{{ elasticsearch_container_image_tag | default(elasticsearch_version, true) }}
elasticsearch_container_name: elasticsearch
elasticsearch_container_env:
"ES_JAVA_OPTS": "-Xms{{ elasticsearch_allocated_ram_mb }}m -Xmx{{ elasticsearch_allocated_ram_mb }}m"
"cluster.name": "{{ elasticsearch_config_cluster_name }}"
"discovery.type": "{{ elasticsearch_config_discovery_type }}"
"bootstrap.memory_lock": "{{ 'true' if elasticsearch_config_boostrap_memory_lock else 'false' }}"
elasticsearch_container_user: ~
elasticsearch_container_ports: ~
elasticsearch_container_labels:
version: "{{ elasticsearch_version }}"
elasticsearch_container_ulimits:
# - "memlock:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}"
- "memlock:-1:-1"
elasticsearch_container_volumes:
- "{{ elasticsearch_data_path }}:/usr/share/elasticsearch/data:z"
elasticsearch_container_networks: ~
elasticsearch_container_purge_networks: ~
elasticsearch_container_restart_policy: unless-stopped

View File

@ -1,32 +0,0 @@
---
- name: Ensure host directories are present
file:
path: "{{ item }}"
state: directory
mode: "0777"
loop:
- "{{ elasticsearch_base_path }}"
- "{{ elasticsearch_data_path }}"
- name: Ensure elastic container image is present
docker_image:
name: "{{ elasticsearch_container_image }}"
state: present
source: pull
force_source: "{{ elasticsearch_container_image_tag|default(false, true)|bool }}"
- name: Ensure elastic container is running
docker_container:
name: "{{ elasticsearch_container_name }}"
image: "{{ elasticsearch_container_image }}"
env: "{{ elasticsearch_container_env | default(omit, True) }}"
user: "{{ elasticsearch_container_user | default(omit, True) }}"
ports: "{{ elasticsearch_container_ports | default(omit, True) }}"
labels: "{{ elasticsearch_container_labels | default(omit, True) }}"
volumes: "{{ elasticsearch_container_volumes }}"
ulimits: "{{ elasticsearch_container_ulimits }}"
networks: "{{ elasticsearch_container_networks | default(omit, True) }}"
purge_networks: "{{ elasticsearch_container_purge_networks | default(omit, True) }}"
restart_policy: "{{ elasticsearch_container_restart_policy }}"
state: started

View File

@ -1,18 +0,0 @@
# `finallycoffee.services.ghost` ansible role
[Ghost](https://ghost.org/) is a self-hosted blog with rich media capabilities,
which this role deploys in a docker container.
## Requirements
Ghost requires a MySQL-database (like mariadb) for storing it's data, which
can be configured using the `ghost_database_(host|username|password|database)` variables.
Setting `ghost_domain` to a fully-qualified domain on which ghost should be reachable
is also required.
Ghosts configuration can be changed using the `ghost_config` variable.
Container arguments which are equivalent to `community.docker.docker_container` can be
provided in the `ghost_container_[...]` syntax (e.g. `ghost_container_ports` to expose
ghosts port to the host).

View File

@ -1,38 +0,0 @@
---
ghost_domain: ~
ghost_version: "5.33.6"
ghost_user: ghost
ghost_user_group: ghost
ghost_base_path: /opt/ghost
ghost_data_path: "{{ ghost_base_path }}/data"
ghost_config_path: "{{ ghost_base_path }}/config"
ghost_config_file: "{{ ghost_config_path }}/ghost.env"
ghost_database_username: ghost
ghost_database_password: ~
ghost_database_database: ghost
ghost_database_host: ~
ghost_base_config:
url: "https://{{ ghost_domain }}"
database__client: mysql
database__connection__host: "{{ ghost_database_host }}"
database__connection__user: "{{ ghost_database_username }}"
database__connection__password: "{{ ghost_database_password }}"
database__connection__database: "{{ ghost_database_database }}"
ghost_config: {}
ghost_container_name: ghost
ghost_container_image_name: docker.io/ghost
ghost_container_image_tag: ~
ghost_container_base_volumes:
- "{{ ghost_data_path }}:{{ ghost_container_data_directory }}:rw"
ghost_container_extra_volumes: []
ghost_container_volumes:
"{{ ghost_container_base_volumes + ghost_container_extra_volumes }}"
ghost_container_base_labels:
version: "{{ ghost_version }}"
ghost_container_extra_labels: {}
ghost_container_restart_policy: "unless-stopped"
ghost_container_networks: ~
ghost_container_purge_networks: ~
ghost_container_etc_hosts: ~

View File

@ -1,56 +0,0 @@
---
- name: Ensure ghost group is created
ansible.builtin.group:
name: "{{ ghost_user_group }}"
state: present
system: true
- name: Ensure ghost user is created
ansible.builtin.user:
name: "{{ ghost_user }}"
groups:
- "{{ ghost_user_group }}"
append: true
state: present
system: true
- name: Ensure host paths for docker volumes exist for ghost
ansible.builtin.file:
path: "{{ item }}"
state: directory
mode: "0750"
owner: "{{ ghost_user }}"
group: "{{ ghost_user_group }}"
loop:
- "{{ ghost_base_path }}"
- "{{ ghost_data_path }}"
- "{{ ghost_config_path }}"
- name: Ensure ghost configuration file is templated
ansible.builtin.template:
src: "ghost.env.j2"
dest: "{{ ghost_config_file }}"
owner: "{{ ghost_user }}"
group: "{{ ghost_user_group }}"
mode: "0644"
- name: Ensure ghost container image is present on host
community.docker.docker_image:
name: "{{ ghost_container_image }}"
state: present
source: pull
force_source: "{{ ghost_container_image_tag is defined }}"
- name: Ensure ghost container is running
community.docker.docker_container:
name: "{{ ghost_container_name }}"
image: "{{ ghost_container_image }}"
ports: "{{ ghost_container_ports | default(omit, true) }}"
labels: "{{ ghost_container_labels }}"
volumes: "{{ ghost_container_volumes }}"
env_file: "{{ ghost_config_file }}"
etc_hosts: "{{ ghost_container_etc_hosts | default(omit, true) }}"
networks: "{{ ghost_container_networks | default(omit, true) }}"
purge_networks: "{{ ghost_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ ghost_container_restart_policy }}"
state: started

View File

@ -1,3 +0,0 @@
{% for key, value in ghost_config_complete.items() %}
{{ key }}={{ value }}
{% endfor %}

View File

@ -1,10 +0,0 @@
---
ghost_container_image: "{{ ghost_container_image_name}}:{{ ghost_container_image_tag | default(ghost_version, true) }}"
ghost_container_labels: >-2
{{ ghost_container_base_labels
| combine(ghost_container_extra_labels) }}
ghost_container_data_directory: "/var/lib/ghost/content"
ghost_config_complete: >-2
{{ ghost_base_config | combine(ghost_config, recursive=True) }}

View File

@ -1,6 +1,6 @@
--- ---
gitea_version: "1.19.4" gitea_version: "1.16.4"
gitea_user: git gitea_user: git
gitea_base_path: "/opt/gitea" gitea_base_path: "/opt/gitea"
gitea_data_path: "{{ gitea_base_path }}/data" gitea_data_path: "{{ gitea_base_path }}/data"

View File

@ -1,15 +0,0 @@
# `finallycoffee.services.jellyfin` ansible role
This role runs [Jellyfin](https://jellyfin.org/), a free software media system,
in a docker container.
## Usage
`jellyfin_domain` contains the FQDN which jellyfin should listen to. Most configuration
is done in the software itself.
Jellyfin runs in host networking mode by default, as that is needed for some features like
network discovery with chromecasts and similar.
Media can be mounted into jellyfin using `jellyfin_media_volumes`, taking a list of strings
akin to `community.docker.docker_container`'s `volumes` key.

View File

@ -1,7 +1,6 @@
--- ---
jellyfin_user: jellyfin jellyfin_user: jellyfin
jellyfin_version: 10.8.10
jellyfin_base_path: /opt/jellyfin jellyfin_base_path: /opt/jellyfin
jellyfin_config_path: "{{ jellyfin_base_path }}/config" jellyfin_config_path: "{{ jellyfin_base_path }}/config"
@ -11,13 +10,11 @@ jellyfin_media_volumes: []
jellyfin_container_name: jellyfin jellyfin_container_name: jellyfin
jellyfin_container_image_name: "docker.io/jellyfin/jellyfin" jellyfin_container_image_name: "docker.io/jellyfin/jellyfin"
jellyfin_container_image_tag: ~ jellyfin_container_image_tag: "latest"
jellyfin_container_image_ref: "{{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag | default(jellyfin_version, true) }}" jellyfin_container_image_ref: "{{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag }}"
jellyfin_container_network_mode: host jellyfin_container_network_mode: host
jellyfin_container_networks: ~ jellyfin_container_networks: ~
jellyfin_container_volumes: "{{ jellyfin_container_base_volumes + jellyfin_media_volumes }}" jellyfin_container_volumes: "{{ jellyfin_container_base_volumes + jellyfin_media_volumes }}"
jellyfin_container_labels: "{{ jellyfin_container_base_labels | combine(jellyfin_container_extra_labels) }}"
jellyfin_container_extra_labels: {}
jellyfin_container_restart_policy: "unless-stopped" jellyfin_container_restart_policy: "unless-stopped"
jellyfin_host_directories: jellyfin_host_directories:

View File

@ -21,18 +21,13 @@
name: "{{ jellyfin_container_image_ref }}" name: "{{ jellyfin_container_image_ref }}"
state: present state: present
source: pull source: pull
force_source: "{{ jellyfin_container_image_tag | default(false, true) }}" force_source: "{{ jellyfin_container_image_tag in ['stable', 'unstable'] }}"
register: jellyfin_container_image_pull_result
until: jellyfin_container_image_pull_result is succeeded
retries: 5
delay: 3
- name: Ensure container '{{ jellyfin_container_name }}' is running - name: Ensure container '{{ jellyfin_container_name }}' is running
docker_container: docker_container:
name: "{{ jellyfin_container_name }}" name: "{{ jellyfin_container_name }}"
image: "{{ jellyfin_container_image_ref }}" image: "{{ jellyfin_container_image_ref }}"
user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}" user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}"
labels: "{{ jellyfin_container_labels }}"
volumes: "{{ jellyfin_container_volumes }}" volumes: "{{ jellyfin_container_volumes }}"
networks: "{{ jellyfin_container_networks | default(omit, True) }}" networks: "{{ jellyfin_container_networks | default(omit, True) }}"
network_mode: "{{ jellyfin_container_network_mode }}" network_mode: "{{ jellyfin_container_network_mode }}"

View File

@ -3,6 +3,3 @@
jellyfin_container_base_volumes: jellyfin_container_base_volumes:
- "{{ jellyfin_config_path }}:/config:z" - "{{ jellyfin_config_path }}:/config:z"
- "{{ jellyfin_cache_path }}:/cache:z" - "{{ jellyfin_cache_path }}:/cache:z"
jellyfin_container_base_labels:
version: "{{ jellyfin_version }}"

View File

@ -1,28 +0,0 @@
# `finallycoffee.services.nginx` ansible role
## Description
Runs `nginx`, a HTTP reverse proxy, in a docker container.
## Usage
For the role to do anything, `nginx_config` needs to be populated with the configuration for nginx.
An example would be:
```yaml
nginx_config: |+
server {
listen 80 default_server;
server_name my.server.fqdn;
location / { return 200; }
}
```
The container is named `nginx` by default, this can be overridden in `nginx_container_name`.
When running this role multiple times, `nginx_base_path` should also be changed for each run,
otherwise the configuration files collide in the filesystem.
For exposing this server to the host and/or internet, the `nginx_container_ports` (port forwarding host
from host to container), `nginx_container_networks` (docker networking) or `nginx_container_labels`
(for label-based routing discovery like traefik) can be used. The options correspond to the arguments
of the `community.docker.docker_container` module.

View File

@ -1,33 +0,0 @@
---
nginx_version: "1.25.1"
nginx_flavour: alpine
nginx_base_path: /opt/nginx
nginx_config_file: "{{ nginx_base_path }}/nginx.conf"
nginx_container_name: nginx
nginx_container_image_reference: >-
{{
nginx_container_image_repository
+ ':' + (nginx_container_image_tag
| default(nginx_version
+ (('-' + nginx_flavour) if nginx_flavour is defined else ''), true))
}}
nginx_container_image_repository: >-
{{
(
container_registries[nginx_container_image_registry]
| default(nginx_container_image_registry)
)
+ '/'
+ nginx_container_image_namespace | default('')
+ nginx_container_image_name
}}
nginx_container_image_registry: "docker.io"
nginx_container_image_name: "nginx"
nginx_container_image_tag: ~
nginx_container_restart_policy: "unless-stopped"
nginx_container_volumes:
- "{{ nginx_config_file }}:/etc/nginx/conf.d/nginx.conf:ro"

View File

@ -1,8 +0,0 @@
---
- name: Ensure nginx container '{{ nginx_container_name }}' is restarted
community.docker.docker_container:
name: "{{ nginx_container_name }}"
state: started
restart: true
listen: restart-nginx

View File

@ -1,37 +0,0 @@
---
- name: Ensure base path '{{ nginx_base_path }}' exists
ansible.builtin.file:
path: "{{ nginx_base_path }}"
state: directory
mode: 0755
- name: Ensure nginx config file is templated
ansible.builtin.copy:
dest: "{{ nginx_config_file }}"
content: "{{ nginx_config }}"
mode: 0640
notify:
- restart-nginx
- name: Ensure docker container image is present
community.docker.docker_image:
name: "{{ nginx_container_image_reference }}"
state: present
source: pull
force_source: "{{ nginx_container_image_tag is defined and nginx_container_image_tag | string != '' }}"
- name: Ensure docker container '{{ nginx_container_name }}' is running
community.docker.docker_container:
name: "{{ nginx_container_name }}"
image: "{{ nginx_container_image_reference }}"
env: "{{ nginx_container_env | default(omit, true) }}"
user: "{{ nginx_container_user | default(omit, true) }}"
ports: "{{ nginx_container_ports | default(omit, true) }}"
labels: "{{ nginx_container_labels | default(omit, true) }}"
volumes: "{{ nginx_container_volumes | default(omit, true) }}"
etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}"
networks: "{{ nginx_container_networks | default(omit, true) }}"
purge_networks: "{{ nginx_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ nginx_container_restart_policy }}"
state: started

View File

@ -44,14 +44,22 @@
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated - name: Ensure systemd service file for '{{ restic_job_name }}' is templated
template: template:
dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.service" dest: "/etc/systemd/system/{{ service.unit_name }}.service"
src: restic.service.j2 src: "{{ service.file }}"
owner: root owner: root
group: root group: root
mode: 0640 mode: 0640
notify: notify:
- reload-systemd - reload-systemd
- trigger-restic - trigger-restic
loop:
- unit_name: "{{ restic_systemd_unit_naming_scheme }}"
file: restic.service.j2
- unit_name: "{{ restic_systemd_unit_naming_scheme }}-unlock"
file: restic-unlock.service.j2
loop_control:
loop_var: service
label: "{{ service.file }}"
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated - name: Ensure systemd service file for '{{ restic_job_name }}' is templated
template: template:
@ -66,6 +74,11 @@
- name: Flush handlers to ensure systemd knows about '{{ restic_job_name }}' - name: Flush handlers to ensure systemd knows about '{{ restic_job_name }}'
meta: flush_handlers meta: flush_handlers
- name: Ensure systemd service for unlocking repository for '{{ restic_job_name }}' is enabled
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}-unlock.service"
enabled: true
- name: Ensure systemd timer for '{{ restic_job_name }}' is activated - name: Ensure systemd timer for '{{ restic_job_name }}' is activated
systemd: systemd:
name: "{{ restic_systemd_unit_naming_scheme }}.timer" name: "{{ restic_systemd_unit_naming_scheme }}.timer"

View File

@ -0,0 +1,21 @@
[Unit]
Description={{ restic_job_description }} - Unlock after reboot job
[Service]
Type=oneshot
User={{ restic_user }}
WorkingDirectory={{ restic_systemd_working_directory }}
SyslogIdentifier={{ restic_systemd_syslog_identifier }}
Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
Environment=RESTIC_PASSWORD={{ restic_repo_password }}
{% if restic_s3_key_id and restic_s3_access_key %}
Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }}
Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }}
{% endif %}
ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init'
ExecStart=/usr/bin/restic unlock
[Install]
WantedBy=multi-user.target

View File

@ -1,16 +0,0 @@
# `finallycoffee.services.vouch-proxy`
[Vouch-Proxy](https://github.com/vouch/vouch-proxy) can be used in combination with
nginx' `auth_request` module to secure web services with OIDC/OAuth. This role runs
vouch-proxys' official docker container.
## Usage
The `oauth` config section must be supplied in `vouch_proxy_oauth_config`, and the
`vouch` config section can be overridden in `vouch_proxy_vouch_config`. For possible
configuration values, see https://github.com/vouch/vouch-proxy/blob/master/config/config.yml_example.
For an example nginx config, see https://github.com/vouch/vouch-proxy#installation-and-configuration.
Passing container arguments in the same way as `community.docker.docker_container` is supported
using the `vouch_proxy_container_[...]` prefix (e.g. `vouch_proxy_container_ports`).

View File

@ -1,51 +0,0 @@
---
vouch_proxy_user: vouch-proxy
vouch_proxy_version: 0.39.0
vouch_proxy_base_path: /opt/vouch-proxy
vouch_proxy_config_path: "{{ vouch_proxy_base_path }}/config"
vouch_proxy_config_file: "{{ vouch_proxy_config_path }}/config.yaml"
vouch_proxy_container_name: vouch-proxy
vouch_proxy_container_image_name: vouch-proxy
vouch_proxy_container_image_namespace: vouch/
vouch_proxy_container_image_registry: quay.io
vouch_proxy_container_image_repository: >-
{{
(container_registries[vouch_proxy_container_image_registry] | default(vouch_proxy_container_image_registry))
+ '/' + (vouch_proxy_container_image_namespace | default(''))
+ vouch_proxy_container_image_name
}}
vouch_proxy_container_image_reference: >-
{{
vouch_proxy_container_image_repository + ':'
+ (vouch_proxy_container_image_tag | default(vouch_proxy_version))
}}
vouch_proxy_container_image_force_pull: "{{ vouch_proxy_container_image_tag is defined }}"
vouch_proxy_container_default_volumes:
- "{{ vouch_proxy_config_file }}:/config/config.yaml:ro"
vouch_proxy_container_volumes: >-
{{ vouch_proxy_container_default_volumes
+ vouch_proxy_container_extra_volumes | default([]) }}
vouch_proxy_container_restart_policy: "unless-stopped"
vouch_proxy_config_vouch_log_level: info
vouch_proxy_config_vouch_listen: 0.0.0.0
vouch_proxy_config_vouch_port: 9090
vouch_proxy_config_vouch_domains: []
vouch_proxy_config_vouch_document_root: ~
vouch_proxy_oauth_config: {}
vouch_proxy_vouch_config:
logLevel: "{{ vouch_proxy_config_vouch_log_level }}"
listen: "{{ vouch_proxy_config_vouch_listen }}"
port: "{{ vouch_proxy_config_vouch_port }}"
domains: "{{ vouch_proxy_config_vouch_domains }}"
document_root: "{{ vouch_proxy_config_vouch_document_root }}"
vouch_proxy_config:
vouch: "{{ vouch_proxy_vouch_config }}"
oauth: "{{ vouch_proxy_oauth_config }}"

View File

@ -1,8 +0,0 @@
---
- name: Ensure vouch-proxy was restarted
community.docker.docker_container:
name: "{{ vouch_proxy_container_name }}"
state: started
restart: yes
listen: restart-vouch-proxy

View File

@ -1,50 +0,0 @@
---
- name: Ensure vouch-proxy user '{{ vouch_proxy_user }}' exists
ansible.builtin.user:
name: "{{ vouch_proxy_user }}"
state: present
system: true
register: vouch_proxy_user_info
- name: Ensure mounts are created
ansible.builtin.file:
dest: "{{ item.path }}"
state: directory
owner: "{{ item.owner | default(vouch_proxy_user_info.uid | default(vouch_proxy_user)) }}"
group: "{{ item.owner | default(vouch_proxy_user_info.group | default(vouch_proxy_user)) }}"
mode: "{{ item.mode | default('0755') }}"
loop:
- path: "{{ vouch_proxy_base_path }}"
- path: "{{ vouch_proxy_config_path }}"
- name: Ensure config file is templated
ansible.builtin.copy:
dest: "{{ vouch_proxy_config_file }}"
content: "{{ vouch_proxy_config | to_nice_yaml }}"
owner: "{{ vouch_proxy_user_info.uid | default(vouch_proxy_user) }}"
group: "{{ vouch_proxy_user_info.group | default(vouch_proxy_user) }}"
mode: "0640"
notify:
- restart-vouch-proxy
- name: Ensure container image is present on host
community.docker.docker_image:
name: "{{ vouch_proxy_container_image_reference }}"
state: present
source: pull
force_source: "{{ vouch_proxy_container_image_force_pull | bool }}"
- name: Ensure container '{{ vouch_proxy_container_name }}' is running
community.docker.docker_container:
name: "{{ vouch_proxy_container_name }}"
image: "{{ vouch_proxy_container_image_reference }}"
env: "{{ vouch_proxy_container_env | default(omit) }}"
user: "{{ vouch_proxy_user_info.uid | default(vouch_proxy_user) }}"
ports: "{{ vouch_proxy_container_ports | default(omit) }}"
volumes: "{{ vouch_proxy_container_volumes | default(omit) }}"
networks: "{{ vouch_proxy_container_networks | default(omit) }}"
purge_networks: "{{ vouch_proxy_container_purge_networks | default(omit) }}"
etc_hosts: "{{ vouch_proxy_container_etc_hosts | default(omit) }}"
restart_policy: "{{ vouch_proxy_container_restart_policy }}"
state: started