Compare commits

..

1 Commits

83 changed files with 489 additions and 1759 deletions

View File

@ -1,4 +1,4 @@
# `finallycoffee.services` ansible collection # `finallycoffee.service` ansible collection
## Overview ## Overview
@ -8,35 +8,20 @@ concise area of concern.
## Roles ## Roles
- [`authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com) - [`roles/authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com)
instance, an authentication provider with beta OIDC provider support. instance, an authentication provider with beta OIDC provider support.
- [`ghost`](roles/ghost/README.md): Deploys [ghost.org](https://ghost.org/), a simple to use - [`roles/gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a
blogging and publishing platform.
- [`gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a
lightweight, self-hosted git service. lightweight, self-hosted git service.
- [`hedgedoc`](roles/hedgedoc/README.md): Deploy [hedgedoc](https://hedgedoc.org/), - [`roles/jellyfin`](roles/jellyfin/README.md): Deploy [jellyfin.org](https://jellyfin.org),
a collaborative real-time markdown editor using websockts
- [`jellyfin`](roles/jellyfin/README.md): Deploy [jellyfin.org](https://jellyfin.org),
the free software media system for streaming stored media to any device. the free software media system for streaming stored media to any device.
- [`keycloak`](roles/keycloak/README.md): Deploy [keycloak](https://www.keycloak.org/), - [`roles/restic`](roles/restic/README.md): Manage backups using restic
the open source identity and access management solution. and persist them to a configurable backend.
- [`openproject`](roles/openproject/README.md): Deploys an [openproject.org](https://www.openproject.org) - [`roles/minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an
installation using the upstream provided docker-compose setup. s3-compatible object storage server, using docker containers.
- [`snipe_it`](roles/snipe_it/README.md): Deploys [Snipe-IT](https://snipeitapp.com/),
the free and open-source IT asset (and license) management with a powerful REST API
- [`vaultwarden`](roles/vaultwarden/README.md): Deploy [vaultwarden](https://github.com/dani-garcia/vaultwarden/),
an open-source implementation of the Bitwarden Server (formerly Bitwarden\_RS).
- [`vouch_proxy`](roles/vouch_proxy/README.md): Deploys [vouch-proxy](https://github.com/vouch/vouch-proxy),
an authorization proxy for arbitrary webapps working with `nginx`s' `auth_request` module.
## License ## License

View File

@ -1,23 +1,15 @@
namespace: finallycoffee namespace: finallycoffee
name: services name: services
version: 0.1.10 version: 0.0.1
readme: README.md readme: README.md
authors: authors:
- transcaffeine <transcaffeine@finally.coffee> - Johanna Dorothea Reichmann <transcaffeine@finallycoffee.eu>
description: Various ansible roles useful for automating infrastructure description: Various ansible roles useful for automating infrastructure
dependencies: dependencies:
"community.crypto": "^2.0.0" "community.docker": "^1.10.0"
"community.docker": "^3.0.0" license:
license_file: LICENSE.md - CNPLv7+
build_ignore: build_ignore:
- '*.tar.gz' - '*.tar.gz'
repository: https://git.finally.coffee/finallycoffee/services repository: https://git.finallycoffee.eu/finallycoffee.eu/services
issues: https://codeberg.org/finallycoffee/ansible-collection-services/issues issues: https://git.finallycoffee.eu/finallycoffee.eu/services/issues
tags:
- authelia
- gitea
- hedgedoc
- jellyfin
- vaultwarden
- snipeit
- docker

View File

@ -1,3 +0,0 @@
---
requires_ansible: ">=2.15"

View File

@ -1,6 +0,0 @@
---
- name: Install and configure hedgedoc
hosts: "{{ hedgedoc_hosts | default('hedgedoc') }}"
become: "{{ hedgedoc_become | default(true, false) }}"
roles:
- role: finallycoffee.services.hedgedoc

View File

@ -1,6 +0,0 @@
---
- name: Install and configure jellyfin
hosts: "{{ jellyfin_hosts | default('jellyfin') }}"
become: "{{ jellyfin_become | default(true, false) }}"
roles:
- role: finallycoffee.services.jellyfin

View File

@ -1,6 +0,0 @@
---
- name: Install openproject
hosts: "{{ openproject_hosts | default('openproject') }}"
become: "{{ openproject_become | default(true, false) }}"
roles:
- role: finallycoffee.services.openproject

View File

@ -1,6 +0,0 @@
---
- name: Install and configure Snipe-IT
hosts: "{{ snipe_it_hosts | default('snipe_it') }}"
become: "{{ snipe_it_become | default(true, false) }}"
roles:
- role: finallycoffee.services.snipe_it

View File

@ -1,6 +0,0 @@
---
- name: Install and configure vaultwarden
hosts: "{{ vaultwarden_hosts | default('vaultwarden') }}"
become: "{{ vaultwarden_become | default(true, false) }}"
roles:
- role: finallycoffee.services.vaultwarden

View File

@ -1,5 +1,6 @@
--- ---
authelia_version: "4.38.17"
authelia_version: 4.34.6
authelia_user: authelia authelia_user: authelia
authelia_base_dir: /opt/authelia authelia_base_dir: /opt/authelia
authelia_domain: authelia.example.org authelia_domain: authelia.example.org
@ -7,26 +8,14 @@ authelia_domain: authelia.example.org
authelia_config_dir: "{{ authelia_base_dir }}/config" authelia_config_dir: "{{ authelia_base_dir }}/config"
authelia_config_file: "{{ authelia_config_dir }}/config.yaml" authelia_config_file: "{{ authelia_config_dir }}/config.yaml"
authelia_data_dir: "{{ authelia_base_dir }}/data" authelia_data_dir: "{{ authelia_base_dir }}/data"
authelia_asset_dir: "{{ authelia_base_dir }}/assets"
authelia_sqlite_storage_file: "{{ authelia_data_dir }}/authelia.sqlite3" authelia_sqlite_storage_file: "{{ authelia_data_dir }}/authelia.sqlite3"
authelia_notification_storage_file: "{{ authelia_data_dir }}/notifications.txt" authelia_notification_storage_file: "{{ authelia_data_dir }}/notifications.txt"
authelia_user_storage_file: "{{ authelia_data_dir }}/user_database.yml" authelia_user_storage_file: "{{ authelia_data_dir }}/user_database.yml"
authelia_container_name: authelia authelia_container_name: authelia
authelia_container_image_server: docker.io authelia_container_image_name: docker.io/authelia/authelia
authelia_container_image_namespace: authelia
authelia_container_image_name: authelia
authelia_container_image: >-2
{{
[
authelia_container_image_server,
authelia_container_image_namespace,
authelia_container_image_name
] | join('/')
}}
authelia_container_image_tag: ~ authelia_container_image_tag: ~
authelia_container_image_ref: >-2 authelia_container_image_ref: "{{ authelia_container_image_name }}:{{ authelia_container_image_tag | default(authelia_version, true) }}"
{{ authelia_container_image }}:{{ authelia_container_image_tag | default(authelia_version, true) }}
authelia_container_image_force_pull: "{{ authelia_container_image_tag | default(false, True) }}" authelia_container_image_force_pull: "{{ authelia_container_image_tag | default(false, True) }}"
authelia_container_env: authelia_container_env:
PUID: "{{ authelia_run_user }}" PUID: "{{ authelia_run_user }}"
@ -52,22 +41,11 @@ authelia_config_jwt_secret: ~
authelia_config_default_redirection_url: ~ authelia_config_default_redirection_url: ~
authelia_config_server_host: 0.0.0.0 authelia_config_server_host: 0.0.0.0
authelia_config_server_port: "{{ authelia_container_listen_port }}" authelia_config_server_port: "{{ authelia_container_listen_port }}"
authelia_config_server_address: >-2
{{ authelia_config_server_host }}:{{ authelia_config_server_port }}
authelia_config_server_path: "" authelia_config_server_path: ""
authelia_config_server_asset_path: "/config/assets/" authelia_config_server_read_buffer_size: 4096
authelia_config_server_buffers_read: 4096 authelia_config_server_write_buffer_size: 4096
authelia_config_server_read_buffer_size: >-2 authelia_config_server_enable_pprof: true
{{ authelia_config_server_buffers_read }} authelia_config_server_enable_expvars: true
authelia_config_server_buffers_write: 4096
authelia_config_server_write_buffer_size: >-2
{{ authelia_config_server_buffers_write }}
authelia_config_server_endpoints_enable_pprof: true
authelia_config_server_enable_pprof: >-2
{{ authelia_config_server_endpoints_enable_pprof }}
authelia_config_server_endpoints_enable_expvars: true
authelia_config_server_enable_expvars: >-2
{{ authelia_config_server_endpoints_enable_expvars }}
authelia_config_server_disable_healthcheck: authelia_config_server_disable_healthcheck:
authelia_config_server_tls_key: ~ authelia_config_server_tls_key: ~
authelia_config_server_tls_certificate: ~ authelia_config_server_tls_certificate: ~
@ -77,8 +55,6 @@ authelia_config_log_level: info
authelia_config_log_format: json authelia_config_log_format: json
authelia_config_log_file_path: ~ authelia_config_log_file_path: ~
authelia_config_log_keep_stdout: false authelia_config_log_keep_stdout: false
authelia_config_telemetry_metrics_enabled: false
authelia_config_telemetry_metrics_address: '0.0.0.0:9959'
authelia_config_totp_disable: true authelia_config_totp_disable: true
authelia_config_totp_issuer: "{{ authelia_domain }}" authelia_config_totp_issuer: "{{ authelia_domain }}"
authelia_config_totp_algorithm: sha1 authelia_config_totp_algorithm: sha1
@ -100,8 +76,8 @@ authelia_config_ntp_version: 4
authelia_config_ntp_max_desync: 3s authelia_config_ntp_max_desync: 3s
authelia_config_ntp_disable_startup_check: false authelia_config_ntp_disable_startup_check: false
authelia_config_ntp_disable_failure: false authelia_config_ntp_disable_failure: false
authelia_config_authentication_backend_disable_reset_password: false
authelia_config_authentication_backend_refresh_interval: 5m authelia_config_authentication_backend_refresh_interval: 5m
authelia_config_authentication_backend_password_reset_disable: false
authelia_config_authentication_backend_password_reset_custom_url: ~ authelia_config_authentication_backend_password_reset_custom_url: ~
authelia_config_authentication_backend_ldap_implementation: custom authelia_config_authentication_backend_ldap_implementation: custom
authelia_config_authentication_backend_ldap_url: ldap://127.0.0.1:389 authelia_config_authentication_backend_ldap_url: ldap://127.0.0.1:389
@ -114,18 +90,10 @@ authelia_config_authentication_backend_ldap_additional_users_dn: "ou=users"
authelia_config_authentication_backend_ldap_users_filter: "(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=inetOrgPerson))" authelia_config_authentication_backend_ldap_users_filter: "(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=inetOrgPerson))"
authelia_config_authentication_backend_ldap_additional_groups_dn: "ou=groups" authelia_config_authentication_backend_ldap_additional_groups_dn: "ou=groups"
authelia_config_authentication_backend_ldap_groups_filter: "(member={dn})" authelia_config_authentication_backend_ldap_groups_filter: "(member={dn})"
authelia_config_authentication_backend_ldap_attributes_username: uid
authelia_config_authentication_backend_ldap_username_attribute: >-2
{{ authelia_config_authentication_backend_ldap_attributes_username }}
authelia_config_authentication_backend_ldap_attributes_mail: mail
authelia_config_authentication_backend_ldap_mail_attribute: >-2
{{ authelia_config_authentication_backend_ldap_attributes_mail }}
authelia_config_authentication_backend_ldap_attributes_display_name: displayName
authelia_config_authentication_backend_ldap_display_name_attribute: >-2
{{ authelia_config_authentication_backend_ldap_attributes_display_name }}
authelia_config_authentication_backend_ldap_group_name_attribute: cn authelia_config_authentication_backend_ldap_group_name_attribute: cn
authelia_config_authentication_backend_ldap_attributes_group_name: >-2 authelia_config_authentication_backend_ldap_username_attribute: uid
{{ authelia_config_authentication_backend_ldap_group_name_attribute }} authelia_config_authentication_backend_ldap_mail_attribute: mail
authelia_config_authentication_backend_ldap_display_name_attribute: displayName
authelia_config_authentication_backend_ldap_user: ~ authelia_config_authentication_backend_ldap_user: ~
authelia_config_authentication_backend_ldap_password: ~ authelia_config_authentication_backend_ldap_password: ~
authelia_config_authentication_backend_file_path: ~ authelia_config_authentication_backend_file_path: ~
@ -153,8 +121,6 @@ authelia_config_session_secret: ~
authelia_config_session_expiration: 1h authelia_config_session_expiration: 1h
authelia_config_session_inactivity: 5m authelia_config_session_inactivity: 5m
authelia_config_session_remember_me_duration: 1M authelia_config_session_remember_me_duration: 1M
authelia_config_session_remember_me: >-2
{{ authelia_config_session_remember_me_duration }}
authelia_config_session_redis_host: "{{ authelia_redis_host }}" authelia_config_session_redis_host: "{{ authelia_redis_host }}"
authelia_config_session_redis_port: "{{ authelia_redis_port }}" authelia_config_session_redis_port: "{{ authelia_redis_port }}"
authelia_config_session_redis_username: "{{ authelia_redis_user }}" authelia_config_session_redis_username: "{{ authelia_redis_user }}"
@ -179,14 +145,15 @@ authelia_config_storage_postgres_ssl_certificate: disable
authelia_config_storage_postgres_ssl_key: disable authelia_config_storage_postgres_ssl_key: disable
authelia_config_notifier_disable_startup_check: false authelia_config_notifier_disable_startup_check: false
authelia_config_notifier_filesystem_filename: ~ authelia_config_notifier_filesystem_filename: ~
authelia_config_notifier_smtp_address: "{{ authelia_smtp_host }}:{{ authelia_stmp_port }}" authelia_config_notifier_smtp_host: "{{ authelia_smtp_host }}"
authelia_config_notifier_smtp_port: "{{ authelia_stmp_port }}"
authelia_config_notifier_smtp_username: "{{ authelia_smtp_user }}" authelia_config_notifier_smtp_username: "{{ authelia_smtp_user }}"
authelia_config_notifier_smtp_password: "{{ authelia_smtp_pass }}" authelia_config_notifier_smtp_password: "{{ authelia_smtp_pass }}"
authelia_config_notifier_smtp_timeout: 5s authelia_config_notifier_smtp_timeout: 5s
authelia_config_notifier_smtp_sender: "Authelia on {{ authelia_domain }} <admin@{{ authelia_domain }}>" authelia_config_notifier_smtp_sender: "Authelia on {{ authelia_domain }} <admin@{{ authelia_domain }}>"
authelia_config_notifier_smtp_identifier: "{{ authelia_domain }}" authelia_config_notifier_smtp_identifier: "{{ authelia_domain }}"
authelia_config_notifier_smtp_subject: "[Authelia @ {{ authelia_domain }}] {title}" authelia_config_notifier_smtp_subject: "[Authelia @ {{ authelia_domain }}] {title}"
authelia_config_notifier_smtp_startup_check_address: "authelia-test@{{ authelia_domain }}" authelia_config_notifier_smtp_startup_check_address: false
authelia_config_notifier_smtp_disable_require_tls: false authelia_config_notifier_smtp_disable_require_tls: false
authelia_config_notifier_smtp_disable_html_emails: false authelia_config_notifier_smtp_disable_html_emails: false
authelia_config_notifier_smtp_tls_skip_verify: false authelia_config_notifier_smtp_tls_skip_verify: false
@ -195,12 +162,6 @@ authelia_config_notifier_smtp_tls_minimum_version: "{{ authelia_tls_minimum_vers
authelia_database_type: ~ authelia_database_type: ~
authelia_database_host: ~ authelia_database_host: ~
authelia_database_port: ~
authelia_database_address: >-2
{{ authelia_database_host }}{{
(authelia_database_port | default(false, true) | bool)
| ternary(':' + authelia_database_port, '')
}}
authelia_database_user: authelia authelia_database_user: authelia
authelia_database_pass: ~ authelia_database_pass: ~
authelia_database_name: authelia authelia_database_name: authelia

View File

@ -4,7 +4,5 @@
docker_container: docker_container:
name: "{{ authelia_container_name }}" name: "{{ authelia_container_name }}"
state: started state: started
restart: true restart: yes
comparisons:
'*': ignore
listen: restart-authelia listen: restart-authelia

View File

@ -1,9 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: authelia
description: Ansible role to deploy authelia using docker
galaxy_tags:
- authelia
- docker

View File

@ -1,20 +1,19 @@
--- ---
- name: Ensure user {{ authelia_user }} exists - name: Ensure user {{ authelia_user }} exists
ansible.builtin.user: user:
name: "{{ authelia_user }}" name: "{{ authelia_user }}"
state: present state: present
system: true system: true
register: authelia_user_info register: authelia_user_info
- name: Ensure host directories are created with correct permissions - name: Ensure host directories are created with correct permissions
ansible.builtin.file: file:
path: "{{ item.path }}" path: "{{ item.path }}"
state: directory state: directory
owner: "{{ item.owner | default(authelia_user) }}" owner: "{{ item.owner | default(authelia_user) }}"
group: "{{ item.group | default(authelia_user) }}" group: "{{ item.group | default(authelia_user) }}"
mode: "{{ item.mode | default('0750') }}" mode: "{{ item.mode | default('0750') }}"
when: item.path | default(false, true) | bool
loop: loop:
- path: "{{ authelia_base_dir }}" - path: "{{ authelia_base_dir }}"
mode: "0755" mode: "0755"
@ -22,11 +21,9 @@
mode: "0750" mode: "0750"
- path: "{{ authelia_data_dir }}" - path: "{{ authelia_data_dir }}"
mode: "0750" mode: "0750"
- path: "{{ authelia_asset_dir }}"
mode: "0750"
- name: Ensure config file is generated - name: Ensure config file is generated
ansible.builtin.copy: copy:
content: "{{ authelia_config | to_nice_yaml(indent=2, width=10000) }}" content: "{{ authelia_config | to_nice_yaml(indent=2, width=10000) }}"
dest: "{{ authelia_config_file }}" dest: "{{ authelia_config_file }}"
owner: "{{ authelia_run_user }}" owner: "{{ authelia_run_user }}"
@ -35,7 +32,7 @@
notify: restart-authelia notify: restart-authelia
- name: Ensure sqlite database file exists before mounting it - name: Ensure sqlite database file exists before mounting it
ansible.builtin.file: file:
path: "{{ authelia_sqlite_storage_file }}" path: "{{ authelia_sqlite_storage_file }}"
state: touch state: touch
owner: "{{ authelia_run_user }}" owner: "{{ authelia_run_user }}"
@ -46,7 +43,7 @@
when: authelia_config_storage_local_path | default(false, true) when: authelia_config_storage_local_path | default(false, true)
- name: Ensure user database exists before mounting it - name: Ensure user database exists before mounting it
ansible.builtin.file: file:
path: "{{ authelia_user_storage_file }}" path: "{{ authelia_user_storage_file }}"
state: touch state: touch
owner: "{{ authelia_run_user }}" owner: "{{ authelia_run_user }}"
@ -57,7 +54,7 @@
when: authelia_config_authentication_backend_file_path | default(false, true) when: authelia_config_authentication_backend_file_path | default(false, true)
- name: Ensure notification reports file exists before mounting it - name: Ensure notification reports file exists before mounting it
ansible.builtin.file: file:
path: "{{ authelia_notification_storage_file }}" path: "{{ authelia_notification_storage_file }}"
state: touch state: touch
owner: "{{ authelia_run_user }}" owner: "{{ authelia_run_user }}"
@ -76,7 +73,7 @@
register: authelia_container_image_info register: authelia_container_image_info
- name: Ensure authelia container is running - name: Ensure authelia container is running
community.docker.docker_container: docker_container:
name: "{{ authelia_container_name }}" name: "{{ authelia_container_name }}"
image: "{{ authelia_container_image_ref }}" image: "{{ authelia_container_image_ref }}"
env: "{{ authelia_container_env }}" env: "{{ authelia_container_env }}"
@ -85,9 +82,7 @@
labels: "{{ authelia_container_labels }}" labels: "{{ authelia_container_labels }}"
volumes: "{{ authelia_container_volumes }}" volumes: "{{ authelia_container_volumes }}"
networks: "{{ authelia_container_networks | default(omit, true) }}" networks: "{{ authelia_container_networks | default(omit, true) }}"
etc_hosts: "{{ authelia_container_etc_hosts | default(omit, true) }}"
purge_networks: "{{ authelia_container_purge_networks | default(omit, true)}}" purge_networks: "{{ authelia_container_purge_networks | default(omit, true)}}"
restart_policy: "{{ authelia_container_restart_policy }}" restart_policy: "{{ authelia_container_restart_policy }}"
recreate: "{{ authelia_container_recreate | default(omit, true) }}"
state: "{{ authelia_container_state }}" state: "{{ authelia_container_state }}"
register: authelia_container_info register: authelia_container_info

View File

@ -5,7 +5,6 @@ authelia_run_group: "{{ (authelia_user_info.group) if authelia_user_info is defi
authelia_container_base_volumes: >-2 authelia_container_base_volumes: >-2
{{ [ authelia_config_file + ":/config/configuration.yml:ro"] {{ [ authelia_config_file + ":/config/configuration.yml:ro"]
+ ([authelia_asset_dir + '/:' + authelia_config_server_asset_path + ':ro'] if authelia_asset_dir | default(false, true) else [])
+ ([ authelia_sqlite_storage_file + ":" + authelia_config_storage_local_path + ":z" ] + ([ authelia_sqlite_storage_file + ":" + authelia_config_storage_local_path + ":z" ]
if authelia_config_storage_local_path | default(false, true) else []) if authelia_config_storage_local_path | default(false, true) else [])
+ ([ authelia_notification_storage_file + ":" + authelia_config_notifier_filesystem_filename + ":z" ] + ([ authelia_notification_storage_file + ":" + authelia_config_notifier_filesystem_filename + ":z" ]
@ -22,7 +21,6 @@ authelia_top_level_config:
theme: "{{ authelia_config_theme }}" theme: "{{ authelia_config_theme }}"
jwt_secret: "{{ authelia_config_jwt_secret }}" jwt_secret: "{{ authelia_config_jwt_secret }}"
log: "{{ authelia_config_log }}" log: "{{ authelia_config_log }}"
telemetry: "{{ authelia_config_telemetry }}"
totp: "{{ authelia_config_totp }}" totp: "{{ authelia_config_totp }}"
webauthn: "{{ authelia_config_webauthn }}" webauthn: "{{ authelia_config_webauthn }}"
duo_api: "{{ authelia_config_duo_api }}" duo_api: "{{ authelia_config_duo_api }}"
@ -48,20 +46,17 @@ authelia_base_config: >-2
authelia_config_server: >-2 authelia_config_server: >-2
{{ {{
{ {
"address": authelia_config_server_address, "host": authelia_config_server_host,
"asset_path": authelia_config_server_asset_path, "port": authelia_config_server_port,
"path": authelia_config_server_path,
"read_buffer_size": authelia_config_server_read_buffer_size,
"write_buffer_size": authelia_config_server_write_buffer_size,
"enable_pprof": authelia_config_server_enable_pprof,
"enable_expvars": authelia_config_server_enable_expvars,
"disable_healthcheck": authelia_config_server_disable_healthcheck, "disable_healthcheck": authelia_config_server_disable_healthcheck,
"endpoints": authelia_config_server_endpoints,
"buffers": authelia_config_server_buffers,
} | combine({"headers": {"csp_template": authelia_config_server_headers_csp_template}} } | combine({"headers": {"csp_template": authelia_config_server_headers_csp_template}}
if authelia_config_server_headers_csp_template | default(false, true) else {}) if authelia_config_server_headers_csp_template | default(false, true) else {})
}} }}
authelia_config_server_endpoints:
enable_expvars: "{{ authelia_config_server_endpoints_enable_expvars }}"
enable_pprof: "{{ authelia_config_server_endpoints_enable_pprof }}"
authelia_config_server_buffers:
read: "{{ authelia_config_server_buffers_read }}"
write: "{{ authelia_config_server_buffers_write }}"
authelia_config_server_tls: authelia_config_server_tls:
key: "{{ authelia_config_server_tls_key }}" key: "{{ authelia_config_server_tls_key }}"
certificate: "{{ authelia_config_server_tls_certificate }}" certificate: "{{ authelia_config_server_tls_certificate }}"
@ -77,10 +72,6 @@ authelia_config_log: >-2
| combine({"keep_stdout": authelia_config_log_keep_stdout} | combine({"keep_stdout": authelia_config_log_keep_stdout}
if authelia_config_log_file_path | default(false, true) else {}) if authelia_config_log_file_path | default(false, true) else {})
}} }}
authelia_config_telemetry:
metrics:
enabled: "{{ authelia_config_telemetry_metrics_enabled }}"
address: "{{ authelia_config_telemetry_metrics_address }}"
authelia_config_totp: authelia_config_totp:
disable: "{{ authelia_config_totp_disable }}" disable: "{{ authelia_config_totp_disable }}"
issuer: "{{ authelia_config_totp_issuer }}" issuer: "{{ authelia_config_totp_issuer }}"
@ -110,6 +101,7 @@ authelia_config_ntp:
authelia_config_authentication_backend: >-2 authelia_config_authentication_backend: >-2
{{ {{
{ {
"disable_reset_password": authelia_config_authentication_backend_disable_reset_password,
"refresh_interval": authelia_config_authentication_backend_refresh_interval, "refresh_interval": authelia_config_authentication_backend_refresh_interval,
} }
| combine({"password_reset": authelia_config_authentication_backend_password_reset} | combine({"password_reset": authelia_config_authentication_backend_password_reset}
@ -120,7 +112,6 @@ authelia_config_authentication_backend: >-2
}} }}
authelia_config_authentication_backend_password_reset: authelia_config_authentication_backend_password_reset:
custom_url: "{{ authelia_config_authentication_backend_password_reset_custom_url }}" custom_url: "{{ authelia_config_authentication_backend_password_reset_custom_url }}"
disable: "{{ authelia_config_authentication_backend_password_reset_disable }}"
authelia_config_authentication_backend_ldap: authelia_config_authentication_backend_ldap:
implementation: "{{ authelia_config_authentication_backend_ldap_implementation }}" implementation: "{{ authelia_config_authentication_backend_ldap_implementation }}"
url: "{{ authelia_config_authentication_backend_ldap_url }}" url: "{{ authelia_config_authentication_backend_ldap_url }}"
@ -134,11 +125,10 @@ authelia_config_authentication_backend_ldap:
additional_groups_dn: "{{ authelia_config_authentication_backend_ldap_additional_groups_dn }}" additional_groups_dn: "{{ authelia_config_authentication_backend_ldap_additional_groups_dn }}"
users_filter: "{{ authelia_config_authentication_backend_ldap_users_filter }}" users_filter: "{{ authelia_config_authentication_backend_ldap_users_filter }}"
groups_filter: "{{ authelia_config_authentication_backend_ldap_groups_filter }}" groups_filter: "{{ authelia_config_authentication_backend_ldap_groups_filter }}"
attributes: group_name_attribute: "{{ authelia_config_authentication_backend_ldap_group_name_attribute }}"
username: "{{ authelia_config_authentication_backend_ldap_attributes_username }}" username_attribute: "{{ authelia_config_authentication_backend_ldap_username_attribute }}"
mail: "{{ authelia_config_authentication_backend_ldap_attributes_mail }}" mail_attribute: "{{ authelia_config_authentication_backend_ldap_mail_attribute }}"
display_name: "{{ authelia_config_authentication_backend_ldap_attributes_display_name }}" display_name_attribute: "{{ authelia_config_authentication_backend_ldap_display_name_attribute }}"
group_name: "{{ authelia_config_authentication_backend_ldap_attributes_group_name }}"
user: "{{ authelia_config_authentication_backend_ldap_user }}" user: "{{ authelia_config_authentication_backend_ldap_user }}"
password: "{{ authelia_config_authentication_backend_ldap_password }}" password: "{{ authelia_config_authentication_backend_ldap_password }}"
authelia_config_authentication_backend_file: authelia_config_authentication_backend_file:
@ -170,19 +160,14 @@ authelia_config_access_control:
default_policy: "{{ authelia_config_access_control_default_policy }}" default_policy: "{{ authelia_config_access_control_default_policy }}"
networks: "{{ authelia_config_access_control_networks }}" networks: "{{ authelia_config_access_control_networks }}"
rules: "{{ authelia_config_access_control_rules }}" rules: "{{ authelia_config_access_control_rules }}"
authelia_config_session: >-2 authelia_config_session:
{{ authelia_config_session_base
| combine(({'redis': authelia_config_session_redis}
if authelia_config_session_redis_host else {}), recursive=true)
}}
authelia_config_session_base:
name: "{{ authelia_config_session_name }}" name: "{{ authelia_config_session_name }}"
domain: "{{ authelia_config_session_domain }}" domain: "{{ authelia_config_session_domain }}"
same_site: "{{ authelia_config_session_same_site }}" same_site: "{{ authelia_config_session_same_site }}"
secret: "{{ authelia_config_session_secret }}" secret: "{{ authelia_config_session_secret }}"
expiration: "{{ authelia_config_session_expiration }}" expiration: "{{ authelia_config_session_expiration }}"
inactivity: "{{ authelia_config_session_inactivity }}" inactivity: "{{ authelia_config_session_inactivity }}"
remember_me: "{{ authelia_config_session_remember_me }}" remember_me_duration: "{{ authelia_config_session_remember_me_duration }}"
authelia_config_session_redis: >-2 authelia_config_session_redis: >-2
{{ {{
{ {
@ -226,13 +211,15 @@ authelia_config_storage: >-2
authelia_config_storage_local: authelia_config_storage_local:
path: "{{ authelia_config_storage_local_path }}" path: "{{ authelia_config_storage_local_path }}"
authelia_config_storage_mysql: authelia_config_storage_mysql:
host: "{{ authelia_database_address }}" host: "{{ authelia_database_host }}"
port: "{{ authelia_config_storage_mysql_port }}"
database: "{{ authelia_database_name }}" database: "{{ authelia_database_name }}"
username: "{{ authelia_database_user }}" username: "{{ authelia_database_user }}"
password: "{{ authelia_database_pass }}" password: "{{ authelia_database_pass }}"
timeout: "{{ authelia_database_timeout }}" timeout: "{{ authelia_database_timeout }}"
authelia_config_storage_postgres: authelia_config_storage_postgres:
address: "{{ authelia_database_address }}" host: "{{ authelia_database_host }}"
port: "{{ authelia_config_storage_postgres_port }}"
database: "{{ authelia_database_name }}" database: "{{ authelia_database_name }}"
schema: public schema: public
username: "{{ authelia_database_user }}" username: "{{ authelia_database_user }}"
@ -256,7 +243,8 @@ authelia_config_notifier: >-2
authelia_config_notifier_filesystem: authelia_config_notifier_filesystem:
filename: "{{ authelia_config_notifier_filesystem_filename }}" filename: "{{ authelia_config_notifier_filesystem_filename }}"
authelia_config_notifier_smtp: authelia_config_notifier_smtp:
address: "{{ authelia_config_notifier_smtp_address }}" host: "{{ authelia_config_notifier_smtp_host }}"
port: "{{ authelia_config_notifier_smtp_port }}"
timeout: "{{ authelia_config_notifier_smtp_timeout }}" timeout: "{{ authelia_config_notifier_smtp_timeout }}"
username: "{{ authelia_config_notifier_smtp_username }}" username: "{{ authelia_config_notifier_smtp_username }}"
password: "{{ authelia_config_notifier_smtp_password }}" password: "{{ authelia_config_notifier_smtp_password }}"

View File

@ -1,18 +0,0 @@
# `finallycoffee.services.ghost` ansible role
[Ghost](https://ghost.org/) is a self-hosted blog with rich media capabilities,
which this role deploys in a docker container.
## Requirements
Ghost requires a MySQL-database (like mariadb) for storing it's data, which
can be configured using the `ghost_database_(host|username|password|database)` variables.
Setting `ghost_domain` to a fully-qualified domain on which ghost should be reachable
is also required.
Ghosts configuration can be changed using the `ghost_config` variable.
Container arguments which are equivalent to `community.docker.docker_container` can be
provided in the `ghost_container_[...]` syntax (e.g. `ghost_container_ports` to expose
ghosts port to the host).

View File

@ -1,38 +0,0 @@
---
ghost_domain: ~
ghost_version: "5.103.0"
ghost_user: ghost
ghost_user_group: ghost
ghost_base_path: /opt/ghost
ghost_data_path: "{{ ghost_base_path }}/data"
ghost_config_path: "{{ ghost_base_path }}/config"
ghost_config_file: "{{ ghost_config_path }}/ghost.env"
ghost_database_username: ghost
ghost_database_password: ~
ghost_database_database: ghost
ghost_database_host: ~
ghost_base_config:
url: "https://{{ ghost_domain }}"
database__client: mysql
database__connection__host: "{{ ghost_database_host }}"
database__connection__user: "{{ ghost_database_username }}"
database__connection__password: "{{ ghost_database_password }}"
database__connection__database: "{{ ghost_database_database }}"
ghost_config: {}
ghost_container_name: ghost
ghost_container_image_name: docker.io/ghost
ghost_container_image_tag: ~
ghost_container_base_volumes:
- "{{ ghost_data_path }}:{{ ghost_container_data_directory }}:rw"
ghost_container_extra_volumes: []
ghost_container_volumes:
"{{ ghost_container_base_volumes + ghost_container_extra_volumes }}"
ghost_container_base_labels:
version: "{{ ghost_version }}"
ghost_container_extra_labels: {}
ghost_container_restart_policy: "unless-stopped"
ghost_container_networks: ~
ghost_container_purge_networks: ~
ghost_container_etc_hosts: ~
ghost_container_state: started

View File

@ -1,10 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: ghost
description: Ansible role to deploy ghost (https://ghost.org) using docker
galaxy_tags:
- ghost
- blog
- docker

View File

@ -1,57 +0,0 @@
---
- name: Ensure ghost group is created
ansible.builtin.group:
name: "{{ ghost_user_group }}"
state: present
system: true
- name: Ensure ghost user is created
ansible.builtin.user:
name: "{{ ghost_user }}"
groups:
- "{{ ghost_user_group }}"
append: true
state: present
system: true
- name: Ensure host paths for docker volumes exist for ghost
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
mode: "0750"
owner: "{{ item.owner | default(ghost_user) }}"
group: "{{ item.group | default(ghost_user_group) }}"
loop:
- path: "{{ ghost_base_path }}"
- path: "{{ ghost_data_path }}"
owner: "1000"
- path: "{{ ghost_config_path }}"
- name: Ensure ghost configuration file is templated
ansible.builtin.template:
src: "ghost.env.j2"
dest: "{{ ghost_config_file }}"
owner: "{{ ghost_user }}"
group: "{{ ghost_user_group }}"
mode: "0644"
- name: Ensure ghost container image is present on host
community.docker.docker_image:
name: "{{ ghost_container_image }}"
state: present
source: pull
force_source: "{{ ghost_container_image_tag is defined }}"
- name: Ensure ghost container '{{ ghost_container_name }}' is {{ ghost_container_state }}
community.docker.docker_container:
name: "{{ ghost_container_name }}"
image: "{{ ghost_container_image }}"
ports: "{{ ghost_container_ports | default(omit, true) }}"
labels: "{{ ghost_container_labels }}"
volumes: "{{ ghost_container_volumes }}"
env_file: "{{ ghost_config_file }}"
etc_hosts: "{{ ghost_container_etc_hosts | default(omit, true) }}"
networks: "{{ ghost_container_networks | default(omit, true) }}"
purge_networks: "{{ ghost_container_purge_networks | default(omit, true) }}"
restart_policy: "{{ ghost_container_restart_policy }}"
state: "{{ ghost_container_state }}"

View File

@ -1,3 +0,0 @@
{% for key, value in ghost_config_complete.items() %}
{{ key }}={{ value }}
{% endfor %}

View File

@ -1,10 +0,0 @@
---
ghost_container_image: "{{ ghost_container_image_name}}:{{ ghost_container_image_tag | default(ghost_version, true) }}"
ghost_container_labels: >-2
{{ ghost_container_base_labels
| combine(ghost_container_extra_labels) }}
ghost_container_data_directory: "/var/lib/ghost/content"
ghost_config_complete: >-2
{{ ghost_base_config | combine(ghost_config, recursive=True) }}

View File

@ -1,7 +1,7 @@
--- ---
gitea_version: "1.22.4"
gitea_version: "1.16.4"
gitea_user: git gitea_user: git
gitea_run_user: "{{ gitea_user }}"
gitea_base_path: "/opt/gitea" gitea_base_path: "/opt/gitea"
gitea_data_path: "{{ gitea_base_path }}/data" gitea_data_path: "{{ gitea_base_path }}/data"
@ -9,29 +9,17 @@ gitea_data_path: "{{ gitea_base_path }}/data"
gitea_domain: ~ gitea_domain: ~
# container config # container config
gitea_container_name: "{{ gitea_user }}" gitea_container_name: "git"
gitea_container_image_server: "docker.io" gitea_container_image_name: "docker.io/gitea/gitea"
gitea_container_image_name: "gitea"
gitea_container_image_namespace: gitea
gitea_container_image_fq_name: >-
{{
[
gitea_container_image_server,
gitea_container_image_namespace,
gitea_container_image_name
] | join('/')
}}
gitea_container_image_tag: "{{ gitea_version }}" gitea_container_image_tag: "{{ gitea_version }}"
gitea_container_image: >-2 gitea_container_image: "{{ gitea_container_image_name }}:{{ gitea_container_image_tag }}"
{{ gitea_container_image_fq_name }}:{{ gitea_container_image_tag }}
gitea_container_networks: [] gitea_container_networks: []
gitea_container_purge_networks: ~ gitea_container_purge_networks: ~
gitea_container_restart_policy: "unless-stopped" gitea_container_restart_policy: "unless-stopped"
gitea_container_extra_env: {} gitea_container_extra_env: {}
gitea_container_extra_labels: {} gitea_contianer_extra_labels: {}
gitea_container_extra_ports: [] gitea_container_extra_ports: []
gitea_container_extra_volumes: [] gitea_container_extra_volumes: []
gitea_container_state: started
# container defaults # container defaults
gitea_container_base_volumes: gitea_container_base_volumes:
@ -52,10 +40,10 @@ gitea_container_base_labels:
gitea_config_mailer_enabled: false gitea_config_mailer_enabled: false
gitea_config_mailer_type: ~ gitea_config_mailer_type: ~
gitea_config_mailer_from_addr: ~ gitea_config_mailer_from_addr: ~
gitea_config_mailer_smtp_addr: ~ gitea_config_mailer_host: ~
gitea_config_mailer_user: ~ gitea_config_mailer_user: ~
gitea_config_mailer_passwd: ~ gitea_config_mailer_passwd: ~
gitea_config_mailer_protocol: ~ gitea_config_mailer_tls: ~
gitea_config_mailer_sendmail_path: ~ gitea_config_mailer_sendmail_path: ~
gitea_config_metrics_enabled: false gitea_config_metrics_enabled: false

View File

@ -1,10 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: gitea
description: Ansible role to deploy gitea using docker
galaxy_tags:
- gitea
- git
- docker

View File

@ -1,15 +1,14 @@
--- ---
- name: Ensure gitea user '{{ gitea_user }}' is present - name: Create gitea user
ansible.builtin.user: user:
name: "{{ gitea_user }}" name: "{{ gitea_user }}"
state: "present" state: present
system: false system: no
create_home: true
register: gitea_user_res register: gitea_user_res
- name: Ensure host directories exist - name: Ensure host directories exist
ansible.builtin.file: file:
path: "{{ item }}" path: "{{ item }}"
owner: "{{ gitea_user_res.uid }}" owner: "{{ gitea_user_res.uid }}"
group: "{{ gitea_user_res.group }}" group: "{{ gitea_user_res.group }}"
@ -19,7 +18,7 @@
- "{{ gitea_data_path }}" - "{{ gitea_data_path }}"
- name: Ensure .ssh folder for gitea user exists - name: Ensure .ssh folder for gitea user exists
ansible.builtin.file: file:
path: "/home/{{ gitea_user }}/.ssh" path: "/home/{{ gitea_user }}/.ssh"
state: directory state: directory
owner: "{{ gitea_user_res.uid }}" owner: "{{ gitea_user_res.uid }}"
@ -38,16 +37,16 @@
register: gitea_user_ssh_key register: gitea_user_ssh_key
- name: Create forwarding script - name: Create forwarding script
ansible.builtin.copy: copy:
dest: "/usr/local/bin/gitea" dest: "/usr/local/bin/gitea"
owner: "{{ gitea_user_res.uid }}" owner: "{{ gitea_user_res.uid }}"
group: "{{ gitea_user_res.group }}" group: "{{ gitea_user_res.group }}"
mode: 0700 mode: 0700
content: | content: |
ssh -p {{ gitea_public_ssh_server_port }} -o StrictHostKeyChecking=no {{ gitea_run_user }}@127.0.0.1 -i /home/{{ gitea_user }}/.ssh/id_ssh_ed25519 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@" ssh -p {{ gitea_public_ssh_server_port }} -o StrictHostKeyChecking=no {{ gitea_user }}@127.0.0.1 -i /home/{{ gitea_user }}/.ssh/id_ssh_ed25519 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@"
- name: Add host pubkey to git users authorized_keys file - name: Add host pubkey to git users authorized_keys file
ansible.builtin.lineinfile: lineinfile:
path: "/home/{{ gitea_user }}/.ssh/authorized_keys" path: "/home/{{ gitea_user }}/.ssh/authorized_keys"
line: "{{ gitea_user_ssh_key.public_key }} Gitea:Host2Container" line: "{{ gitea_user_ssh_key.public_key }} Gitea:Host2Container"
state: present state: present
@ -57,27 +56,26 @@
mode: 0600 mode: 0600
- name: Ensure gitea container image is present - name: Ensure gitea container image is present
community.docker.docker_image: docker_image:
name: "{{ gitea_container_image }}" name: "{{ gitea_container_image }}"
state: present state: present
source: pull source: pull
force_source: "{{ gitea_container_image.endswith(':latest') }}" force_source: "{{ gitea_container_image.endswith(':latest') }}"
- name: Ensure container '{{ gitea_container_name }}' with gitea is {{ gitea_container_state }} - name: Ensure container '{{ gitea_container_name }}' with gitea is running
community.docker.docker_container: docker_container:
name: "{{ gitea_container_name }}" name: "{{ gitea_container_name }}"
image: "{{ gitea_container_image }}" image: "{{ gitea_container_image }}"
env: "{{ gitea_container_env }}" env: "{{ gitea_container_env }}"
labels: "{{ gitea_container_labels }}"
volumes: "{{ gitea_container_volumes }}" volumes: "{{ gitea_container_volumes }}"
networks: "{{ gitea_container_networks | default(omit, True) }}" networks: "{{ gitea_container_networks | default(omit, True) }}"
purge_networks: "{{ gitea_container_purge_networks | default(omit, True) }}" purge_networks: "{{ gitea_container_purge_networks | default(omit, True) }}"
published_ports: "{{ gitea_container_ports }}" published_ports: "{{ gitea_container_ports }}"
restart_policy: "{{ gitea_container_restart_policy }}" restart_policy: "{{ gitea_container_restart_policy }}"
state: "{{ gitea_container_state }}" state: started
- name: Ensure given configuration is set in the config file - name: Ensure given configuration is set in the config file
ansible.builtin.ini_file: ini_file:
path: "{{ gitea_data_path }}/gitea/conf/app.ini" path: "{{ gitea_data_path }}/gitea/conf/app.ini"
section: "{{ section }}" section: "{{ section }}"
option: "{{ option }}" option: "{{ option }}"

View File

@ -14,7 +14,7 @@ gitea_container_port_ssh: 22
gitea_config_base: gitea_config_base:
RUN_MODE: prod RUN_MODE: prod
RUN_USER: "{{ gitea_run_user }}" RUN_USER: "{{ gitea_user }}"
server: server:
SSH_DOMAIN: "{{ gitea_domain }}" SSH_DOMAIN: "{{ gitea_domain }}"
DOMAIN: "{{ gitea_domain }}" DOMAIN: "{{ gitea_domain }}"
@ -24,11 +24,11 @@ gitea_config_base:
mailer: mailer:
ENABLED: "{{ gitea_config_mailer_enabled }}" ENABLED: "{{ gitea_config_mailer_enabled }}"
MAILER_TYP: "{{ gitea_config_mailer_type }}" MAILER_TYP: "{{ gitea_config_mailer_type }}"
SMTP_ADDR: "{{ gitea_config_mailer_smtp_addr }}" HOST: "{{ gitea_config_mailer_host }}"
USER: "{{ gitea_config_mailer_user }}" USER: "{{ gitea_config_mailer_user }}"
PASSWD: "{{ gitea_config_mailer_passwd }}" PASSWD: "{{ gitea_config_mailer_passwd }}"
PROTOCOL: "{{ gitea_config_mailer_protocol }}" IS_TLS_ENABLED: "{{ gitea_config_mailer_tls }}"
FROM: "{{ gitea_config_mailer_from }}" FROM: "{{ gitea_config_mailer_from_addr }}"
SENDMAIL_PATH: "{{ gitea_config_mailer_sendmail_path }}" SENDMAIL_PATH: "{{ gitea_config_mailer_sendmail_path }}"
metrics: metrics:
ENABLED: "{{ gitea_config_metrics_enabled }}" ENABLED: "{{ gitea_config_metrics_enabled }}"

View File

@ -1,21 +0,0 @@
# `finallycoffee.services.hedgedoc` ansible role
Role to deploy and configure hedgedoc using `docker` or `podman`.
To configure hedgedoc, set either the config as complex data
directly in `hedgedoc_config` or use the flattened variables
from the `hedgedoc_config_*` prefix (see
[defaults/main/config.yml](defaults/main/config.yml)).
To remove hedgedoc, set `hedgedoc_state: absent`. Note that this
will delete all data directories aswell, removing any traces this
role created on the target (except database contents).
# Required configuration
- `hedgedoc_config_domain` - Domain of the hedgedoc instance
- `hedgedoc_config_session_secret` - session secret for hedgedoc
## Deployment methods
To set the desired deployment method, set `hedgedoc_deployment_method` to a
supported deployment methods (see [vars/main.yml](vars/main.yml#5)).

View File

@ -1,52 +0,0 @@
---
hedgedoc_config_domain: ~
hedgedoc_config_log_level: "info"
hedgedoc_config_session_secret: ~
hedgedoc_config_protocol_use_ssl: true
hedgedoc_config_hsts_enable: true
hedgedoc_config_csp_enable: true
hedgedoc_config_cookie_policy: 'lax'
hedgedoc_config_allow_free_url: true
hedgedoc_config_allow_email_register: false
hedgedoc_config_allow_anonymous: true
hedgedoc_config_allow_gravatar: true
hedgedoc_config_require_free_url_authentication: true
hedgedoc_config_default_permission: 'full'
hedgedoc_config_db_username: hedgedoc
hedgedoc_config_db_password: ~
hedgedoc_config_db_database: hedgedoc
hedgedoc_config_db_host: localhost
hedgedoc_config_db_port: 5432
hedgedoc_config_db_dialect: postgres
hedgedoc_config_database:
username: "{{ hedgedoc_config_db_username }}"
password: "{{ hedgedoc_config_db_password }}"
database: "{{ hedgedoc_config_db_database }}"
host: "{{ hedgedoc_config_db_host }}"
port: "{{ hedgedoc_config_db_port | int }}"
dialect: "{{ hedgedoc_config_db_dialect }}"
hedgedoc_config_base:
production:
domain: "{{ hedgedoc_config_domain }}"
loglevel: "{{ hedgedoc_config_log_level }}"
sessionSecret: "{{ hedgedoc_config_session_secret }}"
protocolUseSSL: "{{ hedgedoc_config_protocol_use_ssl }}"
cookiePolicy: "{{ hedgedoc_config_cookie_policy }}"
allowFreeURL: "{{ hedgedoc_config_allow_free_url }}"
allowAnonymous: "{{ hedgedoc_config_allow_anonymous }}"
allowEmailRegister: "{{ hedgedoc_config_allow_email_register }}"
allowGravatar: "{{ hedgedoc_config_allow_gravatar }}"
requireFreeURLAuthentication: >-2
{{ hedgedoc_config_require_free_url_authentication }}
defaultPermission: "{{ hedgedoc_config_default_permission }}"
hsts:
enable: "{{ hedgedoc_config_hsts_enable }}"
csp:
enable: "{{ hedgedoc_config_csp_enable }}"
db: "{{ hedgedoc_config_database }}"
hedgedoc_config: ~
hedgedoc_full_config: >-2
{{ hedgedoc_config_base | default({}, true)
| combine(hedgedoc_config | default({}, true), recursive=True) }}

View File

@ -1,57 +0,0 @@
---
hedgedoc_container_image_registry: quay.io
hedgedoc_container_image_namespace: hedgedoc
hedgedoc_container_image_name: hedgedoc
hedgedoc_container_image_flavour: alpine
hedgedoc_container_image_tag: ~
hedgedoc_container_image: >-2
{{
([
hedgedoc_container_image_registry,
hedgedoc_container_image_namespace | default([], true),
hedgedoc_container_image_name,
] | flatten | join('/'))
+ ':'
+ hedgedoc_container_image_tag | default(
hedgedoc_version + (
((hedgedoc_container_image_flavour is string)
and (hedgedoc_container_image_flavour | length > 0))
| ternary('-' +
hedgedoc_container_image_flavour | default('', true),
''
)
),
true
)
}}
hedgedoc_container_image_source: pull
hedgedoc_container_name: hedgedoc
hedgedoc_container_state: >-2
{{ (hedgedoc_state == 'present') | ternary('started', 'absent') }}
hedgedoc_container_config_file: "/hedgedoc/config.json"
hedgedoc_container_upload_path: "/hedgedoc/public/uploads"
hedgedoc_container_env: ~
hedgedoc_container_user: >-2
{{ hedgedoc_run_user_id }}:{{ hedgedoc_run_group_id }}
hedgedoc_container_ports: ~
hedgedoc_container_networks: ~
hedgedoc_container_etc_hosts: ~
hedgedoc_container_base_volumes:
- "{{ hedgedoc_config_file }}:{{ hedgedoc_container_config_file }}:ro"
- "{{ hedgedoc_uploads_path }}:{{ hedgedoc_container_upload_path }}:rw"
hedgedoc_container_volumes: ~
hedgedoc_container_all_volumes: >-2
{{ hedgedoc_container_base_volumes | default([], true)
+ hedgedoc_container_volumes | default([], true) }}
hedgedoc_container_base_labels:
version: "{{ hedgedoc_container_tag | default(hedgedoc_version, true) }}"
hedgedoc_container_labels: ~
hedgedoc_container_network_mode: ~
hedgedoc_container_all_labels: >-2
{{ hedgedoc_container_base_labels | default({}, true)
| combine(hedgedoc_container_labels | default({}, true)) }}
hedgedoc_container_restart_policy: >-2
{{ (hedgedoc_deployment_method === 'docker')
| ternary('unless-stopped', 'on-failure') }}

View File

@ -1,9 +0,0 @@
---
hedgedoc_user: hedgedoc
hedgedoc_version: "1.10.0"
hedgedoc_state: present
hedgedoc_deployment_method: docker
hedgedoc_config_file: "/etc/hedgedoc/config.json"
hedgedoc_uploads_path: "/var/lib/hedgedoc-uploads"

View File

@ -1,5 +0,0 @@
---
hedgedoc_run_user_id: >-2
{{ hedgedoc_user_info.uid | default(hedgedoc_user) }}
hedgedoc_run_group_id: >-2
{{ hedgedoc_user_info.group | default(hedgedoc_user) }}

View File

@ -1,12 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: hedgedoc
description: >-2
Deploy hedgedoc, a collaborative markdown editor, using docker
galaxy_tags:
- hedgedoc
- markdown
- collaboration
- docker

View File

@ -1,23 +0,0 @@
---
- name: Check for valid state
ansible.builtin.fail:
msg: >-2
Unsupported state '{{ hedgedoc_state }}'. Supported
states are {{ hedgedoc_states | join(', ') }}.
when: hedgedoc_state not in hedgedoc_states
- name: Check for valid deployment method
ansible.builtin.fail:
msg: >-2
Deployment method '{{ hedgedoc_deployment_method }}'
is not supported. Supported are:
{{ hedgedoc_deployment_methods | join(', ') }}
when: hedgedoc_deployment_method not in hedgedoc_deployment_methods
- name: Ensure required variables are given
ansible.builtin.fail:
msg: "Required variable '{{ item }}' is undefined!"
loop: "{{ hedgedoc_required_arguments }}"
when: >-2
item not in hostvars[inventory_hostname]
or hostvars[inventory_hostname][item] | length == 0

View File

@ -1,31 +0,0 @@
---
- name: Ensure container image '{{ hedgedoc_container_image }}' is {{ hedgedoc_state }}
community.docker.docker_image:
name: "{{ hedgedoc_container_image }}"
state: "{{ hedgedoc_state }}"
source: "{{ hedgedoc_container_image_source }}"
force_source: >-2
{{ hedgedoc_container_force_source | default(
hedgedoc_container_image_tag | default(false, true), true) }}
register: hedgedoc_container_image_info
until: hedgedoc_container_image_info is success
retries: 5
delay: 3
- name: Ensure container '{{ hedgedoc_container_name }}' is {{ hedgedoc_container_state }}
community.docker.docker_container:
name: "{{ hedgedoc_container_name }}"
image: "{{ hedgedoc_container_image }}"
env: "{{ hedgedoc_container_env | default(omit, true) }}"
user: "{{ hedgedoc_container_user | default(omit, true) }}"
ports: "{{ hedgedoc_container_ports | default(omit, true) }}"
labels: "{{ hedgedoc_container_all_labels }}"
volumes: "{{ hedgedoc_container_all_volumes }}"
etc_hosts: "{{ hedgedoc_container_etc_hosts | default(omit, true) }}"
dns_servers: >-2
{{ hedgedoc_container_dns_servers | default(omit, true) }}
network_mode: >-2
{{ hedgedoc_container_network_mode | default(omit, true) }}
restart_policy: >-2
{{ hedgedoc_container_restart_policy | default(omit, true) }}
state: "{{ hedgedoc_container_state }}"

View File

@ -1,21 +0,0 @@
---
- name: Check preconditions
ansible.builtin.include_tasks:
file: "check.yml"
- name: Ensure user '{{ hedgedoc_user }}' is {{ hedgedoc_state }}
ansible.builtin.user:
name: "{{ hedgedoc_user }}"
state: "{{ hedgedoc_state }}"
system: "{{ hedgedoc_user_system | default(true, false) }}"
register: hedgedoc_user_info
- name: Ensure configuration file '{{ hedgedoc_config_file }}' is {{ hedgedoc_state }}
ansible.builtin.copy:
dest: "{{ hedgedoc_config_file }}"
content: "{{ hedgedoc_full_config | to_nice_json }}"
when: hedgedoc_state == 'present'
- name: Ensure hedgedoc is deployed using {{ hedgedoc_deployment_method }}
ansible.builtin.include_tasks:
file: "deploy-{{ hedgedoc_deployment_method }}.yml"

View File

@ -1,11 +0,0 @@
---
hedgedoc_states:
- present
- absent
hedgedoc_deployment_methods:
- docker
- podman
hedgedoc_required_arguments:
- hedgedoc_config_domain
- hedgedoc_config_session_secret

View File

@ -1,15 +0,0 @@
# `finallycoffee.services.jellyfin` ansible role
This role runs [Jellyfin](https://jellyfin.org/), a free software media system,
in a docker container.
## Usage
`jellyfin_domain` contains the FQDN which jellyfin should listen to. Most configuration
is done in the software itself.
Jellyfin runs in host networking mode by default, as that is needed for some features like
network discovery with chromecasts and similar.
Media can be mounted into jellyfin using `jellyfin_media_volumes`, taking a list of strings
akin to `community.docker.docker_container`'s `volumes` key.

View File

@ -1,7 +1,6 @@
--- ---
jellyfin_user: jellyfin jellyfin_user: jellyfin
jellyfin_version: "10.10.3"
jellyfin_state: present
jellyfin_base_path: /opt/jellyfin jellyfin_base_path: /opt/jellyfin
jellyfin_config_path: "{{ jellyfin_base_path }}/config" jellyfin_config_path: "{{ jellyfin_base_path }}/config"
@ -11,17 +10,11 @@ jellyfin_media_volumes: []
jellyfin_container_name: jellyfin jellyfin_container_name: jellyfin
jellyfin_container_image_name: "docker.io/jellyfin/jellyfin" jellyfin_container_image_name: "docker.io/jellyfin/jellyfin"
jellyfin_container_image_tag: ~ jellyfin_container_image_tag: "latest"
jellyfin_container_image_ref: >-2 jellyfin_container_image_ref: "{{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag }}"
{{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag | default(jellyfin_version, true) }}
jellyfin_container_image_source: pull
jellyfin_container_state: >-2
{{ (jellyfin_state == 'present') | ternary('started', 'absent') }}
jellyfin_container_network_mode: host jellyfin_container_network_mode: host
jellyfin_container_networks: ~ jellyfin_container_networks: ~
jellyfin_container_volumes: "{{ jellyfin_container_base_volumes + jellyfin_media_volumes }}" jellyfin_container_volumes: "{{ jellyfin_container_base_volumes + jellyfin_media_volumes }}"
jellyfin_container_labels: "{{ jellyfin_container_base_labels | combine(jellyfin_container_extra_labels) }}"
jellyfin_container_extra_labels: {}
jellyfin_container_restart_policy: "unless-stopped" jellyfin_container_restart_policy: "unless-stopped"
jellyfin_host_directories: jellyfin_host_directories:

View File

@ -1,10 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: jellyfin
description: Ansible role to deploy jellyfin using docker
galaxy_tags:
- jellyfin
- streaming
- docker

View File

@ -1,47 +1,35 @@
--- ---
- name: Check if state is valid
ansible.builtin.fail:
msg: >-2
Unsupported state '{{ jellyfin_state }}'. Supported
states are {{ jellyfin_states | join(', ') }}.
when: jellyfin_state not in jellyfin_states
- name: Ensure jellyfin user '{{ jellyfin_user }}' is {{ jellyfin_state }} - name: Ensure user '{{ jellyfin_user }}' for jellyfin is created
ansible.builtin.user: user:
name: "{{ jellyfin_user }}" name: "{{ jellyfin_user }}"
state: "{{ jellyfin_state }}" state: present
system: "{{ jellyfin_user_system | default(true, true) }}" system: yes
register: jellyfin_user_info register: jellyfin_user_info
- name: Ensure host directories for jellyfin are {{ jellyfin_state }} - name: Ensure host directories for jellyfin exist
ansible.builtin.file: file:
path: "{{ item.path }}" path: "{{ item.path }}"
state: >-2 state: directory
{{ (jellyfin_state == 'present') | ternary('directory', 'absent') }}
owner: "{{ item.owner | default(jellyfin_uid) }}" owner: "{{ item.owner | default(jellyfin_uid) }}"
group: "{{ item.group | default(jellyfin_gid) }}" group: "{{ item.group | default(jellyfin_gid) }}"
mode: "{{ item.mode }}" mode: "{{ item.mode }}"
loop: "{{ jellyfin_host_directories }}" loop: "{{ jellyfin_host_directories }}"
- name: Ensure container image '{{ jellyfin_container_image_ref }}' is {{ jellyfin_state }} - name: Ensure container image for jellyfin is available
community.docker.docker_image: docker_image:
name: "{{ jellyfin_container_image_ref }}" name: "{{ jellyfin_container_image_ref }}"
state: "{{ jellyfin_state }}" state: present
source: "{{ jellyfin_container_image_source }}" source: pull
force_source: "{{ jellyfin_container_image_tag | default(false, true) }}" force_source: "{{ jellyfin_container_image_tag in ['stable', 'unstable'] }}"
register: jellyfin_container_image_pull_result
until: jellyfin_container_image_pull_result is succeeded
retries: 5
delay: 3
- name: Ensure container '{{ jellyfin_container_name }}' is {{ jellyfin_container_state }} - name: Ensure container '{{ jellyfin_container_name }}' is running
community.docker.docker_container: docker_container:
name: "{{ jellyfin_container_name }}" name: "{{ jellyfin_container_name }}"
image: "{{ jellyfin_container_image_ref }}" image: "{{ jellyfin_container_image_ref }}"
user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}" user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}"
labels: "{{ jellyfin_container_labels }}"
volumes: "{{ jellyfin_container_volumes }}" volumes: "{{ jellyfin_container_volumes }}"
networks: "{{ jellyfin_container_networks | default(omit, True) }}" networks: "{{ jellyfin_container_networks | default(omit, True) }}"
network_mode: "{{ jellyfin_container_network_mode }}" network_mode: "{{ jellyfin_container_network_mode }}"
restart_policy: "{{ jellyfin_container_restart_policy }}" restart_policy: "{{ jellyfin_container_restart_policy }}"
state: "{{ jellyfin_container_state }}" state: started

View File

@ -1,11 +1,5 @@
--- ---
jellyfin_states:
- present
- absent
jellyfin_container_base_volumes: jellyfin_container_base_volumes:
- "{{ jellyfin_config_path }}:/config:z" - "{{ jellyfin_config_path }}:/config:z"
- "{{ jellyfin_cache_path }}:/cache:z" - "{{ jellyfin_cache_path }}:/cache:z"
jellyfin_container_base_labels:
version: "{{ jellyfin_version }}"

View File

@ -1,16 +0,0 @@
# `finallycoffee.services.keycloak` ansible role
Ansible role for deploying keycloak, currently only supports docker.
Migrated from `entropia.sso.keycloak`.
## Required variables
- `keycloak_database_password` - password for the database user
- `keycloak_config_hostname` - public domain of the keycloak server
## Database configuration
- `keycloak_database_hostname` - hostname of the database server, defaults to `localhost`
- `keycloak_database_username` - username to use when connecting to the database server, defaults to `keycloak`
- `keycloak_database_database` - name of the database to use, defaults to `keycloak`

View File

@ -1,51 +0,0 @@
---
keycloak_version: 26.0.7
keycloak_container_name: keycloak
keycloak_container_image_upstream_registry: quay.io
keycloak_container_image_upstream_namespace: keycloak
keycloak_container_image_upstream_name: keycloak
keycloak_container_image_upstream: >-2
{{
([
keycloak_container_image_upstream_registry | default([]),
keycloak_container_image_upstream_namespace | default([]),
keycloak_container_image_upstream_name,
] | flatten | join('/'))
}}
keycloak_container_image_name: "keycloak:{{ keycloak_version }}-custom"
keycloak_container_database_vendor: postgres
keycloak_base_path: /opt/keycloak
keycloak_container_build_directory: "{{ keycloak_base_path }}/build"
keycloak_container_build_jar_directory: providers
keycloak_container_build_flags: {}
keycloak_provider_jars_directory: "{{ keycloak_base_path }}/providers"
keycloak_build_provider_jars_directory: "{{ keycloak_container_build_directory }}/{{ keycloak_container_build_jar_directory }}"
keycloak_database_hostname: localhost
keycloak_database_port: 5432
keycloak_database_username: keycloak
keycloak_database_password: ~
keycloak_database_database: keycloak
keycloak_container_env: {}
keycloak_container_labels: ~
keycloak_container_volumes: ~
keycloak_container_restart_policy: unless-stopped
keycloak_container_command: >-2
start
--db-username {{ keycloak_database_username }}
--db-password {{ keycloak_database_password }}
--db-url jdbc:postgresql://{{ keycloak_database_hostname }}{{ keycloak_database_port | ternary(':' ~ keycloak_database_port, '') }}/{{ keycloak_database_database }}
{{ keycloak_container_extra_start_flags | default([]) | join(' ') }}
--proxy-headers=xforwarded
--hostname {{ keycloak_config_hostname }}
--optimized
keycloak_config_health_enabled: true
keycloak_config_metrics_enabled: true
keycloak_config_hostname: localhost
keycloak_config_admin_username: admin
keycloak_config_admin_password: ~

View File

@ -1,13 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: keycloak
description: Deploy keycloak, the opensource identity and access management solution
galaxy_tags:
- keycloak
- sso
- oidc
- oauth2
- iam
- docker

View File

@ -1,72 +0,0 @@
---
- name: Ensure build directory exists
ansible.builtin.file:
name: "{{ keycloak_container_build_directory }}"
state: directory
recurse: yes
mode: 0700
tags:
- keycloak-build-container
- name: Ensure provider jars directory exists
ansible.builtin.file:
name: "{{ keycloak_provider_jars_directory }}"
state: directory
mode: 0775
tags:
- keycloak-build-container
- name: Ensure Dockerfile is templated
ansible.builtin.template:
src: Dockerfile.j2
dest: "{{ keycloak_container_build_directory }}/Dockerfile"
mode: 0700
register: keycloak_buildfile_info
tags:
- keycloak-container
- keycloak-build-container
- name: Ensure upstream Keycloak container image '{{ keycloak_container_image_upstream }}:{{ keycloak_version }}' is present
community.docker.docker_image:
name: "{{ keycloak_container_image_upstream }}:{{ keycloak_version }}"
source: pull
state: present
register: keycloak_container_image_upstream_status
tags:
- keycloak-container
- keycloak-build-container
- name: Ensure custom keycloak container image '{{ keycloak_container_image_name }}' is built
community.docker.docker_image:
name: "{{ keycloak_container_image_name }}"
build:
args:
DB_VENDOR: "{{ keycloak_container_database_vendor }}"
KC_ADMIN_PASSWORD: "{{ keycloak_config_admin_password }}"
dockerfile: "{{ keycloak_container_build_directory }}/Dockerfile"
path: "{{ keycloak_container_build_directory }}"
source: build
state: present
force_source: "{{ keycloak_buildfile_info.changed or keycloak_container_image_upstream_status.changed or (keycloak_force_rebuild_container | default(false))}}"
register: keycloak_container_image_status
tags:
- keycloak-container
- keycloak-build-container
- name: Ensure keycloak container is running
community.docker.docker_container:
name: "{{ keycloak_container_name }}"
image: "{{ keycloak_container_image_name }}"
env: "{{ keycloak_container_env | default(omit, true) }}"
ports: "{{ keycloak_container_ports | default(omit, true) }}"
hostname: "{{ keycloak_container_hostname | default(omit) }}"
labels: "{{ keycloak_container_labels | default(omit, true) }}"
volumes: "{{ keycloak_container_volumes | default(omit, true) }}"
restart_policy: "{{ keycloak_container_restart_policy }}"
recreate: "{{ keycloak_container_force_recreate | default(false) or (keycloak_container_image_status.changed if keycloak_container_image_status is defined else false) }}"
etc_hosts: "{{ keycloak_container_etc_hosts | default(omit) }}"
state: started
command: "{{ keycloak_container_command }}"
tags:
- keycloak-container

View File

@ -1,41 +0,0 @@
FROM {{ keycloak_container_image_upstream }}:{{ keycloak_version }} as builder
# Enable health and metrics support
ENV KC_HEALTH_ENABLED={{ keycloak_config_health_enabled | ternary('true', 'false') }}
ENV KC_METRICS_ENABLED={{ keycloak_config_metrics_enabled | ternary('true', 'false') }}
# Configure a database vendor
ARG DB_VENDOR
ENV KC_DB=$DB_VENDOR
WORKDIR {{ keycloak_container_working_directory }}
ADD ./providers/* providers/
# Workaround to set correct mode on jar files
USER root
RUN chmod -R 0770 providers/*
USER keycloak
RUN {{ keycloak_container_working_directory }}/bin/kc.sh --verbose \
{% for argument in keycloak_container_build_flags | dict2items(key_name='flag', value_name='value') %}
--{{- argument['flag'] -}}{{- argument['value'] | default(false, true) | ternary('=' + argument['value'], '') }} \
{% endfor%}
build{% if keycloak_container_build_features | default([]) | length > 0 %} \
{% endif %}
{% if keycloak_container_build_features | default([]) | length > 0 %}
--features="{{ keycloak_container_build_features | join(',') }}"
{% endif %}
FROM {{ keycloak_container_image_upstream }}:{{ keycloak_version }}
COPY --from=builder {{ keycloak_container_working_directory }}/ {{ keycloak_container_working_directory }}/
ENV KC_HOSTNAME={{ keycloak_config_hostname }}
ENV KEYCLOAK_ADMIN={{ keycloak_config_admin_username }}
ARG KC_ADMIN_PASSWORD
{% if keycloak_version | split('.') | first | int > 21 %}
ENV KEYCLOAK_ADMIN_PASSWORD=$KC_ADMIN_PASSWORD
{% else %}
ENV KEYCLOAK_PASSWORD=$KC_ADMIN_PASSWORD
{% endif %}
ENTRYPOINT ["{{ keycloak_container_working_directory }}/bin/kc.sh"]

View File

@ -1,3 +0,0 @@
---
keycloak_container_working_directory: /opt/keycloak

29
roles/minio/README.md Normal file
View File

@ -0,0 +1,29 @@
# `finallycoffee.services.minio` ansible role
## Overview
This role deploys a [min.io](https://min.io) server (s3-compatible object storage server)
using the official docker container image.
## Configuration
The role requires setting the password for the `root` user (name can be changed by
setting `minio_root_username`) in `minio_root_password`. That user has full control
over the minio-server instance.
### Useful config hints
Most configuration is done by setting environment variables in
`minio_container_extra_env`, for example:
```yaml
minio_container_extra_env:
# disable the "console" web browser UI
MINIO_BROWSER: off
# enable public prometheus metrics on `/minio/v2/metrics/cluster`
MINIO_PROMETHEUS_AUTH_TYPE: public
```
When serving minio (or any s3-compatible server) on a "subfolder",
see https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTRedirect.html
and https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html

View File

@ -0,0 +1,40 @@
---
minio_user: ~
minio_data_path: /opt/minio
minio_create_user: false
minio_manage_host_filesystem: false
minio_root_username: root
minio_root_password: ~
minio_container_name: minio
minio_container_image_name: docker.io/minio/minio
minio_container_image_tag: latest
minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}"
minio_container_networks: []
minio_container_ports: []
minio_container_base_volumes:
- "{{ minio_data_path }}:{{ minio_container_data_path }}:z"
minio_container_extra_volumes: []
minio_container_base_env:
MINIO_ROOT_USER: "{{ minio_root_username }}"
MINIO_ROOT_PASSWORD: "{{ minio_root_password }}"
minio_container_extra_env: {}
minio_container_labels: {}
minio_container_command:
- "server"
- "{{ minio_container_data_path }}"
- "--console-address \":{{ minio_container_listen_port_console }}\""
minio_container_restart_policy: "unless-stopped"
minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}"
minio_container_listen_port_api: 9000
minio_container_listen_port_console: 8900
minio_container_data_path: /storage

View File

@ -0,0 +1,37 @@
---
- name: Ensure minio run user is present
user:
name: "{{ minio_user }}"
state: present
system: yes
when: minio_create_user
- name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present
file:
path: "{{ minio_data_path }}"
state: directory
user: "{{ minio_user|default(omit, True) }}"
group: "{{ minio_user|default(omit, True) }}"
when: minio_manage_host_filesystem
- name: Ensure container image for minio is present
community.docker.docker_image:
name: "{{ minio_container_image }}"
state: present
source: pull
force_source: "{{ minio_container_image_force_source }}"
- name: Ensure container {{ minio_container_name }} is running
docker_container:
name: "{{ minio_container_name }}"
image: "{{ minio_container_image }}"
volumes: "{{ minio_container_volumes }}"
env: "{{ minio_container_env }}"
labels: "{{ minio_container_labels }}"
networks: "{{ minio_container_networks }}"
ports: "{{ minio_container_ports }}"
user: "{{ minio_user|default(omit, True) }}"
command: "{{ minio_container_command }}"
restart_policy: "{{ minio_container_restart_policy }}"
state: started

View File

@ -0,0 +1,5 @@
---
minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}"
minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}"

View File

@ -1,21 +0,0 @@
# `finallycoffee.services.openproject` ansible role
Deploys [openproject](https://www.openproject.org/) using docker-compose.
## Configuration
To set configuration variables for OpenProject, set them in `openproject_compose_overrides`:
```yaml
openproject_compose_overrides:
version: "3.7"
services:
proxy:
[...]
volumes:
pgdata:
driver: local
driver_opts:
o: bind
type: none
device: /var/lib/postgresql
```

View File

@ -1,11 +0,0 @@
---
openproject_base_path: "/opt/openproject"
openproject_upstream_git_url: "https://github.com/opf/openproject-deploy.git"
openproject_upstream_git_branch: "stable/14"
openproject_compose_project_path: "{{ openproject_base_path }}"
openproject_compose_project_name: "openproject"
openproject_compose_project_env_file: "{{ openproject_compose_project_path }}/.env"
openproject_compose_project_override_file: "{{ openproject_compose_project_path }}/docker-compose.override.yml"
openproject_compose_project_env: {}

View File

@ -1,38 +0,0 @@
---
- name: Ensure base directory '{{ openproject_base_path }}' is present
ansible.builtin.file:
path: "{{ openproject_base_path }}"
state: directory
- name: Ensure upstream repository is cloned
ansible.builtin.git:
dest: "{{ openproject_base_path }}"
repo: "{{ openproject_upstream_git_url }}"
version: "{{ openproject_upstream_git_branch }}"
clone: true
depth: 1
- name: Ensure environment is configured
ansible.builtin.lineinfile:
line: "{{ item.key}}={{ item.value}}"
path: "{{ openproject_compose_project_env_file }}"
state: present
create: true
loop: "{{ openproject_compose_project_env | dict2items(key_name='key', value_name='value') }}"
- name: Ensure docker compose overrides are set
ansible.builtin.copy:
dest: "{{ openproject_compose_project_override_file }}"
content: "{{ openproject_compose_overrides | default({}) | to_nice_yaml }}"
- name: Ensure containers are pulled
community.docker.docker_compose_v2:
project_src: "{{ openproject_compose_project_path }}"
project_name: "{{ openproject_compose_project_name }}"
pull: "missing"
- name: Ensure services are running
community.docker.docker_compose_v2:
project_src: "{{ openproject_compose_project_path }}"
project_name: "{{ openproject_compose_project_name }}"
state: "present"

77
roles/restic/README.md Normal file
View File

@ -0,0 +1,77 @@
# `finallycoffee.services.restic`
Ansible role for backup up data using `restic`, utilizing `systemd` timers for scheduling.
## Overview
As restic encrypts the data before storing it, the `restic_repo_password` needs
to be populated with a strong key, and saved accordingly as only this key can
be used to decrypt the data for a restore!
### Backends
#### S3 Backend
To use a `s3`-compatible backend like AWS buckets or minio, both `restic_s3_key_id`
and `restic_s3_access_key` need to be populated, and the `restic_repo_url` has the
format `s3:https://my.s3.endpoint:port/bucket-name`.
#### SFTP Backend
Using the `sftp` backend requires the configured `restic_user` to be able to
authenticate to the configured SFTP-Server using password-less methods like
publickey-authentication. The `restic_repo_url` then follows the format
`sftp:{user}@{server}:/my-restic-repository` (or without leading `/` for relative
paths to the `{user}`s home directory.
### Backing up data
A job name like `$service-postgres` or similar needs to be set in `restic_job_name`,
which is used for naming the `systemd` units, their syslog identifiers etc.
If backing up filesystem locations, the paths need to be specified in
`restic_backup_paths` as lists of strings representing absolute filesystem
locations.
If backing up f.ex. database or other data which is generating backups using
a command like `pg_dump`, use `restic_backup_stdin_command` (which needs to output
to `stdout`) in conjunction with `restic_backup_stdin_command_filename` to name
the resulting output (required).
### Policy
The backup policy can be adjusted by overriding the `restic_policy_keep_*`
variables, with the defaults being:
```yaml
restic_policy_keep_all_within: 1d
restic_policy_keep_hourly: 6
restic_policy_keep_daily: 2
restic_policy_keep_weekly: 7
restic_policy_keep_monthly: 4
restic_policy_backup_frequency: hourly
```
**Note:** `restic_policy_backup_frequency` must conform to `systemd`s
`OnCalendar` syntax, which can be checked using `systemd-analyze calender $x`.
## Role behaviour
Per default, when the systemd unit for a job changes, the job is not immediately
started. This can be overridden using `restic_start_job_on_unit_change: true`,
which will immediately start the backup job if it's configuration changed.
The systemd unit runs with `restic_user`, which is root by default, guaranteeing
that filesystem paths are always readable. The `restic_user` can be overridden,
but care needs to be taken to ensure the user has permission to read all the
provided filesystem paths / the backup command may be executed by the user.
If ansible should create the user, set `restic_create_user` to `true`, which
will attempt to create the `restic_user` as a system user.
### Installing
For Debian and RedHat, the role attempts to install restic using the default
package manager's ansible module (apt/dnf). For other distributions, the generic
`package` module tries to install `restic_package_name` (default: `restic`),
which can be overridden if needed.

View File

@ -0,0 +1,37 @@
---
restic_repo_url: ~
restic_repo_password: ~
restic_s3_key_id: ~
restic_s3_access_key: ~
restic_backup_paths: []
restic_backup_stdin_command: ~
restic_backup_stdin_command_filename: ~
restic_policy_keep_all_within: 1d
restic_policy_keep_hourly: 6
restic_policy_keep_daily: 2
restic_policy_keep_weekly: 7
restic_policy_keep_monthly: 4
restic_policy_backup_frequency: hourly
restic_policy:
keep_within: "{{ restic_policy_keep_all_within }}"
hourly: "{{ restic_policy_keep_hourly }}"
daily: "{{ restic_policy_keep_daily }}"
weekly: "{{ restic_policy_keep_weekly }}"
monthly: "{{ restic_policy_keep_monthly }}"
frequency: "{{ restic_policy_backup_frequency }}"
restic_user: root
restic_create_user: false
restic_start_job_on_unit_change: false
restic_job_name: ~
restic_job_description: "Restic backup job for {{ restic_job_name }}"
restic_systemd_unit_naming_scheme: "restic.{{ restic_job_name }}"
restic_systemd_working_directory: /tmp
restic_systemd_syslog_identifier: "restic-{{ restic_job_name }}"
restic_package_name: restic

View File

@ -0,0 +1,13 @@
---
- name: Ensure system daemon is reloaded
listen: reload-systemd
systemd:
daemon_reload: true
- name: Ensure systemd service for '{{ restic_job_name }}' is started immediately
listen: trigger-restic
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}.service"
state: started
when: restic_start_job_on_unit_change

View File

@ -0,0 +1,90 @@
---
- name: Ensure {{ restic_user }} system user exists
user:
name: "{{ restic_user }}"
state: present
system: true
when: restic_create_user
- name: Ensure either backup_paths or backup_stdin_command is populated
when: restic_backup_paths|length > 0 and restic_backup_stdin_command
fail:
msg: "Setting both `restic_backup_paths` and `restic_backup_stdin_command` is not supported"
- name: Ensure a filename for stdin_command backup is given
when: restic_backup_stdin_command and not restic_backup_stdin_command_filename
fail:
msg: "`restic_backup_stdin_command` was set but no filename for the resulting output was supplied in `restic_backup_stdin_command_filename`"
- name: Ensure backup frequency adheres to systemd's OnCalender syntax
command:
cmd: "systemd-analyze calendar {{ restic_policy.frequency }}"
register: systemd_calender_parse_res
failed_when: systemd_calender_parse_res.rc != 0
changed_when: false
- name: Ensure restic is installed
block:
- name: Ensure restic is installed via apt
apt:
package: restic
state: latest
when: ansible_os_family == 'Debian'
- name: Ensure restic is installed via dnf
dnf:
name: restic
state: latest
when: ansible_os_family == 'RedHat'
- name: Ensure restic is installed using the auto-detected package-manager
package:
name: "{{ restic_package_name }}"
state: present
when: ansible_os_family not in ['RedHat', 'Debian']
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
template:
dest: "/etc/systemd/system/{{ service.unit_name }}.service"
src: "{{ service.file }}"
owner: root
group: root
mode: 0640
notify:
- reload-systemd
- trigger-restic
loop:
- unit_name: "{{ restic_systemd_unit_naming_scheme }}"
file: restic.service.j2
- unit_name: "{{ restic_systemd_unit_naming_scheme }}-unlock"
file: restic-unlock.service.j2
loop_control:
loop_var: service
label: "{{ service.file }}"
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
template:
dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.timer"
src: restic.timer.j2
owner: root
group: root
mode: 0640
notify:
- reload-systemd
- name: Flush handlers to ensure systemd knows about '{{ restic_job_name }}'
meta: flush_handlers
- name: Ensure systemd service for unlocking repository for '{{ restic_job_name }}' is enabled
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}-unlock.service"
enabled: true
- name: Ensure systemd timer for '{{ restic_job_name }}' is activated
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}.timer"
enabled: true
- name: Ensure systemd timer for '{{ restic_job_name }}' is started
systemd:
name: "{{ restic_systemd_unit_naming_scheme }}.timer"
state: started

View File

@ -0,0 +1,21 @@
[Unit]
Description={{ restic_job_description }} - Unlock after reboot job
[Service]
Type=oneshot
User={{ restic_user }}
WorkingDirectory={{ restic_systemd_working_directory }}
SyslogIdentifier={{ restic_systemd_syslog_identifier }}
Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
Environment=RESTIC_PASSWORD={{ restic_repo_password }}
{% if restic_s3_key_id and restic_s3_access_key %}
Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }}
Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }}
{% endif %}
ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init'
ExecStart=/usr/bin/restic unlock
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,28 @@
[Unit]
Description={{ restic_job_description }}
[Service]
Type=oneshot
User={{ restic_user }}
WorkingDirectory={{ restic_systemd_working_directory }}
SyslogIdentifier={{ restic_systemd_syslog_identifier }}
Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
Environment=RESTIC_PASSWORD={{ restic_repo_password }}
{% if restic_s3_key_id and restic_s3_access_key %}
Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }}
Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }}
{% endif %}
ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init'
{% if restic_backup_stdin_command %}
ExecStart=/bin/sh -c '{{ restic_backup_stdin_command }} | /usr/bin/restic backup --verbose --stdin --stdin-filename {{ restic_backup_stdin_command_filename }}'
{% else %}
ExecStart=/usr/bin/restic --verbose backup {{ restic_backup_paths | join(' ') }}
{% endif %}
ExecStartPost=/usr/bin/restic forget --prune --keep-within={{ restic_policy.keep_within }} --keep-hourly={{ restic_policy.hourly }} --keep-daily={{ restic_policy.daily }} --keep-weekly={{ restic_policy.weekly }} --keep-monthly={{ restic_policy.monthly }}
ExecStartPost=-/usr/bin/restic snapshots
ExecStartPost=/usr/bin/restic check
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,10 @@
[Unit]
Description=Run {{ restic_job_name }}
[Timer]
OnCalendar={{ restic_policy.frequency }}
Persistent=True
Unit={{ restic_systemd_unit_naming_scheme }}.service
[Install]
WantedBy=timers.target

View File

@ -1,46 +0,0 @@
# `finallycoffee.services.snipe_it` ansible role
[Snipe-IT](https://snipeitapp.com/) is an open-source asset management with
a powerful JSON-REST API. This ansible role deploys and configures Snipe-IT.
## Requirements
Snipe-IT requires a MySQL-Database like MariaDB and a working email service
for sending email. For installing and configuring MariaDB, see
[`finallycoffee.base.mariadb`](https://galaxy.ansible.com/ui/repo/published/finallycoffee/base/content/role/mariadb/).
## Configuration
Required variables to set are:
- `snipe_it_domain` - domain name of the snipe-it instance
- `snipe_it_config_app_url` - URL where snipe-it will be reachable including protocol and port
- `snipe_it_config_app_key` - Laravel application key
### Database configuration
All (database) options from the upstream laravel `.env` file are available
under the `snipe_it_config_db_*` prefix. Configure a database as follows:
```
snipe_it_config_db_host: localhost # defaults to localhost
snipe_it_config_db_port: "3306" # defaults to 3306
snipe_it_config_db_database: my_snipe_db_name # defaults to 'snipeit'
snipe_it_config_db_username: my_snipe_db_user # defaults to 'snipeit'
snipe_it_config_db_password: my_snipe_db_password
# Set this if the database is shared with
# other applications. defaults to not set
snipe_it_config_db_prefix: snipe_
```
### Email configuration
Configuring an email server is mandatory. An example is provided below:
```yaml
snipe_it_config_mail_host: smtp.example.com
snipe_it_config_mail_username: snipe_user@snipe.example.com
snipe_it_config_mail_password: i_want_to_be_strong_and_long
snipe_it_config_mail_from_addr: "noreply@snipe.example.com"
snipe_it_config_mail_from_name: "Example.com SnipeIT instance"
```
The default smtp port is `587` and can be set in `snipe_it_config_mail_port`.

View File

@ -1,131 +0,0 @@
---
snipe_it_config_app_version: "v{{ snipe_it_version }}"
snipe_it_config_app_port: 8000
snipe_it_config_app_env: "production"
snipe_it_config_app_debug: false
snipe_it_config_app_key: ~
snipe_it_config_app_url: "http://localhost:{{ snipe_it_config_app_port }}"
snipe_it_config_app_timezone: UTC
snipe_it_config_app_locale: en-US
snipe_it_config_app_locked: false
snipe_it_config_app_cipher: "AES-256-GCM"
snipe_it_config_app_force_tls: false
snipe_it_config_app_trusted_proxies:
- '192.168.0.0/16'
- '172.16.0.0/12'
- '10.0.0.0/8'
snipe_it_config_db_connection: mysql
snipe_it_config_db_host: localhost
snipe_it_config_db_port: "3306"
snipe_it_config_db_database: snipeit
snipe_it_config_db_username: snipeit
snipe_it_config_db_password: ~
snipe_it_config_db_prefix: ~
snipe_it_config_db_dump_path: /usr/bin/
snipe_it_config_db_charset: utf8mb4
snipe_it_config_db_collation: utf8mb4_unicode_ci
snipe_it_config_db_ssl: false
snipe_it_config_db_ssl_is_paas: false
snipe_it_config_db_ssl_key_path: ~
snipe_it_config_db_ssl_cert_path: ~
snipe_it_config_db_ssl_ca_path: ~
snipe_it_config_db_ssl_cipher: ~
snipe_it_config_db_ssl_verify_server: ~
snipe_it_config_mail_mailer: smtp
snipe_it_config_mail_host: ~
snipe_it_config_mail_port: 587
snipe_it_config_mail_username: ~
snipe_it_config_mail_password: ~
snipe_it_config_mail_tls_verify_peer: true
snipe_it_config_mail_from_addr: ~
snipe_it_config_mail_from_name: ~
snipe_it_config_mail_replyto_addr: "{{ snipe_it_config_mail_from_addr }}"
snipe_it_config_mail_replyto_name: "{{ snipe_it_config_mail_from_name }}"
snipe_it_config_mail_auto_embed_method: attachment
snipe_it_config_mail_backup_notification_driver: ~
snipe_it_config_mail_backup_notification_address: ~
snipe_it_config_private_filesystem_disk: "local"
snipe_it_config_public_filesystem_disk: "local_public"
snipe_it_config_allow_backup_delete: false
snipe_it_config_allow_data_purge: false
snipe_it_config_image_lib: 'gd'
snipe_it_config_log_channel: 'stderr'
snipe_it_config_log_max_days: 10
snipe_it_config_cookie_name: "_snipe_session"
snipe_it_config_cookie_domain: "{{ snipe_it_domain }}"
snipe_it_config_secure_cookies: true
snipe_it_config_session_driver: file
snipe_it_config_session_lifetime: 12000
snipe_it_config_cache_driver: file
snipe_it_config_cache_prefix: snipeit
snipe_it_config_queue_driver: file
snipe_it_base_config:
APP_VERSION: "{{ snipe_it_config_app_version }}"
APP_PORT: "{{ snipe_it_config_app_port }}"
APP_ENV: "{{ snipe_it_config_app_env }}"
APP_DEBUG: "{{ snipe_it_config_app_debug }}"
APP_KEY: "{{ snipe_it_config_app_key }}"
APP_URL: "{{ snipe_it_config_app_url }}"
APP_TIMEZONE: "{{ snipe_it_config_app_timezone }}"
APP_LOCALE: "{{ snipe_it_config_app_locale }}"
APP_LOCKED: "{{ snipe_it_config_app_locked }}"
APP_CIPHER: "{{ snipe_it_config_app_cipher }}"
APP_FORCE_TLS: "{{ snipe_it_config_app_force_tls }}"
APP_TRUSTED_PROXIES: "{{ snipe_it_config_app_trusted_proxies | join(',') }}"
DB_CONNECTION: "{{ snipe_it_config_db_connection }}"
DB_HOST: "{{ snipe_it_config_db_host }}"
DB_PORT: "{{ snipe_it_config_db_port }}"
DB_DATABASE: "{{ snipe_it_config_db_database }}"
DB_USERNAME: "{{ snipe_it_config_db_username }}"
DB_PASSWORD: "{{ snipe_it_config_db_password }}"
DB_PREFIX: "{{ snipe_it_config_db_prefix | default('null', true) }}"
DB_DUMP_PATH: "{{ snipe_it_config_db_dump_path }}"
DB_CHARSET: "{{ snipe_it_config_db_charset }}"
DB_COLLATION: "{{ snipe_it_config_db_collation }}"
DB_SSL: "{{ snipe_it_config_db_ssl }}"
DB_SSL_IS_PAAS: "{{ snipe_it_config_db_ssl_is_paas }}"
DB_SSL_KEY_PATH: "{{ snipe_it_config_db_ssl_key_path | default('null', true) }}"
DB_SSL_CERT_PATH: "{{ snipe_it_config_db_ssl_cert_path | default('null', true) }}"
DB_SSL_CA_PATH: "{{ snipe_it_config_db_ssl_ca_path | default('null', true) }}"
DB_SSL_CIPHER: "{{ snipe_it_config_db_ssl_cipher | default('null', true) }}"
DB_SSL_VERIFY_SERVER: "{{ snipe_it_config_db_ssl_verify_server | default('null', true) }}"
MAIL_MAILER: "{{ snipe_it_config_mail_mailer }}"
MAIL_HOST: "{{ snipe_it_config_mail_host }}"
MAIL_PORT: "{{ snipe_it_config_mail_port }}"
MAIL_USERNAME: "{{ snipe_it_config_mail_username }}"
MAIL_PASSWORD: "{{ snipe_it_config_mail_password }}"
MAIL_TLS_VERIFY_PEER: "{{ snipe_it_config_mail_tls_verify_peer }}"
MAIL_FROM_ADDR: "{{ snipe_it_config_mail_from_addr | default('null', true) }}"
MAIL_FROM_NAME: "{{ snipe_it_config_mail_from_name | default('null', true) }}"
MAIL_REPLYTO_ADDR: "{{ snipe_it_config_mail_replyto_addr | default('null', true) }}"
MAIL_REPLYTO_NAME: "{{ snipe_it_config_mail_replyto_name | default('null', true) }}"
MAIL_AUTO_EMBED_METHOD: "{{ snipe_it_config_mail_auto_embed_method }}"
MAIL_BACKUP_NOTIFICATION_DRIVER: "{{ snipe_it_config_mail_backup_notification_driver }}"
MAIL_BACKUP_NOTIFICATION_ADDRESS: "{{ snipe_it_config_mail_backup_notification_address }}"
SESSION_DRIVER: "{{ snipe_it_config_session_driver }}"
SESSION_LIFETIME: "{{ snipe_it_config_session_lifetime }}"
CACHE_DRIVER: "{{ snipe_it_config_cache_driver }}"
CACHE_PREFIX: "{{ snipe_it_config_cache_prefix }}"
QUEUE_DRIVER: "{{ snipe_it_config_queue_driver }}"
PRIVATE_FILESYSTEM_DISK: "{{ snipe_it_config_private_filesystem_disk }}"
PUBLIC_FILESYSTEM_DISK: "{{ snipe_it_config_public_filesystem_disk }}"
ALLOW_BACKUP_DELETE: "{{ snipe_it_config_allow_backup_delete }}"
ALLOW_DATA_PURGE: "{{ snipe_it_config_allow_data_purge }}"
IMAGE_LIB: "{{ snipe_it_config_image_lib }}"
LOG_CHANNEL: "{{ snipe_it_config_log_channel }}"
LOG_MAX_DAYS: "{{ snipe_it_config_log_max_days }}"
COOKIE_NAME: "{{ snipe_it_config_cookie_name }}"
COOKIE_DOMAIN: "{{ snipe_it_config_cookie_domain }}"
SECURE_COOKIES: "{{ snipe_it_config_secure_cookies }}"
snipe_it_config: ~
snipe_it_merged_config: >-2
{{ (snipe_it_base_config | default({}, true))
| combine((snipe_it_config | default({}, true)), recursive=True) }}

View File

@ -1,48 +0,0 @@
---
snipe_it_container_image_registry: docker.io
snipe_it_container_image_namespace: snipe
snipe_it_container_image_name: 'snipe-it'
snipe_it_container_image_tag: ~
snipe_it_container_image_flavour: alpine
snipe_it_container_image_source: pull
snipe_it_container_image_force_source: >-2
{{ snipe_it_container_image_tag | default(false, true) | bool }}
snipe_it_container_image: >-2
{{
([
snipe_it_container_image_registry | default([], true),
snipe_it_container_image_namespace | default([], true),
snipe_it_container_image_name,
] | flatten | join('/'))
+ ':'
+ (snipe_it_container_image_tag | default(
'v' + snipe_it_version + (
((snipe_it_container_image_flavour is string)
and (snipe_it_container_image_flavour | length > 0))
| ternary(
'-' + snipe_it_container_image_flavour | default('', true),
''
)
),
true
))
}}
snipe_it_container_env_file: "/var/www/html/.env"
snipe_it_container_data_directory: "/var/lib/snipeit/"
snipe_it_container_volumes:
- "{{ snipe_it_data_directory }}:{{ snipe_it_container_data_directory }}:z"
snipe_it_container_name: 'snipe-it'
snipe_it_container_state: >-2
{{ (snipe_it_state == 'present') | ternary('started', 'absent') }}
snipe_it_container_env: ~
snipe_it_container_user: ~
snipe_it_container_ports: ~
snipe_it_container_labels: ~
snipe_it_container_recreate: ~
snipe_it_container_networks: ~
snipe_it_container_etc_hosts: ~
snipe_it_container_dns_servers: ~
snipe_it_container_network_mode: ~
snipe_it_container_restart_policy: 'unless-stopped'

View File

@ -1,9 +0,0 @@
---
snipe_it_user: snipeit
snipe_it_version: "7.1.15"
snipe_it_domain: ~
snipe_it_state: present
snipe_it_deployment_method: docker
snipe_it_env_file: /etc/snipeit/env
snipe_it_data_directory: /var/lib/snipeit

View File

@ -1,5 +0,0 @@
---
snipe_it_run_user_id: >-2
{{ snipe_it_user_info.uid | default(snipe_it_user) }}
snipe_it_run_group_id: >-2
{{ snipe_it_user_info.group | default(snipe_it_user) }}

View File

@ -1,12 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: snipe_it
description: >-2
Deploy Snipe-IT, an open-source asset / license management system with
powerful JSON REST API
galaxy_tags:
- snipeit
- asset-management
- docker

View File

@ -1,14 +0,0 @@
---
- name: Ensure state is valid
ansible.builtin.fail:
msg: >-2
Unsupported state '{{ snipe_it_state }}'!
Supported states are {{ snipe_it_states | join(', ') }}.
when: snipe_it_state is not in snipe_it_states
- name: Ensure deployment method is valid
ansible.builtin.fail:
msg: >-2
Unsupported deployment_method '{{ snipe_it_deployment_method }}'!
Supported values are {{ snipe_it_deployment_methods | join(', ') }}.
when: snipe_it_deployment_method is not in snipe_it_deployment_methods

View File

@ -1,30 +0,0 @@
---
- name: Ensure container image '{{ snipe_it_container_image }}' is {{ snipe_it_state }}
community.docker.docker_image:
name: "{{ snipe_it_container_image }}"
state: "{{ snipe_it_state }}"
source: "{{ snipe_it_container_image_source }}"
force_source: "{{ snipe_it_container_image_force_source }}"
register: snipe_it_container_image_info
until: snipe_it_container_image_info is success
retries: 5
delay: 3
- name: Ensure container '{{ snipe_it_container_name }}' is {{ snipe_it_container_state }}
community.docker.docker_container:
name: "{{ snipe_it_container_name }}"
image: "{{ snipe_it_container_image }}"
env_file: "{{ snipe_it_env_file }}"
env: "{{ snipe_it_container_env | default(omit, true) }}"
user: "{{ snipe_it_container_user | default(omit, true) }}"
ports: "{{ snipe_it_container_ports | default(omit, true) }}"
labels: "{{ snipe_it_container_labels | default(omit, true) }}"
volumes: "{{ snipe_it_container_volumes | default(omit, true) }}"
networks: "{{ snipe_it_container_networks | default(omit, true) }}"
etc_hosts: "{{ snipe_it_container_etc_hosts | default(omit, true) }}"
dns_servers: "{{ snipe_it_container_dns_servers | default(omit, true) }}"
network_mode: "{{ snipe_it_container_network_mode | default(omit, true) }}"
restart_policy: >-2
{{ snipe_it_container_restart_policy | default(omit, true) }}
recreate: "{{ snipe_it_container_recreate | default(omit, true) }}"
state: "{{ snipe_it_container_state }}"

View File

@ -1,59 +0,0 @@
---
- name: Check preconditions
ansible.builtin.include_tasks:
file: "check.yml"
- name: Ensure snipe-it user '{{ snipe_it_user }}' is {{ snipe_it_state }}
ansible.builtin.user:
name: "{{ snipe_it_user }}"
state: "{{ snipe_it_state }}"
system: "{{ snipe_it_user_system | default(true, true) }}"
create_home: "{{ snipe_it_user_create_home | default(false, true) }}"
groups: "{{ snipe_it_user_groups | default(omit, true) }}"
append: >-2
{{
snipe_it_user_groups_append | default(
snipe_it_user_groups | default([], true) | length > 0,
true,
)
}}
register: snipe_it_user_info
- name: Ensure snipe-it environment file is {{ snipe_it_state }}
ansible.builtin.file:
path: "{{ snipe_it_env_file }}"
state: "{{ snipe_it_state }}"
when: snipe_it_state == 'absent'
- name: Ensure snipe-it config directory is {{ snipe_it_state }}
ansible.builtin.file:
path: "{{ snipe_it_env_file | dirname }}"
state: "{{ (snipe_it_state == 'present') | ternary('directory', 'absent') }}"
owner: "{{ snipe_it_run_user_id }}"
group: "{{ snipe_it_run_group_id }}"
mode: "0755"
when: snipe_it_state == 'present'
- name: Ensure snipe-it data directory '{{ snipe_it_data_directory }}' is {{ snipe_it_state }}
ansible.builtin.file:
path: "{{ snipe_it_data_directory }}"
state: "{{ (snipe_it_state == 'present') | ternary('directory', 'absent') }}"
owner: "{{ snipe_it_run_user_id }}"
group: "{{ snipe_it_run_group_id }}"
mode: "0755"
- name: Ensure snipe-it environment file is templated
ansible.builtin.copy:
content: |+2
{% for entry in snipe_it_merged_config | dict2items %}
{{ entry.key }}={{ entry.value }}
{% endfor %}
dest: "{{ snipe_it_env_file }}"
owner: "{{ snipe_it_run_user_id }}"
group: "{{ snipe_it_run_group_id }}"
mode: "0640"
when: snipe_it_state == 'present'
- name: Deploy using {{ snipe_it_deployment_method }}
ansible.builtin.include_tasks:
file: "deploy-{{ snipe_it_deployment_method }}.yml"

View File

@ -1,6 +0,0 @@
---
snipe_it_states:
- present
- absent
snipe_it_deployment_methods:
- docker

View File

@ -1,54 +0,0 @@
# `finallycoffee.services.vaultwarden` ansible role
Vaultwarden is an unofficial (not associated with Bitwarden) bitwarden API compatible
server backend, formally called `bitwarden_rs`, written in rust.
This ansible role can deploy and configure `vaultwarden`, and supports removing
itself using `vaultwarden_state: absent` (Warning: It does not ask for confirmation,
and will remove all user data when instructed to remove it).
## Configuration
To use this role, the following variables need to be populated:
- `vaultwarden_config_domain` - always. Changing this will lead to two-factor not working for two-factor methods registered in the past.
- `vaultwarden_config_admin_token` - if `vaultwarden_config_disable_admin_token` is `false`.
Setting other configuration values for vaultwarden can be done using role-provided flattened keys in the
`vaultwarden_config_*` namespace (see [`defaults/main/config.yml`](defaults/main/config.yml) for available variables),
or by setting the configuration directly in the same structure as the `config.json` would be in `vaultwarden_config`.
### Email
Configure mailing by first enabling SMTP using `vaultwarden_config_enable_smtp: true`,
then configure your email server like this:
```yaml
vaultwarden_config:
smtp_host: "mail.example.com"
smtp_explicit_tls: true
smtp_port: 465
smtp_from: "noreply+vaultwarden@example.com"
smtp_from_name: "'Example.com Vaultwarden instance' <noreply+vaultwarden@example.com>"
smtp_username: vaultwarden@example.com
smtp_password: i_hope_i_will_be_a_strong_one!
helo_name: "{{ vaultwarden_config_domain }}"
```
### 2FA via email
To enable email-based two-factor-authentication, set `vaultwarden_config_enable_email_2fa: true`
and optionally set the following configuration:
```yaml
vaultwarden_config:
email_token_size: 8
email_expiration_time: 300 # 300 seconds = 5min
email_attempts_limit: 3
```
### Feature flags
To enable more authentication methods, toggles are provided in
[`vaultwarden_config_enable_*`](defaults/main/config.yml#L18).
It is genereally recommended to simply keep unused methods off.
Per default, 'Sends' are allowed.

View File

@ -1,68 +0,0 @@
---
# Required configuration
vaultwarden_config_domain: ~
vaultwarden_config_admin_token: ~
# Invitations and signups
vaultwarden_config_invitations_allowed: false
vaultwarden_config_invitation_org_name: ~
vaultwarden_config_signups_allowed: false
vaultwarden_config_signups_verify: true
vaultwarden_config_signups_verify_resend_time: 3600
vaultwarden_config_signups_verify_resend_limit: 5
# Entry preview icons
vaultwarden_config_disable_icon_download: true
vaultwarden_config_icon_cache_ttl: 604800 # 7 days
vaultwarden_config_icon_cache_negttl: 259200 # 3 days
vaultwarden_config_icon_download_timeout: 30 # seconds
vaultwarden_config_icon_blacklist_non_global_ips: true
# Features
vaultwarden_config_sends_allowed: true
vaultwarden_config_enable_yubico: false
vaultwarden_config_enable_duo: false
vaultwarden_config_enable_smtp: false
vaultwarden_config_enable_email_2fa: false
# Security
vaultwarden_config_password_iterations: 100000
vaultwarden_config_show_password_hint: false
vaultwarden_config_disable_2fa_remember: false
vaultwarden_config_disable_admin_token: true
vaultwarden_config_require_device_email: false
vaultwarden_config_authenticator_disable_time_drift: true
# Other
vaultwarden_config_log_timestamp_format: "%Y-%m-%d %H:%M:%S.%3f"
vaultwarden_config_ip_header: "X-Real-IP"
vaultwarden_config_reload_templates: false
vaultwarden_base_config:
domain: "{{ vaultwarden_config_domain }}"
admin_token: "{{ vaultwarden_config_admin_token }}"
invitations_allowed: "{{ vaultwarden_config_invitations_allowed }}"
invitation_org_name: "{{ vaultwarden_config_invitation_org_name | default('', true) }}"
signups_allowed: "{{ vaultwarden_config_signups_allowed }}"
signups_verify: "{{ vaultwarden_config_signups_verify }}"
signups_verify_resend_time: "{{ vaultwarden_config_signups_verify_resend_time }}"
signups_verify_resend_limit: "{{ vaultwarden_config_signups_verify_resend_limit }}"
disable_icon_download: "{{ vaultwarden_config_disable_icon_download }}"
icon_cache_ttl: "{{ vaultwarden_config_icon_cache_ttl }}"
icon_cache_negttl: "{{ vaultwarden_config_icon_cache_negttl }}"
icon_download_timeout: "{{ vaultwarden_config_icon_download_timeout }}"
icon_blacklist_non_global_ips: "{{ vaultwarden_config_icon_blacklist_non_global_ips }}"
password_iterations: "{{ vaultwarden_config_password_iterations }}"
show_password_hint: "{{ vaultwarden_config_show_password_hint }}"
disable_2fa_remember: "{{ vaultwarden_config_disable_2fa_remember }}"
disable_admin_token: "{{ vaultwarden_config_disable_admin_token }}"
require_device_email: "{{ vaultwarden_config_require_device_email }}"
authenticator_disable_time_drift: "{{ vaultwarden_config_authenticator_disable_time_drift }}"
ip_header: "{{ vaultwarden_config_ip_header }}"
log_timestamp_format: "{{ vaultwarden_config_log_timestamp_format }}"
reload_templates: "{{ vaultwarden_config_reload_templates }}"
sends_allowed: "{{ vaultwarden_config_sends_allowed }}"
_enable_yubico: "{{ vaultwarden_config_enable_yubico }}"
_enable_duo: "{{ vaultwarden_config_enable_duo }}"
_enable_smtp: "{{ vaultwarden_config_enable_smtp }}"
_enable_email_2fa: "{{ vaultwarden_config_enable_email_2fa }}"
vaultwarden_config: ~
vaultwarden_merged_config: >-2
{{ vaultwarden_base_config | default({}, true)
| combine(vaultwarden_config | default({}, true), recursive=true) }}

View File

@ -1,50 +0,0 @@
---
vaultwarden_container_image_registry: docker.io
vaultwarden_container_image_namespace: vaultwarden
vaultwarden_container_image_name: server
vaultwarden_container_image_tag: ~
vaultwarden_container_image_flavour: alpine
vaultwarden_container_image_source: pull
vaultwarden_container_image_force_source: >-2
{{ vaultwarden_container_image_tag | default(false, true) | bool }}
vaultwarden_container_image: >-2
{{
([
vaultwarden_container_image_registry | default([], true),
vaultwarden_container_image_namespace | default([], true),
vaultwarden_container_image_name,
] | flatten | join('/'))
+ ':'
+ (vaultwarden_container_image_tag | default(
vaultwarden_version + (
((vaultwarden_container_image_flavour is string)
and (vaultwarden_container_image_flavour | length > 0))
| ternary(
'-' + vaultwarden_container_image_flavour | default('', true),
''
)
),
true
))
}}
vaultwarden_container_name: vaultwarden
vaultwarden_container_env: ~
vaultwarden_container_user: >-2
{{ vaultwarden_run_user_id }}:{{ vaultwarden_run_group_id }}
vaultwarden_container_ports: ~
vaultwarden_container_labels: ~
vaultwarden_container_networks: ~
vaultwarden_container_etc_hosts: ~
vaultwarden_container_dns_servers: ~
vaultwarden_container_restart_policy: >-2
{{ (vaultwarden_deployment_method == 'docker') | ternary(
'unless-stopped',
'on-failure',
)
}}
vaultwarden_container_state: >-2
{{ (vaultwarden_state == 'present') | ternary('started', 'absent') }}
vaultwarden_container_volumes:
- "{{ vaultwarden_data_directory }}:/data:rw"
- "{{ vaultwarden_config_file }}:/data/config.json:ro"

View File

@ -1,10 +0,0 @@
---
vaultwarden_user: vaultwarden
vaultwarden_version: "1.32.7"
vaultwarden_config_file: "/etc/vaultwarden/config.json"
vaultwarden_config_directory: "{{ vaultwarden_config_file | dirname }}"
vaultwarden_data_directory: "/var/lib/vaultwarden"
vaultwarden_state: present
vaultwarden_deployment_method: docker

View File

@ -1,5 +0,0 @@
---
vaultwarden_run_user_id: >-2
{{ vaultwarden_user_info.uid | default(vaultwarden_user, true) }}
vaultwarden_run_group_id: >-2
{{ vaultwarden_user_info.group | default(vaultwarden_user, true) }}

View File

@ -1,9 +0,0 @@
---
- name: Ensure vaultwarden container '{{ vaultwarden_container_name }}' is restarted
community.docker.docker_container:
name: "{{ vaultwarden_container_name }}"
state: "{{ vaultwarden_container_state }}"
restart: true
listen: vaultwarden-restart
when: vaultwarden_deployment_method == 'docker'
ignore_errors: "{{ ansible_check_mode }}"

View File

@ -1,12 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: vaultwarden
description: >-2
Deploy vaultwarden, a bitwarden-compatible server backend
galaxy_tags:
- vaultwarden
- bitwarden
- passwordstore
- docker

View File

@ -1,22 +0,0 @@
---
- name: Ensure container image '{{ vaultwarden_container_image }}' is {{ vaultwarden_state }}
community.docker.docker_image:
name: "{{ vaultwarden_container_image }}"
state: "{{ vaultwarden_state }}"
source: "{{ vaultwarden_container_image_source }}"
force_source: "{{ vaultwarden_container_image_force_source }}"
- name: Ensure container '{{ vaultwarden_container_name }}' is {{ vaultwarden_container_state }}
community.docker.docker_container:
name: "{{ vaultwarden_container_name }}"
image: "{{ vaultwarden_container_image }}"
env: "{{ vaultwarden_container_env | default(omit, true) }}"
user: "{{ vaultwarden_container_user | default(omit, true) }}"
ports: "{{ vaultwarden_container_ports | default(omit, true) }}"
labels: "{{ vaultwarden_container_labels | default(omit, true) }}"
volumes: "{{ vaultwarden_container_volumes }}"
networks: "{{ vaultwarden_container_networks | default(omit, true) }}"
etc_hosts: "{{ vaultwarden_container_etc_hosts | default(omit, true) }}"
dns_servers: "{{ vaultwarden_container_dns_servers | default(omit, true) }}"
restart_policy: "{{ vaultwarden_container_restart_policy | default(omit, true) }}"
state: "{{ vaultwarden_container_state | default(omit, true) }}"

View File

@ -1,78 +0,0 @@
---
- name: Ensure state is valid
ansible.builtin.fail:
msg: >-2
Unsupported state '{{ vaultwarden_state }}'!
Supported states are {{ vaultwarden_states | join(', ') }}.
when: vaultwarden_state not in vaultwarden_states
- name: Ensure deployment method is valid
ansible.builtin.fail:
msg: >-2
Unsupported deployment method '{{ vaultwarden_deployment_method }}'!
Supported are {{ vaultwarden_deployment_methods | join(', ') }}.
when: vaultwarden_deployment_method not in vaultwarden_deployment_methods
- name: Ensure required variables are given
ansible.builtin.fail:
msg: "Required variable '{{ var }}' is undefined!"
loop: "{{ vaultwarden_required_variables }}"
loop_control:
loop_var: var
when: >-2
var not in hostvars[inventory_hostname]
or hostvars[inventory_hostname][var] | length == 0
- name: Ensure required variables are given
ansible.builtin.fail:
msg: "Required variable '{{ var.name }}' is undefined!"
loop: "{{ vaultwarden_conditionally_required_variables }}"
loop_control:
loop_var: var
label: "{{ var.name }}"
when: >-2
var.when and (
var.name not in hostvars[inventory_hostname]
or hostvars[inventory_hostname][var.name] | length == 0)
- name: Ensure vaultwarden user '{{ vaultwarden_user }}' is {{ vaultwarden_state }}
ansible.builtin.user:
name: "{{ vaultwarden_user }}"
state: "{{ vaultwarden_state }}"
system: "{{ vaultwarden_user_system | default(true, true) }}"
create_home: "{{ vaultwarden_user_create_home | default(false, true) }}"
groups: "{{ vaultwarden_user_groups | default(omit, true) }}"
append: >-2
{{ vaultwarden_user_append_groups | default(
(vaultwarden_user_groups | default([], true) | length > 0),
true,
) }}
register: vaultwarden_user_info
- name: Ensure base paths are {{ vaultwarden_state }}
ansible.builtin.file:
path: "{{ mount.path }}"
state: "{{ (vaultwarden_state == 'present') | ternary('directory', 'absent') }}"
owner: "{{ mount.owner | default(vaultwarden_run_user_id) }}"
group: "{{ mount.group | default(vaultwarden_run_group_id) }}"
mode: "{{ mount.mode | default('0755', true) }}"
loop:
- path: "{{ vaultwarden_config_directory }}"
- path: "{{ vaultwarden_data_directory }}"
loop_control:
loop_var: mount
label: "{{ mount.path }}"
- name: Ensure vaultwarden config file '{{ vaultwarden_config_file }}' is {{ vaultwarden_state }}
ansible.builtin.copy:
content: "{{ vaultwarden_merged_config | to_nice_json(indent=4) }}"
dest: "{{ vaultwarden_config_file }}"
owner: "{{ vaultwarden_run_user_id }}"
group: "{{ vaultwarden_run_group_id }}"
mode: "0640"
when: vaultwarden_state == 'present'
notify: vaultwarden-restart
- name: Deploy vaultwarden using {{ vaultwarden_deployment_method }}
ansible.builtin.include_tasks:
file: "deploy-{{ vaultwarden_deployment_method }}.yml"

View File

@ -1,11 +0,0 @@
---
vaultwarden_states:
- present
- absent
vaultwarden_deployment_methods:
- docker
vaultwarden_required_variables:
- vaultwarden_config_domain
vaultwarden_conditionally_required_variables:
- name: vaultwarden_config_admin_token
when: "{{ vaultwarden_config_disable_admin_token | default(true, true) | bool }}"

View File

@ -1,16 +0,0 @@
# `finallycoffee.services.vouch-proxy`
[Vouch-Proxy](https://github.com/vouch/vouch-proxy) can be used in combination with
nginx' `auth_request` module to secure web services with OIDC/OAuth. This role runs
vouch-proxys' official docker container.
## Usage
The `oauth` config section must be supplied in `vouch_proxy_oauth_config`, and the
`vouch` config section can be overridden in `vouch_proxy_vouch_config`. For possible
configuration values, see https://github.com/vouch/vouch-proxy/blob/master/config/config.yml_example.
For an example nginx config, see https://github.com/vouch/vouch-proxy#installation-and-configuration.
Passing container arguments in the same way as `community.docker.docker_container` is supported
using the `vouch_proxy_container_[...]` prefix (e.g. `vouch_proxy_container_ports`).

View File

@ -1,51 +0,0 @@
---
vouch_proxy_user: vouch-proxy
vouch_proxy_version: 0.40.0
vouch_proxy_base_path: /opt/vouch-proxy
vouch_proxy_config_path: "{{ vouch_proxy_base_path }}/config"
vouch_proxy_config_file: "{{ vouch_proxy_config_path }}/config.yaml"
vouch_proxy_container_name: vouch-proxy
vouch_proxy_container_image_name: vouch-proxy
vouch_proxy_container_image_namespace: vouch/
vouch_proxy_container_image_registry: quay.io
vouch_proxy_container_image_repository: >-
{{
(container_registries[vouch_proxy_container_image_registry] | default(vouch_proxy_container_image_registry))
+ '/' + (vouch_proxy_container_image_namespace | default(''))
+ vouch_proxy_container_image_name
}}
vouch_proxy_container_image_reference: >-
{{
vouch_proxy_container_image_repository + ':'
+ (vouch_proxy_container_image_tag | default(vouch_proxy_version))
}}
vouch_proxy_container_image_force_pull: "{{ vouch_proxy_container_image_tag is defined }}"
vouch_proxy_container_default_volumes:
- "{{ vouch_proxy_config_file }}:/config/config.yaml:ro"
vouch_proxy_container_volumes: >-
{{ vouch_proxy_container_default_volumes
+ vouch_proxy_container_extra_volumes | default([]) }}
vouch_proxy_container_restart_policy: "unless-stopped"
vouch_proxy_config_vouch_log_level: info
vouch_proxy_config_vouch_listen: 0.0.0.0
vouch_proxy_config_vouch_port: 9090
vouch_proxy_config_vouch_domains: []
vouch_proxy_config_vouch_document_root: ~
vouch_proxy_oauth_config: {}
vouch_proxy_vouch_config:
logLevel: "{{ vouch_proxy_config_vouch_log_level }}"
listen: "{{ vouch_proxy_config_vouch_listen }}"
port: "{{ vouch_proxy_config_vouch_port }}"
domains: "{{ vouch_proxy_config_vouch_domains }}"
document_root: "{{ vouch_proxy_config_vouch_document_root }}"
vouch_proxy_config:
vouch: "{{ vouch_proxy_vouch_config }}"
oauth: "{{ vouch_proxy_oauth_config }}"

View File

@ -1,8 +0,0 @@
---
- name: Ensure vouch-proxy was restarted
community.docker.docker_container:
name: "{{ vouch_proxy_container_name }}"
state: started
restart: yes
listen: restart-vouch-proxy

View File

@ -1,12 +0,0 @@
---
allow_duplicates: true
dependencies: []
galaxy_info:
role_name: vouch_proxy
description: Ansible role to deploy vouch_proxy using docker
galaxy_tags:
- vouch_proxy
- oidc
- authentication
- authorization
- docker

View File

@ -1,50 +0,0 @@
---
- name: Ensure vouch-proxy user '{{ vouch_proxy_user }}' exists
ansible.builtin.user:
name: "{{ vouch_proxy_user }}"
state: present
system: true
register: vouch_proxy_user_info
- name: Ensure mounts are created
ansible.builtin.file:
dest: "{{ item.path }}"
state: directory
owner: "{{ item.owner | default(vouch_proxy_user_info.uid | default(vouch_proxy_user)) }}"
group: "{{ item.owner | default(vouch_proxy_user_info.group | default(vouch_proxy_user)) }}"
mode: "{{ item.mode | default('0755') }}"
loop:
- path: "{{ vouch_proxy_base_path }}"
- path: "{{ vouch_proxy_config_path }}"
- name: Ensure config file is templated
ansible.builtin.copy:
dest: "{{ vouch_proxy_config_file }}"
content: "{{ vouch_proxy_config | to_nice_yaml }}"
owner: "{{ vouch_proxy_user_info.uid | default(vouch_proxy_user) }}"
group: "{{ vouch_proxy_user_info.group | default(vouch_proxy_user) }}"
mode: "0640"
notify:
- restart-vouch-proxy
- name: Ensure container image is present on host
community.docker.docker_image:
name: "{{ vouch_proxy_container_image_reference }}"
state: present
source: pull
force_source: "{{ vouch_proxy_container_image_force_pull | bool }}"
- name: Ensure container '{{ vouch_proxy_container_name }}' is running
community.docker.docker_container:
name: "{{ vouch_proxy_container_name }}"
image: "{{ vouch_proxy_container_image_reference }}"
env: "{{ vouch_proxy_container_env | default(omit) }}"
user: "{{ vouch_proxy_user_info.uid | default(vouch_proxy_user) }}"
ports: "{{ vouch_proxy_container_ports | default(omit) }}"
volumes: "{{ vouch_proxy_container_volumes | default(omit) }}"
networks: "{{ vouch_proxy_container_networks | default(omit) }}"
purge_networks: "{{ vouch_proxy_container_purge_networks | default(omit) }}"
etc_hosts: "{{ vouch_proxy_container_etc_hosts | default(omit) }}"
restart_policy: "{{ vouch_proxy_container_restart_policy }}"
state: started