Compare commits
110 Commits
transcaffe
...
main
Author | SHA1 | Date | |
---|---|---|---|
25be7155e0 | |||
950d017889 | |||
31639291f1 | |||
127a128a73 | |||
6bf38f8273 | |||
266057d11c | |||
afa91a622b | |||
d92262798a | |||
4b13c448d0 | |||
a9a988ec69 | |||
d9de07c8f8 | |||
c51dd4c142 | |||
bfd6449a71 | |||
7ebde915db | |||
a18c8c3355 | |||
5a6ffc6991 | |||
e7ea03c00e | |||
c7040c96f8 | |||
d8d0b5c9e5 | |||
1d1f519e3a | |||
11d5c81c60 | |||
f7eeb59288 | |||
72d8ac9644 | |||
d3d6f89949 | |||
43d0dc171a | |||
d8dfb24df0 | |||
57c8dae0c4 | |||
6385b6f7a8 | |||
7d09c9a88d | |||
98c60a73c6 | |||
5ac019bace | |||
43fd798712 | |||
88dc6377ce | |||
0a132b0ad5 | |||
72942ee382 | |||
755e5d55f2 | |||
5023a56b29 | |||
17dfe4b2bb | |||
cd122ebbdb | |||
0d914bd11c | |||
49ed240f10 | |||
81d6f809d7 | |||
b24ea1e925 | |||
912c32cb3e | |||
cde5f12e79 | |||
a8f5507eab | |||
ddfa8d6687 | |||
fd4cc0fe6a | |||
3a15ed1157 | |||
a7fad79d05 | |||
f3d3617ec0 | |||
908b579f2c | |||
bab5b94500 | |||
b5b4f67a08 | |||
5e29e174d5 | |||
6001399569 | |||
87df054977 | |||
8c89d40fcd | |||
f231d4e7d3 | |||
80077af008 | |||
eeb66de8a4 | |||
396f1b3a57 | |||
be66a3fe7a | |||
5bfca1a55c | |||
70238d3bd4 | |||
f6a97805de | |||
b350a19bcc | |||
74a3216a41 | |||
ef6da18172 | |||
65a256e8b5 | |||
6547f15bb4 | |||
5f19b5d9a9 | |||
4a2d1dec92 | |||
4632a1263a | |||
e5924d5ecb | |||
f2fe2fb034 | |||
e8e97a5d89 | |||
5ea358e65d | |||
82ff46ee99 | |||
8a2993e9c3 | |||
87094b81ec | |||
bbe369d525 | |||
ed56665ed8 | |||
98be926e89 | |||
0fc751f7d6 | |||
271410f4c6 | |||
e17369ae38 | |||
a3c2716b7f | |||
c8802b9dbf | |||
4ef456efdf | |||
cd31d8b6af | |||
20cb480915 | |||
e1756fd4b0 | |||
1e12c0fcfd | |||
f368107966 | |||
0aa621b510 | |||
0c509f6b66 | |||
a364a58717 | |||
ccd50cb8cf | |||
1fe626fad5 | |||
d4858c89f4 | |||
6658d7226c | |||
36224d0531 | |||
24be358a46 | |||
c38e4f34dd | |||
10a9779996 | |||
b635a00a34 | |||
159c4fda30 | |||
1e104bf1fb | |||
1417564e1d |
31
README.md
31
README.md
@ -1,4 +1,4 @@
|
|||||||
# `finallycoffee.service` ansible collection
|
# `finallycoffee.services` ansible collection
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
@ -8,20 +8,35 @@ concise area of concern.
|
|||||||
|
|
||||||
## Roles
|
## Roles
|
||||||
|
|
||||||
- [`roles/authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com)
|
- [`authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com)
|
||||||
instance, an authentication provider with beta OIDC provider support.
|
instance, an authentication provider with beta OIDC provider support.
|
||||||
|
|
||||||
- [`roles/gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a
|
- [`ghost`](roles/ghost/README.md): Deploys [ghost.org](https://ghost.org/), a simple to use
|
||||||
|
blogging and publishing platform.
|
||||||
|
|
||||||
|
- [`gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a
|
||||||
lightweight, self-hosted git service.
|
lightweight, self-hosted git service.
|
||||||
|
|
||||||
- [`roles/jellyfin`](roles/jellyfin/README.md): Deploy [jellyfin.org](https://jellyfin.org),
|
- [`hedgedoc`](roles/hedgedoc/README.md): Deploy [hedgedoc](https://hedgedoc.org/),
|
||||||
|
a collaborative real-time markdown editor using websockts
|
||||||
|
|
||||||
|
- [`jellyfin`](roles/jellyfin/README.md): Deploy [jellyfin.org](https://jellyfin.org),
|
||||||
the free software media system for streaming stored media to any device.
|
the free software media system for streaming stored media to any device.
|
||||||
|
|
||||||
- [`roles/restic`](roles/restic/README.md): Manage backups using restic
|
- [`keycloak`](roles/keycloak/README.md): Deploy [keycloak](https://www.keycloak.org/),
|
||||||
and persist them to a configurable backend.
|
the open source identity and access management solution.
|
||||||
|
|
||||||
- [`roles/minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an
|
- [`openproject`](roles/openproject/README.md): Deploys an [openproject.org](https://www.openproject.org)
|
||||||
s3-compatible object storage server, using docker containers.
|
installation using the upstream provided docker-compose setup.
|
||||||
|
|
||||||
|
- [`snipe_it`](roles/snipe_it/README.md): Deploys [Snipe-IT](https://snipeitapp.com/),
|
||||||
|
the free and open-source IT asset (and license) management with a powerful REST API
|
||||||
|
|
||||||
|
- [`vaultwarden`](roles/vaultwarden/README.md): Deploy [vaultwarden](https://github.com/dani-garcia/vaultwarden/),
|
||||||
|
an open-source implementation of the Bitwarden Server (formerly Bitwarden\_RS).
|
||||||
|
|
||||||
|
- [`vouch_proxy`](roles/vouch_proxy/README.md): Deploys [vouch-proxy](https://github.com/vouch/vouch-proxy),
|
||||||
|
an authorization proxy for arbitrary webapps working with `nginx`s' `auth_request` module.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
22
galaxy.yml
22
galaxy.yml
@ -1,15 +1,23 @@
|
|||||||
namespace: finallycoffee
|
namespace: finallycoffee
|
||||||
name: services
|
name: services
|
||||||
version: 0.0.1
|
version: 0.1.10
|
||||||
readme: README.md
|
readme: README.md
|
||||||
authors:
|
authors:
|
||||||
- Johanna Dorothea Reichmann <transcaffeine@finallycoffee.eu>
|
- transcaffeine <transcaffeine@finally.coffee>
|
||||||
description: Various ansible roles useful for automating infrastructure
|
description: Various ansible roles useful for automating infrastructure
|
||||||
dependencies:
|
dependencies:
|
||||||
"community.docker": "^1.10.0"
|
"community.crypto": "^2.0.0"
|
||||||
license:
|
"community.docker": "^3.0.0"
|
||||||
- CNPLv7+
|
license_file: LICENSE.md
|
||||||
build_ignore:
|
build_ignore:
|
||||||
- '*.tar.gz'
|
- '*.tar.gz'
|
||||||
repository: https://git.finallycoffee.eu/finallycoffee.eu/services
|
repository: https://git.finally.coffee/finallycoffee/services
|
||||||
issues: https://git.finallycoffee.eu/finallycoffee.eu/services/issues
|
issues: https://codeberg.org/finallycoffee/ansible-collection-services/issues
|
||||||
|
tags:
|
||||||
|
- authelia
|
||||||
|
- gitea
|
||||||
|
- hedgedoc
|
||||||
|
- jellyfin
|
||||||
|
- vaultwarden
|
||||||
|
- snipeit
|
||||||
|
- docker
|
||||||
|
3
meta/runtime.yml
Normal file
3
meta/runtime.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
requires_ansible: ">=2.15"
|
6
playbooks/hedgedoc.yml
Normal file
6
playbooks/hedgedoc.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- name: Install and configure hedgedoc
|
||||||
|
hosts: "{{ hedgedoc_hosts | default('hedgedoc') }}"
|
||||||
|
become: "{{ hedgedoc_become | default(true, false) }}"
|
||||||
|
roles:
|
||||||
|
- role: finallycoffee.services.hedgedoc
|
6
playbooks/jellyfin.yml
Normal file
6
playbooks/jellyfin.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- name: Install and configure jellyfin
|
||||||
|
hosts: "{{ jellyfin_hosts | default('jellyfin') }}"
|
||||||
|
become: "{{ jellyfin_become | default(true, false) }}"
|
||||||
|
roles:
|
||||||
|
- role: finallycoffee.services.jellyfin
|
6
playbooks/openproject.yml
Normal file
6
playbooks/openproject.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- name: Install openproject
|
||||||
|
hosts: "{{ openproject_hosts | default('openproject') }}"
|
||||||
|
become: "{{ openproject_become | default(true, false) }}"
|
||||||
|
roles:
|
||||||
|
- role: finallycoffee.services.openproject
|
6
playbooks/snipe_it.yml
Normal file
6
playbooks/snipe_it.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- name: Install and configure Snipe-IT
|
||||||
|
hosts: "{{ snipe_it_hosts | default('snipe_it') }}"
|
||||||
|
become: "{{ snipe_it_become | default(true, false) }}"
|
||||||
|
roles:
|
||||||
|
- role: finallycoffee.services.snipe_it
|
6
playbooks/vaultwarden.yml
Normal file
6
playbooks/vaultwarden.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- name: Install and configure vaultwarden
|
||||||
|
hosts: "{{ vaultwarden_hosts | default('vaultwarden') }}"
|
||||||
|
become: "{{ vaultwarden_become | default(true, false) }}"
|
||||||
|
roles:
|
||||||
|
- role: finallycoffee.services.vaultwarden
|
@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
|
authelia_version: "4.38.17"
|
||||||
authelia_version: 4.34.6
|
|
||||||
authelia_user: authelia
|
authelia_user: authelia
|
||||||
authelia_base_dir: /opt/authelia
|
authelia_base_dir: /opt/authelia
|
||||||
authelia_domain: authelia.example.org
|
authelia_domain: authelia.example.org
|
||||||
@ -8,14 +7,26 @@ authelia_domain: authelia.example.org
|
|||||||
authelia_config_dir: "{{ authelia_base_dir }}/config"
|
authelia_config_dir: "{{ authelia_base_dir }}/config"
|
||||||
authelia_config_file: "{{ authelia_config_dir }}/config.yaml"
|
authelia_config_file: "{{ authelia_config_dir }}/config.yaml"
|
||||||
authelia_data_dir: "{{ authelia_base_dir }}/data"
|
authelia_data_dir: "{{ authelia_base_dir }}/data"
|
||||||
|
authelia_asset_dir: "{{ authelia_base_dir }}/assets"
|
||||||
authelia_sqlite_storage_file: "{{ authelia_data_dir }}/authelia.sqlite3"
|
authelia_sqlite_storage_file: "{{ authelia_data_dir }}/authelia.sqlite3"
|
||||||
authelia_notification_storage_file: "{{ authelia_data_dir }}/notifications.txt"
|
authelia_notification_storage_file: "{{ authelia_data_dir }}/notifications.txt"
|
||||||
authelia_user_storage_file: "{{ authelia_data_dir }}/user_database.yml"
|
authelia_user_storage_file: "{{ authelia_data_dir }}/user_database.yml"
|
||||||
|
|
||||||
authelia_container_name: authelia
|
authelia_container_name: authelia
|
||||||
authelia_container_image_name: docker.io/authelia/authelia
|
authelia_container_image_server: docker.io
|
||||||
|
authelia_container_image_namespace: authelia
|
||||||
|
authelia_container_image_name: authelia
|
||||||
|
authelia_container_image: >-2
|
||||||
|
{{
|
||||||
|
[
|
||||||
|
authelia_container_image_server,
|
||||||
|
authelia_container_image_namespace,
|
||||||
|
authelia_container_image_name
|
||||||
|
] | join('/')
|
||||||
|
}}
|
||||||
authelia_container_image_tag: ~
|
authelia_container_image_tag: ~
|
||||||
authelia_container_image_ref: "{{ authelia_container_image_name }}:{{ authelia_container_image_tag | default(authelia_version, true) }}"
|
authelia_container_image_ref: >-2
|
||||||
|
{{ authelia_container_image }}:{{ authelia_container_image_tag | default(authelia_version, true) }}
|
||||||
authelia_container_image_force_pull: "{{ authelia_container_image_tag | default(false, True) }}"
|
authelia_container_image_force_pull: "{{ authelia_container_image_tag | default(false, True) }}"
|
||||||
authelia_container_env:
|
authelia_container_env:
|
||||||
PUID: "{{ authelia_run_user }}"
|
PUID: "{{ authelia_run_user }}"
|
||||||
@ -41,11 +52,22 @@ authelia_config_jwt_secret: ~
|
|||||||
authelia_config_default_redirection_url: ~
|
authelia_config_default_redirection_url: ~
|
||||||
authelia_config_server_host: 0.0.0.0
|
authelia_config_server_host: 0.0.0.0
|
||||||
authelia_config_server_port: "{{ authelia_container_listen_port }}"
|
authelia_config_server_port: "{{ authelia_container_listen_port }}"
|
||||||
|
authelia_config_server_address: >-2
|
||||||
|
{{ authelia_config_server_host }}:{{ authelia_config_server_port }}
|
||||||
authelia_config_server_path: ""
|
authelia_config_server_path: ""
|
||||||
authelia_config_server_read_buffer_size: 4096
|
authelia_config_server_asset_path: "/config/assets/"
|
||||||
authelia_config_server_write_buffer_size: 4096
|
authelia_config_server_buffers_read: 4096
|
||||||
authelia_config_server_enable_pprof: true
|
authelia_config_server_read_buffer_size: >-2
|
||||||
authelia_config_server_enable_expvars: true
|
{{ authelia_config_server_buffers_read }}
|
||||||
|
authelia_config_server_buffers_write: 4096
|
||||||
|
authelia_config_server_write_buffer_size: >-2
|
||||||
|
{{ authelia_config_server_buffers_write }}
|
||||||
|
authelia_config_server_endpoints_enable_pprof: true
|
||||||
|
authelia_config_server_enable_pprof: >-2
|
||||||
|
{{ authelia_config_server_endpoints_enable_pprof }}
|
||||||
|
authelia_config_server_endpoints_enable_expvars: true
|
||||||
|
authelia_config_server_enable_expvars: >-2
|
||||||
|
{{ authelia_config_server_endpoints_enable_expvars }}
|
||||||
authelia_config_server_disable_healthcheck:
|
authelia_config_server_disable_healthcheck:
|
||||||
authelia_config_server_tls_key: ~
|
authelia_config_server_tls_key: ~
|
||||||
authelia_config_server_tls_certificate: ~
|
authelia_config_server_tls_certificate: ~
|
||||||
@ -55,6 +77,8 @@ authelia_config_log_level: info
|
|||||||
authelia_config_log_format: json
|
authelia_config_log_format: json
|
||||||
authelia_config_log_file_path: ~
|
authelia_config_log_file_path: ~
|
||||||
authelia_config_log_keep_stdout: false
|
authelia_config_log_keep_stdout: false
|
||||||
|
authelia_config_telemetry_metrics_enabled: false
|
||||||
|
authelia_config_telemetry_metrics_address: '0.0.0.0:9959'
|
||||||
authelia_config_totp_disable: true
|
authelia_config_totp_disable: true
|
||||||
authelia_config_totp_issuer: "{{ authelia_domain }}"
|
authelia_config_totp_issuer: "{{ authelia_domain }}"
|
||||||
authelia_config_totp_algorithm: sha1
|
authelia_config_totp_algorithm: sha1
|
||||||
@ -76,8 +100,8 @@ authelia_config_ntp_version: 4
|
|||||||
authelia_config_ntp_max_desync: 3s
|
authelia_config_ntp_max_desync: 3s
|
||||||
authelia_config_ntp_disable_startup_check: false
|
authelia_config_ntp_disable_startup_check: false
|
||||||
authelia_config_ntp_disable_failure: false
|
authelia_config_ntp_disable_failure: false
|
||||||
authelia_config_authentication_backend_disable_reset_password: false
|
|
||||||
authelia_config_authentication_backend_refresh_interval: 5m
|
authelia_config_authentication_backend_refresh_interval: 5m
|
||||||
|
authelia_config_authentication_backend_password_reset_disable: false
|
||||||
authelia_config_authentication_backend_password_reset_custom_url: ~
|
authelia_config_authentication_backend_password_reset_custom_url: ~
|
||||||
authelia_config_authentication_backend_ldap_implementation: custom
|
authelia_config_authentication_backend_ldap_implementation: custom
|
||||||
authelia_config_authentication_backend_ldap_url: ldap://127.0.0.1:389
|
authelia_config_authentication_backend_ldap_url: ldap://127.0.0.1:389
|
||||||
@ -90,10 +114,18 @@ authelia_config_authentication_backend_ldap_additional_users_dn: "ou=users"
|
|||||||
authelia_config_authentication_backend_ldap_users_filter: "(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=inetOrgPerson))"
|
authelia_config_authentication_backend_ldap_users_filter: "(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=inetOrgPerson))"
|
||||||
authelia_config_authentication_backend_ldap_additional_groups_dn: "ou=groups"
|
authelia_config_authentication_backend_ldap_additional_groups_dn: "ou=groups"
|
||||||
authelia_config_authentication_backend_ldap_groups_filter: "(member={dn})"
|
authelia_config_authentication_backend_ldap_groups_filter: "(member={dn})"
|
||||||
|
authelia_config_authentication_backend_ldap_attributes_username: uid
|
||||||
|
authelia_config_authentication_backend_ldap_username_attribute: >-2
|
||||||
|
{{ authelia_config_authentication_backend_ldap_attributes_username }}
|
||||||
|
authelia_config_authentication_backend_ldap_attributes_mail: mail
|
||||||
|
authelia_config_authentication_backend_ldap_mail_attribute: >-2
|
||||||
|
{{ authelia_config_authentication_backend_ldap_attributes_mail }}
|
||||||
|
authelia_config_authentication_backend_ldap_attributes_display_name: displayName
|
||||||
|
authelia_config_authentication_backend_ldap_display_name_attribute: >-2
|
||||||
|
{{ authelia_config_authentication_backend_ldap_attributes_display_name }}
|
||||||
authelia_config_authentication_backend_ldap_group_name_attribute: cn
|
authelia_config_authentication_backend_ldap_group_name_attribute: cn
|
||||||
authelia_config_authentication_backend_ldap_username_attribute: uid
|
authelia_config_authentication_backend_ldap_attributes_group_name: >-2
|
||||||
authelia_config_authentication_backend_ldap_mail_attribute: mail
|
{{ authelia_config_authentication_backend_ldap_group_name_attribute }}
|
||||||
authelia_config_authentication_backend_ldap_display_name_attribute: displayName
|
|
||||||
authelia_config_authentication_backend_ldap_user: ~
|
authelia_config_authentication_backend_ldap_user: ~
|
||||||
authelia_config_authentication_backend_ldap_password: ~
|
authelia_config_authentication_backend_ldap_password: ~
|
||||||
authelia_config_authentication_backend_file_path: ~
|
authelia_config_authentication_backend_file_path: ~
|
||||||
@ -121,6 +153,8 @@ authelia_config_session_secret: ~
|
|||||||
authelia_config_session_expiration: 1h
|
authelia_config_session_expiration: 1h
|
||||||
authelia_config_session_inactivity: 5m
|
authelia_config_session_inactivity: 5m
|
||||||
authelia_config_session_remember_me_duration: 1M
|
authelia_config_session_remember_me_duration: 1M
|
||||||
|
authelia_config_session_remember_me: >-2
|
||||||
|
{{ authelia_config_session_remember_me_duration }}
|
||||||
authelia_config_session_redis_host: "{{ authelia_redis_host }}"
|
authelia_config_session_redis_host: "{{ authelia_redis_host }}"
|
||||||
authelia_config_session_redis_port: "{{ authelia_redis_port }}"
|
authelia_config_session_redis_port: "{{ authelia_redis_port }}"
|
||||||
authelia_config_session_redis_username: "{{ authelia_redis_user }}"
|
authelia_config_session_redis_username: "{{ authelia_redis_user }}"
|
||||||
@ -145,15 +179,14 @@ authelia_config_storage_postgres_ssl_certificate: disable
|
|||||||
authelia_config_storage_postgres_ssl_key: disable
|
authelia_config_storage_postgres_ssl_key: disable
|
||||||
authelia_config_notifier_disable_startup_check: false
|
authelia_config_notifier_disable_startup_check: false
|
||||||
authelia_config_notifier_filesystem_filename: ~
|
authelia_config_notifier_filesystem_filename: ~
|
||||||
authelia_config_notifier_smtp_host: "{{ authelia_smtp_host }}"
|
authelia_config_notifier_smtp_address: "{{ authelia_smtp_host }}:{{ authelia_stmp_port }}"
|
||||||
authelia_config_notifier_smtp_port: "{{ authelia_stmp_port }}"
|
|
||||||
authelia_config_notifier_smtp_username: "{{ authelia_smtp_user }}"
|
authelia_config_notifier_smtp_username: "{{ authelia_smtp_user }}"
|
||||||
authelia_config_notifier_smtp_password: "{{ authelia_smtp_pass }}"
|
authelia_config_notifier_smtp_password: "{{ authelia_smtp_pass }}"
|
||||||
authelia_config_notifier_smtp_timeout: 5s
|
authelia_config_notifier_smtp_timeout: 5s
|
||||||
authelia_config_notifier_smtp_sender: "Authelia on {{ authelia_domain }} <admin@{{ authelia_domain }}>"
|
authelia_config_notifier_smtp_sender: "Authelia on {{ authelia_domain }} <admin@{{ authelia_domain }}>"
|
||||||
authelia_config_notifier_smtp_identifier: "{{ authelia_domain }}"
|
authelia_config_notifier_smtp_identifier: "{{ authelia_domain }}"
|
||||||
authelia_config_notifier_smtp_subject: "[Authelia @ {{ authelia_domain }}] {title}"
|
authelia_config_notifier_smtp_subject: "[Authelia @ {{ authelia_domain }}] {title}"
|
||||||
authelia_config_notifier_smtp_startup_check_address: false
|
authelia_config_notifier_smtp_startup_check_address: "authelia-test@{{ authelia_domain }}"
|
||||||
authelia_config_notifier_smtp_disable_require_tls: false
|
authelia_config_notifier_smtp_disable_require_tls: false
|
||||||
authelia_config_notifier_smtp_disable_html_emails: false
|
authelia_config_notifier_smtp_disable_html_emails: false
|
||||||
authelia_config_notifier_smtp_tls_skip_verify: false
|
authelia_config_notifier_smtp_tls_skip_verify: false
|
||||||
@ -162,6 +195,12 @@ authelia_config_notifier_smtp_tls_minimum_version: "{{ authelia_tls_minimum_vers
|
|||||||
|
|
||||||
authelia_database_type: ~
|
authelia_database_type: ~
|
||||||
authelia_database_host: ~
|
authelia_database_host: ~
|
||||||
|
authelia_database_port: ~
|
||||||
|
authelia_database_address: >-2
|
||||||
|
{{ authelia_database_host }}{{
|
||||||
|
(authelia_database_port | default(false, true) | bool)
|
||||||
|
| ternary(':' + authelia_database_port, '')
|
||||||
|
}}
|
||||||
authelia_database_user: authelia
|
authelia_database_user: authelia
|
||||||
authelia_database_pass: ~
|
authelia_database_pass: ~
|
||||||
authelia_database_name: authelia
|
authelia_database_name: authelia
|
||||||
|
@ -4,5 +4,7 @@
|
|||||||
docker_container:
|
docker_container:
|
||||||
name: "{{ authelia_container_name }}"
|
name: "{{ authelia_container_name }}"
|
||||||
state: started
|
state: started
|
||||||
restart: yes
|
restart: true
|
||||||
|
comparisons:
|
||||||
|
'*': ignore
|
||||||
listen: restart-authelia
|
listen: restart-authelia
|
||||||
|
9
roles/authelia/meta/main.yml
Normal file
9
roles/authelia/meta/main.yml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
allow_duplicates: true
|
||||||
|
dependencies: []
|
||||||
|
galaxy_info:
|
||||||
|
role_name: authelia
|
||||||
|
description: Ansible role to deploy authelia using docker
|
||||||
|
galaxy_tags:
|
||||||
|
- authelia
|
||||||
|
- docker
|
@ -1,19 +1,20 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Ensure user {{ authelia_user }} exists
|
- name: Ensure user {{ authelia_user }} exists
|
||||||
user:
|
ansible.builtin.user:
|
||||||
name: "{{ authelia_user }}"
|
name: "{{ authelia_user }}"
|
||||||
state: present
|
state: present
|
||||||
system: true
|
system: true
|
||||||
register: authelia_user_info
|
register: authelia_user_info
|
||||||
|
|
||||||
- name: Ensure host directories are created with correct permissions
|
- name: Ensure host directories are created with correct permissions
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: "{{ item.path }}"
|
path: "{{ item.path }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "{{ item.owner | default(authelia_user) }}"
|
owner: "{{ item.owner | default(authelia_user) }}"
|
||||||
group: "{{ item.group | default(authelia_user) }}"
|
group: "{{ item.group | default(authelia_user) }}"
|
||||||
mode: "{{ item.mode | default('0750') }}"
|
mode: "{{ item.mode | default('0750') }}"
|
||||||
|
when: item.path | default(false, true) | bool
|
||||||
loop:
|
loop:
|
||||||
- path: "{{ authelia_base_dir }}"
|
- path: "{{ authelia_base_dir }}"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
@ -21,9 +22,11 @@
|
|||||||
mode: "0750"
|
mode: "0750"
|
||||||
- path: "{{ authelia_data_dir }}"
|
- path: "{{ authelia_data_dir }}"
|
||||||
mode: "0750"
|
mode: "0750"
|
||||||
|
- path: "{{ authelia_asset_dir }}"
|
||||||
|
mode: "0750"
|
||||||
|
|
||||||
- name: Ensure config file is generated
|
- name: Ensure config file is generated
|
||||||
copy:
|
ansible.builtin.copy:
|
||||||
content: "{{ authelia_config | to_nice_yaml(indent=2, width=10000) }}"
|
content: "{{ authelia_config | to_nice_yaml(indent=2, width=10000) }}"
|
||||||
dest: "{{ authelia_config_file }}"
|
dest: "{{ authelia_config_file }}"
|
||||||
owner: "{{ authelia_run_user }}"
|
owner: "{{ authelia_run_user }}"
|
||||||
@ -32,7 +35,7 @@
|
|||||||
notify: restart-authelia
|
notify: restart-authelia
|
||||||
|
|
||||||
- name: Ensure sqlite database file exists before mounting it
|
- name: Ensure sqlite database file exists before mounting it
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: "{{ authelia_sqlite_storage_file }}"
|
path: "{{ authelia_sqlite_storage_file }}"
|
||||||
state: touch
|
state: touch
|
||||||
owner: "{{ authelia_run_user }}"
|
owner: "{{ authelia_run_user }}"
|
||||||
@ -43,7 +46,7 @@
|
|||||||
when: authelia_config_storage_local_path | default(false, true)
|
when: authelia_config_storage_local_path | default(false, true)
|
||||||
|
|
||||||
- name: Ensure user database exists before mounting it
|
- name: Ensure user database exists before mounting it
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: "{{ authelia_user_storage_file }}"
|
path: "{{ authelia_user_storage_file }}"
|
||||||
state: touch
|
state: touch
|
||||||
owner: "{{ authelia_run_user }}"
|
owner: "{{ authelia_run_user }}"
|
||||||
@ -54,7 +57,7 @@
|
|||||||
when: authelia_config_authentication_backend_file_path | default(false, true)
|
when: authelia_config_authentication_backend_file_path | default(false, true)
|
||||||
|
|
||||||
- name: Ensure notification reports file exists before mounting it
|
- name: Ensure notification reports file exists before mounting it
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: "{{ authelia_notification_storage_file }}"
|
path: "{{ authelia_notification_storage_file }}"
|
||||||
state: touch
|
state: touch
|
||||||
owner: "{{ authelia_run_user }}"
|
owner: "{{ authelia_run_user }}"
|
||||||
@ -73,7 +76,7 @@
|
|||||||
register: authelia_container_image_info
|
register: authelia_container_image_info
|
||||||
|
|
||||||
- name: Ensure authelia container is running
|
- name: Ensure authelia container is running
|
||||||
docker_container:
|
community.docker.docker_container:
|
||||||
name: "{{ authelia_container_name }}"
|
name: "{{ authelia_container_name }}"
|
||||||
image: "{{ authelia_container_image_ref }}"
|
image: "{{ authelia_container_image_ref }}"
|
||||||
env: "{{ authelia_container_env }}"
|
env: "{{ authelia_container_env }}"
|
||||||
@ -82,7 +85,9 @@
|
|||||||
labels: "{{ authelia_container_labels }}"
|
labels: "{{ authelia_container_labels }}"
|
||||||
volumes: "{{ authelia_container_volumes }}"
|
volumes: "{{ authelia_container_volumes }}"
|
||||||
networks: "{{ authelia_container_networks | default(omit, true) }}"
|
networks: "{{ authelia_container_networks | default(omit, true) }}"
|
||||||
|
etc_hosts: "{{ authelia_container_etc_hosts | default(omit, true) }}"
|
||||||
purge_networks: "{{ authelia_container_purge_networks | default(omit, true)}}"
|
purge_networks: "{{ authelia_container_purge_networks | default(omit, true)}}"
|
||||||
restart_policy: "{{ authelia_container_restart_policy }}"
|
restart_policy: "{{ authelia_container_restart_policy }}"
|
||||||
|
recreate: "{{ authelia_container_recreate | default(omit, true) }}"
|
||||||
state: "{{ authelia_container_state }}"
|
state: "{{ authelia_container_state }}"
|
||||||
register: authelia_container_info
|
register: authelia_container_info
|
||||||
|
@ -5,6 +5,7 @@ authelia_run_group: "{{ (authelia_user_info.group) if authelia_user_info is defi
|
|||||||
|
|
||||||
authelia_container_base_volumes: >-2
|
authelia_container_base_volumes: >-2
|
||||||
{{ [ authelia_config_file + ":/config/configuration.yml:ro"]
|
{{ [ authelia_config_file + ":/config/configuration.yml:ro"]
|
||||||
|
+ ([authelia_asset_dir + '/:' + authelia_config_server_asset_path + ':ro'] if authelia_asset_dir | default(false, true) else [])
|
||||||
+ ([ authelia_sqlite_storage_file + ":" + authelia_config_storage_local_path + ":z" ]
|
+ ([ authelia_sqlite_storage_file + ":" + authelia_config_storage_local_path + ":z" ]
|
||||||
if authelia_config_storage_local_path | default(false, true) else [])
|
if authelia_config_storage_local_path | default(false, true) else [])
|
||||||
+ ([ authelia_notification_storage_file + ":" + authelia_config_notifier_filesystem_filename + ":z" ]
|
+ ([ authelia_notification_storage_file + ":" + authelia_config_notifier_filesystem_filename + ":z" ]
|
||||||
@ -21,6 +22,7 @@ authelia_top_level_config:
|
|||||||
theme: "{{ authelia_config_theme }}"
|
theme: "{{ authelia_config_theme }}"
|
||||||
jwt_secret: "{{ authelia_config_jwt_secret }}"
|
jwt_secret: "{{ authelia_config_jwt_secret }}"
|
||||||
log: "{{ authelia_config_log }}"
|
log: "{{ authelia_config_log }}"
|
||||||
|
telemetry: "{{ authelia_config_telemetry }}"
|
||||||
totp: "{{ authelia_config_totp }}"
|
totp: "{{ authelia_config_totp }}"
|
||||||
webauthn: "{{ authelia_config_webauthn }}"
|
webauthn: "{{ authelia_config_webauthn }}"
|
||||||
duo_api: "{{ authelia_config_duo_api }}"
|
duo_api: "{{ authelia_config_duo_api }}"
|
||||||
@ -46,17 +48,20 @@ authelia_base_config: >-2
|
|||||||
authelia_config_server: >-2
|
authelia_config_server: >-2
|
||||||
{{
|
{{
|
||||||
{
|
{
|
||||||
"host": authelia_config_server_host,
|
"address": authelia_config_server_address,
|
||||||
"port": authelia_config_server_port,
|
"asset_path": authelia_config_server_asset_path,
|
||||||
"path": authelia_config_server_path,
|
|
||||||
"read_buffer_size": authelia_config_server_read_buffer_size,
|
|
||||||
"write_buffer_size": authelia_config_server_write_buffer_size,
|
|
||||||
"enable_pprof": authelia_config_server_enable_pprof,
|
|
||||||
"enable_expvars": authelia_config_server_enable_expvars,
|
|
||||||
"disable_healthcheck": authelia_config_server_disable_healthcheck,
|
"disable_healthcheck": authelia_config_server_disable_healthcheck,
|
||||||
|
"endpoints": authelia_config_server_endpoints,
|
||||||
|
"buffers": authelia_config_server_buffers,
|
||||||
} | combine({"headers": {"csp_template": authelia_config_server_headers_csp_template}}
|
} | combine({"headers": {"csp_template": authelia_config_server_headers_csp_template}}
|
||||||
if authelia_config_server_headers_csp_template | default(false, true) else {})
|
if authelia_config_server_headers_csp_template | default(false, true) else {})
|
||||||
}}
|
}}
|
||||||
|
authelia_config_server_endpoints:
|
||||||
|
enable_expvars: "{{ authelia_config_server_endpoints_enable_expvars }}"
|
||||||
|
enable_pprof: "{{ authelia_config_server_endpoints_enable_pprof }}"
|
||||||
|
authelia_config_server_buffers:
|
||||||
|
read: "{{ authelia_config_server_buffers_read }}"
|
||||||
|
write: "{{ authelia_config_server_buffers_write }}"
|
||||||
authelia_config_server_tls:
|
authelia_config_server_tls:
|
||||||
key: "{{ authelia_config_server_tls_key }}"
|
key: "{{ authelia_config_server_tls_key }}"
|
||||||
certificate: "{{ authelia_config_server_tls_certificate }}"
|
certificate: "{{ authelia_config_server_tls_certificate }}"
|
||||||
@ -72,6 +77,10 @@ authelia_config_log: >-2
|
|||||||
| combine({"keep_stdout": authelia_config_log_keep_stdout}
|
| combine({"keep_stdout": authelia_config_log_keep_stdout}
|
||||||
if authelia_config_log_file_path | default(false, true) else {})
|
if authelia_config_log_file_path | default(false, true) else {})
|
||||||
}}
|
}}
|
||||||
|
authelia_config_telemetry:
|
||||||
|
metrics:
|
||||||
|
enabled: "{{ authelia_config_telemetry_metrics_enabled }}"
|
||||||
|
address: "{{ authelia_config_telemetry_metrics_address }}"
|
||||||
authelia_config_totp:
|
authelia_config_totp:
|
||||||
disable: "{{ authelia_config_totp_disable }}"
|
disable: "{{ authelia_config_totp_disable }}"
|
||||||
issuer: "{{ authelia_config_totp_issuer }}"
|
issuer: "{{ authelia_config_totp_issuer }}"
|
||||||
@ -101,7 +110,6 @@ authelia_config_ntp:
|
|||||||
authelia_config_authentication_backend: >-2
|
authelia_config_authentication_backend: >-2
|
||||||
{{
|
{{
|
||||||
{
|
{
|
||||||
"disable_reset_password": authelia_config_authentication_backend_disable_reset_password,
|
|
||||||
"refresh_interval": authelia_config_authentication_backend_refresh_interval,
|
"refresh_interval": authelia_config_authentication_backend_refresh_interval,
|
||||||
}
|
}
|
||||||
| combine({"password_reset": authelia_config_authentication_backend_password_reset}
|
| combine({"password_reset": authelia_config_authentication_backend_password_reset}
|
||||||
@ -112,6 +120,7 @@ authelia_config_authentication_backend: >-2
|
|||||||
}}
|
}}
|
||||||
authelia_config_authentication_backend_password_reset:
|
authelia_config_authentication_backend_password_reset:
|
||||||
custom_url: "{{ authelia_config_authentication_backend_password_reset_custom_url }}"
|
custom_url: "{{ authelia_config_authentication_backend_password_reset_custom_url }}"
|
||||||
|
disable: "{{ authelia_config_authentication_backend_password_reset_disable }}"
|
||||||
authelia_config_authentication_backend_ldap:
|
authelia_config_authentication_backend_ldap:
|
||||||
implementation: "{{ authelia_config_authentication_backend_ldap_implementation }}"
|
implementation: "{{ authelia_config_authentication_backend_ldap_implementation }}"
|
||||||
url: "{{ authelia_config_authentication_backend_ldap_url }}"
|
url: "{{ authelia_config_authentication_backend_ldap_url }}"
|
||||||
@ -125,10 +134,11 @@ authelia_config_authentication_backend_ldap:
|
|||||||
additional_groups_dn: "{{ authelia_config_authentication_backend_ldap_additional_groups_dn }}"
|
additional_groups_dn: "{{ authelia_config_authentication_backend_ldap_additional_groups_dn }}"
|
||||||
users_filter: "{{ authelia_config_authentication_backend_ldap_users_filter }}"
|
users_filter: "{{ authelia_config_authentication_backend_ldap_users_filter }}"
|
||||||
groups_filter: "{{ authelia_config_authentication_backend_ldap_groups_filter }}"
|
groups_filter: "{{ authelia_config_authentication_backend_ldap_groups_filter }}"
|
||||||
group_name_attribute: "{{ authelia_config_authentication_backend_ldap_group_name_attribute }}"
|
attributes:
|
||||||
username_attribute: "{{ authelia_config_authentication_backend_ldap_username_attribute }}"
|
username: "{{ authelia_config_authentication_backend_ldap_attributes_username }}"
|
||||||
mail_attribute: "{{ authelia_config_authentication_backend_ldap_mail_attribute }}"
|
mail: "{{ authelia_config_authentication_backend_ldap_attributes_mail }}"
|
||||||
display_name_attribute: "{{ authelia_config_authentication_backend_ldap_display_name_attribute }}"
|
display_name: "{{ authelia_config_authentication_backend_ldap_attributes_display_name }}"
|
||||||
|
group_name: "{{ authelia_config_authentication_backend_ldap_attributes_group_name }}"
|
||||||
user: "{{ authelia_config_authentication_backend_ldap_user }}"
|
user: "{{ authelia_config_authentication_backend_ldap_user }}"
|
||||||
password: "{{ authelia_config_authentication_backend_ldap_password }}"
|
password: "{{ authelia_config_authentication_backend_ldap_password }}"
|
||||||
authelia_config_authentication_backend_file:
|
authelia_config_authentication_backend_file:
|
||||||
@ -160,14 +170,19 @@ authelia_config_access_control:
|
|||||||
default_policy: "{{ authelia_config_access_control_default_policy }}"
|
default_policy: "{{ authelia_config_access_control_default_policy }}"
|
||||||
networks: "{{ authelia_config_access_control_networks }}"
|
networks: "{{ authelia_config_access_control_networks }}"
|
||||||
rules: "{{ authelia_config_access_control_rules }}"
|
rules: "{{ authelia_config_access_control_rules }}"
|
||||||
authelia_config_session:
|
authelia_config_session: >-2
|
||||||
|
{{ authelia_config_session_base
|
||||||
|
| combine(({'redis': authelia_config_session_redis}
|
||||||
|
if authelia_config_session_redis_host else {}), recursive=true)
|
||||||
|
}}
|
||||||
|
authelia_config_session_base:
|
||||||
name: "{{ authelia_config_session_name }}"
|
name: "{{ authelia_config_session_name }}"
|
||||||
domain: "{{ authelia_config_session_domain }}"
|
domain: "{{ authelia_config_session_domain }}"
|
||||||
same_site: "{{ authelia_config_session_same_site }}"
|
same_site: "{{ authelia_config_session_same_site }}"
|
||||||
secret: "{{ authelia_config_session_secret }}"
|
secret: "{{ authelia_config_session_secret }}"
|
||||||
expiration: "{{ authelia_config_session_expiration }}"
|
expiration: "{{ authelia_config_session_expiration }}"
|
||||||
inactivity: "{{ authelia_config_session_inactivity }}"
|
inactivity: "{{ authelia_config_session_inactivity }}"
|
||||||
remember_me_duration: "{{ authelia_config_session_remember_me_duration }}"
|
remember_me: "{{ authelia_config_session_remember_me }}"
|
||||||
authelia_config_session_redis: >-2
|
authelia_config_session_redis: >-2
|
||||||
{{
|
{{
|
||||||
{
|
{
|
||||||
@ -211,15 +226,13 @@ authelia_config_storage: >-2
|
|||||||
authelia_config_storage_local:
|
authelia_config_storage_local:
|
||||||
path: "{{ authelia_config_storage_local_path }}"
|
path: "{{ authelia_config_storage_local_path }}"
|
||||||
authelia_config_storage_mysql:
|
authelia_config_storage_mysql:
|
||||||
host: "{{ authelia_database_host }}"
|
host: "{{ authelia_database_address }}"
|
||||||
port: "{{ authelia_config_storage_mysql_port }}"
|
|
||||||
database: "{{ authelia_database_name }}"
|
database: "{{ authelia_database_name }}"
|
||||||
username: "{{ authelia_database_user }}"
|
username: "{{ authelia_database_user }}"
|
||||||
password: "{{ authelia_database_pass }}"
|
password: "{{ authelia_database_pass }}"
|
||||||
timeout: "{{ authelia_database_timeout }}"
|
timeout: "{{ authelia_database_timeout }}"
|
||||||
authelia_config_storage_postgres:
|
authelia_config_storage_postgres:
|
||||||
host: "{{ authelia_database_host }}"
|
address: "{{ authelia_database_address }}"
|
||||||
port: "{{ authelia_config_storage_postgres_port }}"
|
|
||||||
database: "{{ authelia_database_name }}"
|
database: "{{ authelia_database_name }}"
|
||||||
schema: public
|
schema: public
|
||||||
username: "{{ authelia_database_user }}"
|
username: "{{ authelia_database_user }}"
|
||||||
@ -243,8 +256,7 @@ authelia_config_notifier: >-2
|
|||||||
authelia_config_notifier_filesystem:
|
authelia_config_notifier_filesystem:
|
||||||
filename: "{{ authelia_config_notifier_filesystem_filename }}"
|
filename: "{{ authelia_config_notifier_filesystem_filename }}"
|
||||||
authelia_config_notifier_smtp:
|
authelia_config_notifier_smtp:
|
||||||
host: "{{ authelia_config_notifier_smtp_host }}"
|
address: "{{ authelia_config_notifier_smtp_address }}"
|
||||||
port: "{{ authelia_config_notifier_smtp_port }}"
|
|
||||||
timeout: "{{ authelia_config_notifier_smtp_timeout }}"
|
timeout: "{{ authelia_config_notifier_smtp_timeout }}"
|
||||||
username: "{{ authelia_config_notifier_smtp_username }}"
|
username: "{{ authelia_config_notifier_smtp_username }}"
|
||||||
password: "{{ authelia_config_notifier_smtp_password }}"
|
password: "{{ authelia_config_notifier_smtp_password }}"
|
||||||
|
18
roles/ghost/README.md
Normal file
18
roles/ghost/README.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# `finallycoffee.services.ghost` ansible role
|
||||||
|
|
||||||
|
[Ghost](https://ghost.org/) is a self-hosted blog with rich media capabilities,
|
||||||
|
which this role deploys in a docker container.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
Ghost requires a MySQL-database (like mariadb) for storing it's data, which
|
||||||
|
can be configured using the `ghost_database_(host|username|password|database)` variables.
|
||||||
|
|
||||||
|
Setting `ghost_domain` to a fully-qualified domain on which ghost should be reachable
|
||||||
|
is also required.
|
||||||
|
|
||||||
|
Ghosts configuration can be changed using the `ghost_config` variable.
|
||||||
|
|
||||||
|
Container arguments which are equivalent to `community.docker.docker_container` can be
|
||||||
|
provided in the `ghost_container_[...]` syntax (e.g. `ghost_container_ports` to expose
|
||||||
|
ghosts port to the host).
|
38
roles/ghost/defaults/main.yml
Normal file
38
roles/ghost/defaults/main.yml
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
ghost_domain: ~
|
||||||
|
ghost_version: "5.103.0"
|
||||||
|
ghost_user: ghost
|
||||||
|
ghost_user_group: ghost
|
||||||
|
ghost_base_path: /opt/ghost
|
||||||
|
ghost_data_path: "{{ ghost_base_path }}/data"
|
||||||
|
ghost_config_path: "{{ ghost_base_path }}/config"
|
||||||
|
ghost_config_file: "{{ ghost_config_path }}/ghost.env"
|
||||||
|
ghost_database_username: ghost
|
||||||
|
ghost_database_password: ~
|
||||||
|
ghost_database_database: ghost
|
||||||
|
ghost_database_host: ~
|
||||||
|
ghost_base_config:
|
||||||
|
url: "https://{{ ghost_domain }}"
|
||||||
|
database__client: mysql
|
||||||
|
database__connection__host: "{{ ghost_database_host }}"
|
||||||
|
database__connection__user: "{{ ghost_database_username }}"
|
||||||
|
database__connection__password: "{{ ghost_database_password }}"
|
||||||
|
database__connection__database: "{{ ghost_database_database }}"
|
||||||
|
ghost_config: {}
|
||||||
|
|
||||||
|
ghost_container_name: ghost
|
||||||
|
ghost_container_image_name: docker.io/ghost
|
||||||
|
ghost_container_image_tag: ~
|
||||||
|
ghost_container_base_volumes:
|
||||||
|
- "{{ ghost_data_path }}:{{ ghost_container_data_directory }}:rw"
|
||||||
|
ghost_container_extra_volumes: []
|
||||||
|
ghost_container_volumes:
|
||||||
|
"{{ ghost_container_base_volumes + ghost_container_extra_volumes }}"
|
||||||
|
ghost_container_base_labels:
|
||||||
|
version: "{{ ghost_version }}"
|
||||||
|
ghost_container_extra_labels: {}
|
||||||
|
ghost_container_restart_policy: "unless-stopped"
|
||||||
|
ghost_container_networks: ~
|
||||||
|
ghost_container_purge_networks: ~
|
||||||
|
ghost_container_etc_hosts: ~
|
||||||
|
ghost_container_state: started
|
10
roles/ghost/meta/main.yml
Normal file
10
roles/ghost/meta/main.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
allow_duplicates: true
|
||||||
|
dependencies: []
|
||||||
|
galaxy_info:
|
||||||
|
role_name: ghost
|
||||||
|
description: Ansible role to deploy ghost (https://ghost.org) using docker
|
||||||
|
galaxy_tags:
|
||||||
|
- ghost
|
||||||
|
- blog
|
||||||
|
- docker
|
57
roles/ghost/tasks/main.yml
Normal file
57
roles/ghost/tasks/main.yml
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure ghost group is created
|
||||||
|
ansible.builtin.group:
|
||||||
|
name: "{{ ghost_user_group }}"
|
||||||
|
state: present
|
||||||
|
system: true
|
||||||
|
|
||||||
|
- name: Ensure ghost user is created
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ ghost_user }}"
|
||||||
|
groups:
|
||||||
|
- "{{ ghost_user_group }}"
|
||||||
|
append: true
|
||||||
|
state: present
|
||||||
|
system: true
|
||||||
|
|
||||||
|
- name: Ensure host paths for docker volumes exist for ghost
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ item.path }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0750"
|
||||||
|
owner: "{{ item.owner | default(ghost_user) }}"
|
||||||
|
group: "{{ item.group | default(ghost_user_group) }}"
|
||||||
|
loop:
|
||||||
|
- path: "{{ ghost_base_path }}"
|
||||||
|
- path: "{{ ghost_data_path }}"
|
||||||
|
owner: "1000"
|
||||||
|
- path: "{{ ghost_config_path }}"
|
||||||
|
|
||||||
|
- name: Ensure ghost configuration file is templated
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "ghost.env.j2"
|
||||||
|
dest: "{{ ghost_config_file }}"
|
||||||
|
owner: "{{ ghost_user }}"
|
||||||
|
group: "{{ ghost_user_group }}"
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Ensure ghost container image is present on host
|
||||||
|
community.docker.docker_image:
|
||||||
|
name: "{{ ghost_container_image }}"
|
||||||
|
state: present
|
||||||
|
source: pull
|
||||||
|
force_source: "{{ ghost_container_image_tag is defined }}"
|
||||||
|
|
||||||
|
- name: Ensure ghost container '{{ ghost_container_name }}' is {{ ghost_container_state }}
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ ghost_container_name }}"
|
||||||
|
image: "{{ ghost_container_image }}"
|
||||||
|
ports: "{{ ghost_container_ports | default(omit, true) }}"
|
||||||
|
labels: "{{ ghost_container_labels }}"
|
||||||
|
volumes: "{{ ghost_container_volumes }}"
|
||||||
|
env_file: "{{ ghost_config_file }}"
|
||||||
|
etc_hosts: "{{ ghost_container_etc_hosts | default(omit, true) }}"
|
||||||
|
networks: "{{ ghost_container_networks | default(omit, true) }}"
|
||||||
|
purge_networks: "{{ ghost_container_purge_networks | default(omit, true) }}"
|
||||||
|
restart_policy: "{{ ghost_container_restart_policy }}"
|
||||||
|
state: "{{ ghost_container_state }}"
|
3
roles/ghost/templates/ghost.env.j2
Normal file
3
roles/ghost/templates/ghost.env.j2
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
{% for key, value in ghost_config_complete.items() %}
|
||||||
|
{{ key }}={{ value }}
|
||||||
|
{% endfor %}
|
10
roles/ghost/vars/main.yml
Normal file
10
roles/ghost/vars/main.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
ghost_container_image: "{{ ghost_container_image_name}}:{{ ghost_container_image_tag | default(ghost_version, true) }}"
|
||||||
|
ghost_container_labels: >-2
|
||||||
|
{{ ghost_container_base_labels
|
||||||
|
| combine(ghost_container_extra_labels) }}
|
||||||
|
|
||||||
|
ghost_container_data_directory: "/var/lib/ghost/content"
|
||||||
|
ghost_config_complete: >-2
|
||||||
|
{{ ghost_base_config | combine(ghost_config, recursive=True) }}
|
@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
|
gitea_version: "1.22.4"
|
||||||
gitea_version: "1.16.4"
|
|
||||||
gitea_user: git
|
gitea_user: git
|
||||||
|
gitea_run_user: "{{ gitea_user }}"
|
||||||
gitea_base_path: "/opt/gitea"
|
gitea_base_path: "/opt/gitea"
|
||||||
gitea_data_path: "{{ gitea_base_path }}/data"
|
gitea_data_path: "{{ gitea_base_path }}/data"
|
||||||
|
|
||||||
@ -9,17 +9,29 @@ gitea_data_path: "{{ gitea_base_path }}/data"
|
|||||||
gitea_domain: ~
|
gitea_domain: ~
|
||||||
|
|
||||||
# container config
|
# container config
|
||||||
gitea_container_name: "git"
|
gitea_container_name: "{{ gitea_user }}"
|
||||||
gitea_container_image_name: "docker.io/gitea/gitea"
|
gitea_container_image_server: "docker.io"
|
||||||
|
gitea_container_image_name: "gitea"
|
||||||
|
gitea_container_image_namespace: gitea
|
||||||
|
gitea_container_image_fq_name: >-
|
||||||
|
{{
|
||||||
|
[
|
||||||
|
gitea_container_image_server,
|
||||||
|
gitea_container_image_namespace,
|
||||||
|
gitea_container_image_name
|
||||||
|
] | join('/')
|
||||||
|
}}
|
||||||
gitea_container_image_tag: "{{ gitea_version }}"
|
gitea_container_image_tag: "{{ gitea_version }}"
|
||||||
gitea_container_image: "{{ gitea_container_image_name }}:{{ gitea_container_image_tag }}"
|
gitea_container_image: >-2
|
||||||
|
{{ gitea_container_image_fq_name }}:{{ gitea_container_image_tag }}
|
||||||
gitea_container_networks: []
|
gitea_container_networks: []
|
||||||
gitea_container_purge_networks: ~
|
gitea_container_purge_networks: ~
|
||||||
gitea_container_restart_policy: "unless-stopped"
|
gitea_container_restart_policy: "unless-stopped"
|
||||||
gitea_container_extra_env: {}
|
gitea_container_extra_env: {}
|
||||||
gitea_contianer_extra_labels: {}
|
gitea_container_extra_labels: {}
|
||||||
gitea_container_extra_ports: []
|
gitea_container_extra_ports: []
|
||||||
gitea_container_extra_volumes: []
|
gitea_container_extra_volumes: []
|
||||||
|
gitea_container_state: started
|
||||||
|
|
||||||
# container defaults
|
# container defaults
|
||||||
gitea_container_base_volumes:
|
gitea_container_base_volumes:
|
||||||
@ -40,10 +52,10 @@ gitea_container_base_labels:
|
|||||||
gitea_config_mailer_enabled: false
|
gitea_config_mailer_enabled: false
|
||||||
gitea_config_mailer_type: ~
|
gitea_config_mailer_type: ~
|
||||||
gitea_config_mailer_from_addr: ~
|
gitea_config_mailer_from_addr: ~
|
||||||
gitea_config_mailer_host: ~
|
gitea_config_mailer_smtp_addr: ~
|
||||||
gitea_config_mailer_user: ~
|
gitea_config_mailer_user: ~
|
||||||
gitea_config_mailer_passwd: ~
|
gitea_config_mailer_passwd: ~
|
||||||
gitea_config_mailer_tls: ~
|
gitea_config_mailer_protocol: ~
|
||||||
gitea_config_mailer_sendmail_path: ~
|
gitea_config_mailer_sendmail_path: ~
|
||||||
gitea_config_metrics_enabled: false
|
gitea_config_metrics_enabled: false
|
||||||
|
|
||||||
|
10
roles/gitea/meta/main.yml
Normal file
10
roles/gitea/meta/main.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
allow_duplicates: true
|
||||||
|
dependencies: []
|
||||||
|
galaxy_info:
|
||||||
|
role_name: gitea
|
||||||
|
description: Ansible role to deploy gitea using docker
|
||||||
|
galaxy_tags:
|
||||||
|
- gitea
|
||||||
|
- git
|
||||||
|
- docker
|
@ -1,14 +1,15 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
- name: Create gitea user
|
- name: Ensure gitea user '{{ gitea_user }}' is present
|
||||||
user:
|
ansible.builtin.user:
|
||||||
name: "{{ gitea_user }}"
|
name: "{{ gitea_user }}"
|
||||||
state: present
|
state: "present"
|
||||||
system: no
|
system: false
|
||||||
|
create_home: true
|
||||||
register: gitea_user_res
|
register: gitea_user_res
|
||||||
|
|
||||||
- name: Ensure host directories exist
|
- name: Ensure host directories exist
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
owner: "{{ gitea_user_res.uid }}"
|
owner: "{{ gitea_user_res.uid }}"
|
||||||
group: "{{ gitea_user_res.group }}"
|
group: "{{ gitea_user_res.group }}"
|
||||||
@ -18,7 +19,7 @@
|
|||||||
- "{{ gitea_data_path }}"
|
- "{{ gitea_data_path }}"
|
||||||
|
|
||||||
- name: Ensure .ssh folder for gitea user exists
|
- name: Ensure .ssh folder for gitea user exists
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: "/home/{{ gitea_user }}/.ssh"
|
path: "/home/{{ gitea_user }}/.ssh"
|
||||||
state: directory
|
state: directory
|
||||||
owner: "{{ gitea_user_res.uid }}"
|
owner: "{{ gitea_user_res.uid }}"
|
||||||
@ -37,16 +38,16 @@
|
|||||||
register: gitea_user_ssh_key
|
register: gitea_user_ssh_key
|
||||||
|
|
||||||
- name: Create forwarding script
|
- name: Create forwarding script
|
||||||
copy:
|
ansible.builtin.copy:
|
||||||
dest: "/usr/local/bin/gitea"
|
dest: "/usr/local/bin/gitea"
|
||||||
owner: "{{ gitea_user_res.uid }}"
|
owner: "{{ gitea_user_res.uid }}"
|
||||||
group: "{{ gitea_user_res.group }}"
|
group: "{{ gitea_user_res.group }}"
|
||||||
mode: 0700
|
mode: 0700
|
||||||
content: |
|
content: |
|
||||||
ssh -p {{ gitea_public_ssh_server_port }} -o StrictHostKeyChecking=no {{ gitea_user }}@127.0.0.1 -i /home/{{ gitea_user }}/.ssh/id_ssh_ed25519 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@"
|
ssh -p {{ gitea_public_ssh_server_port }} -o StrictHostKeyChecking=no {{ gitea_run_user }}@127.0.0.1 -i /home/{{ gitea_user }}/.ssh/id_ssh_ed25519 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@"
|
||||||
|
|
||||||
- name: Add host pubkey to git users authorized_keys file
|
- name: Add host pubkey to git users authorized_keys file
|
||||||
lineinfile:
|
ansible.builtin.lineinfile:
|
||||||
path: "/home/{{ gitea_user }}/.ssh/authorized_keys"
|
path: "/home/{{ gitea_user }}/.ssh/authorized_keys"
|
||||||
line: "{{ gitea_user_ssh_key.public_key }} Gitea:Host2Container"
|
line: "{{ gitea_user_ssh_key.public_key }} Gitea:Host2Container"
|
||||||
state: present
|
state: present
|
||||||
@ -56,26 +57,27 @@
|
|||||||
mode: 0600
|
mode: 0600
|
||||||
|
|
||||||
- name: Ensure gitea container image is present
|
- name: Ensure gitea container image is present
|
||||||
docker_image:
|
community.docker.docker_image:
|
||||||
name: "{{ gitea_container_image }}"
|
name: "{{ gitea_container_image }}"
|
||||||
state: present
|
state: present
|
||||||
source: pull
|
source: pull
|
||||||
force_source: "{{ gitea_container_image.endswith(':latest') }}"
|
force_source: "{{ gitea_container_image.endswith(':latest') }}"
|
||||||
|
|
||||||
- name: Ensure container '{{ gitea_container_name }}' with gitea is running
|
- name: Ensure container '{{ gitea_container_name }}' with gitea is {{ gitea_container_state }}
|
||||||
docker_container:
|
community.docker.docker_container:
|
||||||
name: "{{ gitea_container_name }}"
|
name: "{{ gitea_container_name }}"
|
||||||
image: "{{ gitea_container_image }}"
|
image: "{{ gitea_container_image }}"
|
||||||
env: "{{ gitea_container_env }}"
|
env: "{{ gitea_container_env }}"
|
||||||
|
labels: "{{ gitea_container_labels }}"
|
||||||
volumes: "{{ gitea_container_volumes }}"
|
volumes: "{{ gitea_container_volumes }}"
|
||||||
networks: "{{ gitea_container_networks | default(omit, True) }}"
|
networks: "{{ gitea_container_networks | default(omit, True) }}"
|
||||||
purge_networks: "{{ gitea_container_purge_networks | default(omit, True) }}"
|
purge_networks: "{{ gitea_container_purge_networks | default(omit, True) }}"
|
||||||
published_ports: "{{ gitea_container_ports }}"
|
published_ports: "{{ gitea_container_ports }}"
|
||||||
restart_policy: "{{ gitea_container_restart_policy }}"
|
restart_policy: "{{ gitea_container_restart_policy }}"
|
||||||
state: started
|
state: "{{ gitea_container_state }}"
|
||||||
|
|
||||||
- name: Ensure given configuration is set in the config file
|
- name: Ensure given configuration is set in the config file
|
||||||
ini_file:
|
ansible.builtin.ini_file:
|
||||||
path: "{{ gitea_data_path }}/gitea/conf/app.ini"
|
path: "{{ gitea_data_path }}/gitea/conf/app.ini"
|
||||||
section: "{{ section }}"
|
section: "{{ section }}"
|
||||||
option: "{{ option }}"
|
option: "{{ option }}"
|
||||||
|
@ -14,7 +14,7 @@ gitea_container_port_ssh: 22
|
|||||||
|
|
||||||
gitea_config_base:
|
gitea_config_base:
|
||||||
RUN_MODE: prod
|
RUN_MODE: prod
|
||||||
RUN_USER: "{{ gitea_user }}"
|
RUN_USER: "{{ gitea_run_user }}"
|
||||||
server:
|
server:
|
||||||
SSH_DOMAIN: "{{ gitea_domain }}"
|
SSH_DOMAIN: "{{ gitea_domain }}"
|
||||||
DOMAIN: "{{ gitea_domain }}"
|
DOMAIN: "{{ gitea_domain }}"
|
||||||
@ -24,11 +24,11 @@ gitea_config_base:
|
|||||||
mailer:
|
mailer:
|
||||||
ENABLED: "{{ gitea_config_mailer_enabled }}"
|
ENABLED: "{{ gitea_config_mailer_enabled }}"
|
||||||
MAILER_TYP: "{{ gitea_config_mailer_type }}"
|
MAILER_TYP: "{{ gitea_config_mailer_type }}"
|
||||||
HOST: "{{ gitea_config_mailer_host }}"
|
SMTP_ADDR: "{{ gitea_config_mailer_smtp_addr }}"
|
||||||
USER: "{{ gitea_config_mailer_user }}"
|
USER: "{{ gitea_config_mailer_user }}"
|
||||||
PASSWD: "{{ gitea_config_mailer_passwd }}"
|
PASSWD: "{{ gitea_config_mailer_passwd }}"
|
||||||
IS_TLS_ENABLED: "{{ gitea_config_mailer_tls }}"
|
PROTOCOL: "{{ gitea_config_mailer_protocol }}"
|
||||||
FROM: "{{ gitea_config_mailer_from_addr }}"
|
FROM: "{{ gitea_config_mailer_from }}"
|
||||||
SENDMAIL_PATH: "{{ gitea_config_mailer_sendmail_path }}"
|
SENDMAIL_PATH: "{{ gitea_config_mailer_sendmail_path }}"
|
||||||
metrics:
|
metrics:
|
||||||
ENABLED: "{{ gitea_config_metrics_enabled }}"
|
ENABLED: "{{ gitea_config_metrics_enabled }}"
|
||||||
|
21
roles/hedgedoc/README.md
Normal file
21
roles/hedgedoc/README.md
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# `finallycoffee.services.hedgedoc` ansible role
|
||||||
|
|
||||||
|
Role to deploy and configure hedgedoc using `docker` or `podman`.
|
||||||
|
To configure hedgedoc, set either the config as complex data
|
||||||
|
directly in `hedgedoc_config` or use the flattened variables
|
||||||
|
from the `hedgedoc_config_*` prefix (see
|
||||||
|
[defaults/main/config.yml](defaults/main/config.yml)).
|
||||||
|
|
||||||
|
To remove hedgedoc, set `hedgedoc_state: absent`. Note that this
|
||||||
|
will delete all data directories aswell, removing any traces this
|
||||||
|
role created on the target (except database contents).
|
||||||
|
|
||||||
|
# Required configuration
|
||||||
|
|
||||||
|
- `hedgedoc_config_domain` - Domain of the hedgedoc instance
|
||||||
|
- `hedgedoc_config_session_secret` - session secret for hedgedoc
|
||||||
|
|
||||||
|
## Deployment methods
|
||||||
|
|
||||||
|
To set the desired deployment method, set `hedgedoc_deployment_method` to a
|
||||||
|
supported deployment methods (see [vars/main.yml](vars/main.yml#5)).
|
52
roles/hedgedoc/defaults/main/config.yml
Normal file
52
roles/hedgedoc/defaults/main/config.yml
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
---
|
||||||
|
hedgedoc_config_domain: ~
|
||||||
|
hedgedoc_config_log_level: "info"
|
||||||
|
hedgedoc_config_session_secret: ~
|
||||||
|
hedgedoc_config_protocol_use_ssl: true
|
||||||
|
hedgedoc_config_hsts_enable: true
|
||||||
|
hedgedoc_config_csp_enable: true
|
||||||
|
hedgedoc_config_cookie_policy: 'lax'
|
||||||
|
hedgedoc_config_allow_free_url: true
|
||||||
|
hedgedoc_config_allow_email_register: false
|
||||||
|
hedgedoc_config_allow_anonymous: true
|
||||||
|
hedgedoc_config_allow_gravatar: true
|
||||||
|
hedgedoc_config_require_free_url_authentication: true
|
||||||
|
hedgedoc_config_default_permission: 'full'
|
||||||
|
|
||||||
|
hedgedoc_config_db_username: hedgedoc
|
||||||
|
hedgedoc_config_db_password: ~
|
||||||
|
hedgedoc_config_db_database: hedgedoc
|
||||||
|
hedgedoc_config_db_host: localhost
|
||||||
|
hedgedoc_config_db_port: 5432
|
||||||
|
hedgedoc_config_db_dialect: postgres
|
||||||
|
|
||||||
|
hedgedoc_config_database:
|
||||||
|
username: "{{ hedgedoc_config_db_username }}"
|
||||||
|
password: "{{ hedgedoc_config_db_password }}"
|
||||||
|
database: "{{ hedgedoc_config_db_database }}"
|
||||||
|
host: "{{ hedgedoc_config_db_host }}"
|
||||||
|
port: "{{ hedgedoc_config_db_port | int }}"
|
||||||
|
dialect: "{{ hedgedoc_config_db_dialect }}"
|
||||||
|
hedgedoc_config_base:
|
||||||
|
production:
|
||||||
|
domain: "{{ hedgedoc_config_domain }}"
|
||||||
|
loglevel: "{{ hedgedoc_config_log_level }}"
|
||||||
|
sessionSecret: "{{ hedgedoc_config_session_secret }}"
|
||||||
|
protocolUseSSL: "{{ hedgedoc_config_protocol_use_ssl }}"
|
||||||
|
cookiePolicy: "{{ hedgedoc_config_cookie_policy }}"
|
||||||
|
allowFreeURL: "{{ hedgedoc_config_allow_free_url }}"
|
||||||
|
allowAnonymous: "{{ hedgedoc_config_allow_anonymous }}"
|
||||||
|
allowEmailRegister: "{{ hedgedoc_config_allow_email_register }}"
|
||||||
|
allowGravatar: "{{ hedgedoc_config_allow_gravatar }}"
|
||||||
|
requireFreeURLAuthentication: >-2
|
||||||
|
{{ hedgedoc_config_require_free_url_authentication }}
|
||||||
|
defaultPermission: "{{ hedgedoc_config_default_permission }}"
|
||||||
|
hsts:
|
||||||
|
enable: "{{ hedgedoc_config_hsts_enable }}"
|
||||||
|
csp:
|
||||||
|
enable: "{{ hedgedoc_config_csp_enable }}"
|
||||||
|
db: "{{ hedgedoc_config_database }}"
|
||||||
|
hedgedoc_config: ~
|
||||||
|
hedgedoc_full_config: >-2
|
||||||
|
{{ hedgedoc_config_base | default({}, true)
|
||||||
|
| combine(hedgedoc_config | default({}, true), recursive=True) }}
|
57
roles/hedgedoc/defaults/main/container.yml
Normal file
57
roles/hedgedoc/defaults/main/container.yml
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
hedgedoc_container_image_registry: quay.io
|
||||||
|
hedgedoc_container_image_namespace: hedgedoc
|
||||||
|
hedgedoc_container_image_name: hedgedoc
|
||||||
|
hedgedoc_container_image_flavour: alpine
|
||||||
|
hedgedoc_container_image_tag: ~
|
||||||
|
hedgedoc_container_image: >-2
|
||||||
|
{{
|
||||||
|
([
|
||||||
|
hedgedoc_container_image_registry,
|
||||||
|
hedgedoc_container_image_namespace | default([], true),
|
||||||
|
hedgedoc_container_image_name,
|
||||||
|
] | flatten | join('/'))
|
||||||
|
+ ':'
|
||||||
|
+ hedgedoc_container_image_tag | default(
|
||||||
|
hedgedoc_version + (
|
||||||
|
((hedgedoc_container_image_flavour is string)
|
||||||
|
and (hedgedoc_container_image_flavour | length > 0))
|
||||||
|
| ternary('-' +
|
||||||
|
hedgedoc_container_image_flavour | default('', true),
|
||||||
|
''
|
||||||
|
)
|
||||||
|
),
|
||||||
|
true
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
hedgedoc_container_image_source: pull
|
||||||
|
hedgedoc_container_name: hedgedoc
|
||||||
|
hedgedoc_container_state: >-2
|
||||||
|
{{ (hedgedoc_state == 'present') | ternary('started', 'absent') }}
|
||||||
|
|
||||||
|
hedgedoc_container_config_file: "/hedgedoc/config.json"
|
||||||
|
hedgedoc_container_upload_path: "/hedgedoc/public/uploads"
|
||||||
|
|
||||||
|
hedgedoc_container_env: ~
|
||||||
|
hedgedoc_container_user: >-2
|
||||||
|
{{ hedgedoc_run_user_id }}:{{ hedgedoc_run_group_id }}
|
||||||
|
hedgedoc_container_ports: ~
|
||||||
|
hedgedoc_container_networks: ~
|
||||||
|
hedgedoc_container_etc_hosts: ~
|
||||||
|
hedgedoc_container_base_volumes:
|
||||||
|
- "{{ hedgedoc_config_file }}:{{ hedgedoc_container_config_file }}:ro"
|
||||||
|
- "{{ hedgedoc_uploads_path }}:{{ hedgedoc_container_upload_path }}:rw"
|
||||||
|
hedgedoc_container_volumes: ~
|
||||||
|
hedgedoc_container_all_volumes: >-2
|
||||||
|
{{ hedgedoc_container_base_volumes | default([], true)
|
||||||
|
+ hedgedoc_container_volumes | default([], true) }}
|
||||||
|
hedgedoc_container_base_labels:
|
||||||
|
version: "{{ hedgedoc_container_tag | default(hedgedoc_version, true) }}"
|
||||||
|
hedgedoc_container_labels: ~
|
||||||
|
hedgedoc_container_network_mode: ~
|
||||||
|
hedgedoc_container_all_labels: >-2
|
||||||
|
{{ hedgedoc_container_base_labels | default({}, true)
|
||||||
|
| combine(hedgedoc_container_labels | default({}, true)) }}
|
||||||
|
hedgedoc_container_restart_policy: >-2
|
||||||
|
{{ (hedgedoc_deployment_method === 'docker')
|
||||||
|
| ternary('unless-stopped', 'on-failure') }}
|
9
roles/hedgedoc/defaults/main/main.yml
Normal file
9
roles/hedgedoc/defaults/main/main.yml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
hedgedoc_user: hedgedoc
|
||||||
|
hedgedoc_version: "1.10.0"
|
||||||
|
|
||||||
|
hedgedoc_state: present
|
||||||
|
hedgedoc_deployment_method: docker
|
||||||
|
|
||||||
|
hedgedoc_config_file: "/etc/hedgedoc/config.json"
|
||||||
|
hedgedoc_uploads_path: "/var/lib/hedgedoc-uploads"
|
5
roles/hedgedoc/defaults/main/user.yml
Normal file
5
roles/hedgedoc/defaults/main/user.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
hedgedoc_run_user_id: >-2
|
||||||
|
{{ hedgedoc_user_info.uid | default(hedgedoc_user) }}
|
||||||
|
hedgedoc_run_group_id: >-2
|
||||||
|
{{ hedgedoc_user_info.group | default(hedgedoc_user) }}
|
12
roles/hedgedoc/meta/main.yml
Normal file
12
roles/hedgedoc/meta/main.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
allow_duplicates: true
|
||||||
|
dependencies: []
|
||||||
|
galaxy_info:
|
||||||
|
role_name: hedgedoc
|
||||||
|
description: >-2
|
||||||
|
Deploy hedgedoc, a collaborative markdown editor, using docker
|
||||||
|
galaxy_tags:
|
||||||
|
- hedgedoc
|
||||||
|
- markdown
|
||||||
|
- collaboration
|
||||||
|
- docker
|
23
roles/hedgedoc/tasks/check.yml
Normal file
23
roles/hedgedoc/tasks/check.yml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
- name: Check for valid state
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-2
|
||||||
|
Unsupported state '{{ hedgedoc_state }}'. Supported
|
||||||
|
states are {{ hedgedoc_states | join(', ') }}.
|
||||||
|
when: hedgedoc_state not in hedgedoc_states
|
||||||
|
|
||||||
|
- name: Check for valid deployment method
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-2
|
||||||
|
Deployment method '{{ hedgedoc_deployment_method }}'
|
||||||
|
is not supported. Supported are:
|
||||||
|
{{ hedgedoc_deployment_methods | join(', ') }}
|
||||||
|
when: hedgedoc_deployment_method not in hedgedoc_deployment_methods
|
||||||
|
|
||||||
|
- name: Ensure required variables are given
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "Required variable '{{ item }}' is undefined!"
|
||||||
|
loop: "{{ hedgedoc_required_arguments }}"
|
||||||
|
when: >-2
|
||||||
|
item not in hostvars[inventory_hostname]
|
||||||
|
or hostvars[inventory_hostname][item] | length == 0
|
31
roles/hedgedoc/tasks/deploy-docker.yml
Normal file
31
roles/hedgedoc/tasks/deploy-docker.yml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure container image '{{ hedgedoc_container_image }}' is {{ hedgedoc_state }}
|
||||||
|
community.docker.docker_image:
|
||||||
|
name: "{{ hedgedoc_container_image }}"
|
||||||
|
state: "{{ hedgedoc_state }}"
|
||||||
|
source: "{{ hedgedoc_container_image_source }}"
|
||||||
|
force_source: >-2
|
||||||
|
{{ hedgedoc_container_force_source | default(
|
||||||
|
hedgedoc_container_image_tag | default(false, true), true) }}
|
||||||
|
register: hedgedoc_container_image_info
|
||||||
|
until: hedgedoc_container_image_info is success
|
||||||
|
retries: 5
|
||||||
|
delay: 3
|
||||||
|
|
||||||
|
- name: Ensure container '{{ hedgedoc_container_name }}' is {{ hedgedoc_container_state }}
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ hedgedoc_container_name }}"
|
||||||
|
image: "{{ hedgedoc_container_image }}"
|
||||||
|
env: "{{ hedgedoc_container_env | default(omit, true) }}"
|
||||||
|
user: "{{ hedgedoc_container_user | default(omit, true) }}"
|
||||||
|
ports: "{{ hedgedoc_container_ports | default(omit, true) }}"
|
||||||
|
labels: "{{ hedgedoc_container_all_labels }}"
|
||||||
|
volumes: "{{ hedgedoc_container_all_volumes }}"
|
||||||
|
etc_hosts: "{{ hedgedoc_container_etc_hosts | default(omit, true) }}"
|
||||||
|
dns_servers: >-2
|
||||||
|
{{ hedgedoc_container_dns_servers | default(omit, true) }}
|
||||||
|
network_mode: >-2
|
||||||
|
{{ hedgedoc_container_network_mode | default(omit, true) }}
|
||||||
|
restart_policy: >-2
|
||||||
|
{{ hedgedoc_container_restart_policy | default(omit, true) }}
|
||||||
|
state: "{{ hedgedoc_container_state }}"
|
21
roles/hedgedoc/tasks/main.yml
Normal file
21
roles/hedgedoc/tasks/main.yml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
- name: Check preconditions
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: "check.yml"
|
||||||
|
|
||||||
|
- name: Ensure user '{{ hedgedoc_user }}' is {{ hedgedoc_state }}
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ hedgedoc_user }}"
|
||||||
|
state: "{{ hedgedoc_state }}"
|
||||||
|
system: "{{ hedgedoc_user_system | default(true, false) }}"
|
||||||
|
register: hedgedoc_user_info
|
||||||
|
|
||||||
|
- name: Ensure configuration file '{{ hedgedoc_config_file }}' is {{ hedgedoc_state }}
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ hedgedoc_config_file }}"
|
||||||
|
content: "{{ hedgedoc_full_config | to_nice_json }}"
|
||||||
|
when: hedgedoc_state == 'present'
|
||||||
|
|
||||||
|
- name: Ensure hedgedoc is deployed using {{ hedgedoc_deployment_method }}
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: "deploy-{{ hedgedoc_deployment_method }}.yml"
|
11
roles/hedgedoc/vars/main.yml
Normal file
11
roles/hedgedoc/vars/main.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
hedgedoc_states:
|
||||||
|
- present
|
||||||
|
- absent
|
||||||
|
hedgedoc_deployment_methods:
|
||||||
|
- docker
|
||||||
|
- podman
|
||||||
|
|
||||||
|
hedgedoc_required_arguments:
|
||||||
|
- hedgedoc_config_domain
|
||||||
|
- hedgedoc_config_session_secret
|
15
roles/jellyfin/README.md
Normal file
15
roles/jellyfin/README.md
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# `finallycoffee.services.jellyfin` ansible role
|
||||||
|
|
||||||
|
This role runs [Jellyfin](https://jellyfin.org/), a free software media system,
|
||||||
|
in a docker container.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
`jellyfin_domain` contains the FQDN which jellyfin should listen to. Most configuration
|
||||||
|
is done in the software itself.
|
||||||
|
|
||||||
|
Jellyfin runs in host networking mode by default, as that is needed for some features like
|
||||||
|
network discovery with chromecasts and similar.
|
||||||
|
|
||||||
|
Media can be mounted into jellyfin using `jellyfin_media_volumes`, taking a list of strings
|
||||||
|
akin to `community.docker.docker_container`'s `volumes` key.
|
@ -1,6 +1,7 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
jellyfin_user: jellyfin
|
jellyfin_user: jellyfin
|
||||||
|
jellyfin_version: "10.10.3"
|
||||||
|
jellyfin_state: present
|
||||||
|
|
||||||
jellyfin_base_path: /opt/jellyfin
|
jellyfin_base_path: /opt/jellyfin
|
||||||
jellyfin_config_path: "{{ jellyfin_base_path }}/config"
|
jellyfin_config_path: "{{ jellyfin_base_path }}/config"
|
||||||
@ -10,11 +11,17 @@ jellyfin_media_volumes: []
|
|||||||
|
|
||||||
jellyfin_container_name: jellyfin
|
jellyfin_container_name: jellyfin
|
||||||
jellyfin_container_image_name: "docker.io/jellyfin/jellyfin"
|
jellyfin_container_image_name: "docker.io/jellyfin/jellyfin"
|
||||||
jellyfin_container_image_tag: "latest"
|
jellyfin_container_image_tag: ~
|
||||||
jellyfin_container_image_ref: "{{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag }}"
|
jellyfin_container_image_ref: >-2
|
||||||
|
{{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag | default(jellyfin_version, true) }}
|
||||||
|
jellyfin_container_image_source: pull
|
||||||
|
jellyfin_container_state: >-2
|
||||||
|
{{ (jellyfin_state == 'present') | ternary('started', 'absent') }}
|
||||||
jellyfin_container_network_mode: host
|
jellyfin_container_network_mode: host
|
||||||
jellyfin_container_networks: ~
|
jellyfin_container_networks: ~
|
||||||
jellyfin_container_volumes: "{{ jellyfin_container_base_volumes + jellyfin_media_volumes }}"
|
jellyfin_container_volumes: "{{ jellyfin_container_base_volumes + jellyfin_media_volumes }}"
|
||||||
|
jellyfin_container_labels: "{{ jellyfin_container_base_labels | combine(jellyfin_container_extra_labels) }}"
|
||||||
|
jellyfin_container_extra_labels: {}
|
||||||
jellyfin_container_restart_policy: "unless-stopped"
|
jellyfin_container_restart_policy: "unless-stopped"
|
||||||
|
|
||||||
jellyfin_host_directories:
|
jellyfin_host_directories:
|
||||||
|
10
roles/jellyfin/meta/main.yml
Normal file
10
roles/jellyfin/meta/main.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
allow_duplicates: true
|
||||||
|
dependencies: []
|
||||||
|
galaxy_info:
|
||||||
|
role_name: jellyfin
|
||||||
|
description: Ansible role to deploy jellyfin using docker
|
||||||
|
galaxy_tags:
|
||||||
|
- jellyfin
|
||||||
|
- streaming
|
||||||
|
- docker
|
@ -1,35 +1,47 @@
|
|||||||
---
|
---
|
||||||
|
- name: Check if state is valid
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-2
|
||||||
|
Unsupported state '{{ jellyfin_state }}'. Supported
|
||||||
|
states are {{ jellyfin_states | join(', ') }}.
|
||||||
|
when: jellyfin_state not in jellyfin_states
|
||||||
|
|
||||||
- name: Ensure user '{{ jellyfin_user }}' for jellyfin is created
|
- name: Ensure jellyfin user '{{ jellyfin_user }}' is {{ jellyfin_state }}
|
||||||
user:
|
ansible.builtin.user:
|
||||||
name: "{{ jellyfin_user }}"
|
name: "{{ jellyfin_user }}"
|
||||||
state: present
|
state: "{{ jellyfin_state }}"
|
||||||
system: yes
|
system: "{{ jellyfin_user_system | default(true, true) }}"
|
||||||
register: jellyfin_user_info
|
register: jellyfin_user_info
|
||||||
|
|
||||||
- name: Ensure host directories for jellyfin exist
|
- name: Ensure host directories for jellyfin are {{ jellyfin_state }}
|
||||||
file:
|
ansible.builtin.file:
|
||||||
path: "{{ item.path }}"
|
path: "{{ item.path }}"
|
||||||
state: directory
|
state: >-2
|
||||||
|
{{ (jellyfin_state == 'present') | ternary('directory', 'absent') }}
|
||||||
owner: "{{ item.owner | default(jellyfin_uid) }}"
|
owner: "{{ item.owner | default(jellyfin_uid) }}"
|
||||||
group: "{{ item.group | default(jellyfin_gid) }}"
|
group: "{{ item.group | default(jellyfin_gid) }}"
|
||||||
mode: "{{ item.mode }}"
|
mode: "{{ item.mode }}"
|
||||||
loop: "{{ jellyfin_host_directories }}"
|
loop: "{{ jellyfin_host_directories }}"
|
||||||
|
|
||||||
- name: Ensure container image for jellyfin is available
|
- name: Ensure container image '{{ jellyfin_container_image_ref }}' is {{ jellyfin_state }}
|
||||||
docker_image:
|
community.docker.docker_image:
|
||||||
name: "{{ jellyfin_container_image_ref }}"
|
name: "{{ jellyfin_container_image_ref }}"
|
||||||
state: present
|
state: "{{ jellyfin_state }}"
|
||||||
source: pull
|
source: "{{ jellyfin_container_image_source }}"
|
||||||
force_source: "{{ jellyfin_container_image_tag in ['stable', 'unstable'] }}"
|
force_source: "{{ jellyfin_container_image_tag | default(false, true) }}"
|
||||||
|
register: jellyfin_container_image_pull_result
|
||||||
|
until: jellyfin_container_image_pull_result is succeeded
|
||||||
|
retries: 5
|
||||||
|
delay: 3
|
||||||
|
|
||||||
- name: Ensure container '{{ jellyfin_container_name }}' is running
|
- name: Ensure container '{{ jellyfin_container_name }}' is {{ jellyfin_container_state }}
|
||||||
docker_container:
|
community.docker.docker_container:
|
||||||
name: "{{ jellyfin_container_name }}"
|
name: "{{ jellyfin_container_name }}"
|
||||||
image: "{{ jellyfin_container_image_ref }}"
|
image: "{{ jellyfin_container_image_ref }}"
|
||||||
user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}"
|
user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}"
|
||||||
|
labels: "{{ jellyfin_container_labels }}"
|
||||||
volumes: "{{ jellyfin_container_volumes }}"
|
volumes: "{{ jellyfin_container_volumes }}"
|
||||||
networks: "{{ jellyfin_container_networks | default(omit, True) }}"
|
networks: "{{ jellyfin_container_networks | default(omit, True) }}"
|
||||||
network_mode: "{{ jellyfin_container_network_mode }}"
|
network_mode: "{{ jellyfin_container_network_mode }}"
|
||||||
restart_policy: "{{ jellyfin_container_restart_policy }}"
|
restart_policy: "{{ jellyfin_container_restart_policy }}"
|
||||||
state: started
|
state: "{{ jellyfin_container_state }}"
|
||||||
|
@ -1,5 +1,11 @@
|
|||||||
---
|
---
|
||||||
|
jellyfin_states:
|
||||||
|
- present
|
||||||
|
- absent
|
||||||
|
|
||||||
jellyfin_container_base_volumes:
|
jellyfin_container_base_volumes:
|
||||||
- "{{ jellyfin_config_path }}:/config:z"
|
- "{{ jellyfin_config_path }}:/config:z"
|
||||||
- "{{ jellyfin_cache_path }}:/cache:z"
|
- "{{ jellyfin_cache_path }}:/cache:z"
|
||||||
|
|
||||||
|
jellyfin_container_base_labels:
|
||||||
|
version: "{{ jellyfin_version }}"
|
||||||
|
16
roles/keycloak/README.md
Normal file
16
roles/keycloak/README.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# `finallycoffee.services.keycloak` ansible role
|
||||||
|
|
||||||
|
Ansible role for deploying keycloak, currently only supports docker.
|
||||||
|
|
||||||
|
Migrated from `entropia.sso.keycloak`.
|
||||||
|
|
||||||
|
## Required variables
|
||||||
|
|
||||||
|
- `keycloak_database_password` - password for the database user
|
||||||
|
- `keycloak_config_hostname` - public domain of the keycloak server
|
||||||
|
|
||||||
|
## Database configuration
|
||||||
|
|
||||||
|
- `keycloak_database_hostname` - hostname of the database server, defaults to `localhost`
|
||||||
|
- `keycloak_database_username` - username to use when connecting to the database server, defaults to `keycloak`
|
||||||
|
- `keycloak_database_database` - name of the database to use, defaults to `keycloak`
|
51
roles/keycloak/defaults/main.yml
Normal file
51
roles/keycloak/defaults/main.yml
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
---
|
||||||
|
keycloak_version: 26.0.7
|
||||||
|
keycloak_container_name: keycloak
|
||||||
|
|
||||||
|
keycloak_container_image_upstream_registry: quay.io
|
||||||
|
keycloak_container_image_upstream_namespace: keycloak
|
||||||
|
keycloak_container_image_upstream_name: keycloak
|
||||||
|
keycloak_container_image_upstream: >-2
|
||||||
|
{{
|
||||||
|
([
|
||||||
|
keycloak_container_image_upstream_registry | default([]),
|
||||||
|
keycloak_container_image_upstream_namespace | default([]),
|
||||||
|
keycloak_container_image_upstream_name,
|
||||||
|
] | flatten | join('/'))
|
||||||
|
}}
|
||||||
|
keycloak_container_image_name: "keycloak:{{ keycloak_version }}-custom"
|
||||||
|
|
||||||
|
keycloak_container_database_vendor: postgres
|
||||||
|
keycloak_base_path: /opt/keycloak
|
||||||
|
keycloak_container_build_directory: "{{ keycloak_base_path }}/build"
|
||||||
|
keycloak_container_build_jar_directory: providers
|
||||||
|
keycloak_container_build_flags: {}
|
||||||
|
keycloak_provider_jars_directory: "{{ keycloak_base_path }}/providers"
|
||||||
|
keycloak_build_provider_jars_directory: "{{ keycloak_container_build_directory }}/{{ keycloak_container_build_jar_directory }}"
|
||||||
|
|
||||||
|
keycloak_database_hostname: localhost
|
||||||
|
keycloak_database_port: 5432
|
||||||
|
keycloak_database_username: keycloak
|
||||||
|
keycloak_database_password: ~
|
||||||
|
keycloak_database_database: keycloak
|
||||||
|
|
||||||
|
keycloak_container_env: {}
|
||||||
|
keycloak_container_labels: ~
|
||||||
|
keycloak_container_volumes: ~
|
||||||
|
keycloak_container_restart_policy: unless-stopped
|
||||||
|
keycloak_container_command: >-2
|
||||||
|
start
|
||||||
|
--db-username {{ keycloak_database_username }}
|
||||||
|
--db-password {{ keycloak_database_password }}
|
||||||
|
--db-url jdbc:postgresql://{{ keycloak_database_hostname }}{{ keycloak_database_port | ternary(':' ~ keycloak_database_port, '') }}/{{ keycloak_database_database }}
|
||||||
|
{{ keycloak_container_extra_start_flags | default([]) | join(' ') }}
|
||||||
|
--proxy-headers=xforwarded
|
||||||
|
--hostname {{ keycloak_config_hostname }}
|
||||||
|
--optimized
|
||||||
|
|
||||||
|
keycloak_config_health_enabled: true
|
||||||
|
keycloak_config_metrics_enabled: true
|
||||||
|
|
||||||
|
keycloak_config_hostname: localhost
|
||||||
|
keycloak_config_admin_username: admin
|
||||||
|
keycloak_config_admin_password: ~
|
13
roles/keycloak/meta/main.yml
Normal file
13
roles/keycloak/meta/main.yml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
allow_duplicates: true
|
||||||
|
dependencies: []
|
||||||
|
galaxy_info:
|
||||||
|
role_name: keycloak
|
||||||
|
description: Deploy keycloak, the opensource identity and access management solution
|
||||||
|
galaxy_tags:
|
||||||
|
- keycloak
|
||||||
|
- sso
|
||||||
|
- oidc
|
||||||
|
- oauth2
|
||||||
|
- iam
|
||||||
|
- docker
|
72
roles/keycloak/tasks/main.yml
Normal file
72
roles/keycloak/tasks/main.yml
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Ensure build directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
name: "{{ keycloak_container_build_directory }}"
|
||||||
|
state: directory
|
||||||
|
recurse: yes
|
||||||
|
mode: 0700
|
||||||
|
tags:
|
||||||
|
- keycloak-build-container
|
||||||
|
|
||||||
|
- name: Ensure provider jars directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
name: "{{ keycloak_provider_jars_directory }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0775
|
||||||
|
tags:
|
||||||
|
- keycloak-build-container
|
||||||
|
|
||||||
|
- name: Ensure Dockerfile is templated
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: Dockerfile.j2
|
||||||
|
dest: "{{ keycloak_container_build_directory }}/Dockerfile"
|
||||||
|
mode: 0700
|
||||||
|
register: keycloak_buildfile_info
|
||||||
|
tags:
|
||||||
|
- keycloak-container
|
||||||
|
- keycloak-build-container
|
||||||
|
|
||||||
|
- name: Ensure upstream Keycloak container image '{{ keycloak_container_image_upstream }}:{{ keycloak_version }}' is present
|
||||||
|
community.docker.docker_image:
|
||||||
|
name: "{{ keycloak_container_image_upstream }}:{{ keycloak_version }}"
|
||||||
|
source: pull
|
||||||
|
state: present
|
||||||
|
register: keycloak_container_image_upstream_status
|
||||||
|
tags:
|
||||||
|
- keycloak-container
|
||||||
|
- keycloak-build-container
|
||||||
|
|
||||||
|
- name: Ensure custom keycloak container image '{{ keycloak_container_image_name }}' is built
|
||||||
|
community.docker.docker_image:
|
||||||
|
name: "{{ keycloak_container_image_name }}"
|
||||||
|
build:
|
||||||
|
args:
|
||||||
|
DB_VENDOR: "{{ keycloak_container_database_vendor }}"
|
||||||
|
KC_ADMIN_PASSWORD: "{{ keycloak_config_admin_password }}"
|
||||||
|
dockerfile: "{{ keycloak_container_build_directory }}/Dockerfile"
|
||||||
|
path: "{{ keycloak_container_build_directory }}"
|
||||||
|
source: build
|
||||||
|
state: present
|
||||||
|
force_source: "{{ keycloak_buildfile_info.changed or keycloak_container_image_upstream_status.changed or (keycloak_force_rebuild_container | default(false))}}"
|
||||||
|
register: keycloak_container_image_status
|
||||||
|
tags:
|
||||||
|
- keycloak-container
|
||||||
|
- keycloak-build-container
|
||||||
|
|
||||||
|
- name: Ensure keycloak container is running
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ keycloak_container_name }}"
|
||||||
|
image: "{{ keycloak_container_image_name }}"
|
||||||
|
env: "{{ keycloak_container_env | default(omit, true) }}"
|
||||||
|
ports: "{{ keycloak_container_ports | default(omit, true) }}"
|
||||||
|
hostname: "{{ keycloak_container_hostname | default(omit) }}"
|
||||||
|
labels: "{{ keycloak_container_labels | default(omit, true) }}"
|
||||||
|
volumes: "{{ keycloak_container_volumes | default(omit, true) }}"
|
||||||
|
restart_policy: "{{ keycloak_container_restart_policy }}"
|
||||||
|
recreate: "{{ keycloak_container_force_recreate | default(false) or (keycloak_container_image_status.changed if keycloak_container_image_status is defined else false) }}"
|
||||||
|
etc_hosts: "{{ keycloak_container_etc_hosts | default(omit) }}"
|
||||||
|
state: started
|
||||||
|
command: "{{ keycloak_container_command }}"
|
||||||
|
tags:
|
||||||
|
- keycloak-container
|
41
roles/keycloak/templates/Dockerfile.j2
Normal file
41
roles/keycloak/templates/Dockerfile.j2
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
FROM {{ keycloak_container_image_upstream }}:{{ keycloak_version }} as builder
|
||||||
|
|
||||||
|
# Enable health and metrics support
|
||||||
|
ENV KC_HEALTH_ENABLED={{ keycloak_config_health_enabled | ternary('true', 'false') }}
|
||||||
|
ENV KC_METRICS_ENABLED={{ keycloak_config_metrics_enabled | ternary('true', 'false') }}
|
||||||
|
|
||||||
|
# Configure a database vendor
|
||||||
|
ARG DB_VENDOR
|
||||||
|
ENV KC_DB=$DB_VENDOR
|
||||||
|
|
||||||
|
WORKDIR {{ keycloak_container_working_directory }}
|
||||||
|
|
||||||
|
ADD ./providers/* providers/
|
||||||
|
# Workaround to set correct mode on jar files
|
||||||
|
USER root
|
||||||
|
RUN chmod -R 0770 providers/*
|
||||||
|
USER keycloak
|
||||||
|
|
||||||
|
RUN {{ keycloak_container_working_directory }}/bin/kc.sh --verbose \
|
||||||
|
{% for argument in keycloak_container_build_flags | dict2items(key_name='flag', value_name='value') %}
|
||||||
|
--{{- argument['flag'] -}}{{- argument['value'] | default(false, true) | ternary('=' + argument['value'], '') }} \
|
||||||
|
{% endfor%}
|
||||||
|
build{% if keycloak_container_build_features | default([]) | length > 0 %} \
|
||||||
|
{% endif %}
|
||||||
|
{% if keycloak_container_build_features | default([]) | length > 0 %}
|
||||||
|
--features="{{ keycloak_container_build_features | join(',') }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
FROM {{ keycloak_container_image_upstream }}:{{ keycloak_version }}
|
||||||
|
COPY --from=builder {{ keycloak_container_working_directory }}/ {{ keycloak_container_working_directory }}/
|
||||||
|
|
||||||
|
ENV KC_HOSTNAME={{ keycloak_config_hostname }}
|
||||||
|
ENV KEYCLOAK_ADMIN={{ keycloak_config_admin_username }}
|
||||||
|
ARG KC_ADMIN_PASSWORD
|
||||||
|
{% if keycloak_version | split('.') | first | int > 21 %}
|
||||||
|
ENV KEYCLOAK_ADMIN_PASSWORD=$KC_ADMIN_PASSWORD
|
||||||
|
{% else %}
|
||||||
|
ENV KEYCLOAK_PASSWORD=$KC_ADMIN_PASSWORD
|
||||||
|
{% endif %}
|
||||||
|
ENTRYPOINT ["{{ keycloak_container_working_directory }}/bin/kc.sh"]
|
3
roles/keycloak/vars/main.yml
Normal file
3
roles/keycloak/vars/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
keycloak_container_working_directory: /opt/keycloak
|
@ -1,29 +0,0 @@
|
|||||||
# `finallycoffee.services.minio` ansible role
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
This role deploys a [min.io](https://min.io) server (s3-compatible object storage server)
|
|
||||||
using the official docker container image.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
The role requires setting the password for the `root` user (name can be changed by
|
|
||||||
setting `minio_root_username`) in `minio_root_password`. That user has full control
|
|
||||||
over the minio-server instance.
|
|
||||||
|
|
||||||
### Useful config hints
|
|
||||||
|
|
||||||
Most configuration is done by setting environment variables in
|
|
||||||
`minio_container_extra_env`, for example:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
minio_container_extra_env:
|
|
||||||
# disable the "console" web browser UI
|
|
||||||
MINIO_BROWSER: off
|
|
||||||
# enable public prometheus metrics on `/minio/v2/metrics/cluster`
|
|
||||||
MINIO_PROMETHEUS_AUTH_TYPE: public
|
|
||||||
```
|
|
||||||
|
|
||||||
When serving minio (or any s3-compatible server) on a "subfolder",
|
|
||||||
see https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTRedirect.html
|
|
||||||
and https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html
|
|
@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
minio_user: ~
|
|
||||||
minio_data_path: /opt/minio
|
|
||||||
|
|
||||||
minio_create_user: false
|
|
||||||
minio_manage_host_filesystem: false
|
|
||||||
|
|
||||||
minio_root_username: root
|
|
||||||
minio_root_password: ~
|
|
||||||
|
|
||||||
minio_container_name: minio
|
|
||||||
minio_container_image_name: docker.io/minio/minio
|
|
||||||
minio_container_image_tag: latest
|
|
||||||
minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}"
|
|
||||||
minio_container_networks: []
|
|
||||||
minio_container_ports: []
|
|
||||||
|
|
||||||
minio_container_base_volumes:
|
|
||||||
- "{{ minio_data_path }}:{{ minio_container_data_path }}:z"
|
|
||||||
minio_container_extra_volumes: []
|
|
||||||
|
|
||||||
minio_container_base_env:
|
|
||||||
MINIO_ROOT_USER: "{{ minio_root_username }}"
|
|
||||||
MINIO_ROOT_PASSWORD: "{{ minio_root_password }}"
|
|
||||||
minio_container_extra_env: {}
|
|
||||||
|
|
||||||
minio_container_labels: {}
|
|
||||||
|
|
||||||
minio_container_command:
|
|
||||||
- "server"
|
|
||||||
- "{{ minio_container_data_path }}"
|
|
||||||
- "--console-address \":{{ minio_container_listen_port_console }}\""
|
|
||||||
minio_container_restart_policy: "unless-stopped"
|
|
||||||
minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}"
|
|
||||||
|
|
||||||
minio_container_listen_port_api: 9000
|
|
||||||
minio_container_listen_port_console: 8900
|
|
||||||
|
|
||||||
minio_container_data_path: /storage
|
|
@ -1,37 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Ensure minio run user is present
|
|
||||||
user:
|
|
||||||
name: "{{ minio_user }}"
|
|
||||||
state: present
|
|
||||||
system: yes
|
|
||||||
when: minio_create_user
|
|
||||||
|
|
||||||
- name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present
|
|
||||||
file:
|
|
||||||
path: "{{ minio_data_path }}"
|
|
||||||
state: directory
|
|
||||||
user: "{{ minio_user|default(omit, True) }}"
|
|
||||||
group: "{{ minio_user|default(omit, True) }}"
|
|
||||||
when: minio_manage_host_filesystem
|
|
||||||
|
|
||||||
- name: Ensure container image for minio is present
|
|
||||||
community.docker.docker_image:
|
|
||||||
name: "{{ minio_container_image }}"
|
|
||||||
state: present
|
|
||||||
source: pull
|
|
||||||
force_source: "{{ minio_container_image_force_source }}"
|
|
||||||
|
|
||||||
- name: Ensure container {{ minio_container_name }} is running
|
|
||||||
docker_container:
|
|
||||||
name: "{{ minio_container_name }}"
|
|
||||||
image: "{{ minio_container_image }}"
|
|
||||||
volumes: "{{ minio_container_volumes }}"
|
|
||||||
env: "{{ minio_container_env }}"
|
|
||||||
labels: "{{ minio_container_labels }}"
|
|
||||||
networks: "{{ minio_container_networks }}"
|
|
||||||
ports: "{{ minio_container_ports }}"
|
|
||||||
user: "{{ minio_user|default(omit, True) }}"
|
|
||||||
command: "{{ minio_container_command }}"
|
|
||||||
restart_policy: "{{ minio_container_restart_policy }}"
|
|
||||||
state: started
|
|
@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}"
|
|
||||||
|
|
||||||
minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}"
|
|
21
roles/openproject/README.md
Normal file
21
roles/openproject/README.md
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# `finallycoffee.services.openproject` ansible role
|
||||||
|
|
||||||
|
Deploys [openproject](https://www.openproject.org/) using docker-compose.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
To set configuration variables for OpenProject, set them in `openproject_compose_overrides`:
|
||||||
|
```yaml
|
||||||
|
openproject_compose_overrides:
|
||||||
|
version: "3.7"
|
||||||
|
services:
|
||||||
|
proxy:
|
||||||
|
[...]
|
||||||
|
volumes:
|
||||||
|
pgdata:
|
||||||
|
driver: local
|
||||||
|
driver_opts:
|
||||||
|
o: bind
|
||||||
|
type: none
|
||||||
|
device: /var/lib/postgresql
|
||||||
|
```
|
11
roles/openproject/defaults/main.yml
Normal file
11
roles/openproject/defaults/main.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
openproject_base_path: "/opt/openproject"
|
||||||
|
|
||||||
|
openproject_upstream_git_url: "https://github.com/opf/openproject-deploy.git"
|
||||||
|
openproject_upstream_git_branch: "stable/14"
|
||||||
|
|
||||||
|
openproject_compose_project_path: "{{ openproject_base_path }}"
|
||||||
|
openproject_compose_project_name: "openproject"
|
||||||
|
openproject_compose_project_env_file: "{{ openproject_compose_project_path }}/.env"
|
||||||
|
openproject_compose_project_override_file: "{{ openproject_compose_project_path }}/docker-compose.override.yml"
|
||||||
|
openproject_compose_project_env: {}
|
38
roles/openproject/tasks/main.yml
Normal file
38
roles/openproject/tasks/main.yml
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure base directory '{{ openproject_base_path }}' is present
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ openproject_base_path }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Ensure upstream repository is cloned
|
||||||
|
ansible.builtin.git:
|
||||||
|
dest: "{{ openproject_base_path }}"
|
||||||
|
repo: "{{ openproject_upstream_git_url }}"
|
||||||
|
version: "{{ openproject_upstream_git_branch }}"
|
||||||
|
clone: true
|
||||||
|
depth: 1
|
||||||
|
|
||||||
|
- name: Ensure environment is configured
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
line: "{{ item.key}}={{ item.value}}"
|
||||||
|
path: "{{ openproject_compose_project_env_file }}"
|
||||||
|
state: present
|
||||||
|
create: true
|
||||||
|
loop: "{{ openproject_compose_project_env | dict2items(key_name='key', value_name='value') }}"
|
||||||
|
|
||||||
|
- name: Ensure docker compose overrides are set
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ openproject_compose_project_override_file }}"
|
||||||
|
content: "{{ openproject_compose_overrides | default({}) | to_nice_yaml }}"
|
||||||
|
|
||||||
|
- name: Ensure containers are pulled
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ openproject_compose_project_path }}"
|
||||||
|
project_name: "{{ openproject_compose_project_name }}"
|
||||||
|
pull: "missing"
|
||||||
|
|
||||||
|
- name: Ensure services are running
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ openproject_compose_project_path }}"
|
||||||
|
project_name: "{{ openproject_compose_project_name }}"
|
||||||
|
state: "present"
|
@ -1,77 +0,0 @@
|
|||||||
# `finallycoffee.services.restic`
|
|
||||||
|
|
||||||
Ansible role for backup up data using `restic`, utilizing `systemd` timers for scheduling.
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
|
|
||||||
As restic encrypts the data before storing it, the `restic_repo_password` needs
|
|
||||||
to be populated with a strong key, and saved accordingly as only this key can
|
|
||||||
be used to decrypt the data for a restore!
|
|
||||||
|
|
||||||
### Backends
|
|
||||||
|
|
||||||
#### S3 Backend
|
|
||||||
|
|
||||||
To use a `s3`-compatible backend like AWS buckets or minio, both `restic_s3_key_id`
|
|
||||||
and `restic_s3_access_key` need to be populated, and the `restic_repo_url` has the
|
|
||||||
format `s3:https://my.s3.endpoint:port/bucket-name`.
|
|
||||||
|
|
||||||
#### SFTP Backend
|
|
||||||
|
|
||||||
Using the `sftp` backend requires the configured `restic_user` to be able to
|
|
||||||
authenticate to the configured SFTP-Server using password-less methods like
|
|
||||||
publickey-authentication. The `restic_repo_url` then follows the format
|
|
||||||
`sftp:{user}@{server}:/my-restic-repository` (or without leading `/` for relative
|
|
||||||
paths to the `{user}`s home directory.
|
|
||||||
|
|
||||||
### Backing up data
|
|
||||||
|
|
||||||
A job name like `$service-postgres` or similar needs to be set in `restic_job_name`,
|
|
||||||
which is used for naming the `systemd` units, their syslog identifiers etc.
|
|
||||||
|
|
||||||
If backing up filesystem locations, the paths need to be specified in
|
|
||||||
`restic_backup_paths` as lists of strings representing absolute filesystem
|
|
||||||
locations.
|
|
||||||
|
|
||||||
If backing up f.ex. database or other data which is generating backups using
|
|
||||||
a command like `pg_dump`, use `restic_backup_stdin_command` (which needs to output
|
|
||||||
to `stdout`) in conjunction with `restic_backup_stdin_command_filename` to name
|
|
||||||
the resulting output (required).
|
|
||||||
|
|
||||||
### Policy
|
|
||||||
|
|
||||||
The backup policy can be adjusted by overriding the `restic_policy_keep_*`
|
|
||||||
variables, with the defaults being:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
restic_policy_keep_all_within: 1d
|
|
||||||
restic_policy_keep_hourly: 6
|
|
||||||
restic_policy_keep_daily: 2
|
|
||||||
restic_policy_keep_weekly: 7
|
|
||||||
restic_policy_keep_monthly: 4
|
|
||||||
restic_policy_backup_frequency: hourly
|
|
||||||
```
|
|
||||||
|
|
||||||
**Note:** `restic_policy_backup_frequency` must conform to `systemd`s
|
|
||||||
`OnCalendar` syntax, which can be checked using `systemd-analyze calender $x`.
|
|
||||||
|
|
||||||
## Role behaviour
|
|
||||||
|
|
||||||
Per default, when the systemd unit for a job changes, the job is not immediately
|
|
||||||
started. This can be overridden using `restic_start_job_on_unit_change: true`,
|
|
||||||
which will immediately start the backup job if it's configuration changed.
|
|
||||||
|
|
||||||
The systemd unit runs with `restic_user`, which is root by default, guaranteeing
|
|
||||||
that filesystem paths are always readable. The `restic_user` can be overridden,
|
|
||||||
but care needs to be taken to ensure the user has permission to read all the
|
|
||||||
provided filesystem paths / the backup command may be executed by the user.
|
|
||||||
|
|
||||||
If ansible should create the user, set `restic_create_user` to `true`, which
|
|
||||||
will attempt to create the `restic_user` as a system user.
|
|
||||||
|
|
||||||
### Installing
|
|
||||||
|
|
||||||
For Debian and RedHat, the role attempts to install restic using the default
|
|
||||||
package manager's ansible module (apt/dnf). For other distributions, the generic
|
|
||||||
`package` module tries to install `restic_package_name` (default: `restic`),
|
|
||||||
which can be overridden if needed.
|
|
@ -1,37 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
restic_repo_url: ~
|
|
||||||
restic_repo_password: ~
|
|
||||||
restic_s3_key_id: ~
|
|
||||||
restic_s3_access_key: ~
|
|
||||||
|
|
||||||
restic_backup_paths: []
|
|
||||||
restic_backup_stdin_command: ~
|
|
||||||
restic_backup_stdin_command_filename: ~
|
|
||||||
|
|
||||||
restic_policy_keep_all_within: 1d
|
|
||||||
restic_policy_keep_hourly: 6
|
|
||||||
restic_policy_keep_daily: 2
|
|
||||||
restic_policy_keep_weekly: 7
|
|
||||||
restic_policy_keep_monthly: 4
|
|
||||||
restic_policy_backup_frequency: hourly
|
|
||||||
|
|
||||||
restic_policy:
|
|
||||||
keep_within: "{{ restic_policy_keep_all_within }}"
|
|
||||||
hourly: "{{ restic_policy_keep_hourly }}"
|
|
||||||
daily: "{{ restic_policy_keep_daily }}"
|
|
||||||
weekly: "{{ restic_policy_keep_weekly }}"
|
|
||||||
monthly: "{{ restic_policy_keep_monthly }}"
|
|
||||||
frequency: "{{ restic_policy_backup_frequency }}"
|
|
||||||
|
|
||||||
restic_user: root
|
|
||||||
restic_create_user: false
|
|
||||||
restic_start_job_on_unit_change: false
|
|
||||||
|
|
||||||
restic_job_name: ~
|
|
||||||
restic_job_description: "Restic backup job for {{ restic_job_name }}"
|
|
||||||
restic_systemd_unit_naming_scheme: "restic.{{ restic_job_name }}"
|
|
||||||
restic_systemd_working_directory: /tmp
|
|
||||||
restic_systemd_syslog_identifier: "restic-{{ restic_job_name }}"
|
|
||||||
|
|
||||||
restic_package_name: restic
|
|
@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Ensure system daemon is reloaded
|
|
||||||
listen: reload-systemd
|
|
||||||
systemd:
|
|
||||||
daemon_reload: true
|
|
||||||
|
|
||||||
- name: Ensure systemd service for '{{ restic_job_name }}' is started immediately
|
|
||||||
listen: trigger-restic
|
|
||||||
systemd:
|
|
||||||
name: "{{ restic_systemd_unit_naming_scheme }}.service"
|
|
||||||
state: started
|
|
||||||
when: restic_start_job_on_unit_change
|
|
@ -1,90 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Ensure {{ restic_user }} system user exists
|
|
||||||
user:
|
|
||||||
name: "{{ restic_user }}"
|
|
||||||
state: present
|
|
||||||
system: true
|
|
||||||
when: restic_create_user
|
|
||||||
|
|
||||||
- name: Ensure either backup_paths or backup_stdin_command is populated
|
|
||||||
when: restic_backup_paths|length > 0 and restic_backup_stdin_command
|
|
||||||
fail:
|
|
||||||
msg: "Setting both `restic_backup_paths` and `restic_backup_stdin_command` is not supported"
|
|
||||||
|
|
||||||
- name: Ensure a filename for stdin_command backup is given
|
|
||||||
when: restic_backup_stdin_command and not restic_backup_stdin_command_filename
|
|
||||||
fail:
|
|
||||||
msg: "`restic_backup_stdin_command` was set but no filename for the resulting output was supplied in `restic_backup_stdin_command_filename`"
|
|
||||||
|
|
||||||
- name: Ensure backup frequency adheres to systemd's OnCalender syntax
|
|
||||||
command:
|
|
||||||
cmd: "systemd-analyze calendar {{ restic_policy.frequency }}"
|
|
||||||
register: systemd_calender_parse_res
|
|
||||||
failed_when: systemd_calender_parse_res.rc != 0
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Ensure restic is installed
|
|
||||||
block:
|
|
||||||
- name: Ensure restic is installed via apt
|
|
||||||
apt:
|
|
||||||
package: restic
|
|
||||||
state: latest
|
|
||||||
when: ansible_os_family == 'Debian'
|
|
||||||
- name: Ensure restic is installed via dnf
|
|
||||||
dnf:
|
|
||||||
name: restic
|
|
||||||
state: latest
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
- name: Ensure restic is installed using the auto-detected package-manager
|
|
||||||
package:
|
|
||||||
name: "{{ restic_package_name }}"
|
|
||||||
state: present
|
|
||||||
when: ansible_os_family not in ['RedHat', 'Debian']
|
|
||||||
|
|
||||||
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
|
|
||||||
template:
|
|
||||||
dest: "/etc/systemd/system/{{ service.unit_name }}.service"
|
|
||||||
src: "{{ service.file }}"
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0640
|
|
||||||
notify:
|
|
||||||
- reload-systemd
|
|
||||||
- trigger-restic
|
|
||||||
loop:
|
|
||||||
- unit_name: "{{ restic_systemd_unit_naming_scheme }}"
|
|
||||||
file: restic.service.j2
|
|
||||||
- unit_name: "{{ restic_systemd_unit_naming_scheme }}-unlock"
|
|
||||||
file: restic-unlock.service.j2
|
|
||||||
loop_control:
|
|
||||||
loop_var: service
|
|
||||||
label: "{{ service.file }}"
|
|
||||||
|
|
||||||
- name: Ensure systemd service file for '{{ restic_job_name }}' is templated
|
|
||||||
template:
|
|
||||||
dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.timer"
|
|
||||||
src: restic.timer.j2
|
|
||||||
owner: root
|
|
||||||
group: root
|
|
||||||
mode: 0640
|
|
||||||
notify:
|
|
||||||
- reload-systemd
|
|
||||||
|
|
||||||
- name: Flush handlers to ensure systemd knows about '{{ restic_job_name }}'
|
|
||||||
meta: flush_handlers
|
|
||||||
|
|
||||||
- name: Ensure systemd service for unlocking repository for '{{ restic_job_name }}' is enabled
|
|
||||||
systemd:
|
|
||||||
name: "{{ restic_systemd_unit_naming_scheme }}-unlock.service"
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
- name: Ensure systemd timer for '{{ restic_job_name }}' is activated
|
|
||||||
systemd:
|
|
||||||
name: "{{ restic_systemd_unit_naming_scheme }}.timer"
|
|
||||||
enabled: true
|
|
||||||
|
|
||||||
- name: Ensure systemd timer for '{{ restic_job_name }}' is started
|
|
||||||
systemd:
|
|
||||||
name: "{{ restic_systemd_unit_naming_scheme }}.timer"
|
|
||||||
state: started
|
|
@ -1,21 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description={{ restic_job_description }} - Unlock after reboot job
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
User={{ restic_user }}
|
|
||||||
WorkingDirectory={{ restic_systemd_working_directory }}
|
|
||||||
SyslogIdentifier={{ restic_systemd_syslog_identifier }}
|
|
||||||
|
|
||||||
Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
|
|
||||||
Environment=RESTIC_PASSWORD={{ restic_repo_password }}
|
|
||||||
{% if restic_s3_key_id and restic_s3_access_key %}
|
|
||||||
Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }}
|
|
||||||
Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init'
|
|
||||||
ExecStart=/usr/bin/restic unlock
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
@ -1,28 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description={{ restic_job_description }}
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
User={{ restic_user }}
|
|
||||||
WorkingDirectory={{ restic_systemd_working_directory }}
|
|
||||||
SyslogIdentifier={{ restic_systemd_syslog_identifier }}
|
|
||||||
|
|
||||||
Environment=RESTIC_REPOSITORY={{ restic_repo_url }}
|
|
||||||
Environment=RESTIC_PASSWORD={{ restic_repo_password }}
|
|
||||||
{% if restic_s3_key_id and restic_s3_access_key %}
|
|
||||||
Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }}
|
|
||||||
Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init'
|
|
||||||
{% if restic_backup_stdin_command %}
|
|
||||||
ExecStart=/bin/sh -c '{{ restic_backup_stdin_command }} | /usr/bin/restic backup --verbose --stdin --stdin-filename {{ restic_backup_stdin_command_filename }}'
|
|
||||||
{% else %}
|
|
||||||
ExecStart=/usr/bin/restic --verbose backup {{ restic_backup_paths | join(' ') }}
|
|
||||||
{% endif %}
|
|
||||||
ExecStartPost=/usr/bin/restic forget --prune --keep-within={{ restic_policy.keep_within }} --keep-hourly={{ restic_policy.hourly }} --keep-daily={{ restic_policy.daily }} --keep-weekly={{ restic_policy.weekly }} --keep-monthly={{ restic_policy.monthly }}
|
|
||||||
ExecStartPost=-/usr/bin/restic snapshots
|
|
||||||
ExecStartPost=/usr/bin/restic check
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
@ -1,10 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Run {{ restic_job_name }}
|
|
||||||
|
|
||||||
[Timer]
|
|
||||||
OnCalendar={{ restic_policy.frequency }}
|
|
||||||
Persistent=True
|
|
||||||
Unit={{ restic_systemd_unit_naming_scheme }}.service
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=timers.target
|
|
46
roles/snipe_it/README.md
Normal file
46
roles/snipe_it/README.md
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# `finallycoffee.services.snipe_it` ansible role
|
||||||
|
|
||||||
|
[Snipe-IT](https://snipeitapp.com/) is an open-source asset management with
|
||||||
|
a powerful JSON-REST API. This ansible role deploys and configures Snipe-IT.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
Snipe-IT requires a MySQL-Database like MariaDB and a working email service
|
||||||
|
for sending email. For installing and configuring MariaDB, see
|
||||||
|
[`finallycoffee.base.mariadb`](https://galaxy.ansible.com/ui/repo/published/finallycoffee/base/content/role/mariadb/).
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Required variables to set are:
|
||||||
|
|
||||||
|
- `snipe_it_domain` - domain name of the snipe-it instance
|
||||||
|
- `snipe_it_config_app_url` - URL where snipe-it will be reachable including protocol and port
|
||||||
|
- `snipe_it_config_app_key` - Laravel application key
|
||||||
|
|
||||||
|
### Database configuration
|
||||||
|
|
||||||
|
All (database) options from the upstream laravel `.env` file are available
|
||||||
|
under the `snipe_it_config_db_*` prefix. Configure a database as follows:
|
||||||
|
```
|
||||||
|
snipe_it_config_db_host: localhost # defaults to localhost
|
||||||
|
snipe_it_config_db_port: "3306" # defaults to 3306
|
||||||
|
snipe_it_config_db_database: my_snipe_db_name # defaults to 'snipeit'
|
||||||
|
snipe_it_config_db_username: my_snipe_db_user # defaults to 'snipeit'
|
||||||
|
snipe_it_config_db_password: my_snipe_db_password
|
||||||
|
# Set this if the database is shared with
|
||||||
|
# other applications. defaults to not set
|
||||||
|
snipe_it_config_db_prefix: snipe_
|
||||||
|
```
|
||||||
|
|
||||||
|
### Email configuration
|
||||||
|
|
||||||
|
Configuring an email server is mandatory. An example is provided below:
|
||||||
|
```yaml
|
||||||
|
snipe_it_config_mail_host: smtp.example.com
|
||||||
|
snipe_it_config_mail_username: snipe_user@snipe.example.com
|
||||||
|
snipe_it_config_mail_password: i_want_to_be_strong_and_long
|
||||||
|
snipe_it_config_mail_from_addr: "noreply@snipe.example.com"
|
||||||
|
snipe_it_config_mail_from_name: "Example.com SnipeIT instance"
|
||||||
|
```
|
||||||
|
|
||||||
|
The default smtp port is `587` and can be set in `snipe_it_config_mail_port`.
|
131
roles/snipe_it/defaults/main/config.yml
Normal file
131
roles/snipe_it/defaults/main/config.yml
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
---
|
||||||
|
snipe_it_config_app_version: "v{{ snipe_it_version }}"
|
||||||
|
snipe_it_config_app_port: 8000
|
||||||
|
snipe_it_config_app_env: "production"
|
||||||
|
snipe_it_config_app_debug: false
|
||||||
|
snipe_it_config_app_key: ~
|
||||||
|
snipe_it_config_app_url: "http://localhost:{{ snipe_it_config_app_port }}"
|
||||||
|
snipe_it_config_app_timezone: UTC
|
||||||
|
snipe_it_config_app_locale: en-US
|
||||||
|
snipe_it_config_app_locked: false
|
||||||
|
snipe_it_config_app_cipher: "AES-256-GCM"
|
||||||
|
snipe_it_config_app_force_tls: false
|
||||||
|
snipe_it_config_app_trusted_proxies:
|
||||||
|
- '192.168.0.0/16'
|
||||||
|
- '172.16.0.0/12'
|
||||||
|
- '10.0.0.0/8'
|
||||||
|
|
||||||
|
snipe_it_config_db_connection: mysql
|
||||||
|
snipe_it_config_db_host: localhost
|
||||||
|
snipe_it_config_db_port: "3306"
|
||||||
|
snipe_it_config_db_database: snipeit
|
||||||
|
snipe_it_config_db_username: snipeit
|
||||||
|
snipe_it_config_db_password: ~
|
||||||
|
snipe_it_config_db_prefix: ~
|
||||||
|
snipe_it_config_db_dump_path: /usr/bin/
|
||||||
|
snipe_it_config_db_charset: utf8mb4
|
||||||
|
snipe_it_config_db_collation: utf8mb4_unicode_ci
|
||||||
|
snipe_it_config_db_ssl: false
|
||||||
|
snipe_it_config_db_ssl_is_paas: false
|
||||||
|
snipe_it_config_db_ssl_key_path: ~
|
||||||
|
snipe_it_config_db_ssl_cert_path: ~
|
||||||
|
snipe_it_config_db_ssl_ca_path: ~
|
||||||
|
snipe_it_config_db_ssl_cipher: ~
|
||||||
|
snipe_it_config_db_ssl_verify_server: ~
|
||||||
|
|
||||||
|
snipe_it_config_mail_mailer: smtp
|
||||||
|
snipe_it_config_mail_host: ~
|
||||||
|
snipe_it_config_mail_port: 587
|
||||||
|
snipe_it_config_mail_username: ~
|
||||||
|
snipe_it_config_mail_password: ~
|
||||||
|
snipe_it_config_mail_tls_verify_peer: true
|
||||||
|
snipe_it_config_mail_from_addr: ~
|
||||||
|
snipe_it_config_mail_from_name: ~
|
||||||
|
snipe_it_config_mail_replyto_addr: "{{ snipe_it_config_mail_from_addr }}"
|
||||||
|
snipe_it_config_mail_replyto_name: "{{ snipe_it_config_mail_from_name }}"
|
||||||
|
snipe_it_config_mail_auto_embed_method: attachment
|
||||||
|
snipe_it_config_mail_backup_notification_driver: ~
|
||||||
|
snipe_it_config_mail_backup_notification_address: ~
|
||||||
|
|
||||||
|
snipe_it_config_private_filesystem_disk: "local"
|
||||||
|
snipe_it_config_public_filesystem_disk: "local_public"
|
||||||
|
snipe_it_config_allow_backup_delete: false
|
||||||
|
snipe_it_config_allow_data_purge: false
|
||||||
|
snipe_it_config_image_lib: 'gd'
|
||||||
|
|
||||||
|
snipe_it_config_log_channel: 'stderr'
|
||||||
|
snipe_it_config_log_max_days: 10
|
||||||
|
|
||||||
|
snipe_it_config_cookie_name: "_snipe_session"
|
||||||
|
snipe_it_config_cookie_domain: "{{ snipe_it_domain }}"
|
||||||
|
snipe_it_config_secure_cookies: true
|
||||||
|
|
||||||
|
snipe_it_config_session_driver: file
|
||||||
|
snipe_it_config_session_lifetime: 12000
|
||||||
|
snipe_it_config_cache_driver: file
|
||||||
|
snipe_it_config_cache_prefix: snipeit
|
||||||
|
snipe_it_config_queue_driver: file
|
||||||
|
|
||||||
|
snipe_it_base_config:
|
||||||
|
APP_VERSION: "{{ snipe_it_config_app_version }}"
|
||||||
|
APP_PORT: "{{ snipe_it_config_app_port }}"
|
||||||
|
APP_ENV: "{{ snipe_it_config_app_env }}"
|
||||||
|
APP_DEBUG: "{{ snipe_it_config_app_debug }}"
|
||||||
|
APP_KEY: "{{ snipe_it_config_app_key }}"
|
||||||
|
APP_URL: "{{ snipe_it_config_app_url }}"
|
||||||
|
APP_TIMEZONE: "{{ snipe_it_config_app_timezone }}"
|
||||||
|
APP_LOCALE: "{{ snipe_it_config_app_locale }}"
|
||||||
|
APP_LOCKED: "{{ snipe_it_config_app_locked }}"
|
||||||
|
APP_CIPHER: "{{ snipe_it_config_app_cipher }}"
|
||||||
|
APP_FORCE_TLS: "{{ snipe_it_config_app_force_tls }}"
|
||||||
|
APP_TRUSTED_PROXIES: "{{ snipe_it_config_app_trusted_proxies | join(',') }}"
|
||||||
|
DB_CONNECTION: "{{ snipe_it_config_db_connection }}"
|
||||||
|
DB_HOST: "{{ snipe_it_config_db_host }}"
|
||||||
|
DB_PORT: "{{ snipe_it_config_db_port }}"
|
||||||
|
DB_DATABASE: "{{ snipe_it_config_db_database }}"
|
||||||
|
DB_USERNAME: "{{ snipe_it_config_db_username }}"
|
||||||
|
DB_PASSWORD: "{{ snipe_it_config_db_password }}"
|
||||||
|
DB_PREFIX: "{{ snipe_it_config_db_prefix | default('null', true) }}"
|
||||||
|
DB_DUMP_PATH: "{{ snipe_it_config_db_dump_path }}"
|
||||||
|
DB_CHARSET: "{{ snipe_it_config_db_charset }}"
|
||||||
|
DB_COLLATION: "{{ snipe_it_config_db_collation }}"
|
||||||
|
DB_SSL: "{{ snipe_it_config_db_ssl }}"
|
||||||
|
DB_SSL_IS_PAAS: "{{ snipe_it_config_db_ssl_is_paas }}"
|
||||||
|
DB_SSL_KEY_PATH: "{{ snipe_it_config_db_ssl_key_path | default('null', true) }}"
|
||||||
|
DB_SSL_CERT_PATH: "{{ snipe_it_config_db_ssl_cert_path | default('null', true) }}"
|
||||||
|
DB_SSL_CA_PATH: "{{ snipe_it_config_db_ssl_ca_path | default('null', true) }}"
|
||||||
|
DB_SSL_CIPHER: "{{ snipe_it_config_db_ssl_cipher | default('null', true) }}"
|
||||||
|
DB_SSL_VERIFY_SERVER: "{{ snipe_it_config_db_ssl_verify_server | default('null', true) }}"
|
||||||
|
MAIL_MAILER: "{{ snipe_it_config_mail_mailer }}"
|
||||||
|
MAIL_HOST: "{{ snipe_it_config_mail_host }}"
|
||||||
|
MAIL_PORT: "{{ snipe_it_config_mail_port }}"
|
||||||
|
MAIL_USERNAME: "{{ snipe_it_config_mail_username }}"
|
||||||
|
MAIL_PASSWORD: "{{ snipe_it_config_mail_password }}"
|
||||||
|
MAIL_TLS_VERIFY_PEER: "{{ snipe_it_config_mail_tls_verify_peer }}"
|
||||||
|
MAIL_FROM_ADDR: "{{ snipe_it_config_mail_from_addr | default('null', true) }}"
|
||||||
|
MAIL_FROM_NAME: "{{ snipe_it_config_mail_from_name | default('null', true) }}"
|
||||||
|
MAIL_REPLYTO_ADDR: "{{ snipe_it_config_mail_replyto_addr | default('null', true) }}"
|
||||||
|
MAIL_REPLYTO_NAME: "{{ snipe_it_config_mail_replyto_name | default('null', true) }}"
|
||||||
|
MAIL_AUTO_EMBED_METHOD: "{{ snipe_it_config_mail_auto_embed_method }}"
|
||||||
|
MAIL_BACKUP_NOTIFICATION_DRIVER: "{{ snipe_it_config_mail_backup_notification_driver }}"
|
||||||
|
MAIL_BACKUP_NOTIFICATION_ADDRESS: "{{ snipe_it_config_mail_backup_notification_address }}"
|
||||||
|
SESSION_DRIVER: "{{ snipe_it_config_session_driver }}"
|
||||||
|
SESSION_LIFETIME: "{{ snipe_it_config_session_lifetime }}"
|
||||||
|
CACHE_DRIVER: "{{ snipe_it_config_cache_driver }}"
|
||||||
|
CACHE_PREFIX: "{{ snipe_it_config_cache_prefix }}"
|
||||||
|
QUEUE_DRIVER: "{{ snipe_it_config_queue_driver }}"
|
||||||
|
PRIVATE_FILESYSTEM_DISK: "{{ snipe_it_config_private_filesystem_disk }}"
|
||||||
|
PUBLIC_FILESYSTEM_DISK: "{{ snipe_it_config_public_filesystem_disk }}"
|
||||||
|
ALLOW_BACKUP_DELETE: "{{ snipe_it_config_allow_backup_delete }}"
|
||||||
|
ALLOW_DATA_PURGE: "{{ snipe_it_config_allow_data_purge }}"
|
||||||
|
IMAGE_LIB: "{{ snipe_it_config_image_lib }}"
|
||||||
|
LOG_CHANNEL: "{{ snipe_it_config_log_channel }}"
|
||||||
|
LOG_MAX_DAYS: "{{ snipe_it_config_log_max_days }}"
|
||||||
|
COOKIE_NAME: "{{ snipe_it_config_cookie_name }}"
|
||||||
|
COOKIE_DOMAIN: "{{ snipe_it_config_cookie_domain }}"
|
||||||
|
SECURE_COOKIES: "{{ snipe_it_config_secure_cookies }}"
|
||||||
|
|
||||||
|
snipe_it_config: ~
|
||||||
|
snipe_it_merged_config: >-2
|
||||||
|
{{ (snipe_it_base_config | default({}, true))
|
||||||
|
| combine((snipe_it_config | default({}, true)), recursive=True) }}
|
48
roles/snipe_it/defaults/main/container.yml
Normal file
48
roles/snipe_it/defaults/main/container.yml
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
---
|
||||||
|
snipe_it_container_image_registry: docker.io
|
||||||
|
snipe_it_container_image_namespace: snipe
|
||||||
|
snipe_it_container_image_name: 'snipe-it'
|
||||||
|
snipe_it_container_image_tag: ~
|
||||||
|
snipe_it_container_image_flavour: alpine
|
||||||
|
snipe_it_container_image_source: pull
|
||||||
|
snipe_it_container_image_force_source: >-2
|
||||||
|
{{ snipe_it_container_image_tag | default(false, true) | bool }}
|
||||||
|
snipe_it_container_image: >-2
|
||||||
|
{{
|
||||||
|
([
|
||||||
|
snipe_it_container_image_registry | default([], true),
|
||||||
|
snipe_it_container_image_namespace | default([], true),
|
||||||
|
snipe_it_container_image_name,
|
||||||
|
] | flatten | join('/'))
|
||||||
|
+ ':'
|
||||||
|
+ (snipe_it_container_image_tag | default(
|
||||||
|
'v' + snipe_it_version + (
|
||||||
|
((snipe_it_container_image_flavour is string)
|
||||||
|
and (snipe_it_container_image_flavour | length > 0))
|
||||||
|
| ternary(
|
||||||
|
'-' + snipe_it_container_image_flavour | default('', true),
|
||||||
|
''
|
||||||
|
)
|
||||||
|
),
|
||||||
|
true
|
||||||
|
))
|
||||||
|
}}
|
||||||
|
|
||||||
|
snipe_it_container_env_file: "/var/www/html/.env"
|
||||||
|
snipe_it_container_data_directory: "/var/lib/snipeit/"
|
||||||
|
snipe_it_container_volumes:
|
||||||
|
- "{{ snipe_it_data_directory }}:{{ snipe_it_container_data_directory }}:z"
|
||||||
|
|
||||||
|
snipe_it_container_name: 'snipe-it'
|
||||||
|
snipe_it_container_state: >-2
|
||||||
|
{{ (snipe_it_state == 'present') | ternary('started', 'absent') }}
|
||||||
|
snipe_it_container_env: ~
|
||||||
|
snipe_it_container_user: ~
|
||||||
|
snipe_it_container_ports: ~
|
||||||
|
snipe_it_container_labels: ~
|
||||||
|
snipe_it_container_recreate: ~
|
||||||
|
snipe_it_container_networks: ~
|
||||||
|
snipe_it_container_etc_hosts: ~
|
||||||
|
snipe_it_container_dns_servers: ~
|
||||||
|
snipe_it_container_network_mode: ~
|
||||||
|
snipe_it_container_restart_policy: 'unless-stopped'
|
9
roles/snipe_it/defaults/main/main.yml
Normal file
9
roles/snipe_it/defaults/main/main.yml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
snipe_it_user: snipeit
|
||||||
|
snipe_it_version: "7.1.15"
|
||||||
|
snipe_it_domain: ~
|
||||||
|
snipe_it_state: present
|
||||||
|
snipe_it_deployment_method: docker
|
||||||
|
|
||||||
|
snipe_it_env_file: /etc/snipeit/env
|
||||||
|
snipe_it_data_directory: /var/lib/snipeit
|
5
roles/snipe_it/defaults/main/user.yml
Normal file
5
roles/snipe_it/defaults/main/user.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
snipe_it_run_user_id: >-2
|
||||||
|
{{ snipe_it_user_info.uid | default(snipe_it_user) }}
|
||||||
|
snipe_it_run_group_id: >-2
|
||||||
|
{{ snipe_it_user_info.group | default(snipe_it_user) }}
|
12
roles/snipe_it/meta/main.yml
Normal file
12
roles/snipe_it/meta/main.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
allow_duplicates: true
|
||||||
|
dependencies: []
|
||||||
|
galaxy_info:
|
||||||
|
role_name: snipe_it
|
||||||
|
description: >-2
|
||||||
|
Deploy Snipe-IT, an open-source asset / license management system with
|
||||||
|
powerful JSON REST API
|
||||||
|
galaxy_tags:
|
||||||
|
- snipeit
|
||||||
|
- asset-management
|
||||||
|
- docker
|
14
roles/snipe_it/tasks/check.yml
Normal file
14
roles/snipe_it/tasks/check.yml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure state is valid
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-2
|
||||||
|
Unsupported state '{{ snipe_it_state }}'!
|
||||||
|
Supported states are {{ snipe_it_states | join(', ') }}.
|
||||||
|
when: snipe_it_state is not in snipe_it_states
|
||||||
|
|
||||||
|
- name: Ensure deployment method is valid
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-2
|
||||||
|
Unsupported deployment_method '{{ snipe_it_deployment_method }}'!
|
||||||
|
Supported values are {{ snipe_it_deployment_methods | join(', ') }}.
|
||||||
|
when: snipe_it_deployment_method is not in snipe_it_deployment_methods
|
30
roles/snipe_it/tasks/deploy-docker.yml
Normal file
30
roles/snipe_it/tasks/deploy-docker.yml
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure container image '{{ snipe_it_container_image }}' is {{ snipe_it_state }}
|
||||||
|
community.docker.docker_image:
|
||||||
|
name: "{{ snipe_it_container_image }}"
|
||||||
|
state: "{{ snipe_it_state }}"
|
||||||
|
source: "{{ snipe_it_container_image_source }}"
|
||||||
|
force_source: "{{ snipe_it_container_image_force_source }}"
|
||||||
|
register: snipe_it_container_image_info
|
||||||
|
until: snipe_it_container_image_info is success
|
||||||
|
retries: 5
|
||||||
|
delay: 3
|
||||||
|
|
||||||
|
- name: Ensure container '{{ snipe_it_container_name }}' is {{ snipe_it_container_state }}
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ snipe_it_container_name }}"
|
||||||
|
image: "{{ snipe_it_container_image }}"
|
||||||
|
env_file: "{{ snipe_it_env_file }}"
|
||||||
|
env: "{{ snipe_it_container_env | default(omit, true) }}"
|
||||||
|
user: "{{ snipe_it_container_user | default(omit, true) }}"
|
||||||
|
ports: "{{ snipe_it_container_ports | default(omit, true) }}"
|
||||||
|
labels: "{{ snipe_it_container_labels | default(omit, true) }}"
|
||||||
|
volumes: "{{ snipe_it_container_volumes | default(omit, true) }}"
|
||||||
|
networks: "{{ snipe_it_container_networks | default(omit, true) }}"
|
||||||
|
etc_hosts: "{{ snipe_it_container_etc_hosts | default(omit, true) }}"
|
||||||
|
dns_servers: "{{ snipe_it_container_dns_servers | default(omit, true) }}"
|
||||||
|
network_mode: "{{ snipe_it_container_network_mode | default(omit, true) }}"
|
||||||
|
restart_policy: >-2
|
||||||
|
{{ snipe_it_container_restart_policy | default(omit, true) }}
|
||||||
|
recreate: "{{ snipe_it_container_recreate | default(omit, true) }}"
|
||||||
|
state: "{{ snipe_it_container_state }}"
|
59
roles/snipe_it/tasks/main.yml
Normal file
59
roles/snipe_it/tasks/main.yml
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
---
|
||||||
|
- name: Check preconditions
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: "check.yml"
|
||||||
|
|
||||||
|
- name: Ensure snipe-it user '{{ snipe_it_user }}' is {{ snipe_it_state }}
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ snipe_it_user }}"
|
||||||
|
state: "{{ snipe_it_state }}"
|
||||||
|
system: "{{ snipe_it_user_system | default(true, true) }}"
|
||||||
|
create_home: "{{ snipe_it_user_create_home | default(false, true) }}"
|
||||||
|
groups: "{{ snipe_it_user_groups | default(omit, true) }}"
|
||||||
|
append: >-2
|
||||||
|
{{
|
||||||
|
snipe_it_user_groups_append | default(
|
||||||
|
snipe_it_user_groups | default([], true) | length > 0,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
register: snipe_it_user_info
|
||||||
|
|
||||||
|
- name: Ensure snipe-it environment file is {{ snipe_it_state }}
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ snipe_it_env_file }}"
|
||||||
|
state: "{{ snipe_it_state }}"
|
||||||
|
when: snipe_it_state == 'absent'
|
||||||
|
|
||||||
|
- name: Ensure snipe-it config directory is {{ snipe_it_state }}
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ snipe_it_env_file | dirname }}"
|
||||||
|
state: "{{ (snipe_it_state == 'present') | ternary('directory', 'absent') }}"
|
||||||
|
owner: "{{ snipe_it_run_user_id }}"
|
||||||
|
group: "{{ snipe_it_run_group_id }}"
|
||||||
|
mode: "0755"
|
||||||
|
when: snipe_it_state == 'present'
|
||||||
|
|
||||||
|
- name: Ensure snipe-it data directory '{{ snipe_it_data_directory }}' is {{ snipe_it_state }}
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ snipe_it_data_directory }}"
|
||||||
|
state: "{{ (snipe_it_state == 'present') | ternary('directory', 'absent') }}"
|
||||||
|
owner: "{{ snipe_it_run_user_id }}"
|
||||||
|
group: "{{ snipe_it_run_group_id }}"
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Ensure snipe-it environment file is templated
|
||||||
|
ansible.builtin.copy:
|
||||||
|
content: |+2
|
||||||
|
{% for entry in snipe_it_merged_config | dict2items %}
|
||||||
|
{{ entry.key }}={{ entry.value }}
|
||||||
|
{% endfor %}
|
||||||
|
dest: "{{ snipe_it_env_file }}"
|
||||||
|
owner: "{{ snipe_it_run_user_id }}"
|
||||||
|
group: "{{ snipe_it_run_group_id }}"
|
||||||
|
mode: "0640"
|
||||||
|
when: snipe_it_state == 'present'
|
||||||
|
|
||||||
|
- name: Deploy using {{ snipe_it_deployment_method }}
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: "deploy-{{ snipe_it_deployment_method }}.yml"
|
6
roles/snipe_it/vars/main.yml
Normal file
6
roles/snipe_it/vars/main.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
snipe_it_states:
|
||||||
|
- present
|
||||||
|
- absent
|
||||||
|
snipe_it_deployment_methods:
|
||||||
|
- docker
|
54
roles/vaultwarden/README.md
Normal file
54
roles/vaultwarden/README.md
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# `finallycoffee.services.vaultwarden` ansible role
|
||||||
|
|
||||||
|
Vaultwarden is an unofficial (not associated with Bitwarden) bitwarden API compatible
|
||||||
|
server backend, formally called `bitwarden_rs`, written in rust.
|
||||||
|
|
||||||
|
This ansible role can deploy and configure `vaultwarden`, and supports removing
|
||||||
|
itself using `vaultwarden_state: absent` (Warning: It does not ask for confirmation,
|
||||||
|
and will remove all user data when instructed to remove it).
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
To use this role, the following variables need to be populated:
|
||||||
|
|
||||||
|
- `vaultwarden_config_domain` - always. Changing this will lead to two-factor not working for two-factor methods registered in the past.
|
||||||
|
- `vaultwarden_config_admin_token` - if `vaultwarden_config_disable_admin_token` is `false`.
|
||||||
|
|
||||||
|
Setting other configuration values for vaultwarden can be done using role-provided flattened keys in the
|
||||||
|
`vaultwarden_config_*` namespace (see [`defaults/main/config.yml`](defaults/main/config.yml) for available variables),
|
||||||
|
or by setting the configuration directly in the same structure as the `config.json` would be in `vaultwarden_config`.
|
||||||
|
|
||||||
|
### Email
|
||||||
|
|
||||||
|
Configure mailing by first enabling SMTP using `vaultwarden_config_enable_smtp: true`,
|
||||||
|
then configure your email server like this:
|
||||||
|
```yaml
|
||||||
|
vaultwarden_config:
|
||||||
|
smtp_host: "mail.example.com"
|
||||||
|
smtp_explicit_tls: true
|
||||||
|
smtp_port: 465
|
||||||
|
smtp_from: "noreply+vaultwarden@example.com"
|
||||||
|
smtp_from_name: "'Example.com Vaultwarden instance' <noreply+vaultwarden@example.com>"
|
||||||
|
smtp_username: vaultwarden@example.com
|
||||||
|
smtp_password: i_hope_i_will_be_a_strong_one!
|
||||||
|
helo_name: "{{ vaultwarden_config_domain }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2FA via email
|
||||||
|
|
||||||
|
To enable email-based two-factor-authentication, set `vaultwarden_config_enable_email_2fa: true`
|
||||||
|
and optionally set the following configuration:
|
||||||
|
```yaml
|
||||||
|
vaultwarden_config:
|
||||||
|
email_token_size: 8
|
||||||
|
email_expiration_time: 300 # 300 seconds = 5min
|
||||||
|
email_attempts_limit: 3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Feature flags
|
||||||
|
|
||||||
|
To enable more authentication methods, toggles are provided in
|
||||||
|
[`vaultwarden_config_enable_*`](defaults/main/config.yml#L18).
|
||||||
|
It is genereally recommended to simply keep unused methods off.
|
||||||
|
|
||||||
|
Per default, 'Sends' are allowed.
|
68
roles/vaultwarden/defaults/main/config.yml
Normal file
68
roles/vaultwarden/defaults/main/config.yml
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
---
|
||||||
|
# Required configuration
|
||||||
|
vaultwarden_config_domain: ~
|
||||||
|
vaultwarden_config_admin_token: ~
|
||||||
|
# Invitations and signups
|
||||||
|
vaultwarden_config_invitations_allowed: false
|
||||||
|
vaultwarden_config_invitation_org_name: ~
|
||||||
|
vaultwarden_config_signups_allowed: false
|
||||||
|
vaultwarden_config_signups_verify: true
|
||||||
|
vaultwarden_config_signups_verify_resend_time: 3600
|
||||||
|
vaultwarden_config_signups_verify_resend_limit: 5
|
||||||
|
# Entry preview icons
|
||||||
|
vaultwarden_config_disable_icon_download: true
|
||||||
|
vaultwarden_config_icon_cache_ttl: 604800 # 7 days
|
||||||
|
vaultwarden_config_icon_cache_negttl: 259200 # 3 days
|
||||||
|
vaultwarden_config_icon_download_timeout: 30 # seconds
|
||||||
|
vaultwarden_config_icon_blacklist_non_global_ips: true
|
||||||
|
# Features
|
||||||
|
vaultwarden_config_sends_allowed: true
|
||||||
|
vaultwarden_config_enable_yubico: false
|
||||||
|
vaultwarden_config_enable_duo: false
|
||||||
|
vaultwarden_config_enable_smtp: false
|
||||||
|
vaultwarden_config_enable_email_2fa: false
|
||||||
|
# Security
|
||||||
|
vaultwarden_config_password_iterations: 100000
|
||||||
|
vaultwarden_config_show_password_hint: false
|
||||||
|
vaultwarden_config_disable_2fa_remember: false
|
||||||
|
vaultwarden_config_disable_admin_token: true
|
||||||
|
vaultwarden_config_require_device_email: false
|
||||||
|
vaultwarden_config_authenticator_disable_time_drift: true
|
||||||
|
# Other
|
||||||
|
vaultwarden_config_log_timestamp_format: "%Y-%m-%d %H:%M:%S.%3f"
|
||||||
|
vaultwarden_config_ip_header: "X-Real-IP"
|
||||||
|
vaultwarden_config_reload_templates: false
|
||||||
|
|
||||||
|
vaultwarden_base_config:
|
||||||
|
domain: "{{ vaultwarden_config_domain }}"
|
||||||
|
admin_token: "{{ vaultwarden_config_admin_token }}"
|
||||||
|
invitations_allowed: "{{ vaultwarden_config_invitations_allowed }}"
|
||||||
|
invitation_org_name: "{{ vaultwarden_config_invitation_org_name | default('', true) }}"
|
||||||
|
signups_allowed: "{{ vaultwarden_config_signups_allowed }}"
|
||||||
|
signups_verify: "{{ vaultwarden_config_signups_verify }}"
|
||||||
|
signups_verify_resend_time: "{{ vaultwarden_config_signups_verify_resend_time }}"
|
||||||
|
signups_verify_resend_limit: "{{ vaultwarden_config_signups_verify_resend_limit }}"
|
||||||
|
disable_icon_download: "{{ vaultwarden_config_disable_icon_download }}"
|
||||||
|
icon_cache_ttl: "{{ vaultwarden_config_icon_cache_ttl }}"
|
||||||
|
icon_cache_negttl: "{{ vaultwarden_config_icon_cache_negttl }}"
|
||||||
|
icon_download_timeout: "{{ vaultwarden_config_icon_download_timeout }}"
|
||||||
|
icon_blacklist_non_global_ips: "{{ vaultwarden_config_icon_blacklist_non_global_ips }}"
|
||||||
|
password_iterations: "{{ vaultwarden_config_password_iterations }}"
|
||||||
|
show_password_hint: "{{ vaultwarden_config_show_password_hint }}"
|
||||||
|
disable_2fa_remember: "{{ vaultwarden_config_disable_2fa_remember }}"
|
||||||
|
disable_admin_token: "{{ vaultwarden_config_disable_admin_token }}"
|
||||||
|
require_device_email: "{{ vaultwarden_config_require_device_email }}"
|
||||||
|
authenticator_disable_time_drift: "{{ vaultwarden_config_authenticator_disable_time_drift }}"
|
||||||
|
ip_header: "{{ vaultwarden_config_ip_header }}"
|
||||||
|
log_timestamp_format: "{{ vaultwarden_config_log_timestamp_format }}"
|
||||||
|
reload_templates: "{{ vaultwarden_config_reload_templates }}"
|
||||||
|
sends_allowed: "{{ vaultwarden_config_sends_allowed }}"
|
||||||
|
_enable_yubico: "{{ vaultwarden_config_enable_yubico }}"
|
||||||
|
_enable_duo: "{{ vaultwarden_config_enable_duo }}"
|
||||||
|
_enable_smtp: "{{ vaultwarden_config_enable_smtp }}"
|
||||||
|
_enable_email_2fa: "{{ vaultwarden_config_enable_email_2fa }}"
|
||||||
|
|
||||||
|
vaultwarden_config: ~
|
||||||
|
vaultwarden_merged_config: >-2
|
||||||
|
{{ vaultwarden_base_config | default({}, true)
|
||||||
|
| combine(vaultwarden_config | default({}, true), recursive=true) }}
|
50
roles/vaultwarden/defaults/main/container.yml
Normal file
50
roles/vaultwarden/defaults/main/container.yml
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
vaultwarden_container_image_registry: docker.io
|
||||||
|
vaultwarden_container_image_namespace: vaultwarden
|
||||||
|
vaultwarden_container_image_name: server
|
||||||
|
vaultwarden_container_image_tag: ~
|
||||||
|
vaultwarden_container_image_flavour: alpine
|
||||||
|
vaultwarden_container_image_source: pull
|
||||||
|
vaultwarden_container_image_force_source: >-2
|
||||||
|
{{ vaultwarden_container_image_tag | default(false, true) | bool }}
|
||||||
|
vaultwarden_container_image: >-2
|
||||||
|
{{
|
||||||
|
([
|
||||||
|
vaultwarden_container_image_registry | default([], true),
|
||||||
|
vaultwarden_container_image_namespace | default([], true),
|
||||||
|
vaultwarden_container_image_name,
|
||||||
|
] | flatten | join('/'))
|
||||||
|
+ ':'
|
||||||
|
+ (vaultwarden_container_image_tag | default(
|
||||||
|
vaultwarden_version + (
|
||||||
|
((vaultwarden_container_image_flavour is string)
|
||||||
|
and (vaultwarden_container_image_flavour | length > 0))
|
||||||
|
| ternary(
|
||||||
|
'-' + vaultwarden_container_image_flavour | default('', true),
|
||||||
|
''
|
||||||
|
)
|
||||||
|
),
|
||||||
|
true
|
||||||
|
))
|
||||||
|
}}
|
||||||
|
|
||||||
|
vaultwarden_container_name: vaultwarden
|
||||||
|
vaultwarden_container_env: ~
|
||||||
|
vaultwarden_container_user: >-2
|
||||||
|
{{ vaultwarden_run_user_id }}:{{ vaultwarden_run_group_id }}
|
||||||
|
vaultwarden_container_ports: ~
|
||||||
|
vaultwarden_container_labels: ~
|
||||||
|
vaultwarden_container_networks: ~
|
||||||
|
vaultwarden_container_etc_hosts: ~
|
||||||
|
vaultwarden_container_dns_servers: ~
|
||||||
|
vaultwarden_container_restart_policy: >-2
|
||||||
|
{{ (vaultwarden_deployment_method == 'docker') | ternary(
|
||||||
|
'unless-stopped',
|
||||||
|
'on-failure',
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
vaultwarden_container_state: >-2
|
||||||
|
{{ (vaultwarden_state == 'present') | ternary('started', 'absent') }}
|
||||||
|
vaultwarden_container_volumes:
|
||||||
|
- "{{ vaultwarden_data_directory }}:/data:rw"
|
||||||
|
- "{{ vaultwarden_config_file }}:/data/config.json:ro"
|
10
roles/vaultwarden/defaults/main/main.yml
Normal file
10
roles/vaultwarden/defaults/main/main.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
vaultwarden_user: vaultwarden
|
||||||
|
vaultwarden_version: "1.32.7"
|
||||||
|
|
||||||
|
vaultwarden_config_file: "/etc/vaultwarden/config.json"
|
||||||
|
vaultwarden_config_directory: "{{ vaultwarden_config_file | dirname }}"
|
||||||
|
vaultwarden_data_directory: "/var/lib/vaultwarden"
|
||||||
|
|
||||||
|
vaultwarden_state: present
|
||||||
|
vaultwarden_deployment_method: docker
|
5
roles/vaultwarden/defaults/main/user.yml
Normal file
5
roles/vaultwarden/defaults/main/user.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
vaultwarden_run_user_id: >-2
|
||||||
|
{{ vaultwarden_user_info.uid | default(vaultwarden_user, true) }}
|
||||||
|
vaultwarden_run_group_id: >-2
|
||||||
|
{{ vaultwarden_user_info.group | default(vaultwarden_user, true) }}
|
9
roles/vaultwarden/handlers/main.yml
Normal file
9
roles/vaultwarden/handlers/main.yml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure vaultwarden container '{{ vaultwarden_container_name }}' is restarted
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ vaultwarden_container_name }}"
|
||||||
|
state: "{{ vaultwarden_container_state }}"
|
||||||
|
restart: true
|
||||||
|
listen: vaultwarden-restart
|
||||||
|
when: vaultwarden_deployment_method == 'docker'
|
||||||
|
ignore_errors: "{{ ansible_check_mode }}"
|
12
roles/vaultwarden/meta/main.yml
Normal file
12
roles/vaultwarden/meta/main.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
allow_duplicates: true
|
||||||
|
dependencies: []
|
||||||
|
galaxy_info:
|
||||||
|
role_name: vaultwarden
|
||||||
|
description: >-2
|
||||||
|
Deploy vaultwarden, a bitwarden-compatible server backend
|
||||||
|
galaxy_tags:
|
||||||
|
- vaultwarden
|
||||||
|
- bitwarden
|
||||||
|
- passwordstore
|
||||||
|
- docker
|
22
roles/vaultwarden/tasks/deploy-docker.yml
Normal file
22
roles/vaultwarden/tasks/deploy-docker.yml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure container image '{{ vaultwarden_container_image }}' is {{ vaultwarden_state }}
|
||||||
|
community.docker.docker_image:
|
||||||
|
name: "{{ vaultwarden_container_image }}"
|
||||||
|
state: "{{ vaultwarden_state }}"
|
||||||
|
source: "{{ vaultwarden_container_image_source }}"
|
||||||
|
force_source: "{{ vaultwarden_container_image_force_source }}"
|
||||||
|
|
||||||
|
- name: Ensure container '{{ vaultwarden_container_name }}' is {{ vaultwarden_container_state }}
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ vaultwarden_container_name }}"
|
||||||
|
image: "{{ vaultwarden_container_image }}"
|
||||||
|
env: "{{ vaultwarden_container_env | default(omit, true) }}"
|
||||||
|
user: "{{ vaultwarden_container_user | default(omit, true) }}"
|
||||||
|
ports: "{{ vaultwarden_container_ports | default(omit, true) }}"
|
||||||
|
labels: "{{ vaultwarden_container_labels | default(omit, true) }}"
|
||||||
|
volumes: "{{ vaultwarden_container_volumes }}"
|
||||||
|
networks: "{{ vaultwarden_container_networks | default(omit, true) }}"
|
||||||
|
etc_hosts: "{{ vaultwarden_container_etc_hosts | default(omit, true) }}"
|
||||||
|
dns_servers: "{{ vaultwarden_container_dns_servers | default(omit, true) }}"
|
||||||
|
restart_policy: "{{ vaultwarden_container_restart_policy | default(omit, true) }}"
|
||||||
|
state: "{{ vaultwarden_container_state | default(omit, true) }}"
|
78
roles/vaultwarden/tasks/main.yml
Normal file
78
roles/vaultwarden/tasks/main.yml
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure state is valid
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-2
|
||||||
|
Unsupported state '{{ vaultwarden_state }}'!
|
||||||
|
Supported states are {{ vaultwarden_states | join(', ') }}.
|
||||||
|
when: vaultwarden_state not in vaultwarden_states
|
||||||
|
|
||||||
|
- name: Ensure deployment method is valid
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-2
|
||||||
|
Unsupported deployment method '{{ vaultwarden_deployment_method }}'!
|
||||||
|
Supported are {{ vaultwarden_deployment_methods | join(', ') }}.
|
||||||
|
when: vaultwarden_deployment_method not in vaultwarden_deployment_methods
|
||||||
|
|
||||||
|
- name: Ensure required variables are given
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "Required variable '{{ var }}' is undefined!"
|
||||||
|
loop: "{{ vaultwarden_required_variables }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: var
|
||||||
|
when: >-2
|
||||||
|
var not in hostvars[inventory_hostname]
|
||||||
|
or hostvars[inventory_hostname][var] | length == 0
|
||||||
|
|
||||||
|
- name: Ensure required variables are given
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "Required variable '{{ var.name }}' is undefined!"
|
||||||
|
loop: "{{ vaultwarden_conditionally_required_variables }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: var
|
||||||
|
label: "{{ var.name }}"
|
||||||
|
when: >-2
|
||||||
|
var.when and (
|
||||||
|
var.name not in hostvars[inventory_hostname]
|
||||||
|
or hostvars[inventory_hostname][var.name] | length == 0)
|
||||||
|
|
||||||
|
- name: Ensure vaultwarden user '{{ vaultwarden_user }}' is {{ vaultwarden_state }}
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ vaultwarden_user }}"
|
||||||
|
state: "{{ vaultwarden_state }}"
|
||||||
|
system: "{{ vaultwarden_user_system | default(true, true) }}"
|
||||||
|
create_home: "{{ vaultwarden_user_create_home | default(false, true) }}"
|
||||||
|
groups: "{{ vaultwarden_user_groups | default(omit, true) }}"
|
||||||
|
append: >-2
|
||||||
|
{{ vaultwarden_user_append_groups | default(
|
||||||
|
(vaultwarden_user_groups | default([], true) | length > 0),
|
||||||
|
true,
|
||||||
|
) }}
|
||||||
|
register: vaultwarden_user_info
|
||||||
|
|
||||||
|
- name: Ensure base paths are {{ vaultwarden_state }}
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ mount.path }}"
|
||||||
|
state: "{{ (vaultwarden_state == 'present') | ternary('directory', 'absent') }}"
|
||||||
|
owner: "{{ mount.owner | default(vaultwarden_run_user_id) }}"
|
||||||
|
group: "{{ mount.group | default(vaultwarden_run_group_id) }}"
|
||||||
|
mode: "{{ mount.mode | default('0755', true) }}"
|
||||||
|
loop:
|
||||||
|
- path: "{{ vaultwarden_config_directory }}"
|
||||||
|
- path: "{{ vaultwarden_data_directory }}"
|
||||||
|
loop_control:
|
||||||
|
loop_var: mount
|
||||||
|
label: "{{ mount.path }}"
|
||||||
|
|
||||||
|
- name: Ensure vaultwarden config file '{{ vaultwarden_config_file }}' is {{ vaultwarden_state }}
|
||||||
|
ansible.builtin.copy:
|
||||||
|
content: "{{ vaultwarden_merged_config | to_nice_json(indent=4) }}"
|
||||||
|
dest: "{{ vaultwarden_config_file }}"
|
||||||
|
owner: "{{ vaultwarden_run_user_id }}"
|
||||||
|
group: "{{ vaultwarden_run_group_id }}"
|
||||||
|
mode: "0640"
|
||||||
|
when: vaultwarden_state == 'present'
|
||||||
|
notify: vaultwarden-restart
|
||||||
|
|
||||||
|
- name: Deploy vaultwarden using {{ vaultwarden_deployment_method }}
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: "deploy-{{ vaultwarden_deployment_method }}.yml"
|
11
roles/vaultwarden/vars/main.yml
Normal file
11
roles/vaultwarden/vars/main.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
vaultwarden_states:
|
||||||
|
- present
|
||||||
|
- absent
|
||||||
|
vaultwarden_deployment_methods:
|
||||||
|
- docker
|
||||||
|
vaultwarden_required_variables:
|
||||||
|
- vaultwarden_config_domain
|
||||||
|
vaultwarden_conditionally_required_variables:
|
||||||
|
- name: vaultwarden_config_admin_token
|
||||||
|
when: "{{ vaultwarden_config_disable_admin_token | default(true, true) | bool }}"
|
16
roles/vouch_proxy/README.md
Normal file
16
roles/vouch_proxy/README.md
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# `finallycoffee.services.vouch-proxy`
|
||||||
|
|
||||||
|
[Vouch-Proxy](https://github.com/vouch/vouch-proxy) can be used in combination with
|
||||||
|
nginx' `auth_request` module to secure web services with OIDC/OAuth. This role runs
|
||||||
|
vouch-proxys' official docker container.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
The `oauth` config section must be supplied in `vouch_proxy_oauth_config`, and the
|
||||||
|
`vouch` config section can be overridden in `vouch_proxy_vouch_config`. For possible
|
||||||
|
configuration values, see https://github.com/vouch/vouch-proxy/blob/master/config/config.yml_example.
|
||||||
|
|
||||||
|
For an example nginx config, see https://github.com/vouch/vouch-proxy#installation-and-configuration.
|
||||||
|
|
||||||
|
Passing container arguments in the same way as `community.docker.docker_container` is supported
|
||||||
|
using the `vouch_proxy_container_[...]` prefix (e.g. `vouch_proxy_container_ports`).
|
51
roles/vouch_proxy/defaults/main.yml
Normal file
51
roles/vouch_proxy/defaults/main.yml
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
vouch_proxy_user: vouch-proxy
|
||||||
|
vouch_proxy_version: 0.40.0
|
||||||
|
vouch_proxy_base_path: /opt/vouch-proxy
|
||||||
|
vouch_proxy_config_path: "{{ vouch_proxy_base_path }}/config"
|
||||||
|
vouch_proxy_config_file: "{{ vouch_proxy_config_path }}/config.yaml"
|
||||||
|
|
||||||
|
vouch_proxy_container_name: vouch-proxy
|
||||||
|
vouch_proxy_container_image_name: vouch-proxy
|
||||||
|
vouch_proxy_container_image_namespace: vouch/
|
||||||
|
vouch_proxy_container_image_registry: quay.io
|
||||||
|
|
||||||
|
vouch_proxy_container_image_repository: >-
|
||||||
|
{{
|
||||||
|
(container_registries[vouch_proxy_container_image_registry] | default(vouch_proxy_container_image_registry))
|
||||||
|
+ '/' + (vouch_proxy_container_image_namespace | default(''))
|
||||||
|
+ vouch_proxy_container_image_name
|
||||||
|
}}
|
||||||
|
vouch_proxy_container_image_reference: >-
|
||||||
|
{{
|
||||||
|
vouch_proxy_container_image_repository + ':'
|
||||||
|
+ (vouch_proxy_container_image_tag | default(vouch_proxy_version))
|
||||||
|
}}
|
||||||
|
|
||||||
|
vouch_proxy_container_image_force_pull: "{{ vouch_proxy_container_image_tag is defined }}"
|
||||||
|
|
||||||
|
vouch_proxy_container_default_volumes:
|
||||||
|
- "{{ vouch_proxy_config_file }}:/config/config.yaml:ro"
|
||||||
|
vouch_proxy_container_volumes: >-
|
||||||
|
{{ vouch_proxy_container_default_volumes
|
||||||
|
+ vouch_proxy_container_extra_volumes | default([]) }}
|
||||||
|
vouch_proxy_container_restart_policy: "unless-stopped"
|
||||||
|
|
||||||
|
vouch_proxy_config_vouch_log_level: info
|
||||||
|
vouch_proxy_config_vouch_listen: 0.0.0.0
|
||||||
|
vouch_proxy_config_vouch_port: 9090
|
||||||
|
vouch_proxy_config_vouch_domains: []
|
||||||
|
vouch_proxy_config_vouch_document_root: ~
|
||||||
|
|
||||||
|
vouch_proxy_oauth_config: {}
|
||||||
|
vouch_proxy_vouch_config:
|
||||||
|
logLevel: "{{ vouch_proxy_config_vouch_log_level }}"
|
||||||
|
listen: "{{ vouch_proxy_config_vouch_listen }}"
|
||||||
|
port: "{{ vouch_proxy_config_vouch_port }}"
|
||||||
|
domains: "{{ vouch_proxy_config_vouch_domains }}"
|
||||||
|
document_root: "{{ vouch_proxy_config_vouch_document_root }}"
|
||||||
|
|
||||||
|
vouch_proxy_config:
|
||||||
|
vouch: "{{ vouch_proxy_vouch_config }}"
|
||||||
|
oauth: "{{ vouch_proxy_oauth_config }}"
|
8
roles/vouch_proxy/handlers/main.yml
Normal file
8
roles/vouch_proxy/handlers/main.yml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Ensure vouch-proxy was restarted
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ vouch_proxy_container_name }}"
|
||||||
|
state: started
|
||||||
|
restart: yes
|
||||||
|
listen: restart-vouch-proxy
|
12
roles/vouch_proxy/meta/main.yml
Normal file
12
roles/vouch_proxy/meta/main.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
allow_duplicates: true
|
||||||
|
dependencies: []
|
||||||
|
galaxy_info:
|
||||||
|
role_name: vouch_proxy
|
||||||
|
description: Ansible role to deploy vouch_proxy using docker
|
||||||
|
galaxy_tags:
|
||||||
|
- vouch_proxy
|
||||||
|
- oidc
|
||||||
|
- authentication
|
||||||
|
- authorization
|
||||||
|
- docker
|
50
roles/vouch_proxy/tasks/main.yml
Normal file
50
roles/vouch_proxy/tasks/main.yml
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Ensure vouch-proxy user '{{ vouch_proxy_user }}' exists
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ vouch_proxy_user }}"
|
||||||
|
state: present
|
||||||
|
system: true
|
||||||
|
register: vouch_proxy_user_info
|
||||||
|
|
||||||
|
- name: Ensure mounts are created
|
||||||
|
ansible.builtin.file:
|
||||||
|
dest: "{{ item.path }}"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ item.owner | default(vouch_proxy_user_info.uid | default(vouch_proxy_user)) }}"
|
||||||
|
group: "{{ item.owner | default(vouch_proxy_user_info.group | default(vouch_proxy_user)) }}"
|
||||||
|
mode: "{{ item.mode | default('0755') }}"
|
||||||
|
loop:
|
||||||
|
- path: "{{ vouch_proxy_base_path }}"
|
||||||
|
- path: "{{ vouch_proxy_config_path }}"
|
||||||
|
|
||||||
|
- name: Ensure config file is templated
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "{{ vouch_proxy_config_file }}"
|
||||||
|
content: "{{ vouch_proxy_config | to_nice_yaml }}"
|
||||||
|
owner: "{{ vouch_proxy_user_info.uid | default(vouch_proxy_user) }}"
|
||||||
|
group: "{{ vouch_proxy_user_info.group | default(vouch_proxy_user) }}"
|
||||||
|
mode: "0640"
|
||||||
|
notify:
|
||||||
|
- restart-vouch-proxy
|
||||||
|
|
||||||
|
- name: Ensure container image is present on host
|
||||||
|
community.docker.docker_image:
|
||||||
|
name: "{{ vouch_proxy_container_image_reference }}"
|
||||||
|
state: present
|
||||||
|
source: pull
|
||||||
|
force_source: "{{ vouch_proxy_container_image_force_pull | bool }}"
|
||||||
|
|
||||||
|
- name: Ensure container '{{ vouch_proxy_container_name }}' is running
|
||||||
|
community.docker.docker_container:
|
||||||
|
name: "{{ vouch_proxy_container_name }}"
|
||||||
|
image: "{{ vouch_proxy_container_image_reference }}"
|
||||||
|
env: "{{ vouch_proxy_container_env | default(omit) }}"
|
||||||
|
user: "{{ vouch_proxy_user_info.uid | default(vouch_proxy_user) }}"
|
||||||
|
ports: "{{ vouch_proxy_container_ports | default(omit) }}"
|
||||||
|
volumes: "{{ vouch_proxy_container_volumes | default(omit) }}"
|
||||||
|
networks: "{{ vouch_proxy_container_networks | default(omit) }}"
|
||||||
|
purge_networks: "{{ vouch_proxy_container_purge_networks | default(omit) }}"
|
||||||
|
etc_hosts: "{{ vouch_proxy_container_etc_hosts | default(omit) }}"
|
||||||
|
restart_policy: "{{ vouch_proxy_container_restart_policy }}"
|
||||||
|
state: started
|
Loading…
Reference in New Issue
Block a user