Compare commits
	
		
			1 Commits
		
	
	
		
			0.1.14
			...
			36ceb40fac
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 36ceb40fac | 
							
								
								
									
										33
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,4 +1,4 @@ | ||||
| # `finallycoffee.services` ansible collection | ||||
| # `finallycoffee.service` ansible collection | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| @@ -8,35 +8,24 @@ concise area of concern. | ||||
|  | ||||
| ## Roles | ||||
|  | ||||
| - [`authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com) | ||||
| - [`roles/authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com) | ||||
|   instance, an authentication provider with beta OIDC provider support. | ||||
|  | ||||
| - [`ghost`](roles/ghost/README.md): Deploys [ghost.org](https://ghost.org/), a simple to use | ||||
|   blogging and publishing platform. | ||||
| - [`roles/elasticsearch`](roles/elasticsearch/README.md): Deploy [elasticsearch](https://www.docker.elastic.co/r/elasticsearch/elasticsearch-oss), | ||||
|   a popular (distributed) search and analytics engine, mostly known by it's | ||||
|   letter "E" in the ELK-stack. | ||||
|  | ||||
| - [`gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a | ||||
| - [`roles/gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a | ||||
|   lightweight, self-hosted git service. | ||||
|  | ||||
| - [`hedgedoc`](roles/hedgedoc/README.md): Deploy [hedgedoc](https://hedgedoc.org/), | ||||
|   a collaborative real-time markdown editor using websockts | ||||
|  | ||||
| - [`jellyfin`](roles/jellyfin/README.md): Deploy [jellyfin.org](https://jellyfin.org), | ||||
| - [`roles/jellyfin`](roles/jellyfin/README.md): Deploy [jellyfin.org](https://jellyfin.org), | ||||
|   the free software media system for streaming stored media to any device. | ||||
|  | ||||
| - [`keycloak`](roles/keycloak/README.md): Deploy [keycloak](https://www.keycloak.org/), | ||||
|   the open source identity and access management solution. | ||||
| - [`roles/restic`](roles/restic/README.md): Manage backups using restic | ||||
|   and persist them to a configurable backend. | ||||
|  | ||||
| - [`openproject`](roles/openproject/README.md): Deploys an [openproject.org](https://www.openproject.org) | ||||
|   installation using the upstream provided docker-compose setup. | ||||
|  | ||||
| - [`snipe_it`](roles/snipe_it/README.md): Deploys [Snipe-IT](https://snipeitapp.com/), | ||||
|   the free and open-source IT asset (and license) management with a powerful REST API | ||||
|  | ||||
| - [`vaultwarden`](roles/vaultwarden/README.md): Deploy [vaultwarden](https://github.com/dani-garcia/vaultwarden/), | ||||
|   an open-source implementation of the Bitwarden Server (formerly Bitwarden\_RS). | ||||
|  | ||||
| - [`vouch_proxy`](roles/vouch_proxy/README.md): Deploys [vouch-proxy](https://github.com/vouch/vouch-proxy), | ||||
|   an authorization proxy for arbitrary webapps working with `nginx`s' `auth_request` module. | ||||
| - [`roles/minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an | ||||
|   s3-compatible object storage server, using docker containers. | ||||
|  | ||||
| ## License | ||||
|  | ||||
|   | ||||
							
								
								
									
										19
									
								
								galaxy.yml
									
									
									
									
									
								
							
							
						
						
									
										19
									
								
								galaxy.yml
									
									
									
									
									
								
							| @@ -1,24 +1,15 @@ | ||||
| namespace: finallycoffee | ||||
| name: services | ||||
| version: "0.1.14" | ||||
| version: 0.0.1 | ||||
| readme: README.md | ||||
| authors: | ||||
| - transcaffeine <transcaffeine@finally.coffee> | ||||
| description: Various ansible roles useful for automating infrastructure | ||||
| dependencies: | ||||
|   "community.crypto": "^2.22.0" | ||||
|   "community.docker": "^4.0.0" | ||||
|   "containers.podman": "^1.16.0" | ||||
| license_file: LICENSE.md | ||||
|   "community.docker": "^1.10.0" | ||||
| license: | ||||
| - CNPLv7+ | ||||
| build_ignore: | ||||
| - '*.tar.gz' | ||||
| repository: https://git.finally.coffee/finallycoffee/services | ||||
| issues: https://codeberg.org/finallycoffee/ansible-collection-services/issues | ||||
| tags: | ||||
|   - authelia | ||||
|   - gitea | ||||
|   - hedgedoc | ||||
|   - jellyfin | ||||
|   - vaultwarden | ||||
|   - snipeit | ||||
|   - docker | ||||
| issues: https://git.finally.coffee/finallycoffee/services/issues | ||||
|   | ||||
| @@ -1,3 +0,0 @@ | ||||
| --- | ||||
|  | ||||
| requires_ansible: ">=2.15" | ||||
| @@ -1,6 +0,0 @@ | ||||
| --- | ||||
| - name: Install and configure hedgedoc | ||||
|   hosts: "{{ hedgedoc_hosts | default('hedgedoc') }}" | ||||
|   become: "{{ hedgedoc_become | default(true, false) }}" | ||||
|   roles: | ||||
|     - role: finallycoffee.services.hedgedoc | ||||
| @@ -1,6 +0,0 @@ | ||||
| --- | ||||
| - name: Install and configure jellyfin | ||||
|   hosts: "{{ jellyfin_hosts | default('jellyfin') }}" | ||||
|   become: "{{ jellyfin_become | default(true, false) }}" | ||||
|   roles: | ||||
|     - role: finallycoffee.services.jellyfin | ||||
| @@ -1,6 +0,0 @@ | ||||
| --- | ||||
| - name: Install openproject | ||||
|   hosts: "{{ openproject_hosts | default('openproject') }}" | ||||
|   become: "{{ openproject_become | default(true, false) }}" | ||||
|   roles: | ||||
|     - role: finallycoffee.services.openproject | ||||
| @@ -1,6 +0,0 @@ | ||||
| --- | ||||
| - name: Install and configure Snipe-IT | ||||
|   hosts: "{{ snipe_it_hosts | default('snipe_it') }}" | ||||
|   become: "{{ snipe_it_become | default(true, false) }}" | ||||
|   roles: | ||||
|     - role: finallycoffee.services.snipe_it | ||||
| @@ -1,6 +0,0 @@ | ||||
| --- | ||||
| - name: Install and configure vaultwarden | ||||
|   hosts: "{{ vaultwarden_hosts | default('vaultwarden') }}" | ||||
|   become: "{{ vaultwarden_become | default(true, false) }}" | ||||
|   roles: | ||||
|     - role: finallycoffee.services.vaultwarden | ||||
| @@ -1,10 +1,9 @@ | ||||
| --- | ||||
| authelia_version: "4.39.1" | ||||
|  | ||||
| authelia_version: 4.37.5 | ||||
| authelia_user: authelia | ||||
| authelia_base_dir: /opt/authelia | ||||
| authelia_domain: authelia.example.org | ||||
| authelia_state: present | ||||
| authelia_deployment_method: docker | ||||
|  | ||||
| authelia_config_dir: "{{ authelia_base_dir }}/config" | ||||
| authelia_config_file: "{{ authelia_config_dir }}/config.yaml" | ||||
| @@ -15,20 +14,9 @@ authelia_notification_storage_file: "{{ authelia_data_dir }}/notifications.txt" | ||||
| authelia_user_storage_file: "{{ authelia_data_dir }}/user_database.yml" | ||||
|  | ||||
| authelia_container_name: authelia | ||||
| authelia_container_image_server: docker.io | ||||
| authelia_container_image_namespace: authelia | ||||
| authelia_container_image_name: authelia | ||||
| authelia_container_image: >-2 | ||||
|   {{ | ||||
|     [ | ||||
|       authelia_container_image_server, | ||||
|       authelia_container_image_namespace, | ||||
|       authelia_container_image_name | ||||
|     ] | join('/') | ||||
|   }} | ||||
| authelia_container_image_name: docker.io/authelia/authelia | ||||
| authelia_container_image_tag: ~ | ||||
| authelia_container_image_ref: >-2 | ||||
|   {{ authelia_container_image }}:{{ authelia_container_image_tag | default(authelia_version, true) }} | ||||
| authelia_container_image_ref: "{{ authelia_container_image_name }}:{{ authelia_container_image_tag | default(authelia_version, true) }}" | ||||
| authelia_container_image_force_pull: "{{ authelia_container_image_tag | default(false, True) }}" | ||||
| authelia_container_env: | ||||
|   PUID: "{{ authelia_run_user }}" | ||||
| @@ -54,22 +42,12 @@ authelia_config_jwt_secret: ~ | ||||
| authelia_config_default_redirection_url: ~ | ||||
| authelia_config_server_host: 0.0.0.0 | ||||
| authelia_config_server_port: "{{ authelia_container_listen_port }}" | ||||
| authelia_config_server_address: >-2 | ||||
|   {{ authelia_config_server_host }}:{{ authelia_config_server_port }} | ||||
| authelia_config_server_path: "" | ||||
| authelia_config_server_asset_path: "/config/assets/" | ||||
| authelia_config_server_buffers_read: 4096 | ||||
| authelia_config_server_read_buffer_size: >-2 | ||||
|   {{ authelia_config_server_buffers_read }} | ||||
| authelia_config_server_buffers_write: 4096 | ||||
| authelia_config_server_write_buffer_size: >-2 | ||||
|   {{ authelia_config_server_buffers_write }} | ||||
| authelia_config_server_endpoints_enable_pprof: true | ||||
| authelia_config_server_enable_pprof: >-2 | ||||
|   {{ authelia_config_server_endpoints_enable_pprof }} | ||||
| authelia_config_server_endpoints_enable_expvars: true | ||||
| authelia_config_server_enable_expvars: >-2 | ||||
|   {{ authelia_config_server_endpoints_enable_expvars }} | ||||
| authelia_config_server_read_buffer_size: 4096 | ||||
| authelia_config_server_write_buffer_size: 4096 | ||||
| authelia_config_server_enable_pprof: true | ||||
| authelia_config_server_enable_expvars: true | ||||
| authelia_config_server_disable_healthcheck: | ||||
| authelia_config_server_tls_key: ~ | ||||
| authelia_config_server_tls_certificate: ~ | ||||
| @@ -116,18 +94,10 @@ authelia_config_authentication_backend_ldap_additional_users_dn: "ou=users" | ||||
| authelia_config_authentication_backend_ldap_users_filter: "(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=inetOrgPerson))" | ||||
| authelia_config_authentication_backend_ldap_additional_groups_dn: "ou=groups" | ||||
| authelia_config_authentication_backend_ldap_groups_filter: "(member={dn})" | ||||
| authelia_config_authentication_backend_ldap_attributes_username: uid | ||||
| authelia_config_authentication_backend_ldap_username_attribute: >-2 | ||||
|   {{ authelia_config_authentication_backend_ldap_attributes_username }} | ||||
| authelia_config_authentication_backend_ldap_attributes_mail: mail | ||||
| authelia_config_authentication_backend_ldap_mail_attribute: >-2 | ||||
|   {{ authelia_config_authentication_backend_ldap_attributes_mail }} | ||||
| authelia_config_authentication_backend_ldap_attributes_display_name: displayName | ||||
| authelia_config_authentication_backend_ldap_display_name_attribute: >-2 | ||||
|   {{ authelia_config_authentication_backend_ldap_attributes_display_name }} | ||||
| authelia_config_authentication_backend_ldap_group_name_attribute: cn | ||||
| authelia_config_authentication_backend_ldap_attributes_group_name: >-2 | ||||
|   {{ authelia_config_authentication_backend_ldap_group_name_attribute }} | ||||
| authelia_config_authentication_backend_ldap_username_attribute: uid | ||||
| authelia_config_authentication_backend_ldap_mail_attribute: mail | ||||
| authelia_config_authentication_backend_ldap_display_name_attribute: displayName | ||||
| authelia_config_authentication_backend_ldap_user: ~ | ||||
| authelia_config_authentication_backend_ldap_password: ~ | ||||
| authelia_config_authentication_backend_file_path: ~ | ||||
| @@ -155,8 +125,6 @@ authelia_config_session_secret: ~ | ||||
| authelia_config_session_expiration: 1h | ||||
| authelia_config_session_inactivity: 5m | ||||
| authelia_config_session_remember_me_duration: 1M | ||||
| authelia_config_session_remember_me: >-2 | ||||
|   {{ authelia_config_session_remember_me_duration }} | ||||
| authelia_config_session_redis_host: "{{ authelia_redis_host }}" | ||||
| authelia_config_session_redis_port: "{{ authelia_redis_port }}" | ||||
| authelia_config_session_redis_username: "{{ authelia_redis_user }}" | ||||
| @@ -181,7 +149,8 @@ authelia_config_storage_postgres_ssl_certificate: disable | ||||
| authelia_config_storage_postgres_ssl_key: disable | ||||
| authelia_config_notifier_disable_startup_check: false | ||||
| authelia_config_notifier_filesystem_filename: ~ | ||||
| authelia_config_notifier_smtp_address: "{{ authelia_smtp_host }}:{{ authelia_stmp_port }}" | ||||
| authelia_config_notifier_smtp_host: "{{ authelia_smtp_host }}" | ||||
| authelia_config_notifier_smtp_port: "{{ authelia_stmp_port }}" | ||||
| authelia_config_notifier_smtp_username: "{{ authelia_smtp_user }}" | ||||
| authelia_config_notifier_smtp_password: "{{ authelia_smtp_pass }}" | ||||
| authelia_config_notifier_smtp_timeout: 5s | ||||
| @@ -197,12 +166,6 @@ authelia_config_notifier_smtp_tls_minimum_version: "{{ authelia_tls_minimum_vers | ||||
|  | ||||
| authelia_database_type: ~ | ||||
| authelia_database_host: ~ | ||||
| authelia_database_port: ~ | ||||
| authelia_database_address: >-2 | ||||
|   {{ authelia_database_host }}{{ | ||||
|     (authelia_database_port | default(false, true) | bool) | ||||
|     | ternary(':' + authelia_database_port, '') | ||||
|   }} | ||||
| authelia_database_user: authelia | ||||
| authelia_database_pass: ~ | ||||
| authelia_database_name: authelia | ||||
|   | ||||
| @@ -4,7 +4,5 @@ | ||||
|   docker_container: | ||||
|     name: "{{ authelia_container_name }}" | ||||
|     state: started | ||||
|     restart: true | ||||
|     comparisons: | ||||
|       '*': ignore | ||||
|     restart: yes | ||||
|   listen: restart-authelia | ||||
|   | ||||
| @@ -1,9 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: authelia | ||||
|   description: Ansible role to deploy authelia using docker | ||||
|   galaxy_tags: | ||||
|     - authelia | ||||
|     - docker | ||||
| @@ -1,61 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure container mounts are present | ||||
|   when: authelia_state == 'present' | ||||
|   block: | ||||
|     - name: Ensure sqlite database file exists before mounting it | ||||
|       ansible.builtin.file: | ||||
|         path: "{{ authelia_sqlite_storage_file }}" | ||||
|         state: touch | ||||
|         owner: "{{ authelia_run_user }}" | ||||
|         group: "{{ authelia_run_group }}" | ||||
|         mode: "0640" | ||||
|         access_time: preserve | ||||
|         modification_time: preserve | ||||
|       when: authelia_config_storage_local_path | default(false, true) | ||||
|  | ||||
|     - name: Ensure user database exists before mounting it | ||||
|       ansible.builtin.file: | ||||
|         path: "{{ authelia_user_storage_file }}" | ||||
|         state: touch | ||||
|         owner: "{{ authelia_run_user }}" | ||||
|         group: "{{ authelia_run_group }}" | ||||
|         mode: "0640" | ||||
|         access_time: preserve | ||||
|         modification_time: preserve | ||||
|       when: authelia_config_authentication_backend_file_path | default(false, true) | ||||
|  | ||||
|     - name: Ensure notification reports file exists before mounting it | ||||
|       ansible.builtin.file: | ||||
|         path: "{{ authelia_notification_storage_file }}" | ||||
|         state: touch | ||||
|         owner: "{{ authelia_run_user }}" | ||||
|         group: "{{ authelia_run_group }}" | ||||
|         mode: "0640" | ||||
|         access_time: preserve | ||||
|         modification_time: preserve | ||||
|       when: authelia_config_notifier_filesystem_filename | default(false, true) | ||||
|  | ||||
| - name: Ensure authelia container image is {{ authelia_state }} | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ authelia_container_image_ref }}" | ||||
|     state: "{{ authelia_state }}" | ||||
|     source: pull | ||||
|     force_source: "{{ authelia_container_image_force_pull }}" | ||||
|   register: authelia_container_image_info | ||||
|  | ||||
| - name: Ensure authelia container is {{ authelia_container_state }} | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ authelia_container_name }}" | ||||
|     image: "{{ authelia_container_image_ref }}" | ||||
|     env: "{{ authelia_container_env }}" | ||||
|     user: "{{ authelia_run_user }}:{{ authelia_run_group }}" | ||||
|     ports: "{{ authelia_container_ports | default(omit, true) }}" | ||||
|     labels: "{{ authelia_container_labels }}" | ||||
|     volumes: "{{ authelia_container_volumes }}" | ||||
|     networks: "{{ authelia_container_networks | default(omit, true) }}" | ||||
|     etc_hosts: "{{ authelia_container_etc_hosts | default(omit, true) }}" | ||||
|     purge_networks: "{{ authelia_container_purge_networks | default(omit, true)}}" | ||||
|     restart_policy: "{{ authelia_container_restart_policy }}" | ||||
|     recreate: "{{ authelia_container_recreate | default(omit, true) }}" | ||||
|     state: "{{ authelia_container_state }}" | ||||
|   register: authelia_container_info | ||||
| @@ -1,30 +1,16 @@ | ||||
| --- | ||||
| - name: Check for valid state | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Unsupported state '{{ authelia_state }}'. | ||||
|       Supported states are {{ authelia_states | join(', ') }}. | ||||
|   when: authelia_state not in authelia_states | ||||
|  | ||||
| - name: Check for valid authelia deployment method | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Unsupported deployment method '{{ authelia_deployment_method }}'. | ||||
|       Supported states are {{ authelia_deployment_methods | join(', ') }}. | ||||
|   when: authelia_deployment_method not in authelia_deployment_methods | ||||
|  | ||||
| - name: Ensure user {{ authelia_user }} is {{ authelia_state }} | ||||
|   ansible.builtin.user: | ||||
| - name: Ensure user {{ authelia_user }} exists | ||||
|   user: | ||||
|     name: "{{ authelia_user }}" | ||||
|     state: "{{ authelia_state }}" | ||||
|     state: present | ||||
|     system: true | ||||
|     create_home: false | ||||
|   register: authelia_user_info | ||||
|  | ||||
| - name: Ensure host directories are {{ authelia_state }} | ||||
|   ansible.builtin.file: | ||||
| - name: Ensure host directories are created with correct permissions | ||||
|   file: | ||||
|     path: "{{ item.path }}" | ||||
|     state: "{{ (authelia_state == 'present') | ternary('directory', 'absent') }}" | ||||
|     state: directory | ||||
|     owner: "{{ item.owner | default(authelia_user) }}" | ||||
|     group: "{{ item.group | default(authelia_user) }}" | ||||
|     mode: "{{ item.mode | default('0750') }}" | ||||
| @@ -39,16 +25,67 @@ | ||||
|     - path: "{{ authelia_asset_dir }}" | ||||
|       mode: "0750" | ||||
|  | ||||
| - name: Ensure config file is {{ authelia_state }} | ||||
|   ansible.builtin.copy: | ||||
| - name: Ensure config file is generated | ||||
|   copy: | ||||
|     content: "{{ authelia_config | to_nice_yaml(indent=2, width=10000) }}" | ||||
|     dest: "{{ authelia_config_file }}" | ||||
|     owner: "{{ authelia_run_user }}" | ||||
|     group: "{{ authelia_run_group }}" | ||||
|     mode: "0640" | ||||
|   notify: restart-authelia | ||||
|   when: authelia_state == 'present' | ||||
|  | ||||
| - name: Deploy authelia using {{ authelia_deployment_method }} | ||||
|   ansible.builtin.include_tasks: | ||||
|     file: "deploy-{{ authelia_deployment_method }}.yml" | ||||
| - name: Ensure sqlite database file exists before mounting it | ||||
|   file: | ||||
|     path: "{{ authelia_sqlite_storage_file }}" | ||||
|     state: touch | ||||
|     owner: "{{ authelia_run_user }}" | ||||
|     group: "{{ authelia_run_group }}" | ||||
|     mode: "0640" | ||||
|     access_time: preserve | ||||
|     modification_time: preserve | ||||
|   when: authelia_config_storage_local_path | default(false, true) | ||||
|  | ||||
| - name: Ensure user database exists before mounting it | ||||
|   file: | ||||
|     path: "{{ authelia_user_storage_file }}" | ||||
|     state: touch | ||||
|     owner: "{{ authelia_run_user }}" | ||||
|     group: "{{ authelia_run_group }}" | ||||
|     mode: "0640" | ||||
|     access_time: preserve | ||||
|     modification_time: preserve | ||||
|   when: authelia_config_authentication_backend_file_path | default(false, true) | ||||
|  | ||||
| - name: Ensure notification reports file exists before mounting it | ||||
|   file: | ||||
|     path: "{{ authelia_notification_storage_file }}" | ||||
|     state: touch | ||||
|     owner: "{{ authelia_run_user }}" | ||||
|     group: "{{ authelia_run_group }}" | ||||
|     mode: "0640" | ||||
|     access_time: preserve | ||||
|     modification_time: preserve | ||||
|   when: authelia_config_notifier_filesystem_filename | default(false, true) | ||||
|  | ||||
| - name: Ensure authelia container image is present | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ authelia_container_image_ref }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ authelia_container_image_force_pull }}" | ||||
|   register: authelia_container_image_info | ||||
|  | ||||
| - name: Ensure authelia container is running | ||||
|   docker_container: | ||||
|     name: "{{ authelia_container_name }}" | ||||
|     image: "{{ authelia_container_image_ref }}" | ||||
|     env: "{{ authelia_container_env }}" | ||||
|     user: "{{ authelia_run_user }}:{{ authelia_run_group }}" | ||||
|     ports: "{{ authelia_container_ports | default(omit, true) }}" | ||||
|     labels: "{{ authelia_container_labels }}" | ||||
|     volumes: "{{ authelia_container_volumes }}" | ||||
|     networks: "{{ authelia_container_networks | default(omit, true) }}" | ||||
|     purge_networks: "{{ authelia_container_purge_networks | default(omit, true)}}" | ||||
|     restart_policy: "{{ authelia_container_restart_policy }}" | ||||
|     state: "{{ authelia_container_state }}" | ||||
|   register: authelia_container_info | ||||
|   | ||||
| @@ -1,9 +1,4 @@ | ||||
| --- | ||||
| authelia_states: | ||||
|   - "present" | ||||
|   - "absent" | ||||
| authelia_deployment_methods: | ||||
|   - "docker" | ||||
|  | ||||
| authelia_run_user: "{{ (authelia_user_info.uid) if authelia_user_info is defined else authelia_user }}" | ||||
| authelia_run_group: "{{ (authelia_user_info.group) if authelia_user_info is defined else authelia_user }}" | ||||
| @@ -53,20 +48,18 @@ authelia_base_config: >-2 | ||||
| authelia_config_server: >-2 | ||||
|   {{ | ||||
|     { | ||||
|       "address": authelia_config_server_address, | ||||
|       "host": authelia_config_server_host, | ||||
|       "port": authelia_config_server_port, | ||||
|       "path": authelia_config_server_path, | ||||
|       "asset_path": authelia_config_server_asset_path, | ||||
|       "read_buffer_size": authelia_config_server_read_buffer_size, | ||||
|       "write_buffer_size": authelia_config_server_write_buffer_size, | ||||
|       "enable_pprof": authelia_config_server_enable_pprof, | ||||
|       "enable_expvars": authelia_config_server_enable_expvars, | ||||
|       "disable_healthcheck": authelia_config_server_disable_healthcheck, | ||||
|       "endpoints": authelia_config_server_endpoints, | ||||
|       "buffers": authelia_config_server_buffers, | ||||
|     } | combine({"headers": {"csp_template": authelia_config_server_headers_csp_template}} | ||||
|         if authelia_config_server_headers_csp_template | default(false, true) else {}) | ||||
|   }} | ||||
| authelia_config_server_endpoints: | ||||
|   enable_expvars: "{{ authelia_config_server_endpoints_enable_expvars }}" | ||||
|   enable_pprof: "{{ authelia_config_server_endpoints_enable_pprof }}" | ||||
| authelia_config_server_buffers: | ||||
|   read: "{{ authelia_config_server_buffers_read }}" | ||||
|   write: "{{ authelia_config_server_buffers_write }}" | ||||
| authelia_config_server_tls: | ||||
|   key: "{{ authelia_config_server_tls_key }}" | ||||
|   certificate: "{{ authelia_config_server_tls_certificate }}" | ||||
| @@ -139,11 +132,10 @@ authelia_config_authentication_backend_ldap: | ||||
|   additional_groups_dn: "{{ authelia_config_authentication_backend_ldap_additional_groups_dn }}"  | ||||
|   users_filter: "{{ authelia_config_authentication_backend_ldap_users_filter }}"  | ||||
|   groups_filter: "{{ authelia_config_authentication_backend_ldap_groups_filter }}" | ||||
|   attributes: | ||||
|     username: "{{ authelia_config_authentication_backend_ldap_attributes_username }}" | ||||
|     mail: "{{ authelia_config_authentication_backend_ldap_attributes_mail }}" | ||||
|     display_name: "{{ authelia_config_authentication_backend_ldap_attributes_display_name }}" | ||||
|     group_name: "{{ authelia_config_authentication_backend_ldap_attributes_group_name }}" | ||||
|   group_name_attribute: "{{ authelia_config_authentication_backend_ldap_group_name_attribute }}" | ||||
|   username_attribute: "{{ authelia_config_authentication_backend_ldap_username_attribute }}" | ||||
|   mail_attribute: "{{ authelia_config_authentication_backend_ldap_mail_attribute }}" | ||||
|   display_name_attribute: "{{ authelia_config_authentication_backend_ldap_display_name_attribute }}" | ||||
|   user: "{{ authelia_config_authentication_backend_ldap_user }}" | ||||
|   password: "{{ authelia_config_authentication_backend_ldap_password }}" | ||||
| authelia_config_authentication_backend_file: | ||||
| @@ -175,19 +167,14 @@ authelia_config_access_control: | ||||
|   default_policy: "{{ authelia_config_access_control_default_policy }}" | ||||
|   networks: "{{ authelia_config_access_control_networks }}" | ||||
|   rules: "{{ authelia_config_access_control_rules }}" | ||||
| authelia_config_session: >-2 | ||||
|   {{ authelia_config_session_base | ||||
|     | combine(({'redis': authelia_config_session_redis} | ||||
|       if authelia_config_session_redis_host else {}), recursive=true) | ||||
|   }} | ||||
| authelia_config_session_base: | ||||
| authelia_config_session: | ||||
|   name: "{{ authelia_config_session_name }}" | ||||
|   domain: "{{ authelia_config_session_domain }}" | ||||
|   same_site: "{{ authelia_config_session_same_site }}" | ||||
|   secret: "{{ authelia_config_session_secret }}" | ||||
|   expiration: "{{ authelia_config_session_expiration }}"  | ||||
|   inactivity: "{{ authelia_config_session_inactivity }}" | ||||
|   remember_me: "{{ authelia_config_session_remember_me }}" | ||||
|   remember_me_duration: "{{ authelia_config_session_remember_me_duration }}" | ||||
| authelia_config_session_redis: >-2 | ||||
|   {{ | ||||
|     { | ||||
| @@ -231,13 +218,15 @@ authelia_config_storage: >-2 | ||||
| authelia_config_storage_local: | ||||
|   path: "{{ authelia_config_storage_local_path }}" | ||||
| authelia_config_storage_mysql: | ||||
|   host: "{{ authelia_database_address }}" | ||||
|   host: "{{ authelia_database_host }}" | ||||
|   port: "{{ authelia_config_storage_mysql_port }}" | ||||
|   database: "{{ authelia_database_name }}" | ||||
|   username: "{{ authelia_database_user }}" | ||||
|   password: "{{ authelia_database_pass }}" | ||||
|   timeout: "{{ authelia_database_timeout }}" | ||||
| authelia_config_storage_postgres: | ||||
|   address: "{{ authelia_database_address }}" | ||||
|   host: "{{ authelia_database_host }}" | ||||
|   port: "{{ authelia_config_storage_postgres_port }}" | ||||
|   database: "{{ authelia_database_name }}" | ||||
|   schema: public | ||||
|   username: "{{ authelia_database_user }}" | ||||
| @@ -261,7 +250,8 @@ authelia_config_notifier: >-2 | ||||
| authelia_config_notifier_filesystem: | ||||
|   filename: "{{ authelia_config_notifier_filesystem_filename }}" | ||||
| authelia_config_notifier_smtp: | ||||
|   address: "{{ authelia_config_notifier_smtp_address }}" | ||||
|   host: "{{ authelia_config_notifier_smtp_host }}" | ||||
|   port: "{{ authelia_config_notifier_smtp_port }}" | ||||
|   timeout: "{{ authelia_config_notifier_smtp_timeout }}" | ||||
|   username: "{{ authelia_config_notifier_smtp_username }}" | ||||
|   password: "{{ authelia_config_notifier_smtp_password }}" | ||||
|   | ||||
							
								
								
									
										22
									
								
								roles/elasticsearch/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								roles/elasticsearch/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| # `finallycoffee.services.elastiscsearch` | ||||
|  | ||||
| A simple ansible role which deploys a single-node elastic container to provide | ||||
| an easy way to do some indexing. | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| Per default, `/opt/elasticsearch/data` is used to persist data, it is | ||||
| customizable by using either `elasticsearch_base_path` or `elasticsearch_data_path`. | ||||
|  | ||||
| As elasticsearch be can be quite memory heavy, the maximum amount of allowed RAM | ||||
| can be configured using `elasticsearch_allocated_ram_mb`, defaulting to 512 (mb). | ||||
|  | ||||
| The cluster name and discovery type can be overridden using | ||||
| `elasticsearch_config_cluster_name` (default: elastic) and | ||||
| `elasticsearch_config_discovery_type` (default: single-node), should one | ||||
| need a multi-node elasticsearch deployment. | ||||
|  | ||||
| Per default, no ports or networks are mapped, and explizit mapping using | ||||
| either ports (`elasticsearch_container_ports`) or networks | ||||
| (`elasticsearch_container_networks`) is required in order for other services | ||||
| to use elastic. | ||||
							
								
								
									
										35
									
								
								roles/elasticsearch/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								roles/elasticsearch/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| --- | ||||
|  | ||||
| elasticsearch_version: 7.17.7 | ||||
|  | ||||
| elasticsearch_base_path: /opt/elasticsearch | ||||
| elasticsearch_data_path: "{{ elasticsearch_base_path }}/data" | ||||
|  | ||||
| elasticsearch_config_cluster_name: elastic | ||||
| elasticsearch_config_discovery_type: single-node | ||||
| elasticsearch_config_boostrap_memory_lock: true | ||||
| elasticsearch_allocated_ram_mb: 512 | ||||
|  | ||||
| elasticsearch_container_image_name: docker.elastic.co/elasticsearch/elasticsearch-oss | ||||
| elasticsearch_container_image_tag: ~ | ||||
| elasticsearch_container_image: >- | ||||
|   {{ elasticsearch_container_image_name }}:{{ elasticsearch_container_image_tag | default(elasticsearch_version, true) }} | ||||
|  | ||||
| elasticsearch_container_name: elasticsearch | ||||
| elasticsearch_container_env: | ||||
|   "ES_JAVA_OPTS": "-Xms{{ elasticsearch_allocated_ram_mb }}m -Xmx{{ elasticsearch_allocated_ram_mb }}m" | ||||
|   "cluster.name": "{{ elasticsearch_config_cluster_name }}" | ||||
|   "discovery.type": "{{ elasticsearch_config_discovery_type }}" | ||||
|   "bootstrap.memory_lock": "{{ 'true' if elasticsearch_config_boostrap_memory_lock else 'false' }}" | ||||
| elasticsearch_container_user: ~ | ||||
| elasticsearch_container_ports: ~ | ||||
| elasticsearch_container_labels: | ||||
|   version: "{{ elasticsearch_version }}" | ||||
| elasticsearch_container_ulimits: | ||||
| #  - "memlock:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}" | ||||
|   - "memlock:-1:-1" | ||||
| elasticsearch_container_volumes: | ||||
|   - "{{ elasticsearch_data_path }}:/usr/share/elasticsearch/data:z" | ||||
| elasticsearch_container_networks: ~ | ||||
| elasticsearch_container_purge_networks: ~ | ||||
| elasticsearch_container_restart_policy: unless-stopped | ||||
							
								
								
									
										32
									
								
								roles/elasticsearch/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								roles/elasticsearch/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure host directories are present | ||||
|   file: | ||||
|     path: "{{ item }}" | ||||
|     state: directory | ||||
|     mode: "0777" | ||||
|   loop: | ||||
|     - "{{ elasticsearch_base_path }}" | ||||
|     - "{{ elasticsearch_data_path }}" | ||||
|  | ||||
| - name: Ensure elastic container image is present | ||||
|   docker_image: | ||||
|     name: "{{ elasticsearch_container_image }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ elasticsearch_container_image_tag|default(false, true)|bool }}" | ||||
|  | ||||
| - name: Ensure elastic container is running | ||||
|   docker_container: | ||||
|     name: "{{ elasticsearch_container_name }}" | ||||
|     image: "{{ elasticsearch_container_image }}" | ||||
|     env: "{{ elasticsearch_container_env | default(omit, True) }}" | ||||
|     user: "{{ elasticsearch_container_user | default(omit, True) }}" | ||||
|     ports: "{{ elasticsearch_container_ports | default(omit, True) }}" | ||||
|     labels: "{{ elasticsearch_container_labels | default(omit, True) }}" | ||||
|     volumes: "{{ elasticsearch_container_volumes }}" | ||||
|     ulimits: "{{ elasticsearch_container_ulimits }}" | ||||
|     networks: "{{ elasticsearch_container_networks | default(omit, True) }}" | ||||
|     purge_networks: "{{ elasticsearch_container_purge_networks | default(omit, True) }}" | ||||
|     restart_policy: "{{ elasticsearch_container_restart_policy }}" | ||||
|     state: started | ||||
| @@ -1,18 +0,0 @@ | ||||
| # `finallycoffee.services.ghost` ansible role | ||||
|  | ||||
| [Ghost](https://ghost.org/) is a self-hosted blog with rich media capabilities, | ||||
| which this role deploys in a docker container. | ||||
|  | ||||
| ## Requirements | ||||
|  | ||||
| Ghost requires a MySQL-database (like mariadb) for storing it's data, which | ||||
| can be configured using the `ghost_database_(host|username|password|database)` variables. | ||||
|  | ||||
| Setting `ghost_domain` to a fully-qualified domain on which ghost should be reachable | ||||
| is also required. | ||||
|  | ||||
| Ghosts configuration can be changed using the `ghost_config` variable. | ||||
|  | ||||
| Container arguments which are equivalent to `community.docker.docker_container` can be | ||||
| provided in the `ghost_container_[...]` syntax (e.g. `ghost_container_ports` to expose | ||||
| ghosts port to the host). | ||||
| @@ -1,6 +1,7 @@ | ||||
| --- | ||||
|  | ||||
| ghost_domain: ~ | ||||
| ghost_version: "5.113.1" | ||||
| ghost_version: "5.33.6" | ||||
| ghost_user: ghost | ||||
| ghost_user_group: ghost | ||||
| ghost_base_path: /opt/ghost | ||||
| @@ -35,4 +36,3 @@ ghost_container_restart_policy: "unless-stopped" | ||||
| ghost_container_networks: ~ | ||||
| ghost_container_purge_networks: ~ | ||||
| ghost_container_etc_hosts: ~ | ||||
| ghost_container_state: started | ||||
|   | ||||
| @@ -1,10 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: ghost | ||||
|   description: Ansible role to deploy ghost (https://ghost.org) using docker | ||||
|   galaxy_tags: | ||||
|     - ghost | ||||
|     - blog | ||||
|     - docker | ||||
| @@ -16,16 +16,15 @@ | ||||
|  | ||||
| - name: Ensure host paths for docker volumes exist for ghost | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ item.path }}" | ||||
|     path: "{{ item }}" | ||||
|     state: directory | ||||
|     mode: "0750" | ||||
|     owner: "{{ item.owner | default(ghost_user) }}" | ||||
|     group: "{{ item.group | default(ghost_user_group) }}" | ||||
|     owner: "{{ ghost_user }}" | ||||
|     group: "{{ ghost_user_group }}" | ||||
|   loop: | ||||
|     - path: "{{ ghost_base_path }}" | ||||
|     - path: "{{ ghost_data_path }}" | ||||
|       owner: "1000" | ||||
|     - path: "{{ ghost_config_path }}" | ||||
|     - "{{ ghost_base_path }}" | ||||
|     - "{{ ghost_data_path }}" | ||||
|     - "{{ ghost_config_path }}" | ||||
|  | ||||
| - name: Ensure ghost configuration file is templated | ||||
|   ansible.builtin.template: | ||||
| @@ -42,7 +41,7 @@ | ||||
|     source: pull | ||||
|     force_source: "{{ ghost_container_image_tag is defined }}" | ||||
|  | ||||
| - name: Ensure ghost container '{{ ghost_container_name }}' is {{ ghost_container_state }} | ||||
| - name: Ensure ghost container is running | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ ghost_container_name }}" | ||||
|     image: "{{ ghost_container_image }}" | ||||
| @@ -54,4 +53,4 @@ | ||||
|     networks: "{{ ghost_container_networks | default(omit, true) }}" | ||||
|     purge_networks: "{{ ghost_container_purge_networks | default(omit, true) }}" | ||||
|     restart_policy: "{{ ghost_container_restart_policy }}" | ||||
|     state: "{{ ghost_container_state }}" | ||||
|     state: started | ||||
|   | ||||
| @@ -1,7 +1,7 @@ | ||||
| --- | ||||
| gitea_version: "1.23.6" | ||||
|  | ||||
| gitea_version: "1.19.4" | ||||
| gitea_user: git | ||||
| gitea_run_user: "{{ gitea_user }}" | ||||
| gitea_base_path: "/opt/gitea" | ||||
| gitea_data_path: "{{ gitea_base_path }}/data" | ||||
|  | ||||
| @@ -9,30 +9,17 @@ gitea_data_path: "{{ gitea_base_path }}/data" | ||||
| gitea_domain: ~ | ||||
|  | ||||
| # container config | ||||
| gitea_container_name: "{{ gitea_user }}" | ||||
| gitea_container_image_server: "docker.io" | ||||
| gitea_container_image_name: "gitea" | ||||
| gitea_container_image_namespace: gitea | ||||
| gitea_container_image_fq_name: >- | ||||
|   {{ | ||||
|     [ | ||||
|       gitea_container_image_server, | ||||
|       gitea_container_image_namespace, | ||||
|       gitea_container_image_name | ||||
|     ] | join('/') | ||||
|   }} | ||||
| gitea_container_name: "git" | ||||
| gitea_container_image_name: "docker.io/gitea/gitea" | ||||
| gitea_container_image_tag: "{{ gitea_version }}" | ||||
| gitea_container_image: >-2 | ||||
|   {{ gitea_container_image_fq_name }}:{{ gitea_container_image_tag }} | ||||
| gitea_container_image: "{{ gitea_container_image_name }}:{{ gitea_container_image_tag }}" | ||||
| gitea_container_networks: [] | ||||
| gitea_container_purge_networks: ~ | ||||
| gitea_container_restart_policy: "unless-stopped" | ||||
| gitea_container_extra_env: {} | ||||
| gitea_container_extra_labels: {} | ||||
| gitea_contianer_extra_labels: {} | ||||
| gitea_container_extra_ports: [] | ||||
| gitea_container_extra_volumes: [] | ||||
| gitea_container_state: started | ||||
| gitea_container_user: ~ | ||||
|  | ||||
| # container defaults | ||||
| gitea_container_base_volumes: | ||||
| @@ -53,10 +40,10 @@ gitea_container_base_labels: | ||||
| gitea_config_mailer_enabled: false | ||||
| gitea_config_mailer_type: ~ | ||||
| gitea_config_mailer_from_addr: ~ | ||||
| gitea_config_mailer_smtp_addr: ~ | ||||
| gitea_config_mailer_host: ~ | ||||
| gitea_config_mailer_user: ~ | ||||
| gitea_config_mailer_passwd: ~ | ||||
| gitea_config_mailer_protocol: ~ | ||||
| gitea_config_mailer_tls: ~ | ||||
| gitea_config_mailer_sendmail_path: ~ | ||||
| gitea_config_metrics_enabled: false | ||||
|  | ||||
|   | ||||
| @@ -1,10 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: gitea | ||||
|   description: Ansible role to deploy gitea using docker | ||||
|   galaxy_tags: | ||||
|     - gitea | ||||
|     - git | ||||
|     - docker | ||||
| @@ -1,14 +1,14 @@ | ||||
| --- | ||||
| - name: Ensure gitea user '{{ gitea_user }}' is present | ||||
|   ansible.builtin.user: | ||||
|  | ||||
| - name: Create gitea user | ||||
|   user: | ||||
|     name: "{{ gitea_user }}" | ||||
|     state: "present" | ||||
|     system: false | ||||
|     create_home: true | ||||
|     state: present | ||||
|     system: no | ||||
|   register: gitea_user_res | ||||
|  | ||||
| - name: Ensure host directories exist | ||||
|   ansible.builtin.file: | ||||
|   file: | ||||
|     path: "{{ item }}" | ||||
|     owner: "{{ gitea_user_res.uid }}" | ||||
|     group: "{{ gitea_user_res.group }}" | ||||
| @@ -18,7 +18,7 @@ | ||||
|     - "{{ gitea_data_path }}" | ||||
|  | ||||
| - name: Ensure .ssh folder for gitea user exists | ||||
|   ansible.builtin.file: | ||||
|   file: | ||||
|     path: "/home/{{ gitea_user }}/.ssh" | ||||
|     state: directory | ||||
|     owner: "{{ gitea_user_res.uid }}" | ||||
| @@ -37,16 +37,16 @@ | ||||
|   register: gitea_user_ssh_key | ||||
|  | ||||
| - name: Create forwarding script | ||||
|   ansible.builtin.copy: | ||||
|   copy: | ||||
|     dest: "/usr/local/bin/gitea" | ||||
|     owner: "{{ gitea_user_res.uid }}" | ||||
|     group: "{{ gitea_user_res.group }}" | ||||
|     mode: 0700 | ||||
|     content: | | ||||
|       ssh -p {{ gitea_public_ssh_server_port }} -o StrictHostKeyChecking=no {{ gitea_run_user }}@127.0.0.1 -i /home/{{ gitea_user }}/.ssh/id_ssh_ed25519 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@" | ||||
|       ssh -p {{ gitea_public_ssh_server_port }} -o StrictHostKeyChecking=no {{ gitea_user }}@127.0.0.1 -i /home/{{ gitea_user }}/.ssh/id_ssh_ed25519 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@" | ||||
|  | ||||
| - name: Add host pubkey to git users authorized_keys file | ||||
|   ansible.builtin.lineinfile: | ||||
|   lineinfile: | ||||
|     path: "/home/{{ gitea_user }}/.ssh/authorized_keys" | ||||
|     line: "{{ gitea_user_ssh_key.public_key }} Gitea:Host2Container" | ||||
|     state: present | ||||
| @@ -56,28 +56,26 @@ | ||||
|     mode: 0600 | ||||
|  | ||||
| - name: Ensure gitea container image is present | ||||
|   community.docker.docker_image: | ||||
|   docker_image: | ||||
|     name: "{{ gitea_container_image }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ gitea_container_image.endswith(':latest') }}" | ||||
|  | ||||
| - name: Ensure container '{{ gitea_container_name }}' with gitea is {{ gitea_container_state }} | ||||
|   community.docker.docker_container: | ||||
| - name: Ensure container '{{ gitea_container_name }}' with gitea is running | ||||
|   docker_container: | ||||
|     name: "{{ gitea_container_name }}" | ||||
|     image: "{{ gitea_container_image }}" | ||||
|     env: "{{ gitea_container_env }}" | ||||
|     labels: "{{ gitea_container_labels }}" | ||||
|     volumes: "{{ gitea_container_volumes }}" | ||||
|     networks: "{{ gitea_container_networks | default(omit, True) }}" | ||||
|     purge_networks: "{{ gitea_container_purge_networks | default(omit, True) }}" | ||||
|     published_ports: "{{ gitea_container_ports }}" | ||||
|     restart_policy: "{{ gitea_container_restart_policy }}" | ||||
|     state: "{{ gitea_container_state }}" | ||||
|     user: "{{ gitea_container_user | default(omit, true) }}" | ||||
|     state: started | ||||
|  | ||||
| - name: Ensure given configuration is set in the config file | ||||
|   ansible.builtin.ini_file: | ||||
|   ini_file: | ||||
|     path: "{{ gitea_data_path }}/gitea/conf/app.ini" | ||||
|     section: "{{ section }}" | ||||
|     option: "{{ option }}" | ||||
|   | ||||
| @@ -14,7 +14,7 @@ gitea_container_port_ssh: 22 | ||||
|  | ||||
| gitea_config_base: | ||||
|   RUN_MODE: prod | ||||
|   RUN_USER: "{{ gitea_run_user }}" | ||||
|   RUN_USER: "{{ gitea_user }}" | ||||
|   server: | ||||
|     SSH_DOMAIN: "{{ gitea_domain }}" | ||||
|     DOMAIN: "{{ gitea_domain }}" | ||||
| @@ -24,11 +24,11 @@ gitea_config_base: | ||||
|   mailer: | ||||
|     ENABLED: "{{ gitea_config_mailer_enabled }}" | ||||
|     MAILER_TYP: "{{ gitea_config_mailer_type }}" | ||||
|     SMTP_ADDR: "{{ gitea_config_mailer_smtp_addr }}" | ||||
|     HOST: "{{ gitea_config_mailer_host }}" | ||||
|     USER: "{{ gitea_config_mailer_user }}" | ||||
|     PASSWD: "{{ gitea_config_mailer_passwd }}" | ||||
|     PROTOCOL: "{{ gitea_config_mailer_protocol }}" | ||||
|     FROM: "{{ gitea_config_mailer_from }}" | ||||
|     IS_TLS_ENABLED: "{{ gitea_config_mailer_tls }}" | ||||
|     FROM: "{{ gitea_config_mailer_from_addr }}" | ||||
|     SENDMAIL_PATH: "{{ gitea_config_mailer_sendmail_path }}" | ||||
|   metrics: | ||||
|     ENABLED: "{{ gitea_config_metrics_enabled }}" | ||||
|   | ||||
| @@ -1,21 +0,0 @@ | ||||
| # `finallycoffee.services.hedgedoc` ansible role | ||||
|  | ||||
| Role to deploy and configure hedgedoc using `docker` or `podman`. | ||||
| To configure hedgedoc, set either the config as complex data | ||||
| directly in `hedgedoc_config` or use the flattened variables | ||||
| from the `hedgedoc_config_*` prefix (see | ||||
| [defaults/main/config.yml](defaults/main/config.yml)). | ||||
|  | ||||
| To remove hedgedoc, set `hedgedoc_state: absent`. Note that this | ||||
| will delete all data directories aswell, removing any traces this | ||||
| role created on the target (except database contents). | ||||
|  | ||||
| # Required configuration | ||||
|  | ||||
| - `hedgedoc_config_domain` - Domain of the hedgedoc instance | ||||
| - `hedgedoc_config_session_secret` - session secret for hedgedoc | ||||
|  | ||||
| ## Deployment methods | ||||
|  | ||||
| To set the desired deployment method, set `hedgedoc_deployment_method` to a | ||||
| supported deployment methods (see [vars/main.yml](vars/main.yml#5)). | ||||
| @@ -1,52 +0,0 @@ | ||||
| --- | ||||
| hedgedoc_config_domain: ~ | ||||
| hedgedoc_config_log_level: "info" | ||||
| hedgedoc_config_session_secret: ~ | ||||
| hedgedoc_config_protocol_use_ssl: true | ||||
| hedgedoc_config_hsts_enable: true | ||||
| hedgedoc_config_csp_enable: true | ||||
| hedgedoc_config_cookie_policy: 'lax' | ||||
| hedgedoc_config_allow_free_url: true | ||||
| hedgedoc_config_allow_email_register: false | ||||
| hedgedoc_config_allow_anonymous: true | ||||
| hedgedoc_config_allow_gravatar: true | ||||
| hedgedoc_config_require_free_url_authentication: true | ||||
| hedgedoc_config_default_permission: 'full' | ||||
|  | ||||
| hedgedoc_config_db_username: hedgedoc | ||||
| hedgedoc_config_db_password: ~ | ||||
| hedgedoc_config_db_database: hedgedoc | ||||
| hedgedoc_config_db_host: localhost | ||||
| hedgedoc_config_db_port: 5432 | ||||
| hedgedoc_config_db_dialect: postgres | ||||
|  | ||||
| hedgedoc_config_database: | ||||
|   username: "{{ hedgedoc_config_db_username }}" | ||||
|   password: "{{ hedgedoc_config_db_password }}" | ||||
|   database: "{{ hedgedoc_config_db_database }}" | ||||
|   host: "{{ hedgedoc_config_db_host }}" | ||||
|   port: "{{ hedgedoc_config_db_port | int }}" | ||||
|   dialect: "{{ hedgedoc_config_db_dialect }}" | ||||
| hedgedoc_config_base: | ||||
|   production: | ||||
|     domain: "{{ hedgedoc_config_domain }}" | ||||
|     loglevel: "{{ hedgedoc_config_log_level }}" | ||||
|     sessionSecret: "{{ hedgedoc_config_session_secret }}" | ||||
|     protocolUseSSL: "{{ hedgedoc_config_protocol_use_ssl }}" | ||||
|     cookiePolicy: "{{ hedgedoc_config_cookie_policy }}" | ||||
|     allowFreeURL: "{{ hedgedoc_config_allow_free_url }}" | ||||
|     allowAnonymous: "{{ hedgedoc_config_allow_anonymous }}" | ||||
|     allowEmailRegister: "{{ hedgedoc_config_allow_email_register }}" | ||||
|     allowGravatar: "{{ hedgedoc_config_allow_gravatar }}" | ||||
|     requireFreeURLAuthentication: >-2 | ||||
|       {{ hedgedoc_config_require_free_url_authentication }} | ||||
|     defaultPermission: "{{ hedgedoc_config_default_permission }}" | ||||
|     hsts: | ||||
|       enable: "{{ hedgedoc_config_hsts_enable }}" | ||||
|     csp: | ||||
|       enable: "{{ hedgedoc_config_csp_enable }}" | ||||
|     db: "{{ hedgedoc_config_database }}" | ||||
| hedgedoc_config: ~ | ||||
| hedgedoc_full_config: >-2 | ||||
|   {{ hedgedoc_config_base | default({}, true) | ||||
|     | combine(hedgedoc_config | default({}, true), recursive=True) }} | ||||
| @@ -1,57 +0,0 @@ | ||||
| --- | ||||
| hedgedoc_container_image_registry: quay.io | ||||
| hedgedoc_container_image_namespace: hedgedoc | ||||
| hedgedoc_container_image_name: hedgedoc | ||||
| hedgedoc_container_image_flavour: alpine | ||||
| hedgedoc_container_image_tag: ~ | ||||
| hedgedoc_container_image: >-2 | ||||
|   {{ | ||||
|     ([ | ||||
|       hedgedoc_container_image_registry, | ||||
|       hedgedoc_container_image_namespace | default([], true), | ||||
|       hedgedoc_container_image_name, | ||||
|     ] | flatten | join('/')) | ||||
|     + ':' | ||||
|     + hedgedoc_container_image_tag | default( | ||||
|       hedgedoc_version + ( | ||||
|         ((hedgedoc_container_image_flavour is string) | ||||
|           and (hedgedoc_container_image_flavour | length > 0)) | ||||
|         | ternary('-' + | ||||
|           hedgedoc_container_image_flavour | default('', true), | ||||
|           '' | ||||
|         ) | ||||
|       ), | ||||
|       true | ||||
|     ) | ||||
|   }} | ||||
| hedgedoc_container_image_source: pull | ||||
| hedgedoc_container_name: hedgedoc | ||||
| hedgedoc_container_state: >-2 | ||||
|   {{ (hedgedoc_state == 'present') | ternary('started', 'absent') }} | ||||
|  | ||||
| hedgedoc_container_config_file: "/hedgedoc/config.json" | ||||
| hedgedoc_container_upload_path: "/hedgedoc/public/uploads" | ||||
|  | ||||
| hedgedoc_container_env: ~ | ||||
| hedgedoc_container_user: >-2 | ||||
|   {{ hedgedoc_run_user_id }}:{{ hedgedoc_run_group_id }} | ||||
| hedgedoc_container_ports: ~ | ||||
| hedgedoc_container_networks: ~ | ||||
| hedgedoc_container_etc_hosts: ~ | ||||
| hedgedoc_container_base_volumes: | ||||
|   - "{{ hedgedoc_config_file }}:{{ hedgedoc_container_config_file }}:ro" | ||||
|   - "{{ hedgedoc_uploads_path }}:{{ hedgedoc_container_upload_path }}:rw" | ||||
| hedgedoc_container_volumes: ~ | ||||
| hedgedoc_container_all_volumes: >-2 | ||||
|   {{ hedgedoc_container_base_volumes | default([], true) | ||||
|     + hedgedoc_container_volumes | default([], true) }} | ||||
| hedgedoc_container_base_labels: | ||||
|   version: "{{ hedgedoc_container_tag | default(hedgedoc_version, true) }}" | ||||
| hedgedoc_container_labels: ~ | ||||
| hedgedoc_container_network_mode: ~ | ||||
| hedgedoc_container_all_labels: >-2 | ||||
|   {{ hedgedoc_container_base_labels | default({}, true) | ||||
|     | combine(hedgedoc_container_labels | default({}, true)) }} | ||||
| hedgedoc_container_restart_policy: >-2 | ||||
|   {{ (hedgedoc_deployment_method == 'docker') | ||||
|       | ternary('unless-stopped', 'on-failure') }} | ||||
| @@ -1,9 +0,0 @@ | ||||
| --- | ||||
| hedgedoc_user: hedgedoc | ||||
| hedgedoc_version: "1.10.2" | ||||
|  | ||||
| hedgedoc_state: present | ||||
| hedgedoc_deployment_method: docker | ||||
|  | ||||
| hedgedoc_config_file: "/etc/hedgedoc/config.json" | ||||
| hedgedoc_uploads_path: "/var/lib/hedgedoc-uploads" | ||||
| @@ -1,5 +0,0 @@ | ||||
| --- | ||||
| hedgedoc_run_user_id: >-2 | ||||
|   {{ hedgedoc_user_info.uid | default(hedgedoc_user) }} | ||||
| hedgedoc_run_group_id: >-2 | ||||
|   {{ hedgedoc_user_info.group | default(hedgedoc_user) }} | ||||
| @@ -1,12 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: hedgedoc | ||||
|   description: >-2 | ||||
|     Deploy hedgedoc, a collaborative markdown editor, using docker | ||||
|   galaxy_tags: | ||||
|     - hedgedoc | ||||
|     - markdown | ||||
|     - collaboration | ||||
|     - docker | ||||
| @@ -1,23 +0,0 @@ | ||||
| --- | ||||
| - name: Check for valid state | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Unsupported state '{{ hedgedoc_state }}'. Supported | ||||
|       states are {{ hedgedoc_states | join(', ') }}. | ||||
|   when: hedgedoc_state not in hedgedoc_states | ||||
|  | ||||
| - name: Check for valid deployment method | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Deployment method '{{ hedgedoc_deployment_method }}' | ||||
|       is not supported. Supported are: | ||||
|       {{ hedgedoc_deployment_methods | join(', ') }} | ||||
|   when: hedgedoc_deployment_method not in hedgedoc_deployment_methods | ||||
|  | ||||
| - name: Ensure required variables are given | ||||
|   ansible.builtin.fail: | ||||
|     msg: "Required variable '{{ item }}' is undefined!" | ||||
|   loop: "{{ hedgedoc_required_arguments }}" | ||||
|   when: >-2 | ||||
|     item not in hostvars[inventory_hostname] | ||||
|     or hostvars[inventory_hostname][item] | length == 0 | ||||
| @@ -1,31 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure container image '{{ hedgedoc_container_image }}' is {{ hedgedoc_state }} | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ hedgedoc_container_image }}" | ||||
|     state: "{{ hedgedoc_state }}" | ||||
|     source: "{{ hedgedoc_container_image_source }}" | ||||
|     force_source: >-2 | ||||
|       {{ hedgedoc_container_force_source | default( | ||||
|         hedgedoc_container_image_tag | default(false, true), true) }} | ||||
|   register: hedgedoc_container_image_info | ||||
|   until: hedgedoc_container_image_info is success | ||||
|   retries: 5 | ||||
|   delay: 3 | ||||
|  | ||||
| - name: Ensure container '{{ hedgedoc_container_name }}' is {{ hedgedoc_container_state }} | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ hedgedoc_container_name }}" | ||||
|     image: "{{ hedgedoc_container_image }}" | ||||
|     env: "{{ hedgedoc_container_env | default(omit, true) }}" | ||||
|     user: "{{ hedgedoc_container_user | default(omit, true) }}" | ||||
|     ports: "{{ hedgedoc_container_ports | default(omit, true) }}" | ||||
|     labels: "{{ hedgedoc_container_all_labels }}" | ||||
|     volumes: "{{ hedgedoc_container_all_volumes }}" | ||||
|     etc_hosts: "{{ hedgedoc_container_etc_hosts | default(omit, true) }}" | ||||
|     dns_servers: >-2 | ||||
|       {{ hedgedoc_container_dns_servers | default(omit, true) }} | ||||
|     network_mode: >-2 | ||||
|       {{ hedgedoc_container_network_mode | default(omit, true) }} | ||||
|     restart_policy: >-2 | ||||
|       {{ hedgedoc_container_restart_policy | default(omit, true) }} | ||||
|     state: "{{ hedgedoc_container_state }}" | ||||
| @@ -1,31 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure container image '{{ hedgedoc_container_image }}' is {{ hedgedoc_state }} | ||||
|   containers.podman.podman_image: | ||||
|     name: "{{ hedgedoc_container_image }}" | ||||
|     state: "{{ hedgedoc_state }}" | ||||
|     pull: "{{ (hedgedoc_container_image_source == 'pull') | bool }}" | ||||
|     force: >-2 | ||||
|       {{ hedgedoc_container_force_source | default( | ||||
|         hedgedoc_container_image_tag | default(false, true), true) }} | ||||
|   register: hedgedoc_container_image_info | ||||
|   until: hedgedoc_container_image_info is success | ||||
|   retries: 5 | ||||
|   delay: 3 | ||||
|  | ||||
| - name: Ensure container '{{ hedgedoc_container_name }}' is {{ hedgedoc_container_state }} | ||||
|   containers.podman.podman_container: | ||||
|     name: "{{ hedgedoc_container_name }}" | ||||
|     image: "{{ hedgedoc_container_image }}" | ||||
|     env: "{{ hedgedoc_container_env | default(omit, true) }}" | ||||
|     user: "{{ hedgedoc_container_user | default(omit, true) }}" | ||||
|     ports: "{{ hedgedoc_container_ports | default(omit, true) }}" | ||||
|     labels: "{{ hedgedoc_container_all_labels }}" | ||||
|     volumes: "{{ hedgedoc_container_all_volumes }}" | ||||
|     etc_hosts: "{{ hedgedoc_container_etc_hosts | default(omit, true) }}" | ||||
|     dns_servers: >-2 | ||||
|       {{ hedgedoc_container_dns_servers | default(omit, true) }} | ||||
|     network_mode: >-2 | ||||
|       {{ hedgedoc_container_network_mode | default(omit, true) }} | ||||
|     restart_policy: >-2 | ||||
|       {{ hedgedoc_container_restart_policy | default(omit, true) }} | ||||
|     state: "{{ hedgedoc_container_state }}" | ||||
| @@ -1,21 +0,0 @@ | ||||
| --- | ||||
| - name: Check preconditions | ||||
|   ansible.builtin.include_tasks: | ||||
|     file: "check.yml" | ||||
|  | ||||
| - name: Ensure user '{{ hedgedoc_user }}' is {{ hedgedoc_state }} | ||||
|   ansible.builtin.user: | ||||
|     name: "{{ hedgedoc_user }}" | ||||
|     state: "{{ hedgedoc_state }}" | ||||
|     system: "{{ hedgedoc_user_system | default(true, false) }}" | ||||
|   register: hedgedoc_user_info | ||||
|  | ||||
| - name: Ensure configuration file '{{ hedgedoc_config_file }}' is {{ hedgedoc_state }} | ||||
|   ansible.builtin.copy: | ||||
|     dest: "{{ hedgedoc_config_file }}" | ||||
|     content: "{{ hedgedoc_full_config | to_nice_json }}" | ||||
|   when: hedgedoc_state == 'present' | ||||
|  | ||||
| - name: Ensure hedgedoc is deployed using {{ hedgedoc_deployment_method }} | ||||
|   ansible.builtin.include_tasks: | ||||
|     file: "deploy-{{ hedgedoc_deployment_method }}.yml" | ||||
| @@ -1,11 +0,0 @@ | ||||
| --- | ||||
| hedgedoc_states: | ||||
|   - present | ||||
|   - absent | ||||
| hedgedoc_deployment_methods: | ||||
|   - docker | ||||
|   - podman | ||||
|  | ||||
| hedgedoc_required_arguments: | ||||
|   - hedgedoc_config_domain | ||||
|   - hedgedoc_config_session_secret | ||||
| @@ -1,15 +0,0 @@ | ||||
| # `finallycoffee.services.jellyfin` ansible role | ||||
|  | ||||
| This role runs [Jellyfin](https://jellyfin.org/), a free software media system, | ||||
| in a docker container. | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| `jellyfin_domain` contains the FQDN which jellyfin should listen to. Most configuration | ||||
| is done in the software itself. | ||||
|  | ||||
| Jellyfin runs in host networking mode by default, as that is needed for some features like | ||||
| network discovery with chromecasts and similar. | ||||
|  | ||||
| Media can be mounted into jellyfin using `jellyfin_media_volumes`, taking a list of strings | ||||
| akin to `community.docker.docker_container`'s `volumes` key. | ||||
| @@ -1,8 +1,7 @@ | ||||
| --- | ||||
|  | ||||
| jellyfin_user: jellyfin | ||||
| jellyfin_version: "10.10.6" | ||||
| jellyfin_state: present | ||||
| jellyfin_deployment_method: docker | ||||
| jellyfin_version: 10.8.6 | ||||
|  | ||||
| jellyfin_base_path: /opt/jellyfin | ||||
| jellyfin_config_path: "{{ jellyfin_base_path }}/config" | ||||
| @@ -13,11 +12,7 @@ jellyfin_media_volumes: [] | ||||
| jellyfin_container_name: jellyfin | ||||
| jellyfin_container_image_name: "docker.io/jellyfin/jellyfin" | ||||
| jellyfin_container_image_tag: ~ | ||||
| jellyfin_container_image_ref: >-2 | ||||
|   {{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag | default(jellyfin_version, true) }} | ||||
| jellyfin_container_image_source: pull | ||||
| jellyfin_container_state: >-2 | ||||
|   {{ (jellyfin_state == 'present') | ternary('started', 'absent') }} | ||||
| jellyfin_container_image_ref: "{{ jellyfin_container_image_name }}:{{ jellyfin_container_image_tag | default(jellyfin_version, true) }}" | ||||
| jellyfin_container_network_mode: host | ||||
| jellyfin_container_networks: ~ | ||||
| jellyfin_container_volumes: "{{ jellyfin_container_base_volumes + jellyfin_media_volumes }}" | ||||
|   | ||||
| @@ -1,10 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: jellyfin | ||||
|   description: Ansible role to deploy jellyfin using docker | ||||
|   galaxy_tags: | ||||
|     - jellyfin | ||||
|     - streaming | ||||
|     - docker | ||||
| @@ -1,25 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure container image '{{ jellyfin_container_image_ref }}' is {{ jellyfin_state }} | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ jellyfin_container_image_ref }}" | ||||
|     state: "{{ jellyfin_state }}" | ||||
|     source: "{{ jellyfin_container_image_source }}" | ||||
|     force_source: "{{ jellyfin_container_image_tag | default(false, true) }}" | ||||
|   register: jellyfin_container_image_pull_result | ||||
|   until: jellyfin_container_image_pull_result is succeeded | ||||
|   retries: 5 | ||||
|   delay: 3 | ||||
|  | ||||
| - name: Ensure container '{{ jellyfin_container_name }}' is {{ jellyfin_container_state }} | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ jellyfin_container_name }}" | ||||
|     image: "{{ jellyfin_container_image_ref }}" | ||||
|     user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}" | ||||
|     labels: "{{ jellyfin_container_labels }}" | ||||
|     volumes: "{{ jellyfin_container_volumes }}" | ||||
|     ports: "{{ jellyfin_container_ports | default(omit, true) }}" | ||||
|     networks: "{{ jellyfin_container_networks | default(omit, true) }}" | ||||
|     network_mode: "{{ jellyfin_container_network_mode }}" | ||||
|     etc_hosts: "{{ jellyfin_container_etc_hosts | default(omit, true) }}" | ||||
|     restart_policy: "{{ jellyfin_container_restart_policy }}" | ||||
|     state: "{{ jellyfin_container_state }}" | ||||
| @@ -1,22 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure container image '{{ jellyfin_container_image_ref }}' is {{ jellyfin_state }} | ||||
|   containers.podman.podman_image: | ||||
|     name: "{{ jellyfin_container_image_ref }}" | ||||
|     state: "{{ jellyfin_state }}" | ||||
|     pull: "{{ (jellyfin_container_image_source == 'pull') | bool }}" | ||||
|     force: "{{ jellyfin_container_image_tag | default(false, true) }}" | ||||
|   register: jellyfin_container_image_pull_result | ||||
|   until: jellyfin_container_image_pull_result is succeeded | ||||
|   retries: 5 | ||||
|   delay: 3 | ||||
|  | ||||
| - name: Ensure container '{{ jellyfin_container_name }}' is {{ jellyfin_container_state }} | ||||
|   containers.podman.podman_container: | ||||
|     name: "{{ jellyfin_container_name }}" | ||||
|     image: "{{ jellyfin_container_image_ref }}" | ||||
|     user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}" | ||||
|     labels: "{{ jellyfin_container_labels }}" | ||||
|     volumes: "{{ jellyfin_container_volumes }}" | ||||
|     network: "{{ jellyfin_container_networks | default(omit, True) }}" | ||||
|     restart_policy: "{{ jellyfin_container_restart_policy }}" | ||||
|     state: "{{ jellyfin_container_state }}" | ||||
| @@ -1,35 +1,40 @@ | ||||
| --- | ||||
| - name: Check if state is valid | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Unsupported state '{{ jellyfin_state }}'. Supported | ||||
|       states are {{ jellyfin_states | join(', ') }}. | ||||
|   when: jellyfin_state not in jellyfin_states | ||||
|  | ||||
| - name: Check if deployment method is valid | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Unsupported state '{{ jellyfin_deployment_method }}'. Supported | ||||
|       states are {{ jellyfin_deployment_methods | join(', ') }}. | ||||
|   when: jellyfin_deployment_method not in jellyfin_deployment_methods | ||||
|  | ||||
| - name: Ensure jellyfin user '{{ jellyfin_user }}' is {{ jellyfin_state }} | ||||
|   ansible.builtin.user: | ||||
| - name: Ensure user '{{ jellyfin_user }}' for jellyfin is created | ||||
|   user: | ||||
|     name: "{{ jellyfin_user }}" | ||||
|     state: "{{ jellyfin_state }}" | ||||
|     system: "{{ jellyfin_user_system | default(true, true) }}" | ||||
|     state: present | ||||
|     system: yes | ||||
|   register: jellyfin_user_info | ||||
|  | ||||
| - name: Ensure host directories for jellyfin are {{ jellyfin_state }} | ||||
|   ansible.builtin.file: | ||||
| - name: Ensure host directories for jellyfin exist | ||||
|   file: | ||||
|     path: "{{ item.path }}" | ||||
|     state: >-2 | ||||
|       {{ (jellyfin_state == 'present') | ternary('directory', 'absent') }} | ||||
|     state: directory | ||||
|     owner: "{{ item.owner  | default(jellyfin_uid) }}" | ||||
|     group: "{{ item.group | default(jellyfin_gid) }}" | ||||
|     mode: "{{ item.mode }}" | ||||
|   loop: "{{ jellyfin_host_directories }}" | ||||
|  | ||||
| - name: Ensure jellyfin is deployed using {{ jellyfin_deployment_method }} | ||||
|   ansible.builtin.include_tasks: | ||||
|     file: "deploy-{{ jellyfin_deployment_method }}.yml" | ||||
| - name: Ensure container image for jellyfin is available | ||||
|   docker_image: | ||||
|     name: "{{ jellyfin_container_image_ref }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ jellyfin_container_image_tag | default(false, true) }}" | ||||
|   register: jellyfin_container_image_pull_result | ||||
|   until: jellyfin_container_image_pull_result is succeeded | ||||
|   retries: 5 | ||||
|   delay: 3 | ||||
|  | ||||
| - name: Ensure container '{{ jellyfin_container_name }}' is running | ||||
|   docker_container: | ||||
|     name: "{{ jellyfin_container_name }}" | ||||
|     image: "{{ jellyfin_container_image_ref }}" | ||||
|     user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}" | ||||
|     labels: "{{ jellyfin_container_labels }}" | ||||
|     volumes: "{{ jellyfin_container_volumes }}" | ||||
|     networks: "{{ jellyfin_container_networks | default(omit, True) }}" | ||||
|     network_mode: "{{ jellyfin_container_network_mode }}" | ||||
|     restart_policy: "{{ jellyfin_container_restart_policy }}" | ||||
|     state: started | ||||
|   | ||||
| @@ -1,10 +1,4 @@ | ||||
| --- | ||||
| jellyfin_states: | ||||
|   - present | ||||
|   - absent | ||||
| jellyfin_deployment_methods: | ||||
|   - docker | ||||
|   - podman | ||||
|  | ||||
| jellyfin_container_base_volumes: | ||||
|   - "{{ jellyfin_config_path }}:/config:z" | ||||
|   | ||||
| @@ -1,16 +0,0 @@ | ||||
| # `finallycoffee.services.keycloak` ansible role | ||||
|  | ||||
| Ansible role for deploying keycloak, currently only supports docker. | ||||
|  | ||||
| Migrated from `entropia.sso.keycloak`. | ||||
|  | ||||
| ## Required variables | ||||
|  | ||||
| - `keycloak_database_password` - password for the database user | ||||
| - `keycloak_config_hostname` - public domain of the keycloak server | ||||
|  | ||||
| ## Database configuration | ||||
|  | ||||
| - `keycloak_database_hostname` - hostname of the database server, defaults to `localhost` | ||||
| - `keycloak_database_username` - username to use when connecting to the database server, defaults to `keycloak` | ||||
| - `keycloak_database_database` - name of the database to use, defaults to `keycloak` | ||||
| @@ -1,51 +0,0 @@ | ||||
| --- | ||||
| keycloak_version: "26.1.4" | ||||
| keycloak_container_name: keycloak | ||||
|  | ||||
| keycloak_container_image_upstream_registry: quay.io | ||||
| keycloak_container_image_upstream_namespace: keycloak | ||||
| keycloak_container_image_upstream_name: keycloak | ||||
| keycloak_container_image_upstream: >-2 | ||||
|   {{ | ||||
|     ([ | ||||
|       keycloak_container_image_upstream_registry | default([]), | ||||
|       keycloak_container_image_upstream_namespace | default([]), | ||||
|       keycloak_container_image_upstream_name, | ||||
|     ] | flatten | join('/')) | ||||
|   }} | ||||
| keycloak_container_image_name: "keycloak:{{ keycloak_version }}-custom" | ||||
|  | ||||
| keycloak_container_database_vendor: postgres | ||||
| keycloak_base_path: /opt/keycloak | ||||
| keycloak_container_build_directory: "{{ keycloak_base_path }}/build" | ||||
| keycloak_container_build_jar_directory: providers | ||||
| keycloak_container_build_flags: {} | ||||
| keycloak_provider_jars_directory: "{{ keycloak_base_path }}/providers" | ||||
| keycloak_build_provider_jars_directory: "{{ keycloak_container_build_directory }}/{{ keycloak_container_build_jar_directory }}" | ||||
|  | ||||
| keycloak_database_hostname: localhost | ||||
| keycloak_database_port: 5432 | ||||
| keycloak_database_username: keycloak | ||||
| keycloak_database_password: ~ | ||||
| keycloak_database_database: keycloak | ||||
|  | ||||
| keycloak_container_env: {} | ||||
| keycloak_container_labels: ~ | ||||
| keycloak_container_volumes: ~ | ||||
| keycloak_container_restart_policy: unless-stopped | ||||
| keycloak_container_command: >-2 | ||||
|   start | ||||
|   --db-username {{ keycloak_database_username }} | ||||
|   --db-password {{ keycloak_database_password }} | ||||
|   --db-url jdbc:postgresql://{{ keycloak_database_hostname }}{{ keycloak_database_port | ternary(':' ~ keycloak_database_port, '') }}/{{ keycloak_database_database }} | ||||
|   {{ keycloak_container_extra_start_flags | default([]) | join(' ') }} | ||||
|   --proxy-headers=xforwarded | ||||
|   --hostname {{ keycloak_config_hostname }} | ||||
|   --optimized | ||||
|  | ||||
| keycloak_config_health_enabled: true | ||||
| keycloak_config_metrics_enabled: true | ||||
|  | ||||
| keycloak_config_hostname: localhost | ||||
| keycloak_config_admin_username: admin | ||||
| keycloak_config_admin_password: ~ | ||||
| @@ -1,13 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: keycloak | ||||
|   description: Deploy keycloak, the opensource identity and access management solution | ||||
|   galaxy_tags: | ||||
|     - keycloak | ||||
|     - sso | ||||
|     - oidc | ||||
|     - oauth2 | ||||
|     - iam | ||||
|     - docker | ||||
| @@ -1,72 +0,0 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure build directory exists | ||||
|   ansible.builtin.file: | ||||
|     name: "{{ keycloak_container_build_directory }}" | ||||
|     state: directory | ||||
|     recurse: yes | ||||
|     mode: 0700 | ||||
|   tags: | ||||
|     - keycloak-build-container | ||||
|  | ||||
| - name: Ensure provider jars directory exists | ||||
|   ansible.builtin.file: | ||||
|     name: "{{ keycloak_provider_jars_directory }}" | ||||
|     state: directory | ||||
|     mode: 0775 | ||||
|   tags: | ||||
|     - keycloak-build-container | ||||
|  | ||||
| - name: Ensure Dockerfile is templated | ||||
|   ansible.builtin.template: | ||||
|     src: Dockerfile.j2 | ||||
|     dest: "{{ keycloak_container_build_directory }}/Dockerfile" | ||||
|     mode: 0700 | ||||
|   register: keycloak_buildfile_info | ||||
|   tags: | ||||
|     - keycloak-container | ||||
|     - keycloak-build-container | ||||
|  | ||||
| - name: Ensure upstream Keycloak container image '{{ keycloak_container_image_upstream }}:{{ keycloak_version }}' is present | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ keycloak_container_image_upstream }}:{{ keycloak_version }}" | ||||
|     source: pull | ||||
|     state: present | ||||
|   register: keycloak_container_image_upstream_status | ||||
|   tags: | ||||
|     - keycloak-container | ||||
|     - keycloak-build-container | ||||
|  | ||||
| - name: Ensure custom keycloak container image '{{ keycloak_container_image_name }}' is built | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ keycloak_container_image_name }}" | ||||
|     build: | ||||
|       args: | ||||
|         DB_VENDOR: "{{ keycloak_container_database_vendor }}" | ||||
|         KC_ADMIN_PASSWORD: "{{ keycloak_config_admin_password }}" | ||||
|       dockerfile: "{{ keycloak_container_build_directory }}/Dockerfile" | ||||
|       path: "{{ keycloak_container_build_directory }}" | ||||
|     source: build | ||||
|     state: present | ||||
|     force_source: "{{ keycloak_buildfile_info.changed or keycloak_container_image_upstream_status.changed or (keycloak_force_rebuild_container | default(false))}}" | ||||
|   register: keycloak_container_image_status | ||||
|   tags: | ||||
|     - keycloak-container | ||||
|     - keycloak-build-container | ||||
|  | ||||
| - name: Ensure keycloak container is running | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ keycloak_container_name }}" | ||||
|     image: "{{ keycloak_container_image_name }}" | ||||
|     env: "{{ keycloak_container_env | default(omit, true) }}" | ||||
|     ports: "{{ keycloak_container_ports | default(omit, true) }}" | ||||
|     hostname: "{{ keycloak_container_hostname | default(omit) }}" | ||||
|     labels: "{{ keycloak_container_labels | default(omit, true) }}" | ||||
|     volumes: "{{ keycloak_container_volumes | default(omit, true) }}" | ||||
|     restart_policy: "{{ keycloak_container_restart_policy }}" | ||||
|     recreate: "{{ keycloak_container_force_recreate | default(false) or (keycloak_container_image_status.changed if keycloak_container_image_status is defined else false) }}" | ||||
|     etc_hosts: "{{ keycloak_container_etc_hosts | default(omit) }}" | ||||
|     state: started | ||||
|     command: "{{ keycloak_container_command }}" | ||||
|   tags: | ||||
|     - keycloak-container | ||||
| @@ -1,43 +0,0 @@ | ||||
| FROM {{ keycloak_container_image_upstream }}:{{ keycloak_version }} as builder | ||||
|  | ||||
| # Enable health and metrics support | ||||
| ENV KC_HEALTH_ENABLED={{ keycloak_config_health_enabled | ternary('true', 'false') }} | ||||
| ENV KC_METRICS_ENABLED={{ keycloak_config_metrics_enabled | ternary('true', 'false') }} | ||||
|  | ||||
| # Configure a database vendor | ||||
| ARG DB_VENDOR | ||||
| ENV KC_DB=$DB_VENDOR | ||||
|  | ||||
| WORKDIR {{ keycloak_container_working_directory }} | ||||
|  | ||||
| {% if keycloak_container_image_add_local_providers | default(true) %} | ||||
| ADD ./providers/* providers/ | ||||
| {% endif %} | ||||
| # Workaround to set correct mode on jar files | ||||
| USER root | ||||
| RUN chmod -R 0770 providers/* | ||||
| USER keycloak | ||||
|  | ||||
| RUN {{ keycloak_container_working_directory }}/bin/kc.sh --verbose \ | ||||
| {% for argument in keycloak_container_build_flags | dict2items(key_name='flag', value_name='value') %} | ||||
|   --{{- argument['flag'] -}}{{- argument['value'] | default(false, true) | ternary('=' + argument['value'], '') }} \ | ||||
| {% endfor%} | ||||
|   build{% if keycloak_container_build_features | default([]) | length > 0 %} \ | ||||
| {% endif %} | ||||
| {% if keycloak_container_build_features | default([]) | length > 0 %} | ||||
|   --features="{{ keycloak_container_build_features | join(',') }}" | ||||
| {% endif %} | ||||
|  | ||||
|  | ||||
| FROM {{ keycloak_container_image_upstream }}:{{ keycloak_version }} | ||||
| COPY --from=builder {{ keycloak_container_working_directory }}/ {{ keycloak_container_working_directory }}/ | ||||
|  | ||||
| ENV KC_HOSTNAME={{ keycloak_config_hostname }} | ||||
| ENV KEYCLOAK_ADMIN={{ keycloak_config_admin_username }} | ||||
| ARG KC_ADMIN_PASSWORD | ||||
| {% if keycloak_version | split('.') | first | int > 21 %} | ||||
| ENV KEYCLOAK_ADMIN_PASSWORD=$KC_ADMIN_PASSWORD | ||||
| {% else %} | ||||
| ENV KEYCLOAK_PASSWORD=$KC_ADMIN_PASSWORD | ||||
| {% endif %} | ||||
| ENTRYPOINT ["{{ keycloak_container_working_directory }}/bin/kc.sh"] | ||||
| @@ -1,3 +0,0 @@ | ||||
| --- | ||||
|  | ||||
| keycloak_container_working_directory: /opt/keycloak | ||||
							
								
								
									
										29
									
								
								roles/minio/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								roles/minio/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| # `finallycoffee.services.minio` ansible role | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| This role deploys a [min.io](https://min.io) server (s3-compatible object storage server) | ||||
| using the official docker container image. | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| The role requires setting the password for the `root` user (name can be changed by | ||||
| setting `minio_root_username`) in `minio_root_password`. That user has full control | ||||
| over the minio-server instance. | ||||
|  | ||||
| ### Useful config hints | ||||
|  | ||||
| Most configuration is done by setting environment variables in | ||||
| `minio_container_extra_env`, for example: | ||||
|  | ||||
| ```yaml | ||||
| minio_container_extra_env: | ||||
|   # disable the "console" web browser UI | ||||
|   MINIO_BROWSER: off | ||||
|   # enable public prometheus metrics on `/minio/v2/metrics/cluster` | ||||
|   MINIO_PROMETHEUS_AUTH_TYPE: public | ||||
| ``` | ||||
|  | ||||
| When serving minio (or any s3-compatible server) on a "subfolder", | ||||
| see https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTRedirect.html | ||||
| and https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html | ||||
							
								
								
									
										40
									
								
								roles/minio/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								roles/minio/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,40 @@ | ||||
| --- | ||||
|  | ||||
| minio_user: ~ | ||||
| minio_data_path: /opt/minio | ||||
|  | ||||
| minio_create_user: false | ||||
| minio_manage_host_filesystem: false | ||||
|  | ||||
| minio_root_username: root | ||||
| minio_root_password: ~ | ||||
|  | ||||
| minio_container_name: minio | ||||
| minio_container_image_name: docker.io/minio/minio | ||||
| minio_container_image_tag: latest | ||||
| minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}" | ||||
| minio_container_networks: [] | ||||
| minio_container_ports: [] | ||||
|  | ||||
| minio_container_base_volumes: | ||||
|   - "{{ minio_data_path }}:{{ minio_container_data_path }}:z" | ||||
| minio_container_extra_volumes: [] | ||||
|  | ||||
| minio_container_base_env: | ||||
|   MINIO_ROOT_USER: "{{ minio_root_username }}" | ||||
|   MINIO_ROOT_PASSWORD: "{{ minio_root_password }}" | ||||
| minio_container_extra_env: {} | ||||
|  | ||||
| minio_container_labels: {} | ||||
|  | ||||
| minio_container_command: | ||||
|   - "server" | ||||
|   - "{{ minio_container_data_path }}" | ||||
|   - "--console-address \":{{ minio_container_listen_port_console }}\"" | ||||
| minio_container_restart_policy: "unless-stopped" | ||||
| minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}" | ||||
|  | ||||
| minio_container_listen_port_api: 9000 | ||||
| minio_container_listen_port_console: 8900 | ||||
|  | ||||
| minio_container_data_path: /storage | ||||
							
								
								
									
										37
									
								
								roles/minio/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								roles/minio/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure minio run user is present | ||||
|   user: | ||||
|     name: "{{ minio_user }}" | ||||
|     state: present | ||||
|     system: yes | ||||
|   when: minio_create_user | ||||
|  | ||||
| - name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present | ||||
|   file: | ||||
|     path: "{{ minio_data_path }}" | ||||
|     state: directory | ||||
|     user: "{{ minio_user|default(omit, True) }}" | ||||
|     group: "{{ minio_user|default(omit, True) }}" | ||||
|   when: minio_manage_host_filesystem | ||||
|  | ||||
| - name: Ensure container image for minio is present | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ minio_container_image }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ minio_container_image_force_source }}" | ||||
|  | ||||
| - name: Ensure container {{ minio_container_name }} is running | ||||
|   docker_container: | ||||
|     name: "{{ minio_container_name }}" | ||||
|     image: "{{ minio_container_image }}" | ||||
|     volumes: "{{ minio_container_volumes }}" | ||||
|     env: "{{ minio_container_env }}" | ||||
|     labels: "{{ minio_container_labels }}" | ||||
|     networks: "{{ minio_container_networks }}" | ||||
|     ports: "{{ minio_container_ports }}" | ||||
|     user: "{{ minio_user|default(omit, True) }}" | ||||
|     command: "{{ minio_container_command }}" | ||||
|     restart_policy: "{{ minio_container_restart_policy }}" | ||||
|     state: started | ||||
							
								
								
									
										5
									
								
								roles/minio/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								roles/minio/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| --- | ||||
|  | ||||
| minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}" | ||||
|  | ||||
| minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}" | ||||
							
								
								
									
										33
									
								
								roles/nginx/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								roles/nginx/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | ||||
| --- | ||||
|  | ||||
| nginx_version: "1.25.1" | ||||
| nginx_flavour: alpine | ||||
| nginx_base_path: /opt/nginx | ||||
| nginx_config_file: "{{ nginx_base_path }}/nginx.conf" | ||||
|  | ||||
| nginx_container_name: nginx | ||||
| nginx_container_image_reference: >- | ||||
|   {{ | ||||
|     nginx_container_image_repository | ||||
|     + ':' + (nginx_container_image_tag | ||||
|       | default(nginx_version | ||||
|       + (('-' + nginx_flavour) if nginx_flavour is defined else ''), true)) | ||||
|   }} | ||||
| nginx_container_image_repository: >- | ||||
|   {{ | ||||
|     ( | ||||
|       container_registries[nginx_container_image_registry] | ||||
|       | default(nginx_container_image_registry) | ||||
|     ) | ||||
|     + '/' | ||||
|     + nginx_container_image_namespace | default('') | ||||
|     + nginx_container_image_name | ||||
|   }} | ||||
| nginx_container_image_registry: "docker.io" | ||||
| nginx_container_image_name: "nginx" | ||||
| nginx_container_image_tag: ~ | ||||
|  | ||||
| nginx_container_restart_policy: "unless-stopped" | ||||
| nginx_container_volumes: | ||||
|   - "{{ nginx_config_file }}:/etc/nginx/conf.d/nginx.conf:ro" | ||||
|   | ||||
							
								
								
									
										8
									
								
								roles/nginx/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								roles/nginx/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,8 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure nginx container '{{ nginx_container_name }}' is restarted | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ nginx_container_name }}" | ||||
|     state: started | ||||
|     restart: true | ||||
|   listen: restart-nginx | ||||
							
								
								
									
										37
									
								
								roles/nginx/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								roles/nginx/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure base path '{{ nginx_base_path }}' exists | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ nginx_base_path }}" | ||||
|     state: directory | ||||
|     mode: 0755 | ||||
|  | ||||
| - name: Ensure nginx config file is templated | ||||
|   ansible.builtin.copy: | ||||
|     dest: "{{ nginx_config_file }}" | ||||
|     content: "{{ nginx_config }}" | ||||
|     mode: 0640 | ||||
|   notify: | ||||
|     - restart-nginx | ||||
|  | ||||
| - name: Ensure docker container image is present | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ nginx_container_image_reference }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ nginx_container_image_tag is defined and nginx_container_image_tag | string != '' }}" | ||||
|  | ||||
| - name: Ensure docker container '{{ nginx_container_name }}' is running | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ nginx_container_name }}" | ||||
|     image: "{{ nginx_container_image_reference }}" | ||||
|     env: "{{ nginx_container_env | default(omit, true) }}" | ||||
|     user: "{{ nginx_container_user | default(omit, true) }}" | ||||
|     ports: "{{ nginx_container_ports | default(omit, true) }}" | ||||
|     labels: "{{ nginx_container_labels | default(omit, true) }}" | ||||
|     volumes: "{{ nginx_container_volumes | default(omit, true) }}" | ||||
|     etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}" | ||||
|     networks: "{{ nginx_container_networks | default(omit, true) }}" | ||||
|     purge_networks: "{{ nginx_container_purge_networks | default(omit, true) }}" | ||||
|     restart_policy: "{{ nginx_container_restart_policy }}" | ||||
|     state: started | ||||
| @@ -1,21 +0,0 @@ | ||||
| # `finallycoffee.services.openproject` ansible role | ||||
|  | ||||
| Deploys [openproject](https://www.openproject.org/) using docker-compose. | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| To set configuration variables for OpenProject, set them in `openproject_compose_overrides`: | ||||
| ```yaml | ||||
| openproject_compose_overrides: | ||||
|   version: "3.7" | ||||
|   services: | ||||
|     proxy: | ||||
|        [...] | ||||
|   volumes: | ||||
|     pgdata: | ||||
|       driver: local | ||||
|       driver_opts: | ||||
|         o: bind | ||||
|         type: none | ||||
|         device: /var/lib/postgresql | ||||
| ``` | ||||
| @@ -1,11 +0,0 @@ | ||||
| --- | ||||
| openproject_base_path: "/opt/openproject" | ||||
|  | ||||
| openproject_upstream_git_url: "https://github.com/opf/openproject-deploy.git" | ||||
| openproject_upstream_git_branch: "stable/14" | ||||
|  | ||||
| openproject_compose_project_path: "{{ openproject_base_path }}" | ||||
| openproject_compose_project_name: "openproject" | ||||
| openproject_compose_project_env_file: "{{ openproject_compose_project_path }}/.env" | ||||
| openproject_compose_project_override_file: "{{ openproject_compose_project_path }}/docker-compose.override.yml" | ||||
| openproject_compose_project_env: {} | ||||
| @@ -1,38 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure base directory '{{ openproject_base_path }}' is present | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ openproject_base_path }}" | ||||
|     state: directory | ||||
|  | ||||
| - name: Ensure upstream repository is cloned | ||||
|   ansible.builtin.git: | ||||
|     dest: "{{ openproject_base_path }}" | ||||
|     repo: "{{ openproject_upstream_git_url }}" | ||||
|     version: "{{ openproject_upstream_git_branch }}" | ||||
|     clone: true | ||||
|     depth: 1 | ||||
|  | ||||
| - name: Ensure environment is configured | ||||
|   ansible.builtin.lineinfile: | ||||
|     line: "{{ item.key}}={{ item.value}}" | ||||
|     path: "{{ openproject_compose_project_env_file }}" | ||||
|     state: present | ||||
|     create: true | ||||
|   loop: "{{ openproject_compose_project_env | dict2items(key_name='key', value_name='value') }}" | ||||
|  | ||||
| - name: Ensure docker compose overrides are set | ||||
|   ansible.builtin.copy: | ||||
|     dest: "{{ openproject_compose_project_override_file }}" | ||||
|     content: "{{ openproject_compose_overrides | default({}) | to_nice_yaml }}" | ||||
|  | ||||
| - name: Ensure containers are pulled | ||||
|   community.docker.docker_compose_v2: | ||||
|     project_src: "{{ openproject_compose_project_path }}" | ||||
|     project_name: "{{ openproject_compose_project_name }}" | ||||
|     pull: "missing" | ||||
|  | ||||
| - name: Ensure services are running | ||||
|   community.docker.docker_compose_v2: | ||||
|     project_src: "{{ openproject_compose_project_path }}" | ||||
|     project_name: "{{ openproject_compose_project_name }}" | ||||
|     state: "present" | ||||
							
								
								
									
										77
									
								
								roles/restic/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								roles/restic/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | ||||
| # `finallycoffee.services.restic` | ||||
|  | ||||
| Ansible role for backup up data using `restic`, utilizing `systemd` timers for scheduling. | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| As restic encrypts the data before storing it, the `restic_repo_password` needs | ||||
| to be populated with a strong key, and saved accordingly as only this key can | ||||
| be used to decrypt the data for a restore! | ||||
|  | ||||
| ### Backends | ||||
|  | ||||
| #### S3 Backend | ||||
|  | ||||
| To use a `s3`-compatible backend like AWS buckets or minio, both `restic_s3_key_id` | ||||
| and `restic_s3_access_key` need to be populated, and the `restic_repo_url` has the | ||||
| format `s3:https://my.s3.endpoint:port/bucket-name`. | ||||
|  | ||||
| #### SFTP Backend | ||||
|  | ||||
| Using the `sftp` backend requires the configured `restic_user` to be able to | ||||
| authenticate to the configured SFTP-Server using password-less methods like | ||||
| publickey-authentication. The `restic_repo_url` then follows the format | ||||
| `sftp:{user}@{server}:/my-restic-repository` (or without leading `/` for relative | ||||
| paths to the `{user}`s home directory. | ||||
|  | ||||
| ### Backing up data | ||||
|  | ||||
| A job name like `$service-postgres` or similar needs to be set in `restic_job_name`, | ||||
| which is used for naming the `systemd` units, their syslog identifiers etc. | ||||
|  | ||||
| If backing up filesystem locations, the paths need to be specified in | ||||
| `restic_backup_paths` as lists of strings representing absolute filesystem | ||||
| locations. | ||||
|  | ||||
| If backing up f.ex. database or other data which is generating backups using | ||||
| a command like `pg_dump`, use `restic_backup_stdin_command` (which needs to output | ||||
| to `stdout`) in conjunction with `restic_backup_stdin_command_filename` to name | ||||
| the resulting output (required). | ||||
|  | ||||
| ### Policy | ||||
|  | ||||
| The backup policy can be adjusted by overriding the `restic_policy_keep_*` | ||||
| variables, with the defaults being: | ||||
|  | ||||
| ```yaml | ||||
| restic_policy_keep_all_within: 1d | ||||
| restic_policy_keep_hourly: 6 | ||||
| restic_policy_keep_daily: 2 | ||||
| restic_policy_keep_weekly: 7 | ||||
| restic_policy_keep_monthly: 4 | ||||
| restic_policy_backup_frequency: hourly | ||||
| ``` | ||||
|  | ||||
| **Note:** `restic_policy_backup_frequency` must conform to `systemd`s | ||||
| `OnCalendar` syntax, which can be checked using `systemd-analyze calender $x`. | ||||
|  | ||||
| ## Role behaviour | ||||
|  | ||||
| Per default, when the systemd unit for a job changes, the job is not immediately | ||||
| started. This can be overridden using `restic_start_job_on_unit_change: true`, | ||||
| which will immediately start the backup job if it's configuration changed. | ||||
|  | ||||
| The systemd unit runs with `restic_user`, which is root by default, guaranteeing | ||||
| that filesystem paths are always readable. The `restic_user` can be overridden, | ||||
| but care needs to be taken to ensure the user has permission to read all the | ||||
| provided filesystem paths / the backup command may be executed by the user. | ||||
|  | ||||
| If ansible should create the user, set `restic_create_user` to `true`, which | ||||
| will attempt to create the `restic_user` as a system user. | ||||
|  | ||||
| ### Installing | ||||
|  | ||||
| For Debian and RedHat, the role attempts to install restic using the default | ||||
| package manager's ansible module (apt/dnf). For other distributions, the generic | ||||
| `package` module tries to install `restic_package_name` (default: `restic`), | ||||
| which can be overridden if needed. | ||||
							
								
								
									
										37
									
								
								roles/restic/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								roles/restic/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| --- | ||||
|  | ||||
| restic_repo_url: ~ | ||||
| restic_repo_password: ~ | ||||
| restic_s3_key_id: ~ | ||||
| restic_s3_access_key: ~ | ||||
|  | ||||
| restic_backup_paths: [] | ||||
| restic_backup_stdin_command: ~ | ||||
| restic_backup_stdin_command_filename: ~ | ||||
|  | ||||
| restic_policy_keep_all_within: 1d | ||||
| restic_policy_keep_hourly: 6 | ||||
| restic_policy_keep_daily: 2 | ||||
| restic_policy_keep_weekly: 7 | ||||
| restic_policy_keep_monthly: 4 | ||||
| restic_policy_backup_frequency: hourly | ||||
|  | ||||
| restic_policy: | ||||
|   keep_within: "{{ restic_policy_keep_all_within }}" | ||||
|   hourly: "{{ restic_policy_keep_hourly }}" | ||||
|   daily: "{{ restic_policy_keep_daily }}" | ||||
|   weekly: "{{ restic_policy_keep_weekly }}" | ||||
|   monthly: "{{ restic_policy_keep_monthly }}" | ||||
|   frequency: "{{ restic_policy_backup_frequency }}" | ||||
|  | ||||
| restic_user: root | ||||
| restic_create_user: false | ||||
| restic_start_job_on_unit_change: false | ||||
|  | ||||
| restic_job_name: ~ | ||||
| restic_job_description: "Restic backup job for {{ restic_job_name }}" | ||||
| restic_systemd_unit_naming_scheme: "restic.{{ restic_job_name }}" | ||||
| restic_systemd_working_directory: /tmp | ||||
| restic_systemd_syslog_identifier: "restic-{{ restic_job_name }}" | ||||
|  | ||||
| restic_package_name: restic | ||||
							
								
								
									
										13
									
								
								roles/restic/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								roles/restic/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure system daemon is reloaded | ||||
|   listen: reload-systemd | ||||
|   systemd: | ||||
|     daemon_reload: true | ||||
|  | ||||
| - name: Ensure systemd service for '{{ restic_job_name }}' is started immediately | ||||
|   listen: trigger-restic | ||||
|   systemd: | ||||
|     name: "{{ restic_systemd_unit_naming_scheme }}.service" | ||||
|     state: started | ||||
|   when: restic_start_job_on_unit_change | ||||
							
								
								
									
										77
									
								
								roles/restic/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								roles/restic/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure {{ restic_user }} system user exists | ||||
|   user: | ||||
|     name: "{{ restic_user }}" | ||||
|     state: present | ||||
|     system: true | ||||
|   when: restic_create_user | ||||
|  | ||||
| - name: Ensure either backup_paths or backup_stdin_command is populated | ||||
|   when: restic_backup_paths|length > 0 and restic_backup_stdin_command | ||||
|   fail: | ||||
|     msg: "Setting both `restic_backup_paths` and `restic_backup_stdin_command` is not supported" | ||||
|  | ||||
| - name: Ensure a filename for stdin_command backup is given | ||||
|   when: restic_backup_stdin_command and not restic_backup_stdin_command_filename | ||||
|   fail: | ||||
|     msg: "`restic_backup_stdin_command` was set but no filename for the resulting output was supplied in `restic_backup_stdin_command_filename`" | ||||
|  | ||||
| - name: Ensure backup frequency adheres to systemd's OnCalender syntax | ||||
|   command: | ||||
|     cmd: "systemd-analyze calendar {{ restic_policy.frequency }}" | ||||
|   register: systemd_calender_parse_res | ||||
|   failed_when: systemd_calender_parse_res.rc != 0 | ||||
|   changed_when: false | ||||
|  | ||||
| - name: Ensure restic is installed | ||||
|   block: | ||||
|     - name: Ensure restic is installed via apt | ||||
|       apt: | ||||
|         package: restic | ||||
|         state: latest | ||||
|       when: ansible_os_family == 'Debian' | ||||
|     - name: Ensure restic is installed via dnf | ||||
|       dnf: | ||||
|         name: restic | ||||
|         state: latest | ||||
|       when: ansible_os_family == 'RedHat' | ||||
|     - name: Ensure restic is installed using the auto-detected package-manager | ||||
|       package: | ||||
|         name: "{{ restic_package_name }}" | ||||
|         state: present | ||||
|       when: ansible_os_family not in ['RedHat', 'Debian'] | ||||
|  | ||||
| - name: Ensure systemd service file for '{{ restic_job_name }}' is templated | ||||
|   template: | ||||
|     dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.service" | ||||
|     src: restic.service.j2 | ||||
|     owner: root | ||||
|     group: root | ||||
|     mode: 0640 | ||||
|   notify: | ||||
|     - reload-systemd | ||||
|     - trigger-restic | ||||
|  | ||||
| - name: Ensure systemd service file for '{{ restic_job_name }}' is templated | ||||
|   template: | ||||
|     dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.timer" | ||||
|     src: restic.timer.j2 | ||||
|     owner: root | ||||
|     group: root | ||||
|     mode: 0640 | ||||
|   notify: | ||||
|     - reload-systemd | ||||
|  | ||||
| - name: Flush handlers to ensure systemd knows about '{{ restic_job_name }}' | ||||
|   meta: flush_handlers | ||||
|  | ||||
| - name: Ensure systemd timer for '{{ restic_job_name }}' is activated | ||||
|   systemd: | ||||
|     name: "{{ restic_systemd_unit_naming_scheme }}.timer" | ||||
|     enabled: true | ||||
|  | ||||
| - name: Ensure systemd timer for '{{ restic_job_name }}' is started | ||||
|   systemd: | ||||
|     name: "{{ restic_systemd_unit_naming_scheme }}.timer" | ||||
|     state: started | ||||
							
								
								
									
										28
									
								
								roles/restic/templates/restic.service.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								roles/restic/templates/restic.service.j2
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| [Unit] | ||||
| Description={{ restic_job_description }} | ||||
|  | ||||
| [Service] | ||||
| Type=oneshot | ||||
| User={{ restic_user }} | ||||
| WorkingDirectory={{ restic_systemd_working_directory }} | ||||
| SyslogIdentifier={{ restic_systemd_syslog_identifier }} | ||||
|  | ||||
| Environment=RESTIC_REPOSITORY={{ restic_repo_url }} | ||||
| Environment=RESTIC_PASSWORD={{ restic_repo_password }} | ||||
| {% if restic_s3_key_id and restic_s3_access_key %} | ||||
| Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }} | ||||
| Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }} | ||||
| {% endif %} | ||||
|  | ||||
| ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init' | ||||
| {% if restic_backup_stdin_command %} | ||||
| ExecStart=/bin/sh -c '{{ restic_backup_stdin_command }} | /usr/bin/restic backup --verbose --stdin --stdin-filename {{ restic_backup_stdin_command_filename }}' | ||||
| {% else %} | ||||
| ExecStart=/usr/bin/restic --verbose backup {{ restic_backup_paths | join(' ') }} | ||||
| {% endif %} | ||||
| ExecStartPost=/usr/bin/restic forget --prune --keep-within={{ restic_policy.keep_within }} --keep-hourly={{ restic_policy.hourly }} --keep-daily={{ restic_policy.daily }} --keep-weekly={{ restic_policy.weekly }} --keep-monthly={{ restic_policy.monthly }} | ||||
| ExecStartPost=-/usr/bin/restic snapshots | ||||
| ExecStartPost=/usr/bin/restic check | ||||
|  | ||||
| [Install] | ||||
| WantedBy=multi-user.target | ||||
							
								
								
									
										10
									
								
								roles/restic/templates/restic.timer.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								roles/restic/templates/restic.timer.j2
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| [Unit] | ||||
| Description=Run {{ restic_job_name }} | ||||
|  | ||||
| [Timer] | ||||
| OnCalendar={{ restic_policy.frequency }} | ||||
| Persistent=True | ||||
| Unit={{ restic_systemd_unit_naming_scheme }}.service | ||||
|  | ||||
| [Install] | ||||
| WantedBy=timers.target | ||||
| @@ -1,46 +0,0 @@ | ||||
| # `finallycoffee.services.snipe_it` ansible role | ||||
|  | ||||
| [Snipe-IT](https://snipeitapp.com/) is an open-source asset management with | ||||
| a powerful JSON-REST API. This ansible role deploys and configures Snipe-IT. | ||||
|  | ||||
| ## Requirements | ||||
|  | ||||
| Snipe-IT requires a MySQL-Database like MariaDB and a working email service | ||||
| for sending email. For installing and configuring MariaDB, see | ||||
| [`finallycoffee.base.mariadb`](https://galaxy.ansible.com/ui/repo/published/finallycoffee/base/content/role/mariadb/). | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| Required variables to set are: | ||||
|  | ||||
| - `snipe_it_domain` - domain name of the snipe-it instance | ||||
| - `snipe_it_config_app_url` - URL where snipe-it will be reachable including protocol and port | ||||
| - `snipe_it_config_app_key` - Laravel application key | ||||
|  | ||||
| ### Database configuration | ||||
|  | ||||
| All (database) options from the upstream laravel `.env` file are available | ||||
| under the `snipe_it_config_db_*` prefix. Configure a database as follows: | ||||
| ``` | ||||
| snipe_it_config_db_host: localhost # defaults to localhost | ||||
| snipe_it_config_db_port: "3306" # defaults to 3306 | ||||
| snipe_it_config_db_database: my_snipe_db_name # defaults to 'snipeit' | ||||
| snipe_it_config_db_username: my_snipe_db_user # defaults to 'snipeit' | ||||
| snipe_it_config_db_password: my_snipe_db_password | ||||
| # Set this if the database is shared with | ||||
| # other applications. defaults to not set | ||||
| snipe_it_config_db_prefix: snipe_ | ||||
| ``` | ||||
|  | ||||
| ### Email configuration | ||||
|  | ||||
| Configuring an email server is mandatory. An example is provided below: | ||||
| ```yaml | ||||
| snipe_it_config_mail_host: smtp.example.com | ||||
| snipe_it_config_mail_username: snipe_user@snipe.example.com | ||||
| snipe_it_config_mail_password: i_want_to_be_strong_and_long | ||||
| snipe_it_config_mail_from_addr: "noreply@snipe.example.com" | ||||
| snipe_it_config_mail_from_name: "Example.com SnipeIT instance" | ||||
| ``` | ||||
|  | ||||
| The default smtp port is `587` and can be set in `snipe_it_config_mail_port`. | ||||
| @@ -1,131 +0,0 @@ | ||||
| --- | ||||
| snipe_it_config_app_version: "v{{ snipe_it_version }}" | ||||
| snipe_it_config_app_port: 8000 | ||||
| snipe_it_config_app_env: "production" | ||||
| snipe_it_config_app_debug: false | ||||
| snipe_it_config_app_key: ~ | ||||
| snipe_it_config_app_url: "http://localhost:{{ snipe_it_config_app_port }}" | ||||
| snipe_it_config_app_timezone: UTC | ||||
| snipe_it_config_app_locale: en-US | ||||
| snipe_it_config_app_locked: false | ||||
| snipe_it_config_app_cipher: "AES-256-GCM" | ||||
| snipe_it_config_app_force_tls: false | ||||
| snipe_it_config_app_trusted_proxies: | ||||
|   - '192.168.0.0/16' | ||||
|   - '172.16.0.0/12' | ||||
|   - '10.0.0.0/8' | ||||
|  | ||||
| snipe_it_config_db_connection: mysql | ||||
| snipe_it_config_db_host: localhost | ||||
| snipe_it_config_db_port: "3306" | ||||
| snipe_it_config_db_database: snipeit | ||||
| snipe_it_config_db_username: snipeit | ||||
| snipe_it_config_db_password: ~ | ||||
| snipe_it_config_db_prefix: ~ | ||||
| snipe_it_config_db_dump_path: /usr/bin/ | ||||
| snipe_it_config_db_charset: utf8mb4 | ||||
| snipe_it_config_db_collation: utf8mb4_unicode_ci | ||||
| snipe_it_config_db_ssl: false | ||||
| snipe_it_config_db_ssl_is_paas: false | ||||
| snipe_it_config_db_ssl_key_path: ~ | ||||
| snipe_it_config_db_ssl_cert_path: ~ | ||||
| snipe_it_config_db_ssl_ca_path: ~ | ||||
| snipe_it_config_db_ssl_cipher: ~ | ||||
| snipe_it_config_db_ssl_verify_server: ~ | ||||
|  | ||||
| snipe_it_config_mail_mailer: smtp | ||||
| snipe_it_config_mail_host: ~ | ||||
| snipe_it_config_mail_port: 587 | ||||
| snipe_it_config_mail_username: ~ | ||||
| snipe_it_config_mail_password: ~ | ||||
| snipe_it_config_mail_tls_verify_peer: true | ||||
| snipe_it_config_mail_from_addr: ~ | ||||
| snipe_it_config_mail_from_name: ~ | ||||
| snipe_it_config_mail_replyto_addr: "{{ snipe_it_config_mail_from_addr }}" | ||||
| snipe_it_config_mail_replyto_name: "{{ snipe_it_config_mail_from_name }}" | ||||
| snipe_it_config_mail_auto_embed_method: attachment | ||||
| snipe_it_config_mail_backup_notification_driver: ~ | ||||
| snipe_it_config_mail_backup_notification_address: ~ | ||||
|  | ||||
| snipe_it_config_private_filesystem_disk: "local" | ||||
| snipe_it_config_public_filesystem_disk: "local_public" | ||||
| snipe_it_config_allow_backup_delete: false | ||||
| snipe_it_config_allow_data_purge: false | ||||
| snipe_it_config_image_lib: 'gd' | ||||
|  | ||||
| snipe_it_config_log_channel: 'stderr' | ||||
| snipe_it_config_log_max_days: 10 | ||||
|  | ||||
| snipe_it_config_cookie_name: "_snipe_session" | ||||
| snipe_it_config_cookie_domain: "{{ snipe_it_domain }}" | ||||
| snipe_it_config_secure_cookies: true | ||||
|  | ||||
| snipe_it_config_session_driver: file | ||||
| snipe_it_config_session_lifetime: 12000 | ||||
| snipe_it_config_cache_driver: file | ||||
| snipe_it_config_cache_prefix: snipeit | ||||
| snipe_it_config_queue_driver: file | ||||
|  | ||||
| snipe_it_base_config: | ||||
|   APP_VERSION: "{{ snipe_it_config_app_version }}" | ||||
|   APP_PORT: "{{ snipe_it_config_app_port }}" | ||||
|   APP_ENV: "{{ snipe_it_config_app_env }}" | ||||
|   APP_DEBUG: "{{ snipe_it_config_app_debug }}" | ||||
|   APP_KEY: "{{ snipe_it_config_app_key }}" | ||||
|   APP_URL: "{{ snipe_it_config_app_url }}" | ||||
|   APP_TIMEZONE: "{{ snipe_it_config_app_timezone }}" | ||||
|   APP_LOCALE: "{{ snipe_it_config_app_locale }}" | ||||
|   APP_LOCKED: "{{ snipe_it_config_app_locked }}" | ||||
|   APP_CIPHER: "{{ snipe_it_config_app_cipher }}" | ||||
|   APP_FORCE_TLS: "{{ snipe_it_config_app_force_tls }}" | ||||
|   APP_TRUSTED_PROXIES: "{{ snipe_it_config_app_trusted_proxies | join(',') }}" | ||||
|   DB_CONNECTION: "{{ snipe_it_config_db_connection }}" | ||||
|   DB_HOST: "{{ snipe_it_config_db_host }}" | ||||
|   DB_PORT: "{{ snipe_it_config_db_port }}" | ||||
|   DB_DATABASE: "{{ snipe_it_config_db_database }}" | ||||
|   DB_USERNAME: "{{ snipe_it_config_db_username }}" | ||||
|   DB_PASSWORD: "{{ snipe_it_config_db_password }}" | ||||
|   DB_PREFIX: "{{ snipe_it_config_db_prefix | default('null', true) }}" | ||||
|   DB_DUMP_PATH: "{{ snipe_it_config_db_dump_path }}" | ||||
|   DB_CHARSET: "{{ snipe_it_config_db_charset }}" | ||||
|   DB_COLLATION: "{{ snipe_it_config_db_collation }}" | ||||
|   DB_SSL: "{{ snipe_it_config_db_ssl }}" | ||||
|   DB_SSL_IS_PAAS: "{{ snipe_it_config_db_ssl_is_paas }}" | ||||
|   DB_SSL_KEY_PATH: "{{ snipe_it_config_db_ssl_key_path | default('null', true) }}" | ||||
|   DB_SSL_CERT_PATH: "{{ snipe_it_config_db_ssl_cert_path | default('null', true) }}" | ||||
|   DB_SSL_CA_PATH: "{{ snipe_it_config_db_ssl_ca_path | default('null', true) }}" | ||||
|   DB_SSL_CIPHER: "{{ snipe_it_config_db_ssl_cipher | default('null', true) }}" | ||||
|   DB_SSL_VERIFY_SERVER: "{{ snipe_it_config_db_ssl_verify_server | default('null', true) }}" | ||||
|   MAIL_MAILER: "{{ snipe_it_config_mail_mailer }}" | ||||
|   MAIL_HOST: "{{ snipe_it_config_mail_host }}" | ||||
|   MAIL_PORT: "{{ snipe_it_config_mail_port }}" | ||||
|   MAIL_USERNAME: "{{ snipe_it_config_mail_username }}" | ||||
|   MAIL_PASSWORD: "{{ snipe_it_config_mail_password }}" | ||||
|   MAIL_TLS_VERIFY_PEER: "{{ snipe_it_config_mail_tls_verify_peer }}" | ||||
|   MAIL_FROM_ADDR: "{{ snipe_it_config_mail_from_addr | default('null', true) }}" | ||||
|   MAIL_FROM_NAME: "{{ snipe_it_config_mail_from_name | default('null', true) }}" | ||||
|   MAIL_REPLYTO_ADDR: "{{ snipe_it_config_mail_replyto_addr | default('null', true) }}" | ||||
|   MAIL_REPLYTO_NAME: "{{ snipe_it_config_mail_replyto_name | default('null', true) }}" | ||||
|   MAIL_AUTO_EMBED_METHOD: "{{ snipe_it_config_mail_auto_embed_method }}" | ||||
|   MAIL_BACKUP_NOTIFICATION_DRIVER: "{{ snipe_it_config_mail_backup_notification_driver }}" | ||||
|   MAIL_BACKUP_NOTIFICATION_ADDRESS: "{{ snipe_it_config_mail_backup_notification_address }}" | ||||
|   SESSION_DRIVER: "{{ snipe_it_config_session_driver }}" | ||||
|   SESSION_LIFETIME: "{{ snipe_it_config_session_lifetime }}" | ||||
|   CACHE_DRIVER: "{{ snipe_it_config_cache_driver }}" | ||||
|   CACHE_PREFIX: "{{ snipe_it_config_cache_prefix }}" | ||||
|   QUEUE_DRIVER: "{{ snipe_it_config_queue_driver }}" | ||||
|   PRIVATE_FILESYSTEM_DISK: "{{ snipe_it_config_private_filesystem_disk }}" | ||||
|   PUBLIC_FILESYSTEM_DISK: "{{ snipe_it_config_public_filesystem_disk }}" | ||||
|   ALLOW_BACKUP_DELETE: "{{ snipe_it_config_allow_backup_delete }}" | ||||
|   ALLOW_DATA_PURGE: "{{ snipe_it_config_allow_data_purge }}" | ||||
|   IMAGE_LIB: "{{ snipe_it_config_image_lib }}" | ||||
|   LOG_CHANNEL: "{{ snipe_it_config_log_channel }}" | ||||
|   LOG_MAX_DAYS: "{{ snipe_it_config_log_max_days }}" | ||||
|   COOKIE_NAME: "{{ snipe_it_config_cookie_name }}" | ||||
|   COOKIE_DOMAIN: "{{ snipe_it_config_cookie_domain }}" | ||||
|   SECURE_COOKIES: "{{ snipe_it_config_secure_cookies }}" | ||||
|  | ||||
| snipe_it_config: ~ | ||||
| snipe_it_merged_config: >-2 | ||||
|   {{ (snipe_it_base_config | default({}, true)) | ||||
|     | combine((snipe_it_config | default({}, true)), recursive=True) }} | ||||
| @@ -1,48 +0,0 @@ | ||||
| --- | ||||
| snipe_it_container_image_registry: docker.io | ||||
| snipe_it_container_image_namespace: snipe | ||||
| snipe_it_container_image_name: 'snipe-it' | ||||
| snipe_it_container_image_tag: ~ | ||||
| snipe_it_container_image_flavour: alpine | ||||
| snipe_it_container_image_source: pull | ||||
| snipe_it_container_image_force_source: >-2 | ||||
|   {{ snipe_it_container_image_tag | default(false, true) | bool }} | ||||
| snipe_it_container_image: >-2 | ||||
|   {{ | ||||
|     ([ | ||||
|       snipe_it_container_image_registry | default([], true), | ||||
|       snipe_it_container_image_namespace | default([], true), | ||||
|       snipe_it_container_image_name, | ||||
|     ] | flatten | join('/')) | ||||
|     + ':' | ||||
|     + (snipe_it_container_image_tag | default( | ||||
|         'v' + snipe_it_version + ( | ||||
|           ((snipe_it_container_image_flavour is string) | ||||
|             and (snipe_it_container_image_flavour | length > 0)) | ||||
|           | ternary( | ||||
|             '-' + snipe_it_container_image_flavour | default('', true), | ||||
|             '' | ||||
|           ) | ||||
|         ), | ||||
|         true | ||||
|     )) | ||||
|   }} | ||||
|  | ||||
| snipe_it_container_env_file: "/var/www/html/.env" | ||||
| snipe_it_container_data_directory: "/var/lib/snipeit/" | ||||
| snipe_it_container_volumes: | ||||
|   - "{{ snipe_it_data_directory }}:{{ snipe_it_container_data_directory }}:z" | ||||
|  | ||||
| snipe_it_container_name: 'snipe-it' | ||||
| snipe_it_container_state: >-2 | ||||
|   {{ (snipe_it_state == 'present') | ternary('started', 'absent') }} | ||||
| snipe_it_container_env: ~ | ||||
| snipe_it_container_user: ~ | ||||
| snipe_it_container_ports: ~ | ||||
| snipe_it_container_labels: ~ | ||||
| snipe_it_container_recreate: ~ | ||||
| snipe_it_container_networks: ~ | ||||
| snipe_it_container_etc_hosts: ~ | ||||
| snipe_it_container_dns_servers: ~ | ||||
| snipe_it_container_network_mode: ~ | ||||
| snipe_it_container_restart_policy: 'unless-stopped' | ||||
| @@ -1,9 +0,0 @@ | ||||
| --- | ||||
| snipe_it_user: snipeit | ||||
| snipe_it_version: "8.0.4" | ||||
| snipe_it_domain: ~ | ||||
| snipe_it_state: present | ||||
| snipe_it_deployment_method: docker | ||||
|  | ||||
| snipe_it_env_file: /etc/snipeit/env | ||||
| snipe_it_data_directory: /var/lib/snipeit | ||||
| @@ -1,5 +0,0 @@ | ||||
| --- | ||||
| snipe_it_run_user_id: >-2 | ||||
|   {{ snipe_it_user_info.uid | default(snipe_it_user) }} | ||||
| snipe_it_run_group_id: >-2 | ||||
|   {{ snipe_it_user_info.group | default(snipe_it_user) }} | ||||
| @@ -1,12 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: snipe_it | ||||
|   description: >-2 | ||||
|     Deploy Snipe-IT, an open-source asset / license management system with | ||||
|     powerful JSON REST API | ||||
|   galaxy_tags: | ||||
|     - snipeit | ||||
|     - asset-management | ||||
|     - docker | ||||
| @@ -1,14 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure state is valid | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Unsupported state '{{ snipe_it_state }}'! | ||||
|       Supported states are {{ snipe_it_states | join(', ') }}. | ||||
|   when: snipe_it_state is not in snipe_it_states | ||||
|  | ||||
| - name: Ensure deployment method is valid | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Unsupported deployment_method '{{ snipe_it_deployment_method }}'! | ||||
|       Supported values are {{ snipe_it_deployment_methods | join(', ') }}. | ||||
|   when: snipe_it_deployment_method is not in snipe_it_deployment_methods | ||||
| @@ -1,30 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure container image '{{ snipe_it_container_image }}' is {{ snipe_it_state }} | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ snipe_it_container_image }}" | ||||
|     state: "{{ snipe_it_state }}" | ||||
|     source: "{{ snipe_it_container_image_source }}" | ||||
|     force_source: "{{ snipe_it_container_image_force_source }}" | ||||
|   register: snipe_it_container_image_info | ||||
|   until: snipe_it_container_image_info is success | ||||
|   retries: 5 | ||||
|   delay: 3 | ||||
|  | ||||
| - name: Ensure container '{{ snipe_it_container_name }}' is {{ snipe_it_container_state }} | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ snipe_it_container_name }}" | ||||
|     image: "{{ snipe_it_container_image }}" | ||||
|     env_file: "{{ snipe_it_env_file }}" | ||||
|     env: "{{ snipe_it_container_env | default(omit, true) }}" | ||||
|     user: "{{ snipe_it_container_user | default(omit, true) }}" | ||||
|     ports: "{{ snipe_it_container_ports | default(omit, true) }}" | ||||
|     labels: "{{ snipe_it_container_labels | default(omit, true) }}" | ||||
|     volumes: "{{ snipe_it_container_volumes | default(omit, true) }}" | ||||
|     networks: "{{ snipe_it_container_networks | default(omit, true) }}" | ||||
|     etc_hosts: "{{ snipe_it_container_etc_hosts | default(omit, true) }}" | ||||
|     dns_servers: "{{ snipe_it_container_dns_servers | default(omit, true) }}" | ||||
|     network_mode: "{{ snipe_it_container_network_mode | default(omit, true) }}" | ||||
|     restart_policy: >-2 | ||||
|       {{ snipe_it_container_restart_policy | default(omit, true) }} | ||||
|     recreate: "{{ snipe_it_container_recreate | default(omit, true) }}" | ||||
|     state: "{{ snipe_it_container_state }}" | ||||
| @@ -1,59 +0,0 @@ | ||||
| --- | ||||
| - name: Check preconditions | ||||
|   ansible.builtin.include_tasks: | ||||
|     file: "check.yml" | ||||
|  | ||||
| - name: Ensure snipe-it user '{{ snipe_it_user }}' is {{ snipe_it_state }} | ||||
|   ansible.builtin.user: | ||||
|     name: "{{ snipe_it_user }}" | ||||
|     state: "{{ snipe_it_state }}" | ||||
|     system: "{{ snipe_it_user_system | default(true, true) }}" | ||||
|     create_home: "{{ snipe_it_user_create_home | default(false, true) }}" | ||||
|     groups: "{{ snipe_it_user_groups | default(omit, true) }}" | ||||
|     append: >-2 | ||||
|       {{ | ||||
|         snipe_it_user_groups_append | default( | ||||
|             snipe_it_user_groups | default([], true) | length > 0, | ||||
|             true, | ||||
|         ) | ||||
|       }} | ||||
|   register: snipe_it_user_info | ||||
|  | ||||
| - name: Ensure snipe-it environment file is {{ snipe_it_state }} | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ snipe_it_env_file }}" | ||||
|     state: "{{ snipe_it_state }}" | ||||
|   when: snipe_it_state == 'absent' | ||||
|  | ||||
| - name: Ensure snipe-it config directory is {{ snipe_it_state }} | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ snipe_it_env_file | dirname }}" | ||||
|     state: "{{ (snipe_it_state == 'present') | ternary('directory', 'absent') }}" | ||||
|     owner: "{{ snipe_it_run_user_id }}" | ||||
|     group: "{{ snipe_it_run_group_id }}" | ||||
|     mode: "0755" | ||||
|   when: snipe_it_state == 'present' | ||||
|  | ||||
| - name: Ensure snipe-it data directory '{{ snipe_it_data_directory }}' is {{ snipe_it_state }} | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ snipe_it_data_directory }}" | ||||
|     state: "{{ (snipe_it_state == 'present') | ternary('directory', 'absent') }}" | ||||
|     owner: "{{ snipe_it_run_user_id }}" | ||||
|     group: "{{ snipe_it_run_group_id }}" | ||||
|     mode: "0755" | ||||
|  | ||||
| - name: Ensure snipe-it environment file is templated | ||||
|   ansible.builtin.copy: | ||||
|     content: |+2 | ||||
|       {% for entry in snipe_it_merged_config | dict2items %} | ||||
|       {{ entry.key }}={{ entry.value }} | ||||
|       {% endfor %} | ||||
|     dest: "{{ snipe_it_env_file }}" | ||||
|     owner: "{{ snipe_it_run_user_id }}" | ||||
|     group: "{{ snipe_it_run_group_id }}" | ||||
|     mode: "0640" | ||||
|   when: snipe_it_state == 'present' | ||||
|  | ||||
| - name: Deploy using {{ snipe_it_deployment_method }} | ||||
|   ansible.builtin.include_tasks: | ||||
|     file: "deploy-{{ snipe_it_deployment_method }}.yml" | ||||
| @@ -1,6 +0,0 @@ | ||||
| --- | ||||
| snipe_it_states: | ||||
|   - present | ||||
|   - absent | ||||
| snipe_it_deployment_methods: | ||||
|   - docker | ||||
| @@ -1,54 +0,0 @@ | ||||
| # `finallycoffee.services.vaultwarden` ansible role | ||||
|  | ||||
| Vaultwarden is an unofficial (not associated with Bitwarden) bitwarden API compatible | ||||
| server backend, formally called `bitwarden_rs`, written in rust. | ||||
|  | ||||
| This ansible role can deploy and configure `vaultwarden`, and supports removing | ||||
| itself using `vaultwarden_state: absent` (Warning: It does not ask for confirmation, | ||||
| and will remove all user data when instructed to remove it). | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| To use this role, the following variables need to be populated: | ||||
|  | ||||
| - `vaultwarden_config_domain` - always. Changing this will lead to two-factor not working for two-factor methods registered in the past. | ||||
| - `vaultwarden_config_admin_token` - if `vaultwarden_config_disable_admin_token` is `false`. | ||||
|  | ||||
| Setting other configuration values for vaultwarden can be done using role-provided flattened keys in the | ||||
| `vaultwarden_config_*` namespace (see [`defaults/main/config.yml`](defaults/main/config.yml) for available variables), | ||||
| or by setting the configuration directly in the same structure as the `config.json` would be in `vaultwarden_config`. | ||||
|  | ||||
| ### Email | ||||
|  | ||||
| Configure mailing by first enabling SMTP using `vaultwarden_config_enable_smtp: true`, | ||||
| then configure your email server like this: | ||||
| ```yaml | ||||
| vaultwarden_config: | ||||
|   smtp_host: "mail.example.com" | ||||
|   smtp_explicit_tls: true | ||||
|   smtp_port: 465 | ||||
|   smtp_from: "noreply+vaultwarden@example.com" | ||||
|   smtp_from_name: "'Example.com Vaultwarden instance' <noreply+vaultwarden@example.com>" | ||||
|   smtp_username: vaultwarden@example.com | ||||
|   smtp_password: i_hope_i_will_be_a_strong_one! | ||||
|   helo_name: "{{ vaultwarden_config_domain }}" | ||||
| ``` | ||||
|  | ||||
| ### 2FA via email | ||||
|  | ||||
| To enable email-based two-factor-authentication, set `vaultwarden_config_enable_email_2fa: true` | ||||
| and optionally set the following configuration: | ||||
| ```yaml | ||||
| vaultwarden_config: | ||||
|   email_token_size: 8 | ||||
|   email_expiration_time: 300 # 300 seconds = 5min | ||||
|   email_attempts_limit: 3 | ||||
| ``` | ||||
|  | ||||
| ### Feature flags | ||||
|  | ||||
| To enable more authentication methods, toggles are provided in | ||||
| [`vaultwarden_config_enable_*`](defaults/main/config.yml#L18). | ||||
| It is genereally recommended to simply keep unused methods off. | ||||
|  | ||||
| Per default, 'Sends' are allowed. | ||||
| @@ -1,68 +0,0 @@ | ||||
| --- | ||||
| # Required configuration | ||||
| vaultwarden_config_domain: ~ | ||||
| vaultwarden_config_admin_token: ~ | ||||
| # Invitations and signups | ||||
| vaultwarden_config_invitations_allowed: false | ||||
| vaultwarden_config_invitation_org_name: ~ | ||||
| vaultwarden_config_signups_allowed: false | ||||
| vaultwarden_config_signups_verify: true | ||||
| vaultwarden_config_signups_verify_resend_time: 3600 | ||||
| vaultwarden_config_signups_verify_resend_limit: 5 | ||||
| # Entry preview icons | ||||
| vaultwarden_config_disable_icon_download: true | ||||
| vaultwarden_config_icon_cache_ttl: 604800 # 7 days | ||||
| vaultwarden_config_icon_cache_negttl: 259200 # 3 days | ||||
| vaultwarden_config_icon_download_timeout: 30 # seconds | ||||
| vaultwarden_config_icon_blacklist_non_global_ips: true | ||||
| # Features | ||||
| vaultwarden_config_sends_allowed: true | ||||
| vaultwarden_config_enable_yubico: false | ||||
| vaultwarden_config_enable_duo: false | ||||
| vaultwarden_config_enable_smtp: false | ||||
| vaultwarden_config_enable_email_2fa: false | ||||
| # Security | ||||
| vaultwarden_config_password_iterations: 100000 | ||||
| vaultwarden_config_show_password_hint: false | ||||
| vaultwarden_config_disable_2fa_remember: false | ||||
| vaultwarden_config_disable_admin_token: true | ||||
| vaultwarden_config_require_device_email: false | ||||
| vaultwarden_config_authenticator_disable_time_drift: true | ||||
| # Other | ||||
| vaultwarden_config_log_timestamp_format: "%Y-%m-%d %H:%M:%S.%3f" | ||||
| vaultwarden_config_ip_header: "X-Real-IP" | ||||
| vaultwarden_config_reload_templates: false | ||||
|  | ||||
| vaultwarden_base_config: | ||||
|   domain: "{{ vaultwarden_config_domain }}" | ||||
|   admin_token: "{{ vaultwarden_config_admin_token }}" | ||||
|   invitations_allowed: "{{ vaultwarden_config_invitations_allowed }}" | ||||
|   invitation_org_name: "{{ vaultwarden_config_invitation_org_name | default('', true) }}" | ||||
|   signups_allowed: "{{ vaultwarden_config_signups_allowed }}" | ||||
|   signups_verify: "{{ vaultwarden_config_signups_verify }}" | ||||
|   signups_verify_resend_time: "{{ vaultwarden_config_signups_verify_resend_time }}" | ||||
|   signups_verify_resend_limit: "{{ vaultwarden_config_signups_verify_resend_limit }}" | ||||
|   disable_icon_download: "{{ vaultwarden_config_disable_icon_download }}" | ||||
|   icon_cache_ttl: "{{ vaultwarden_config_icon_cache_ttl }}" | ||||
|   icon_cache_negttl: "{{ vaultwarden_config_icon_cache_negttl }}" | ||||
|   icon_download_timeout: "{{ vaultwarden_config_icon_download_timeout }}" | ||||
|   icon_blacklist_non_global_ips: "{{ vaultwarden_config_icon_blacklist_non_global_ips }}" | ||||
|   password_iterations: "{{ vaultwarden_config_password_iterations }}" | ||||
|   show_password_hint: "{{ vaultwarden_config_show_password_hint }}" | ||||
|   disable_2fa_remember: "{{ vaultwarden_config_disable_2fa_remember }}" | ||||
|   disable_admin_token: "{{ vaultwarden_config_disable_admin_token }}" | ||||
|   require_device_email: "{{ vaultwarden_config_require_device_email }}" | ||||
|   authenticator_disable_time_drift: "{{ vaultwarden_config_authenticator_disable_time_drift }}" | ||||
|   ip_header: "{{ vaultwarden_config_ip_header }}" | ||||
|   log_timestamp_format: "{{ vaultwarden_config_log_timestamp_format }}" | ||||
|   reload_templates: "{{ vaultwarden_config_reload_templates }}" | ||||
|   sends_allowed: "{{ vaultwarden_config_sends_allowed }}" | ||||
|   _enable_yubico: "{{ vaultwarden_config_enable_yubico }}" | ||||
|   _enable_duo: "{{ vaultwarden_config_enable_duo }}" | ||||
|   _enable_smtp: "{{ vaultwarden_config_enable_smtp }}" | ||||
|   _enable_email_2fa: "{{ vaultwarden_config_enable_email_2fa }}" | ||||
|  | ||||
| vaultwarden_config: ~ | ||||
| vaultwarden_merged_config: >-2 | ||||
|   {{ vaultwarden_base_config | default({}, true) | ||||
|     | combine(vaultwarden_config | default({}, true), recursive=true) }} | ||||
| @@ -1,50 +0,0 @@ | ||||
| --- | ||||
| vaultwarden_container_image_registry: docker.io | ||||
| vaultwarden_container_image_namespace: vaultwarden | ||||
| vaultwarden_container_image_name: server | ||||
| vaultwarden_container_image_tag: ~ | ||||
| vaultwarden_container_image_flavour: alpine | ||||
| vaultwarden_container_image_source: pull | ||||
| vaultwarden_container_image_force_source: >-2 | ||||
|   {{ vaultwarden_container_image_tag | default(false, true) | bool }} | ||||
| vaultwarden_container_image: >-2 | ||||
|   {{ | ||||
|     ([ | ||||
|       vaultwarden_container_image_registry | default([], true), | ||||
|       vaultwarden_container_image_namespace | default([], true), | ||||
|       vaultwarden_container_image_name, | ||||
|     ] | flatten |  join('/')) | ||||
|     + ':' | ||||
|     + (vaultwarden_container_image_tag | default( | ||||
|         vaultwarden_version + ( | ||||
|           ((vaultwarden_container_image_flavour is string) | ||||
|             and (vaultwarden_container_image_flavour | length > 0)) | ||||
|           | ternary( | ||||
|             '-' + vaultwarden_container_image_flavour | default('', true), | ||||
|             '' | ||||
|           ) | ||||
|         ), | ||||
|         true | ||||
|     )) | ||||
|   }} | ||||
|  | ||||
| vaultwarden_container_name: vaultwarden | ||||
| vaultwarden_container_env: ~ | ||||
| vaultwarden_container_user: >-2 | ||||
|   {{ vaultwarden_run_user_id }}:{{ vaultwarden_run_group_id }} | ||||
| vaultwarden_container_ports: ~ | ||||
| vaultwarden_container_labels: ~ | ||||
| vaultwarden_container_networks: ~ | ||||
| vaultwarden_container_etc_hosts: ~ | ||||
| vaultwarden_container_dns_servers: ~ | ||||
| vaultwarden_container_restart_policy: >-2 | ||||
|   {{ (vaultwarden_deployment_method == 'docker') | ternary( | ||||
|       'unless-stopped', | ||||
|       'on-failure', | ||||
|       ) | ||||
|   }} | ||||
| vaultwarden_container_state: >-2 | ||||
|   {{ (vaultwarden_state == 'present') | ternary('started', 'absent') }} | ||||
| vaultwarden_container_volumes: | ||||
|   - "{{ vaultwarden_data_directory }}:/data:rw" | ||||
|   - "{{ vaultwarden_config_file }}:/data/config.json:ro" | ||||
| @@ -1,10 +0,0 @@ | ||||
| --- | ||||
| vaultwarden_user: vaultwarden | ||||
| vaultwarden_version: "1.33.2" | ||||
|  | ||||
| vaultwarden_config_file: "/etc/vaultwarden/config.json" | ||||
| vaultwarden_config_directory: "{{ vaultwarden_config_file | dirname }}" | ||||
| vaultwarden_data_directory: "/var/lib/vaultwarden" | ||||
|  | ||||
| vaultwarden_state: present | ||||
| vaultwarden_deployment_method: docker | ||||
| @@ -1,5 +0,0 @@ | ||||
| --- | ||||
| vaultwarden_run_user_id: >-2 | ||||
|   {{ vaultwarden_user_info.uid | default(vaultwarden_user, true) }} | ||||
| vaultwarden_run_group_id: >-2 | ||||
|   {{ vaultwarden_user_info.group | default(vaultwarden_user, true) }} | ||||
| @@ -1,9 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure vaultwarden container '{{ vaultwarden_container_name }}' is restarted | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ vaultwarden_container_name }}" | ||||
|     state: "{{ vaultwarden_container_state }}" | ||||
|     restart: true | ||||
|   listen: vaultwarden-restart | ||||
|   when: vaultwarden_deployment_method == 'docker' | ||||
|   ignore_errors: "{{ ansible_check_mode }}" | ||||
| @@ -1,12 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: vaultwarden | ||||
|   description: >-2 | ||||
|     Deploy vaultwarden, a bitwarden-compatible server backend | ||||
|   galaxy_tags: | ||||
|     - vaultwarden | ||||
|     - bitwarden | ||||
|     - passwordstore | ||||
|     - docker | ||||
| @@ -1,22 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure container image '{{ vaultwarden_container_image }}' is {{ vaultwarden_state }} | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ vaultwarden_container_image }}" | ||||
|     state: "{{ vaultwarden_state }}" | ||||
|     source: "{{ vaultwarden_container_image_source }}" | ||||
|     force_source: "{{ vaultwarden_container_image_force_source }}" | ||||
|  | ||||
| - name: Ensure container '{{ vaultwarden_container_name }}' is {{ vaultwarden_container_state }} | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ vaultwarden_container_name }}" | ||||
|     image: "{{ vaultwarden_container_image }}" | ||||
|     env: "{{ vaultwarden_container_env | default(omit, true) }}" | ||||
|     user: "{{ vaultwarden_container_user | default(omit, true) }}" | ||||
|     ports: "{{ vaultwarden_container_ports | default(omit, true) }}" | ||||
|     labels: "{{ vaultwarden_container_labels | default(omit, true) }}" | ||||
|     volumes: "{{ vaultwarden_container_volumes }}" | ||||
|     networks: "{{ vaultwarden_container_networks | default(omit, true) }}" | ||||
|     etc_hosts: "{{ vaultwarden_container_etc_hosts | default(omit, true) }}" | ||||
|     dns_servers: "{{ vaultwarden_container_dns_servers | default(omit, true) }}" | ||||
|     restart_policy: "{{ vaultwarden_container_restart_policy | default(omit, true) }}" | ||||
|     state: "{{ vaultwarden_container_state | default(omit, true) }}" | ||||
| @@ -1,22 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure container image '{{ vaultwarden_container_image }}' is {{ vaultwarden_state }} | ||||
|   containers.podman.podman_image: | ||||
|     name: "{{ vaultwarden_container_image }}" | ||||
|     state: "{{ vaultwarden_state }}" | ||||
|     pull: "{{ (vaultwarden_container_image_source == 'pull') | bool }}" | ||||
|     force: "{{ vaultwarden_container_image_force_source }}" | ||||
|  | ||||
| - name: Ensure container '{{ vaultwarden_container_name }}' is {{ vaultwarden_container_state }} | ||||
|   containers.podman.podman_container: | ||||
|     name: "{{ vaultwarden_container_name }}" | ||||
|     image: "{{ vaultwarden_container_image }}" | ||||
|     env: "{{ vaultwarden_container_env | default(omit, true) }}" | ||||
|     user: "{{ vaultwarden_container_user | default(omit, true) }}" | ||||
|     ports: "{{ vaultwarden_container_ports | default(omit, true) }}" | ||||
|     labels: "{{ vaultwarden_container_labels | default(omit, true) }}" | ||||
|     volumes: "{{ vaultwarden_container_volumes }}" | ||||
|     network: "{{ vaultwarden_container_networks | default(omit, true) }}" | ||||
|     etc_hosts: "{{ vaultwarden_container_etc_hosts | default(omit, true) }}" | ||||
|     dns_servers: "{{ vaultwarden_container_dns_servers | default(omit, true) }}" | ||||
|     restart_policy: "{{ vaultwarden_container_restart_policy | default(omit, true) }}" | ||||
|     state: "{{ vaultwarden_container_state | default(omit, true) }}" | ||||
| @@ -1,78 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure state is valid | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Unsupported state '{{ vaultwarden_state }}'! | ||||
|       Supported states are {{ vaultwarden_states | join(', ') }}. | ||||
|   when: vaultwarden_state not in vaultwarden_states | ||||
|  | ||||
| - name: Ensure deployment method is valid | ||||
|   ansible.builtin.fail: | ||||
|     msg: >-2 | ||||
|       Unsupported deployment method '{{ vaultwarden_deployment_method }}'! | ||||
|       Supported are {{ vaultwarden_deployment_methods | join(', ') }}. | ||||
|   when: vaultwarden_deployment_method not in vaultwarden_deployment_methods | ||||
|  | ||||
| - name: Ensure required variables are given | ||||
|   ansible.builtin.fail: | ||||
|     msg: "Required variable '{{ var }}' is undefined!" | ||||
|   loop: "{{ vaultwarden_required_variables }}" | ||||
|   loop_control: | ||||
|     loop_var: var | ||||
|   when: >-2 | ||||
|     var not in hostvars[inventory_hostname] | ||||
|     or hostvars[inventory_hostname][var] | length == 0 | ||||
|  | ||||
| - name: Ensure required variables are given | ||||
|   ansible.builtin.fail: | ||||
|     msg: "Required variable '{{ var.name }}' is undefined!" | ||||
|   loop: "{{ vaultwarden_conditionally_required_variables }}" | ||||
|   loop_control: | ||||
|     loop_var: var | ||||
|     label: "{{ var.name }}" | ||||
|   when: >-2 | ||||
|     var.when and ( | ||||
|       var.name not in hostvars[inventory_hostname] | ||||
|       or hostvars[inventory_hostname][var.name] | length == 0) | ||||
|  | ||||
| - name: Ensure vaultwarden user '{{ vaultwarden_user }}' is {{ vaultwarden_state }} | ||||
|   ansible.builtin.user: | ||||
|     name: "{{ vaultwarden_user }}" | ||||
|     state: "{{ vaultwarden_state }}" | ||||
|     system: "{{ vaultwarden_user_system | default(true, true) }}" | ||||
|     create_home: "{{ vaultwarden_user_create_home | default(false, true) }}" | ||||
|     groups: "{{ vaultwarden_user_groups | default(omit, true) }}" | ||||
|     append: >-2 | ||||
|       {{ vaultwarden_user_append_groups | default( | ||||
|         (vaultwarden_user_groups | default([], true) | length > 0), | ||||
|         true, | ||||
|       ) }} | ||||
|   register: vaultwarden_user_info | ||||
|  | ||||
| - name: Ensure base paths are {{ vaultwarden_state }} | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ mount.path }}" | ||||
|     state: "{{ (vaultwarden_state == 'present') | ternary('directory', 'absent') }}" | ||||
|     owner: "{{ mount.owner | default(vaultwarden_run_user_id) }}" | ||||
|     group: "{{ mount.group | default(vaultwarden_run_group_id) }}" | ||||
|     mode: "{{ mount.mode | default('0755', true) }}" | ||||
|   loop: | ||||
|     - path: "{{ vaultwarden_config_directory }}" | ||||
|     - path: "{{ vaultwarden_data_directory }}" | ||||
|   loop_control: | ||||
|     loop_var: mount | ||||
|     label: "{{ mount.path }}" | ||||
|  | ||||
| - name: Ensure vaultwarden config file '{{ vaultwarden_config_file }}' is {{ vaultwarden_state }} | ||||
|   ansible.builtin.copy: | ||||
|     content: "{{ vaultwarden_merged_config | to_nice_json(indent=4) }}" | ||||
|     dest: "{{ vaultwarden_config_file }}" | ||||
|     owner: "{{ vaultwarden_run_user_id }}" | ||||
|     group: "{{ vaultwarden_run_group_id }}" | ||||
|     mode: "0640" | ||||
|   when: vaultwarden_state == 'present' | ||||
|   notify: vaultwarden-restart | ||||
|  | ||||
| - name: Deploy vaultwarden using {{ vaultwarden_deployment_method }} | ||||
|   ansible.builtin.include_tasks: | ||||
|     file: "deploy-{{ vaultwarden_deployment_method }}.yml" | ||||
| @@ -1,12 +0,0 @@ | ||||
| --- | ||||
| vaultwarden_states: | ||||
|   - present | ||||
|   - absent | ||||
| vaultwarden_deployment_methods: | ||||
|   - docker | ||||
|   - podman | ||||
| vaultwarden_required_variables: | ||||
|   - vaultwarden_config_domain | ||||
| vaultwarden_conditionally_required_variables: | ||||
|   - name: vaultwarden_config_admin_token | ||||
|     when: "{{ vaultwarden_config_disable_admin_token | default(true, true) | bool }}" | ||||
| @@ -1,6 +1,7 @@ | ||||
| --- | ||||
| 
 | ||||
| vouch_proxy_user: vouch-proxy | ||||
| vouch_proxy_version: "0.41.0" | ||||
| vouch_proxy_version: 0.39.0 | ||||
| vouch_proxy_base_path: /opt/vouch-proxy | ||||
| vouch_proxy_config_path: "{{ vouch_proxy_base_path }}/config" | ||||
| vouch_proxy_config_file: "{{ vouch_proxy_config_path }}/config.yaml" | ||||
| @@ -1,16 +0,0 @@ | ||||
| # `finallycoffee.services.vouch-proxy` | ||||
|  | ||||
| [Vouch-Proxy](https://github.com/vouch/vouch-proxy) can be used in combination with | ||||
| nginx' `auth_request` module to secure web services with OIDC/OAuth. This role runs | ||||
| vouch-proxys' official docker container. | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| The `oauth` config section must be supplied in `vouch_proxy_oauth_config`, and the | ||||
| `vouch` config section can be overridden in `vouch_proxy_vouch_config`. For possible | ||||
| configuration values, see https://github.com/vouch/vouch-proxy/blob/master/config/config.yml_example. | ||||
|  | ||||
| For an example nginx config, see https://github.com/vouch/vouch-proxy#installation-and-configuration. | ||||
|  | ||||
| Passing container arguments in the same way as `community.docker.docker_container` is supported | ||||
| using the `vouch_proxy_container_[...]` prefix (e.g. `vouch_proxy_container_ports`). | ||||
| @@ -1,12 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: vouch_proxy | ||||
|   description: Ansible role to deploy vouch_proxy using docker | ||||
|   galaxy_tags: | ||||
|     - vouch_proxy | ||||
|     - oidc | ||||
|     - authentication | ||||
|     - authorization | ||||
|     - docker | ||||
		Reference in New Issue
	
	Block a user