Compare commits
	
		
			1 Commits
		
	
	
		
			0.1.3
			...
			36ceb40fac
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 36ceb40fac | 
							
								
								
									
										19
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										19
									
								
								README.md
									
									
									
									
									
								
							| @@ -8,23 +8,24 @@ concise area of concern. | ||||
|  | ||||
| ## Roles | ||||
|  | ||||
| - [`authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com) | ||||
| - [`roles/authelia`](roles/authelia/README.md): Deploys an [authelia.com](https://www.authelia.com) | ||||
|   instance, an authentication provider with beta OIDC provider support. | ||||
|  | ||||
| - [`ghost`](roles/ghost/README.md): Deploys [ghost.org](https://ghost.org/), a simple to use | ||||
|   blogging and publishing platform. | ||||
| - [`roles/elasticsearch`](roles/elasticsearch/README.md): Deploy [elasticsearch](https://www.docker.elastic.co/r/elasticsearch/elasticsearch-oss), | ||||
|   a popular (distributed) search and analytics engine, mostly known by it's | ||||
|   letter "E" in the ELK-stack. | ||||
|  | ||||
| - [`gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a | ||||
| - [`roles/gitea`](roles/gitea/README.md): Deploy [gitea.io](https://gitea.io), a | ||||
|   lightweight, self-hosted git service. | ||||
|  | ||||
| - [`jellyfin`](roles/jellyfin/README.md): Deploy [jellyfin.org](https://jellyfin.org), | ||||
| - [`roles/jellyfin`](roles/jellyfin/README.md): Deploy [jellyfin.org](https://jellyfin.org), | ||||
|   the free software media system for streaming stored media to any device. | ||||
|  | ||||
| - [`openproject`](roles/openproject/README.md): Deploys an [openproject.org](https://www.openproject.org) | ||||
|   installation using the upstream provided docker-compose setup. | ||||
| - [`roles/restic`](roles/restic/README.md): Manage backups using restic | ||||
|   and persist them to a configurable backend. | ||||
|  | ||||
| - [`vouch_proxy`](roles/vouch_proxy/README.md): Deploys [vouch-proxy](https://github.com/vouch/vouch-proxy), | ||||
|   an authorization proxy for arbitrary webapps working with `nginx`s' `auth_request` module. | ||||
| - [`roles/minio`](roles/minio/README.md): Deploy [min.io](https://min.io), an | ||||
|   s3-compatible object storage server, using docker containers. | ||||
|  | ||||
| ## License | ||||
|  | ||||
|   | ||||
							
								
								
									
										10
									
								
								galaxy.yml
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								galaxy.yml
									
									
									
									
									
								
							| @@ -1,15 +1,15 @@ | ||||
| namespace: finallycoffee | ||||
| name: services | ||||
| version: 0.1.3 | ||||
| version: 0.0.1 | ||||
| readme: README.md | ||||
| authors: | ||||
| - transcaffeine <transcaffeine@finally.coffee> | ||||
| description: Various ansible roles useful for automating infrastructure | ||||
| dependencies: | ||||
|   "community.crypto": "^2.0.0" | ||||
|   "community.docker": "^3.0.0" | ||||
| license_file: LICENSE.md | ||||
|   "community.docker": "^1.10.0" | ||||
| license: | ||||
| - CNPLv7+ | ||||
| build_ignore: | ||||
| - '*.tar.gz' | ||||
| repository: https://git.finally.coffee/finallycoffee/services | ||||
| issues: https://codeberg.org/finallycoffee/ansible-collection-services/issues | ||||
| issues: https://git.finally.coffee/finallycoffee/services/issues | ||||
|   | ||||
| @@ -1,3 +0,0 @@ | ||||
| --- | ||||
|  | ||||
| requires_ansible: ">=2.15" | ||||
| @@ -1,6 +0,0 @@ | ||||
| --- | ||||
| - name: Install openproject | ||||
|   hosts: "{{ openproject_hosts | default('openproject') }}" | ||||
|   become: "{{ openproject_become | default(true, false) }}" | ||||
|   roles: | ||||
|     - role: finallycoffee.services.openproject | ||||
| @@ -1,6 +1,6 @@ | ||||
| --- | ||||
|  | ||||
| authelia_version: 4.38.15 | ||||
| authelia_version: 4.37.5 | ||||
| authelia_user: authelia | ||||
| authelia_base_dir: /opt/authelia | ||||
| authelia_domain: authelia.example.org | ||||
| @@ -14,20 +14,9 @@ authelia_notification_storage_file: "{{ authelia_data_dir }}/notifications.txt" | ||||
| authelia_user_storage_file: "{{ authelia_data_dir }}/user_database.yml" | ||||
|  | ||||
| authelia_container_name: authelia | ||||
| authelia_container_image_server: docker.io | ||||
| authelia_container_image_namespace: authelia | ||||
| authelia_container_image_name: authelia | ||||
| authelia_container_image: >-2 | ||||
|   {{ | ||||
|     [ | ||||
|       authelia_container_image_server, | ||||
|       authelia_container_image_namespace, | ||||
|       authelia_container_image_name | ||||
|     ] | join('/') | ||||
|   }} | ||||
| authelia_container_image_name: docker.io/authelia/authelia | ||||
| authelia_container_image_tag: ~ | ||||
| authelia_container_image_ref: >-2 | ||||
|   {{ authelia_container_image }}:{{ authelia_container_image_tag | default(authelia_version, true) }} | ||||
| authelia_container_image_ref: "{{ authelia_container_image_name }}:{{ authelia_container_image_tag | default(authelia_version, true) }}" | ||||
| authelia_container_image_force_pull: "{{ authelia_container_image_tag | default(false, True) }}" | ||||
| authelia_container_env: | ||||
|   PUID: "{{ authelia_run_user }}" | ||||
| @@ -53,22 +42,12 @@ authelia_config_jwt_secret: ~ | ||||
| authelia_config_default_redirection_url: ~ | ||||
| authelia_config_server_host: 0.0.0.0 | ||||
| authelia_config_server_port: "{{ authelia_container_listen_port }}" | ||||
| authelia_config_server_address: >-2 | ||||
|   {{ authelia_config_server_host }}:{{ authelia_config_server_port }} | ||||
| authelia_config_server_path: "" | ||||
| authelia_config_server_asset_path: "/config/assets/" | ||||
| authelia_config_server_buffers_read: 4096 | ||||
| authelia_config_server_read_buffer_size: >-2 | ||||
|   {{ authelia_config_server_buffers_read }} | ||||
| authelia_config_server_buffers_write: 4096 | ||||
| authelia_config_server_write_buffer_size: >-2 | ||||
|   {{ authelia_config_server_buffers_write }} | ||||
| authelia_config_server_endpoints_enable_pprof: true | ||||
| authelia_config_server_enable_pprof: >-2 | ||||
|   {{ authelia_config_server_endpoints_enable_pprof }} | ||||
| authelia_config_server_endpoints_enable_expvars: true | ||||
| authelia_config_server_enable_expvars: >-2 | ||||
|   {{ authelia_config_server_endpoints_enable_expvars }} | ||||
| authelia_config_server_read_buffer_size: 4096 | ||||
| authelia_config_server_write_buffer_size: 4096 | ||||
| authelia_config_server_enable_pprof: true | ||||
| authelia_config_server_enable_expvars: true | ||||
| authelia_config_server_disable_healthcheck: | ||||
| authelia_config_server_tls_key: ~ | ||||
| authelia_config_server_tls_certificate: ~ | ||||
| @@ -115,18 +94,10 @@ authelia_config_authentication_backend_ldap_additional_users_dn: "ou=users" | ||||
| authelia_config_authentication_backend_ldap_users_filter: "(&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=inetOrgPerson))" | ||||
| authelia_config_authentication_backend_ldap_additional_groups_dn: "ou=groups" | ||||
| authelia_config_authentication_backend_ldap_groups_filter: "(member={dn})" | ||||
| authelia_config_authentication_backend_ldap_attributes_username: uid | ||||
| authelia_config_authentication_backend_ldap_username_attribute: >-2 | ||||
|   {{ authelia_config_authentication_backend_ldap_attributes_username }} | ||||
| authelia_config_authentication_backend_ldap_attributes_mail: mail | ||||
| authelia_config_authentication_backend_ldap_mail_attribute: >-2 | ||||
|   {{ authelia_config_authentication_backend_ldap_attributes_mail }} | ||||
| authelia_config_authentication_backend_ldap_attributes_display_name: displayName | ||||
| authelia_config_authentication_backend_ldap_display_name_attribute: >-2 | ||||
|   {{ authelia_config_authentication_backend_ldap_attributes_display_name }} | ||||
| authelia_config_authentication_backend_ldap_group_name_attribute: cn | ||||
| authelia_config_authentication_backend_ldap_attributes_group_name: >-2 | ||||
|   {{ authelia_config_authentication_backend_ldap_group_name_attribute }} | ||||
| authelia_config_authentication_backend_ldap_username_attribute: uid | ||||
| authelia_config_authentication_backend_ldap_mail_attribute: mail | ||||
| authelia_config_authentication_backend_ldap_display_name_attribute: displayName | ||||
| authelia_config_authentication_backend_ldap_user: ~ | ||||
| authelia_config_authentication_backend_ldap_password: ~ | ||||
| authelia_config_authentication_backend_file_path: ~ | ||||
| @@ -154,8 +125,6 @@ authelia_config_session_secret: ~ | ||||
| authelia_config_session_expiration: 1h | ||||
| authelia_config_session_inactivity: 5m | ||||
| authelia_config_session_remember_me_duration: 1M | ||||
| authelia_config_session_remember_me: >-2 | ||||
|   {{ authelia_config_session_remember_me_duration }} | ||||
| authelia_config_session_redis_host: "{{ authelia_redis_host }}" | ||||
| authelia_config_session_redis_port: "{{ authelia_redis_port }}" | ||||
| authelia_config_session_redis_username: "{{ authelia_redis_user }}" | ||||
| @@ -180,7 +149,8 @@ authelia_config_storage_postgres_ssl_certificate: disable | ||||
| authelia_config_storage_postgres_ssl_key: disable | ||||
| authelia_config_notifier_disable_startup_check: false | ||||
| authelia_config_notifier_filesystem_filename: ~ | ||||
| authelia_config_notifier_smtp_address: "{{ authelia_smtp_host }}:{{ authelia_stmp_port }}" | ||||
| authelia_config_notifier_smtp_host: "{{ authelia_smtp_host }}" | ||||
| authelia_config_notifier_smtp_port: "{{ authelia_stmp_port }}" | ||||
| authelia_config_notifier_smtp_username: "{{ authelia_smtp_user }}" | ||||
| authelia_config_notifier_smtp_password: "{{ authelia_smtp_pass }}" | ||||
| authelia_config_notifier_smtp_timeout: 5s | ||||
| @@ -196,12 +166,6 @@ authelia_config_notifier_smtp_tls_minimum_version: "{{ authelia_tls_minimum_vers | ||||
|  | ||||
| authelia_database_type: ~ | ||||
| authelia_database_host: ~ | ||||
| authelia_database_port: ~ | ||||
| authelia_database_address: >-2 | ||||
|   {{ authelia_database_host }}{{ | ||||
|     (authelia_database_port | default(false, true) | bool) | ||||
|     | ternary(':' + authelia_database_port, '') | ||||
|   }} | ||||
| authelia_database_user: authelia | ||||
| authelia_database_pass: ~ | ||||
| authelia_database_name: authelia | ||||
|   | ||||
| @@ -1,9 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: authelia | ||||
|   description: Ansible role to deploy authelia using docker | ||||
|   galaxy_tags: | ||||
|     - authelia | ||||
|     - docker | ||||
| @@ -1,14 +1,14 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure user {{ authelia_user }} exists | ||||
|   ansible.builtin.user: | ||||
|   user: | ||||
|     name: "{{ authelia_user }}" | ||||
|     state: present | ||||
|     system: true | ||||
|   register: authelia_user_info | ||||
|  | ||||
| - name: Ensure host directories are created with correct permissions | ||||
|   ansible.builtin.file: | ||||
|   file: | ||||
|     path: "{{ item.path }}" | ||||
|     state: directory | ||||
|     owner: "{{ item.owner | default(authelia_user) }}" | ||||
| @@ -26,7 +26,7 @@ | ||||
|       mode: "0750" | ||||
|  | ||||
| - name: Ensure config file is generated | ||||
|   ansible.builtin.copy: | ||||
|   copy: | ||||
|     content: "{{ authelia_config | to_nice_yaml(indent=2, width=10000) }}" | ||||
|     dest: "{{ authelia_config_file }}" | ||||
|     owner: "{{ authelia_run_user }}" | ||||
| @@ -35,7 +35,7 @@ | ||||
|   notify: restart-authelia | ||||
|  | ||||
| - name: Ensure sqlite database file exists before mounting it | ||||
|   ansible.builtin.file: | ||||
|   file: | ||||
|     path: "{{ authelia_sqlite_storage_file }}" | ||||
|     state: touch | ||||
|     owner: "{{ authelia_run_user }}" | ||||
| @@ -46,7 +46,7 @@ | ||||
|   when: authelia_config_storage_local_path | default(false, true) | ||||
|  | ||||
| - name: Ensure user database exists before mounting it | ||||
|   ansible.builtin.file: | ||||
|   file: | ||||
|     path: "{{ authelia_user_storage_file }}" | ||||
|     state: touch | ||||
|     owner: "{{ authelia_run_user }}" | ||||
| @@ -57,7 +57,7 @@ | ||||
|   when: authelia_config_authentication_backend_file_path | default(false, true) | ||||
|  | ||||
| - name: Ensure notification reports file exists before mounting it | ||||
|   ansible.builtin.file: | ||||
|   file: | ||||
|     path: "{{ authelia_notification_storage_file }}" | ||||
|     state: touch | ||||
|     owner: "{{ authelia_run_user }}" | ||||
| @@ -76,7 +76,7 @@ | ||||
|   register: authelia_container_image_info | ||||
|  | ||||
| - name: Ensure authelia container is running | ||||
|   community.docker.docker_container: | ||||
|   docker_container: | ||||
|     name: "{{ authelia_container_name }}" | ||||
|     image: "{{ authelia_container_image_ref }}" | ||||
|     env: "{{ authelia_container_env }}" | ||||
| @@ -85,9 +85,7 @@ | ||||
|     labels: "{{ authelia_container_labels }}" | ||||
|     volumes: "{{ authelia_container_volumes }}" | ||||
|     networks: "{{ authelia_container_networks | default(omit, true) }}" | ||||
|     etc_hosts: "{{ authelia_container_etc_hosts | default(omit, true) }}" | ||||
|     purge_networks: "{{ authelia_container_purge_networks | default(omit, true)}}" | ||||
|     restart_policy: "{{ authelia_container_restart_policy }}" | ||||
|     recreate: "{{ authelia_container_recreate | default(omit, true) }}" | ||||
|     state: "{{ authelia_container_state }}" | ||||
|   register: authelia_container_info | ||||
|   | ||||
| @@ -48,20 +48,18 @@ authelia_base_config: >-2 | ||||
| authelia_config_server: >-2 | ||||
|   {{ | ||||
|     { | ||||
|       "address": authelia_config_server_address, | ||||
|       "host": authelia_config_server_host, | ||||
|       "port": authelia_config_server_port, | ||||
|       "path": authelia_config_server_path, | ||||
|       "asset_path": authelia_config_server_asset_path, | ||||
|       "read_buffer_size": authelia_config_server_read_buffer_size, | ||||
|       "write_buffer_size": authelia_config_server_write_buffer_size, | ||||
|       "enable_pprof": authelia_config_server_enable_pprof, | ||||
|       "enable_expvars": authelia_config_server_enable_expvars, | ||||
|       "disable_healthcheck": authelia_config_server_disable_healthcheck, | ||||
|       "endpoints": authelia_config_server_endpoints, | ||||
|       "buffers": authelia_config_server_buffers, | ||||
|     } | combine({"headers": {"csp_template": authelia_config_server_headers_csp_template}} | ||||
|         if authelia_config_server_headers_csp_template | default(false, true) else {}) | ||||
|   }} | ||||
| authelia_config_server_endpoints: | ||||
|   enable_expvars: "{{ authelia_config_server_endpoints_enable_expvars }}" | ||||
|   enable_pprof: "{{ authelia_config_server_endpoints_enable_pprof }}" | ||||
| authelia_config_server_buffers: | ||||
|   read: "{{ authelia_config_server_buffers_read }}" | ||||
|   write: "{{ authelia_config_server_buffers_write }}" | ||||
| authelia_config_server_tls: | ||||
|   key: "{{ authelia_config_server_tls_key }}" | ||||
|   certificate: "{{ authelia_config_server_tls_certificate }}" | ||||
| @@ -134,11 +132,10 @@ authelia_config_authentication_backend_ldap: | ||||
|   additional_groups_dn: "{{ authelia_config_authentication_backend_ldap_additional_groups_dn }}"  | ||||
|   users_filter: "{{ authelia_config_authentication_backend_ldap_users_filter }}"  | ||||
|   groups_filter: "{{ authelia_config_authentication_backend_ldap_groups_filter }}" | ||||
|   attributes: | ||||
|     username: "{{ authelia_config_authentication_backend_ldap_attributes_username }}" | ||||
|     mail: "{{ authelia_config_authentication_backend_ldap_attributes_mail }}" | ||||
|     display_name: "{{ authelia_config_authentication_backend_ldap_attributes_display_name }}" | ||||
|     group_name: "{{ authelia_config_authentication_backend_ldap_attributes_group_name }}" | ||||
|   group_name_attribute: "{{ authelia_config_authentication_backend_ldap_group_name_attribute }}" | ||||
|   username_attribute: "{{ authelia_config_authentication_backend_ldap_username_attribute }}" | ||||
|   mail_attribute: "{{ authelia_config_authentication_backend_ldap_mail_attribute }}" | ||||
|   display_name_attribute: "{{ authelia_config_authentication_backend_ldap_display_name_attribute }}" | ||||
|   user: "{{ authelia_config_authentication_backend_ldap_user }}" | ||||
|   password: "{{ authelia_config_authentication_backend_ldap_password }}" | ||||
| authelia_config_authentication_backend_file: | ||||
| @@ -177,7 +174,7 @@ authelia_config_session: | ||||
|   secret: "{{ authelia_config_session_secret }}" | ||||
|   expiration: "{{ authelia_config_session_expiration }}"  | ||||
|   inactivity: "{{ authelia_config_session_inactivity }}" | ||||
|   remember_me: "{{ authelia_config_session_remember_me }}" | ||||
|   remember_me_duration: "{{ authelia_config_session_remember_me_duration }}" | ||||
| authelia_config_session_redis: >-2 | ||||
|   {{ | ||||
|     { | ||||
| @@ -221,13 +218,15 @@ authelia_config_storage: >-2 | ||||
| authelia_config_storage_local: | ||||
|   path: "{{ authelia_config_storage_local_path }}" | ||||
| authelia_config_storage_mysql: | ||||
|   host: "{{ authelia_database_address }}" | ||||
|   host: "{{ authelia_database_host }}" | ||||
|   port: "{{ authelia_config_storage_mysql_port }}" | ||||
|   database: "{{ authelia_database_name }}" | ||||
|   username: "{{ authelia_database_user }}" | ||||
|   password: "{{ authelia_database_pass }}" | ||||
|   timeout: "{{ authelia_database_timeout }}" | ||||
| authelia_config_storage_postgres: | ||||
|   address: "{{ authelia_database_address }}" | ||||
|   host: "{{ authelia_database_host }}" | ||||
|   port: "{{ authelia_config_storage_postgres_port }}" | ||||
|   database: "{{ authelia_database_name }}" | ||||
|   schema: public | ||||
|   username: "{{ authelia_database_user }}" | ||||
| @@ -251,7 +250,8 @@ authelia_config_notifier: >-2 | ||||
| authelia_config_notifier_filesystem: | ||||
|   filename: "{{ authelia_config_notifier_filesystem_filename }}" | ||||
| authelia_config_notifier_smtp: | ||||
|   address: "{{ authelia_config_notifier_smtp_address }}" | ||||
|   host: "{{ authelia_config_notifier_smtp_host }}" | ||||
|   port: "{{ authelia_config_notifier_smtp_port }}" | ||||
|   timeout: "{{ authelia_config_notifier_smtp_timeout }}" | ||||
|   username: "{{ authelia_config_notifier_smtp_username }}" | ||||
|   password: "{{ authelia_config_notifier_smtp_password }}" | ||||
|   | ||||
							
								
								
									
										22
									
								
								roles/elasticsearch/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								roles/elasticsearch/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| # `finallycoffee.services.elastiscsearch` | ||||
|  | ||||
| A simple ansible role which deploys a single-node elastic container to provide | ||||
| an easy way to do some indexing. | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| Per default, `/opt/elasticsearch/data` is used to persist data, it is | ||||
| customizable by using either `elasticsearch_base_path` or `elasticsearch_data_path`. | ||||
|  | ||||
| As elasticsearch be can be quite memory heavy, the maximum amount of allowed RAM | ||||
| can be configured using `elasticsearch_allocated_ram_mb`, defaulting to 512 (mb). | ||||
|  | ||||
| The cluster name and discovery type can be overridden using | ||||
| `elasticsearch_config_cluster_name` (default: elastic) and | ||||
| `elasticsearch_config_discovery_type` (default: single-node), should one | ||||
| need a multi-node elasticsearch deployment. | ||||
|  | ||||
| Per default, no ports or networks are mapped, and explizit mapping using | ||||
| either ports (`elasticsearch_container_ports`) or networks | ||||
| (`elasticsearch_container_networks`) is required in order for other services | ||||
| to use elastic. | ||||
							
								
								
									
										35
									
								
								roles/elasticsearch/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										35
									
								
								roles/elasticsearch/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,35 @@ | ||||
| --- | ||||
|  | ||||
| elasticsearch_version: 7.17.7 | ||||
|  | ||||
| elasticsearch_base_path: /opt/elasticsearch | ||||
| elasticsearch_data_path: "{{ elasticsearch_base_path }}/data" | ||||
|  | ||||
| elasticsearch_config_cluster_name: elastic | ||||
| elasticsearch_config_discovery_type: single-node | ||||
| elasticsearch_config_boostrap_memory_lock: true | ||||
| elasticsearch_allocated_ram_mb: 512 | ||||
|  | ||||
| elasticsearch_container_image_name: docker.elastic.co/elasticsearch/elasticsearch-oss | ||||
| elasticsearch_container_image_tag: ~ | ||||
| elasticsearch_container_image: >- | ||||
|   {{ elasticsearch_container_image_name }}:{{ elasticsearch_container_image_tag | default(elasticsearch_version, true) }} | ||||
|  | ||||
| elasticsearch_container_name: elasticsearch | ||||
| elasticsearch_container_env: | ||||
|   "ES_JAVA_OPTS": "-Xms{{ elasticsearch_allocated_ram_mb }}m -Xmx{{ elasticsearch_allocated_ram_mb }}m" | ||||
|   "cluster.name": "{{ elasticsearch_config_cluster_name }}" | ||||
|   "discovery.type": "{{ elasticsearch_config_discovery_type }}" | ||||
|   "bootstrap.memory_lock": "{{ 'true' if elasticsearch_config_boostrap_memory_lock else 'false' }}" | ||||
| elasticsearch_container_user: ~ | ||||
| elasticsearch_container_ports: ~ | ||||
| elasticsearch_container_labels: | ||||
|   version: "{{ elasticsearch_version }}" | ||||
| elasticsearch_container_ulimits: | ||||
| #  - "memlock:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}:{{ (1.5 * 1024 * elasticsearch_allocated_ram_mb) | int }}" | ||||
|   - "memlock:-1:-1" | ||||
| elasticsearch_container_volumes: | ||||
|   - "{{ elasticsearch_data_path }}:/usr/share/elasticsearch/data:z" | ||||
| elasticsearch_container_networks: ~ | ||||
| elasticsearch_container_purge_networks: ~ | ||||
| elasticsearch_container_restart_policy: unless-stopped | ||||
							
								
								
									
										32
									
								
								roles/elasticsearch/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								roles/elasticsearch/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure host directories are present | ||||
|   file: | ||||
|     path: "{{ item }}" | ||||
|     state: directory | ||||
|     mode: "0777" | ||||
|   loop: | ||||
|     - "{{ elasticsearch_base_path }}" | ||||
|     - "{{ elasticsearch_data_path }}" | ||||
|  | ||||
| - name: Ensure elastic container image is present | ||||
|   docker_image: | ||||
|     name: "{{ elasticsearch_container_image }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ elasticsearch_container_image_tag|default(false, true)|bool }}" | ||||
|  | ||||
| - name: Ensure elastic container is running | ||||
|   docker_container: | ||||
|     name: "{{ elasticsearch_container_name }}" | ||||
|     image: "{{ elasticsearch_container_image }}" | ||||
|     env: "{{ elasticsearch_container_env | default(omit, True) }}" | ||||
|     user: "{{ elasticsearch_container_user | default(omit, True) }}" | ||||
|     ports: "{{ elasticsearch_container_ports | default(omit, True) }}" | ||||
|     labels: "{{ elasticsearch_container_labels | default(omit, True) }}" | ||||
|     volumes: "{{ elasticsearch_container_volumes }}" | ||||
|     ulimits: "{{ elasticsearch_container_ulimits }}" | ||||
|     networks: "{{ elasticsearch_container_networks | default(omit, True) }}" | ||||
|     purge_networks: "{{ elasticsearch_container_purge_networks | default(omit, True) }}" | ||||
|     restart_policy: "{{ elasticsearch_container_restart_policy }}" | ||||
|     state: started | ||||
| @@ -1,18 +0,0 @@ | ||||
| # `finallycoffee.services.ghost` ansible role | ||||
|  | ||||
| [Ghost](https://ghost.org/) is a self-hosted blog with rich media capabilities, | ||||
| which this role deploys in a docker container. | ||||
|  | ||||
| ## Requirements | ||||
|  | ||||
| Ghost requires a MySQL-database (like mariadb) for storing it's data, which | ||||
| can be configured using the `ghost_database_(host|username|password|database)` variables. | ||||
|  | ||||
| Setting `ghost_domain` to a fully-qualified domain on which ghost should be reachable | ||||
| is also required. | ||||
|  | ||||
| Ghosts configuration can be changed using the `ghost_config` variable. | ||||
|  | ||||
| Container arguments which are equivalent to `community.docker.docker_container` can be | ||||
| provided in the `ghost_container_[...]` syntax (e.g. `ghost_container_ports` to expose | ||||
| ghosts port to the host). | ||||
| @@ -1,7 +1,7 @@ | ||||
| --- | ||||
|  | ||||
| ghost_domain: ~ | ||||
| ghost_version: "5.95.0" | ||||
| ghost_version: "5.33.6" | ||||
| ghost_user: ghost | ||||
| ghost_user_group: ghost | ||||
| ghost_base_path: /opt/ghost | ||||
| @@ -36,4 +36,3 @@ ghost_container_restart_policy: "unless-stopped" | ||||
| ghost_container_networks: ~ | ||||
| ghost_container_purge_networks: ~ | ||||
| ghost_container_etc_hosts: ~ | ||||
| ghost_container_state: started | ||||
|   | ||||
| @@ -1,10 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: ghost | ||||
|   description: Ansible role to deploy ghost (https://ghost.org) using docker | ||||
|   galaxy_tags: | ||||
|     - ghost | ||||
|     - blog | ||||
|     - docker | ||||
| @@ -16,16 +16,15 @@ | ||||
|  | ||||
| - name: Ensure host paths for docker volumes exist for ghost | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ item.path }}" | ||||
|     path: "{{ item }}" | ||||
|     state: directory | ||||
|     mode: "0750" | ||||
|     owner: "{{ item.owner | default(ghost_user) }}" | ||||
|     group: "{{ item.group | default(ghost_user_group) }}" | ||||
|     owner: "{{ ghost_user }}" | ||||
|     group: "{{ ghost_user_group }}" | ||||
|   loop: | ||||
|     - path: "{{ ghost_base_path }}" | ||||
|     - path: "{{ ghost_data_path }}" | ||||
|       owner: "1000" | ||||
|     - path: "{{ ghost_config_path }}" | ||||
|     - "{{ ghost_base_path }}" | ||||
|     - "{{ ghost_data_path }}" | ||||
|     - "{{ ghost_config_path }}" | ||||
|  | ||||
| - name: Ensure ghost configuration file is templated | ||||
|   ansible.builtin.template: | ||||
| @@ -42,7 +41,7 @@ | ||||
|     source: pull | ||||
|     force_source: "{{ ghost_container_image_tag is defined }}" | ||||
|  | ||||
| - name: Ensure ghost container '{{ ghost_container_name }}' is {{ ghost_container_state }} | ||||
| - name: Ensure ghost container is running | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ ghost_container_name }}" | ||||
|     image: "{{ ghost_container_image }}" | ||||
| @@ -54,4 +53,4 @@ | ||||
|     networks: "{{ ghost_container_networks | default(omit, true) }}" | ||||
|     purge_networks: "{{ ghost_container_purge_networks | default(omit, true) }}" | ||||
|     restart_policy: "{{ ghost_container_restart_policy }}" | ||||
|     state: "{{ ghost_container_state }}" | ||||
|     state: started | ||||
|   | ||||
| @@ -1,8 +1,7 @@ | ||||
| --- | ||||
|  | ||||
| gitea_version: "1.22.2" | ||||
| gitea_version: "1.19.4" | ||||
| gitea_user: git | ||||
| gitea_run_user: "{{ gitea_user }}" | ||||
| gitea_base_path: "/opt/gitea" | ||||
| gitea_data_path: "{{ gitea_base_path }}/data" | ||||
|  | ||||
| @@ -10,29 +9,17 @@ gitea_data_path: "{{ gitea_base_path }}/data" | ||||
| gitea_domain: ~ | ||||
|  | ||||
| # container config | ||||
| gitea_container_name: "{{ gitea_user }}" | ||||
| gitea_contianer_image_server: "docker.io" | ||||
| gitea_container_image_name: "gitea" | ||||
| gitea_container_image_namespace: gitea | ||||
| gitea_container_image_fq_name: >- | ||||
|   {{ | ||||
|     [ | ||||
|       gitea_container_image_server, | ||||
|       gitea_container_image_namespace, | ||||
|       gitea_container_image_name | ||||
|     ] | join('/') | ||||
|   }} | ||||
| gitea_container_name: "git" | ||||
| gitea_container_image_name: "docker.io/gitea/gitea" | ||||
| gitea_container_image_tag: "{{ gitea_version }}" | ||||
| gitea_container_image: >-2 | ||||
|   {{ gitea_container_image_fq_name }}:{{ gitea_container_image_tag }} | ||||
| gitea_container_image: "{{ gitea_container_image_name }}:{{ gitea_container_image_tag }}" | ||||
| gitea_container_networks: [] | ||||
| gitea_container_purge_networks: ~ | ||||
| gitea_container_restart_policy: "unless-stopped" | ||||
| gitea_container_extra_env: {} | ||||
| gitea_container_extra_labels: {} | ||||
| gitea_contianer_extra_labels: {} | ||||
| gitea_container_extra_ports: [] | ||||
| gitea_container_extra_volumes: [] | ||||
| gitea_container_state: started | ||||
|  | ||||
| # container defaults | ||||
| gitea_container_base_volumes: | ||||
| @@ -53,10 +40,10 @@ gitea_container_base_labels: | ||||
| gitea_config_mailer_enabled: false | ||||
| gitea_config_mailer_type: ~ | ||||
| gitea_config_mailer_from_addr: ~ | ||||
| gitea_config_mailer_smtp_addr: ~ | ||||
| gitea_config_mailer_host: ~ | ||||
| gitea_config_mailer_user: ~ | ||||
| gitea_config_mailer_passwd: ~ | ||||
| gitea_config_mailer_protocol: ~ | ||||
| gitea_config_mailer_tls: ~ | ||||
| gitea_config_mailer_sendmail_path: ~ | ||||
| gitea_config_metrics_enabled: false | ||||
|  | ||||
|   | ||||
| @@ -1,10 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: gitea | ||||
|   description: Ansible role to deploy gitea using docker | ||||
|   galaxy_tags: | ||||
|     - gitea | ||||
|     - git | ||||
|     - docker | ||||
| @@ -1,15 +1,14 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure gitea user '{{ gitea_user }}' is present | ||||
|   ansible.builtin.user: | ||||
| - name: Create gitea user | ||||
|   user: | ||||
|     name: "{{ gitea_user }}" | ||||
|     state: "present" | ||||
|     system: false | ||||
|     create_home: true | ||||
|     state: present | ||||
|     system: no | ||||
|   register: gitea_user_res | ||||
|  | ||||
| - name: Ensure host directories exist | ||||
|   ansible.builtin.file: | ||||
|   file: | ||||
|     path: "{{ item }}" | ||||
|     owner: "{{ gitea_user_res.uid }}" | ||||
|     group: "{{ gitea_user_res.group }}" | ||||
| @@ -19,7 +18,7 @@ | ||||
|     - "{{ gitea_data_path }}" | ||||
|  | ||||
| - name: Ensure .ssh folder for gitea user exists | ||||
|   ansible.builtin.file: | ||||
|   file: | ||||
|     path: "/home/{{ gitea_user }}/.ssh" | ||||
|     state: directory | ||||
|     owner: "{{ gitea_user_res.uid }}" | ||||
| @@ -38,16 +37,16 @@ | ||||
|   register: gitea_user_ssh_key | ||||
|  | ||||
| - name: Create forwarding script | ||||
|   ansible.builtin.copy: | ||||
|   copy: | ||||
|     dest: "/usr/local/bin/gitea" | ||||
|     owner: "{{ gitea_user_res.uid }}" | ||||
|     group: "{{ gitea_user_res.group }}" | ||||
|     mode: 0700 | ||||
|     content: | | ||||
|       ssh -p {{ gitea_public_ssh_server_port }} -o StrictHostKeyChecking=no {{ gitea_run_user }}@127.0.0.1 -i /home/{{ gitea_user }}/.ssh/id_ssh_ed25519 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@" | ||||
|       ssh -p {{ gitea_public_ssh_server_port }} -o StrictHostKeyChecking=no {{ gitea_user }}@127.0.0.1 -i /home/{{ gitea_user }}/.ssh/id_ssh_ed25519 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@" | ||||
|  | ||||
| - name: Add host pubkey to git users authorized_keys file | ||||
|   ansible.builtin.lineinfile: | ||||
|   lineinfile: | ||||
|     path: "/home/{{ gitea_user }}/.ssh/authorized_keys" | ||||
|     line: "{{ gitea_user_ssh_key.public_key }} Gitea:Host2Container" | ||||
|     state: present | ||||
| @@ -57,27 +56,26 @@ | ||||
|     mode: 0600 | ||||
|  | ||||
| - name: Ensure gitea container image is present | ||||
|   community.docker.docker_image: | ||||
|   docker_image: | ||||
|     name: "{{ gitea_container_image }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ gitea_container_image.endswith(':latest') }}" | ||||
|  | ||||
| - name: Ensure container '{{ gitea_container_name }}' with gitea is {{ gitea_container_state }} | ||||
|   community.docker.docker_container: | ||||
| - name: Ensure container '{{ gitea_container_name }}' with gitea is running | ||||
|   docker_container: | ||||
|     name: "{{ gitea_container_name }}" | ||||
|     image: "{{ gitea_container_image }}" | ||||
|     env: "{{ gitea_container_env }}" | ||||
|     labels: "{{ gitea_container_labels }}" | ||||
|     volumes: "{{ gitea_container_volumes }}" | ||||
|     networks: "{{ gitea_container_networks | default(omit, True) }}" | ||||
|     purge_networks: "{{ gitea_container_purge_networks | default(omit, True) }}" | ||||
|     published_ports: "{{ gitea_container_ports }}" | ||||
|     restart_policy: "{{ gitea_container_restart_policy }}" | ||||
|     state: "{{ gitea_container_state }}" | ||||
|     state: started | ||||
|  | ||||
| - name: Ensure given configuration is set in the config file | ||||
|   ansible.builtin.ini_file: | ||||
|   ini_file: | ||||
|     path: "{{ gitea_data_path }}/gitea/conf/app.ini" | ||||
|     section: "{{ section }}" | ||||
|     option: "{{ option }}" | ||||
|   | ||||
| @@ -14,7 +14,7 @@ gitea_container_port_ssh: 22 | ||||
|  | ||||
| gitea_config_base: | ||||
|   RUN_MODE: prod | ||||
|   RUN_USER: "{{ gitea_run_user }}" | ||||
|   RUN_USER: "{{ gitea_user }}" | ||||
|   server: | ||||
|     SSH_DOMAIN: "{{ gitea_domain }}" | ||||
|     DOMAIN: "{{ gitea_domain }}" | ||||
| @@ -24,11 +24,11 @@ gitea_config_base: | ||||
|   mailer: | ||||
|     ENABLED: "{{ gitea_config_mailer_enabled }}" | ||||
|     MAILER_TYP: "{{ gitea_config_mailer_type }}" | ||||
|     SMTP_ADDR: "{{ gitea_config_mailer_smtp_addr }}" | ||||
|     HOST: "{{ gitea_config_mailer_host }}" | ||||
|     USER: "{{ gitea_config_mailer_user }}" | ||||
|     PASSWD: "{{ gitea_config_mailer_passwd }}" | ||||
|     PROTOCOL: "{{ gitea_config_mailer_protocol }}" | ||||
|     FROM: "{{ gitea_config_mailer_from }}" | ||||
|     IS_TLS_ENABLED: "{{ gitea_config_mailer_tls }}" | ||||
|     FROM: "{{ gitea_config_mailer_from_addr }}" | ||||
|     SENDMAIL_PATH: "{{ gitea_config_mailer_sendmail_path }}" | ||||
|   metrics: | ||||
|     ENABLED: "{{ gitea_config_metrics_enabled }}" | ||||
|   | ||||
| @@ -1,15 +0,0 @@ | ||||
| # `finallycoffee.services.jellyfin` ansible role | ||||
|  | ||||
| This role runs [Jellyfin](https://jellyfin.org/), a free software media system, | ||||
| in a docker container. | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| `jellyfin_domain` contains the FQDN which jellyfin should listen to. Most configuration | ||||
| is done in the software itself. | ||||
|  | ||||
| Jellyfin runs in host networking mode by default, as that is needed for some features like | ||||
| network discovery with chromecasts and similar. | ||||
|  | ||||
| Media can be mounted into jellyfin using `jellyfin_media_volumes`, taking a list of strings | ||||
| akin to `community.docker.docker_container`'s `volumes` key. | ||||
| @@ -1,7 +1,7 @@ | ||||
| --- | ||||
|  | ||||
| jellyfin_user: jellyfin | ||||
| jellyfin_version: 10.9.11 | ||||
| jellyfin_version: 10.8.6 | ||||
|  | ||||
| jellyfin_base_path: /opt/jellyfin | ||||
| jellyfin_config_path: "{{ jellyfin_base_path }}/config" | ||||
|   | ||||
| @@ -1,10 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: jellyfin | ||||
|   description: Ansible role to deploy jellyfin using docker | ||||
|   galaxy_tags: | ||||
|     - jellyfin | ||||
|     - streaming | ||||
|     - docker | ||||
| @@ -1,14 +1,14 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure user '{{ jellyfin_user }}' for jellyfin is created | ||||
|   ansible.builtin.user: | ||||
|   user: | ||||
|     name: "{{ jellyfin_user }}" | ||||
|     state: present | ||||
|     system: yes | ||||
|   register: jellyfin_user_info | ||||
|  | ||||
| - name: Ensure host directories for jellyfin exist | ||||
|   ansible.builtinfile: | ||||
|   file: | ||||
|     path: "{{ item.path }}" | ||||
|     state: directory | ||||
|     owner: "{{ item.owner  | default(jellyfin_uid) }}" | ||||
| @@ -17,7 +17,7 @@ | ||||
|   loop: "{{ jellyfin_host_directories }}" | ||||
|  | ||||
| - name: Ensure container image for jellyfin is available | ||||
|   community.docker.docker_image: | ||||
|   docker_image: | ||||
|     name: "{{ jellyfin_container_image_ref }}" | ||||
|     state: present | ||||
|     source: pull | ||||
| @@ -28,7 +28,7 @@ | ||||
|   delay: 3 | ||||
|  | ||||
| - name: Ensure container '{{ jellyfin_container_name }}' is running | ||||
|   community.docker.docker_container: | ||||
|   docker_container: | ||||
|     name: "{{ jellyfin_container_name }}" | ||||
|     image: "{{ jellyfin_container_image_ref }}" | ||||
|     user: "{{ jellyfin_uid }}:{{ jellyfin_gid }}" | ||||
|   | ||||
							
								
								
									
										29
									
								
								roles/minio/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								roles/minio/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| # `finallycoffee.services.minio` ansible role | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| This role deploys a [min.io](https://min.io) server (s3-compatible object storage server) | ||||
| using the official docker container image. | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| The role requires setting the password for the `root` user (name can be changed by | ||||
| setting `minio_root_username`) in `minio_root_password`. That user has full control | ||||
| over the minio-server instance. | ||||
|  | ||||
| ### Useful config hints | ||||
|  | ||||
| Most configuration is done by setting environment variables in | ||||
| `minio_container_extra_env`, for example: | ||||
|  | ||||
| ```yaml | ||||
| minio_container_extra_env: | ||||
|   # disable the "console" web browser UI | ||||
|   MINIO_BROWSER: off | ||||
|   # enable public prometheus metrics on `/minio/v2/metrics/cluster` | ||||
|   MINIO_PROMETHEUS_AUTH_TYPE: public | ||||
| ``` | ||||
|  | ||||
| When serving minio (or any s3-compatible server) on a "subfolder", | ||||
| see https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTRedirect.html | ||||
| and https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html | ||||
							
								
								
									
										40
									
								
								roles/minio/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								roles/minio/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,40 @@ | ||||
| --- | ||||
|  | ||||
| minio_user: ~ | ||||
| minio_data_path: /opt/minio | ||||
|  | ||||
| minio_create_user: false | ||||
| minio_manage_host_filesystem: false | ||||
|  | ||||
| minio_root_username: root | ||||
| minio_root_password: ~ | ||||
|  | ||||
| minio_container_name: minio | ||||
| minio_container_image_name: docker.io/minio/minio | ||||
| minio_container_image_tag: latest | ||||
| minio_container_image: "{{ minio_container_image_name }}:{{ minio_container_image_tag }}" | ||||
| minio_container_networks: [] | ||||
| minio_container_ports: [] | ||||
|  | ||||
| minio_container_base_volumes: | ||||
|   - "{{ minio_data_path }}:{{ minio_container_data_path }}:z" | ||||
| minio_container_extra_volumes: [] | ||||
|  | ||||
| minio_container_base_env: | ||||
|   MINIO_ROOT_USER: "{{ minio_root_username }}" | ||||
|   MINIO_ROOT_PASSWORD: "{{ minio_root_password }}" | ||||
| minio_container_extra_env: {} | ||||
|  | ||||
| minio_container_labels: {} | ||||
|  | ||||
| minio_container_command: | ||||
|   - "server" | ||||
|   - "{{ minio_container_data_path }}" | ||||
|   - "--console-address \":{{ minio_container_listen_port_console }}\"" | ||||
| minio_container_restart_policy: "unless-stopped" | ||||
| minio_container_image_force_source: "{{ (minio_container_image_tag == 'latest')|bool }}" | ||||
|  | ||||
| minio_container_listen_port_api: 9000 | ||||
| minio_container_listen_port_console: 8900 | ||||
|  | ||||
| minio_container_data_path: /storage | ||||
							
								
								
									
										37
									
								
								roles/minio/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								roles/minio/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure minio run user is present | ||||
|   user: | ||||
|     name: "{{ minio_user }}" | ||||
|     state: present | ||||
|     system: yes | ||||
|   when: minio_create_user | ||||
|  | ||||
| - name: Ensure filesystem mounts ({{ minio_data_path }}) for container volumes are present | ||||
|   file: | ||||
|     path: "{{ minio_data_path }}" | ||||
|     state: directory | ||||
|     user: "{{ minio_user|default(omit, True) }}" | ||||
|     group: "{{ minio_user|default(omit, True) }}" | ||||
|   when: minio_manage_host_filesystem | ||||
|  | ||||
| - name: Ensure container image for minio is present | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ minio_container_image }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ minio_container_image_force_source }}" | ||||
|  | ||||
| - name: Ensure container {{ minio_container_name }} is running | ||||
|   docker_container: | ||||
|     name: "{{ minio_container_name }}" | ||||
|     image: "{{ minio_container_image }}" | ||||
|     volumes: "{{ minio_container_volumes }}" | ||||
|     env: "{{ minio_container_env }}" | ||||
|     labels: "{{ minio_container_labels }}" | ||||
|     networks: "{{ minio_container_networks }}" | ||||
|     ports: "{{ minio_container_ports }}" | ||||
|     user: "{{ minio_user|default(omit, True) }}" | ||||
|     command: "{{ minio_container_command }}" | ||||
|     restart_policy: "{{ minio_container_restart_policy }}" | ||||
|     state: started | ||||
							
								
								
									
										5
									
								
								roles/minio/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								roles/minio/vars/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| --- | ||||
|  | ||||
| minio_container_volumes: "{{ minio_container_base_volumes + minio_container_extra_volumes }}" | ||||
|  | ||||
| minio_container_env: "{{ minio_container_base_env | combine(minio_container_extra_env) }}" | ||||
							
								
								
									
										33
									
								
								roles/nginx/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								roles/nginx/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | ||||
| --- | ||||
|  | ||||
| nginx_version: "1.25.1" | ||||
| nginx_flavour: alpine | ||||
| nginx_base_path: /opt/nginx | ||||
| nginx_config_file: "{{ nginx_base_path }}/nginx.conf" | ||||
|  | ||||
| nginx_container_name: nginx | ||||
| nginx_container_image_reference: >- | ||||
|   {{ | ||||
|     nginx_container_image_repository | ||||
|     + ':' + (nginx_container_image_tag | ||||
|       | default(nginx_version | ||||
|       + (('-' + nginx_flavour) if nginx_flavour is defined else ''), true)) | ||||
|   }} | ||||
| nginx_container_image_repository: >- | ||||
|   {{ | ||||
|     ( | ||||
|       container_registries[nginx_container_image_registry] | ||||
|       | default(nginx_container_image_registry) | ||||
|     ) | ||||
|     + '/' | ||||
|     + nginx_container_image_namespace | default('') | ||||
|     + nginx_container_image_name | ||||
|   }} | ||||
| nginx_container_image_registry: "docker.io" | ||||
| nginx_container_image_name: "nginx" | ||||
| nginx_container_image_tag: ~ | ||||
|  | ||||
| nginx_container_restart_policy: "unless-stopped" | ||||
| nginx_container_volumes: | ||||
|   - "{{ nginx_config_file }}:/etc/nginx/conf.d/nginx.conf:ro" | ||||
|   | ||||
							
								
								
									
										8
									
								
								roles/nginx/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8
									
								
								roles/nginx/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,8 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure nginx container '{{ nginx_container_name }}' is restarted | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ nginx_container_name }}" | ||||
|     state: started | ||||
|     restart: true | ||||
|   listen: restart-nginx | ||||
							
								
								
									
										37
									
								
								roles/nginx/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								roles/nginx/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure base path '{{ nginx_base_path }}' exists | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ nginx_base_path }}" | ||||
|     state: directory | ||||
|     mode: 0755 | ||||
|  | ||||
| - name: Ensure nginx config file is templated | ||||
|   ansible.builtin.copy: | ||||
|     dest: "{{ nginx_config_file }}" | ||||
|     content: "{{ nginx_config }}" | ||||
|     mode: 0640 | ||||
|   notify: | ||||
|     - restart-nginx | ||||
|  | ||||
| - name: Ensure docker container image is present | ||||
|   community.docker.docker_image: | ||||
|     name: "{{ nginx_container_image_reference }}" | ||||
|     state: present | ||||
|     source: pull | ||||
|     force_source: "{{ nginx_container_image_tag is defined and nginx_container_image_tag | string != '' }}" | ||||
|  | ||||
| - name: Ensure docker container '{{ nginx_container_name }}' is running | ||||
|   community.docker.docker_container: | ||||
|     name: "{{ nginx_container_name }}" | ||||
|     image: "{{ nginx_container_image_reference }}" | ||||
|     env: "{{ nginx_container_env | default(omit, true) }}" | ||||
|     user: "{{ nginx_container_user | default(omit, true) }}" | ||||
|     ports: "{{ nginx_container_ports | default(omit, true) }}" | ||||
|     labels: "{{ nginx_container_labels | default(omit, true) }}" | ||||
|     volumes: "{{ nginx_container_volumes | default(omit, true) }}" | ||||
|     etc_hosts: "{{ nginx_container_etc_hosts | default(omit, true) }}" | ||||
|     networks: "{{ nginx_container_networks | default(omit, true) }}" | ||||
|     purge_networks: "{{ nginx_container_purge_networks | default(omit, true) }}" | ||||
|     restart_policy: "{{ nginx_container_restart_policy }}" | ||||
|     state: started | ||||
| @@ -1,21 +0,0 @@ | ||||
| # `finallycoffee.services.openproject` ansible role | ||||
|  | ||||
| Deploys [openproject](https://www.openproject.org/) using docker-compose. | ||||
|  | ||||
| ## Configuration | ||||
|  | ||||
| To set configuration variables for OpenProject, set them in `openproject_compose_overrides`: | ||||
| ```yaml | ||||
| openproject_compose_overrides: | ||||
|   version: "3.7" | ||||
|   services: | ||||
|     proxy: | ||||
|        [...] | ||||
|   volumes: | ||||
|     pgdata: | ||||
|       driver: local | ||||
|       driver_opts: | ||||
|         o: bind | ||||
|         type: none | ||||
|         device: /var/lib/postgresql | ||||
| ``` | ||||
| @@ -1,11 +0,0 @@ | ||||
| --- | ||||
| openproject_base_path: "/opt/openproject" | ||||
|  | ||||
| openproject_upstream_git_url: "https://github.com/opf/openproject-deploy.git" | ||||
| openproject_upstream_git_branch: "stable/13" | ||||
|  | ||||
| openproject_compose_project_path: "{{ openproject_base_path }}/compose" | ||||
| openproject_compose_project_name: "openproject" | ||||
| openproject_compose_project_env_file: "{{ openproject_compose_project_path }}/.env" | ||||
| openproject_compose_project_override_file: "{{ openproject_compose_project_path }}/docker-compose.override.yml" | ||||
| openproject_compose_project_env: {} | ||||
| @@ -1,39 +0,0 @@ | ||||
| --- | ||||
| - name: Ensure base directory '{{ openproject_base_path }}' is present | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ openproject_base_path }}" | ||||
|     state: directory | ||||
|  | ||||
| - name: Ensure upstream repository is cloned | ||||
|   ansible.builtin.git: | ||||
|     dest: "{{ openproject_base_path }}" | ||||
|     repo: "{{ openproject_upstream_git_url }}" | ||||
|     version: "{{ openproject_upstream_git_branch }}" | ||||
|     clone: true | ||||
|     depth: 1 | ||||
|  | ||||
| - name: Ensure environment is configured | ||||
|   ansible.builtin.lineinfile: | ||||
|     line: "{{ item.key}}={{ item.value}}" | ||||
|     path: "{{ openproject_compose_project_env_file }}" | ||||
|     state: present | ||||
|     create: true | ||||
|   loop: "{{ openproject_compose_project_env | dict2items(key_name='key', value_name='value') }}" | ||||
|  | ||||
| - name: Ensure docker compose overrides are set | ||||
|   ansible.builtin.copy: | ||||
|     dest: "{{ openproject_compose_project_override_file }}" | ||||
|     content: "{{ openproject_compose_overrides | default({}) | to_nice_yaml }}" | ||||
|  | ||||
| - name: Ensure containers are pulled | ||||
|   community.docker.docker_compose: | ||||
|     project_src: "{{ openproject_compose_project_path }}" | ||||
|     project_name: "{{ openproject_compose_project_name }}" | ||||
|     pull: true | ||||
|  | ||||
| - name: Ensure services are running | ||||
|   community.docker.docker_compose: | ||||
|     project_src: "{{ openproject_compose_project_path }}" | ||||
|     project_name: "{{ openproject_compose_project_name }}" | ||||
|     state: "present" | ||||
|     build: false | ||||
							
								
								
									
										77
									
								
								roles/restic/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								roles/restic/README.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | ||||
| # `finallycoffee.services.restic` | ||||
|  | ||||
| Ansible role for backup up data using `restic`, utilizing `systemd` timers for scheduling. | ||||
|  | ||||
| ## Overview | ||||
|  | ||||
| As restic encrypts the data before storing it, the `restic_repo_password` needs | ||||
| to be populated with a strong key, and saved accordingly as only this key can | ||||
| be used to decrypt the data for a restore! | ||||
|  | ||||
| ### Backends | ||||
|  | ||||
| #### S3 Backend | ||||
|  | ||||
| To use a `s3`-compatible backend like AWS buckets or minio, both `restic_s3_key_id` | ||||
| and `restic_s3_access_key` need to be populated, and the `restic_repo_url` has the | ||||
| format `s3:https://my.s3.endpoint:port/bucket-name`. | ||||
|  | ||||
| #### SFTP Backend | ||||
|  | ||||
| Using the `sftp` backend requires the configured `restic_user` to be able to | ||||
| authenticate to the configured SFTP-Server using password-less methods like | ||||
| publickey-authentication. The `restic_repo_url` then follows the format | ||||
| `sftp:{user}@{server}:/my-restic-repository` (or without leading `/` for relative | ||||
| paths to the `{user}`s home directory. | ||||
|  | ||||
| ### Backing up data | ||||
|  | ||||
| A job name like `$service-postgres` or similar needs to be set in `restic_job_name`, | ||||
| which is used for naming the `systemd` units, their syslog identifiers etc. | ||||
|  | ||||
| If backing up filesystem locations, the paths need to be specified in | ||||
| `restic_backup_paths` as lists of strings representing absolute filesystem | ||||
| locations. | ||||
|  | ||||
| If backing up f.ex. database or other data which is generating backups using | ||||
| a command like `pg_dump`, use `restic_backup_stdin_command` (which needs to output | ||||
| to `stdout`) in conjunction with `restic_backup_stdin_command_filename` to name | ||||
| the resulting output (required). | ||||
|  | ||||
| ### Policy | ||||
|  | ||||
| The backup policy can be adjusted by overriding the `restic_policy_keep_*` | ||||
| variables, with the defaults being: | ||||
|  | ||||
| ```yaml | ||||
| restic_policy_keep_all_within: 1d | ||||
| restic_policy_keep_hourly: 6 | ||||
| restic_policy_keep_daily: 2 | ||||
| restic_policy_keep_weekly: 7 | ||||
| restic_policy_keep_monthly: 4 | ||||
| restic_policy_backup_frequency: hourly | ||||
| ``` | ||||
|  | ||||
| **Note:** `restic_policy_backup_frequency` must conform to `systemd`s | ||||
| `OnCalendar` syntax, which can be checked using `systemd-analyze calender $x`. | ||||
|  | ||||
| ## Role behaviour | ||||
|  | ||||
| Per default, when the systemd unit for a job changes, the job is not immediately | ||||
| started. This can be overridden using `restic_start_job_on_unit_change: true`, | ||||
| which will immediately start the backup job if it's configuration changed. | ||||
|  | ||||
| The systemd unit runs with `restic_user`, which is root by default, guaranteeing | ||||
| that filesystem paths are always readable. The `restic_user` can be overridden, | ||||
| but care needs to be taken to ensure the user has permission to read all the | ||||
| provided filesystem paths / the backup command may be executed by the user. | ||||
|  | ||||
| If ansible should create the user, set `restic_create_user` to `true`, which | ||||
| will attempt to create the `restic_user` as a system user. | ||||
|  | ||||
| ### Installing | ||||
|  | ||||
| For Debian and RedHat, the role attempts to install restic using the default | ||||
| package manager's ansible module (apt/dnf). For other distributions, the generic | ||||
| `package` module tries to install `restic_package_name` (default: `restic`), | ||||
| which can be overridden if needed. | ||||
							
								
								
									
										37
									
								
								roles/restic/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								roles/restic/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| --- | ||||
|  | ||||
| restic_repo_url: ~ | ||||
| restic_repo_password: ~ | ||||
| restic_s3_key_id: ~ | ||||
| restic_s3_access_key: ~ | ||||
|  | ||||
| restic_backup_paths: [] | ||||
| restic_backup_stdin_command: ~ | ||||
| restic_backup_stdin_command_filename: ~ | ||||
|  | ||||
| restic_policy_keep_all_within: 1d | ||||
| restic_policy_keep_hourly: 6 | ||||
| restic_policy_keep_daily: 2 | ||||
| restic_policy_keep_weekly: 7 | ||||
| restic_policy_keep_monthly: 4 | ||||
| restic_policy_backup_frequency: hourly | ||||
|  | ||||
| restic_policy: | ||||
|   keep_within: "{{ restic_policy_keep_all_within }}" | ||||
|   hourly: "{{ restic_policy_keep_hourly }}" | ||||
|   daily: "{{ restic_policy_keep_daily }}" | ||||
|   weekly: "{{ restic_policy_keep_weekly }}" | ||||
|   monthly: "{{ restic_policy_keep_monthly }}" | ||||
|   frequency: "{{ restic_policy_backup_frequency }}" | ||||
|  | ||||
| restic_user: root | ||||
| restic_create_user: false | ||||
| restic_start_job_on_unit_change: false | ||||
|  | ||||
| restic_job_name: ~ | ||||
| restic_job_description: "Restic backup job for {{ restic_job_name }}" | ||||
| restic_systemd_unit_naming_scheme: "restic.{{ restic_job_name }}" | ||||
| restic_systemd_working_directory: /tmp | ||||
| restic_systemd_syslog_identifier: "restic-{{ restic_job_name }}" | ||||
|  | ||||
| restic_package_name: restic | ||||
							
								
								
									
										13
									
								
								roles/restic/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								roles/restic/handlers/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure system daemon is reloaded | ||||
|   listen: reload-systemd | ||||
|   systemd: | ||||
|     daemon_reload: true | ||||
|  | ||||
| - name: Ensure systemd service for '{{ restic_job_name }}' is started immediately | ||||
|   listen: trigger-restic | ||||
|   systemd: | ||||
|     name: "{{ restic_systemd_unit_naming_scheme }}.service" | ||||
|     state: started | ||||
|   when: restic_start_job_on_unit_change | ||||
							
								
								
									
										77
									
								
								roles/restic/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								roles/restic/tasks/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | ||||
| --- | ||||
|  | ||||
| - name: Ensure {{ restic_user }} system user exists | ||||
|   user: | ||||
|     name: "{{ restic_user }}" | ||||
|     state: present | ||||
|     system: true | ||||
|   when: restic_create_user | ||||
|  | ||||
| - name: Ensure either backup_paths or backup_stdin_command is populated | ||||
|   when: restic_backup_paths|length > 0 and restic_backup_stdin_command | ||||
|   fail: | ||||
|     msg: "Setting both `restic_backup_paths` and `restic_backup_stdin_command` is not supported" | ||||
|  | ||||
| - name: Ensure a filename for stdin_command backup is given | ||||
|   when: restic_backup_stdin_command and not restic_backup_stdin_command_filename | ||||
|   fail: | ||||
|     msg: "`restic_backup_stdin_command` was set but no filename for the resulting output was supplied in `restic_backup_stdin_command_filename`" | ||||
|  | ||||
| - name: Ensure backup frequency adheres to systemd's OnCalender syntax | ||||
|   command: | ||||
|     cmd: "systemd-analyze calendar {{ restic_policy.frequency }}" | ||||
|   register: systemd_calender_parse_res | ||||
|   failed_when: systemd_calender_parse_res.rc != 0 | ||||
|   changed_when: false | ||||
|  | ||||
| - name: Ensure restic is installed | ||||
|   block: | ||||
|     - name: Ensure restic is installed via apt | ||||
|       apt: | ||||
|         package: restic | ||||
|         state: latest | ||||
|       when: ansible_os_family == 'Debian' | ||||
|     - name: Ensure restic is installed via dnf | ||||
|       dnf: | ||||
|         name: restic | ||||
|         state: latest | ||||
|       when: ansible_os_family == 'RedHat' | ||||
|     - name: Ensure restic is installed using the auto-detected package-manager | ||||
|       package: | ||||
|         name: "{{ restic_package_name }}" | ||||
|         state: present | ||||
|       when: ansible_os_family not in ['RedHat', 'Debian'] | ||||
|  | ||||
| - name: Ensure systemd service file for '{{ restic_job_name }}' is templated | ||||
|   template: | ||||
|     dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.service" | ||||
|     src: restic.service.j2 | ||||
|     owner: root | ||||
|     group: root | ||||
|     mode: 0640 | ||||
|   notify: | ||||
|     - reload-systemd | ||||
|     - trigger-restic | ||||
|  | ||||
| - name: Ensure systemd service file for '{{ restic_job_name }}' is templated | ||||
|   template: | ||||
|     dest: "/etc/systemd/system/{{ restic_systemd_unit_naming_scheme }}.timer" | ||||
|     src: restic.timer.j2 | ||||
|     owner: root | ||||
|     group: root | ||||
|     mode: 0640 | ||||
|   notify: | ||||
|     - reload-systemd | ||||
|  | ||||
| - name: Flush handlers to ensure systemd knows about '{{ restic_job_name }}' | ||||
|   meta: flush_handlers | ||||
|  | ||||
| - name: Ensure systemd timer for '{{ restic_job_name }}' is activated | ||||
|   systemd: | ||||
|     name: "{{ restic_systemd_unit_naming_scheme }}.timer" | ||||
|     enabled: true | ||||
|  | ||||
| - name: Ensure systemd timer for '{{ restic_job_name }}' is started | ||||
|   systemd: | ||||
|     name: "{{ restic_systemd_unit_naming_scheme }}.timer" | ||||
|     state: started | ||||
							
								
								
									
										28
									
								
								roles/restic/templates/restic.service.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								roles/restic/templates/restic.service.j2
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| [Unit] | ||||
| Description={{ restic_job_description }} | ||||
|  | ||||
| [Service] | ||||
| Type=oneshot | ||||
| User={{ restic_user }} | ||||
| WorkingDirectory={{ restic_systemd_working_directory }} | ||||
| SyslogIdentifier={{ restic_systemd_syslog_identifier }} | ||||
|  | ||||
| Environment=RESTIC_REPOSITORY={{ restic_repo_url }} | ||||
| Environment=RESTIC_PASSWORD={{ restic_repo_password }} | ||||
| {% if restic_s3_key_id and restic_s3_access_key %} | ||||
| Environment=AWS_ACCESS_KEY_ID={{ restic_s3_key_id }} | ||||
| Environment=AWS_SECRET_ACCESS_KEY={{ restic_s3_access_key }} | ||||
| {% endif %} | ||||
|  | ||||
| ExecStartPre=-/bin/sh -c '/usr/bin/restic snapshots || /usr/bin/restic init' | ||||
| {% if restic_backup_stdin_command %} | ||||
| ExecStart=/bin/sh -c '{{ restic_backup_stdin_command }} | /usr/bin/restic backup --verbose --stdin --stdin-filename {{ restic_backup_stdin_command_filename }}' | ||||
| {% else %} | ||||
| ExecStart=/usr/bin/restic --verbose backup {{ restic_backup_paths | join(' ') }} | ||||
| {% endif %} | ||||
| ExecStartPost=/usr/bin/restic forget --prune --keep-within={{ restic_policy.keep_within }} --keep-hourly={{ restic_policy.hourly }} --keep-daily={{ restic_policy.daily }} --keep-weekly={{ restic_policy.weekly }} --keep-monthly={{ restic_policy.monthly }} | ||||
| ExecStartPost=-/usr/bin/restic snapshots | ||||
| ExecStartPost=/usr/bin/restic check | ||||
|  | ||||
| [Install] | ||||
| WantedBy=multi-user.target | ||||
							
								
								
									
										10
									
								
								roles/restic/templates/restic.timer.j2
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								roles/restic/templates/restic.timer.j2
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| [Unit] | ||||
| Description=Run {{ restic_job_name }} | ||||
|  | ||||
| [Timer] | ||||
| OnCalendar={{ restic_policy.frequency }} | ||||
| Persistent=True | ||||
| Unit={{ restic_systemd_unit_naming_scheme }}.service | ||||
|  | ||||
| [Install] | ||||
| WantedBy=timers.target | ||||
| @@ -1,7 +1,7 @@ | ||||
| --- | ||||
| 
 | ||||
| vouch_proxy_user: vouch-proxy | ||||
| vouch_proxy_version: 0.40.0 | ||||
| vouch_proxy_version: 0.39.0 | ||||
| vouch_proxy_base_path: /opt/vouch-proxy | ||||
| vouch_proxy_config_path: "{{ vouch_proxy_base_path }}/config" | ||||
| vouch_proxy_config_file: "{{ vouch_proxy_config_path }}/config.yaml" | ||||
| @@ -1,16 +0,0 @@ | ||||
| # `finallycoffee.services.vouch-proxy` | ||||
|  | ||||
| [Vouch-Proxy](https://github.com/vouch/vouch-proxy) can be used in combination with | ||||
| nginx' `auth_request` module to secure web services with OIDC/OAuth. This role runs | ||||
| vouch-proxys' official docker container. | ||||
|  | ||||
| ## Usage | ||||
|  | ||||
| The `oauth` config section must be supplied in `vouch_proxy_oauth_config`, and the | ||||
| `vouch` config section can be overridden in `vouch_proxy_vouch_config`. For possible | ||||
| configuration values, see https://github.com/vouch/vouch-proxy/blob/master/config/config.yml_example. | ||||
|  | ||||
| For an example nginx config, see https://github.com/vouch/vouch-proxy#installation-and-configuration. | ||||
|  | ||||
| Passing container arguments in the same way as `community.docker.docker_container` is supported | ||||
| using the `vouch_proxy_container_[...]` prefix (e.g. `vouch_proxy_container_ports`). | ||||
| @@ -1,12 +0,0 @@ | ||||
| --- | ||||
| allow_duplicates: true | ||||
| dependencies: [] | ||||
| galaxy_info: | ||||
|   role_name: vouch_proxy | ||||
|   description: Ansible role to deploy vouch_proxy using docker | ||||
|   galaxy_tags: | ||||
|     - vouch_proxy | ||||
|     - oidc | ||||
|     - authentication | ||||
|     - authorization | ||||
|     - docker | ||||
		Reference in New Issue
	
	Block a user