diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 000000000..5ab59f61d --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,3 @@ +--- +profile: "production" +offline: false diff --git a/.github/workflows/molecule-loadbalancer.yml b/.github/workflows/molecule-loadbalancer.yml index 2f3a154e3..ca03e28f3 100644 --- a/.github/workflows/molecule-loadbalancer.yml +++ b/.github/workflows/molecule-loadbalancer.yml @@ -24,7 +24,7 @@ jobs: build: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python 3.8 uses: actions/setup-python@v6 diff --git a/.github/workflows/molecule-mongo.yml b/.github/workflows/molecule-mongo.yml index ce2f778c7..f0b52d67d 100644 --- a/.github/workflows/molecule-mongo.yml +++ b/.github/workflows/molecule-mongo.yml @@ -18,7 +18,7 @@ jobs: build: runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python 3.8 uses: actions/setup-python@v6 with: diff --git a/.github/workflows/syntax.yml b/.github/workflows/syntax.yml index 7ba467f5d..8295b151f 100644 --- a/.github/workflows/syntax.yml +++ b/.github/workflows/syntax.yml @@ -19,7 +19,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up Python 3.8 uses: actions/setup-python@v6 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..fb9f01809 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,16 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +### Changed +- All group_var files are moved to the environment example template, more information about where to save group_vars in the [README](https://github.com/OpenConext/OpenConext-deploy/blob/main/README.md) +- separate plays for separate roles in the provision.yml playbook +- seperate groups are defined for separate apps, dividing apps across the container servers should be set in the inventory not in the playbook, this way you can easily change it for different environments. This also makes it impossible to use the wrong tag and deploy something you did not intend to, instead nothing will happen. +- mysql_standalone group replaces storage group + +### Removed +- selfsigned_certs role is deprecated and removed from the provision.yml playbook +- environment/playbook inclusion in provision.yml + +### Todo +- [ ] Complete environments/template diff --git a/README.md b/README.md index 76fb8da81..a6623af49 100644 --- a/README.md +++ b/README.md @@ -39,12 +39,12 @@ Every application has a seperate role to install it. The following roles can be | stepupra | Stepup ra interface | | stepupselfservice | Stepup selfservice interface | -All these applications run in Docker. You can use the "docker" role to install docker and Traefik. The result is a Docker application server, with port 443 open. Applications are served by Traefik and recognized on basis of a Host: header. If you run a small installation, you can add a https certificate to Traefik and run a single node application server. +All these applications run in Docker. You can use the "docker" role to install docker and Traefik. The result is a Docker application server, with port 443 open. Applications are served by Traefik and recognized on basis of a Host: header. If you run a small installation, you can add a https certificate to Traefik and run a single node application server. -For a fully functioning environment you also need a MariaDB database server and a Mongo database server. +For a fully functioning environment you also need a MariaDB database server and a Mongo database server. ## Infra roles -This repository is used for deployment of SURFconext, and several roles that the SURFconext teams uses to provision our infrastructure are provided here as well. You can use them for your own infrastructure or use them as inspiration. +This repository is used for deployment of SURFconext, and several roles that the SURFconext teams uses to provision our infrastructure are provided here as well. You can use them for your own infrastructure or use them as inspiration. | name | remarks | | --- | --- | | bind | DNS server for high availability. Very specific for SURFconext | @@ -59,40 +59,93 @@ This repository is used for deployment of SURFconext, and several roles that the | mongo | Install a mongo cluster (has its own README) | | manage_provision_entities|Provision entities to Manage | -# Environment specific variables -Many variables can be overridden to create a setup suitable for your needs. The environment should be placed in the directory environments_external. +# Setting up your environment +Many variables can be overridden to create a setup suitable for your needs. We will explain the setup here for one environment or for a multi-environment (OTAP for example) setup. -A script is available to provision a new environment. It will create a new environment directory under environments-external/ and it will create all necessary passwords and (self-signed) certificates. Replace with the name of the target. Replace with the domain of the target. +The setup descibed below should work, but when using ansible many paths lead to Rome. If you want to know more about variables and where to save them, this can be helpfull: https://docs.ansible.com/projects/ansible/latest/playbook_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable +## Inventory +You need an inventory file for your environment or multiple inventory files if you have multiple environments. An example can be found in environments/template +## Playbook +You can use the provision.yml script to deploy all infra and application roles. Every play has a tag so you can deploy your environment one application at a time by using the specific tag. You can also use your own playbooks if you prefer. + +## First steps +Clone the repository with git. + +```bash +cd yourdir +git clone https://github.com/OpenConext/OpenConext-deploy.git ``` -/prep-env -``` -Then run -``` -cp environments-external//host_vars/template.yml environments-external//host_vars/.yml -``` -(where is the ip address or hostname of your target machine, whatever is set in your inventory file) -Change in environments-external//inventory: -Change all references from %target_host% to +Create ansible.cfg in your directory and add Openconext-deploy/roles to your roles_path +```bash +[defaults] +diff = true +roles_path = OpenConext-deploy/roles # Add your own roles directory if you want ``` -Please note that this has not been tested in quite a while. You will need a lot of manual work to get this environment working + +## One environment +Copy the inventory, host and group files from environment/template to your directory and adjust them according to your preferences: + +```bash +cp -R OpenConext-deploy/environments/template/* . ``` +Edit your inventory file +Edit group_var and host_var files if necessary + +Create an ansible vault in secrets and name it secrets.yml, an unencrypted example can be found in secrets/secret_example.yml +More information about vaults: https://docs.ansible.com/projects/ansible/latest/vault_guide/index.html +The final setup will look like this: -# Playbooks, tags and the provision wrapper script +- group_vars/all.yml +- group_vars/\.yml +- secrets/secrets.yml +- host_vars/\/yml +- inventory +- Openconext-deploy/provision.yml +- Openconext-deploy/roles +- \.yml +- ansible.cfg -Two playbooks exist in this repository: provision.yml and playbook_haproxy.yml. The latter can be used to do red/blue deployments if you also use our haproxy role. -The main playbook is provision.yml. It contains series of plays to install every role on the right node. All roles are tagged, so you can use the [Ansible tag mechanism](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html) to deploy a specific role. +You can use the provision playbook now: -If you would like to deploy manage to your test environment, you would run: +```bash +ansible-playbook OpenConext-deploy/provision.yml -i inventory -t --ask-vault-password ``` -ansible-playbook -i environments-external/test/inventory --tags manage -u THE_REMOTE_SSH_USER_WITH_SUDO_PERMISSIONS + +## Multi-environment +Copy the inventory and group files from environment/template to your directory and adjust them according to your preferences: + +```bash +mkdir # test for example +cp -R OpenConext-deploy/environments/template/* +# etc... ``` +Edit your inventory files +Edit group_var and host_var files if necessary + +For each environment create an ansible vault in secrets and name it secrets.yml, an unencrypted example can be found in secrets/secret_example.yml +More information about vaults: https://docs.ansible.com/projects/ansible/latest/vault_guide/index.html + +The final setup will look like this: -A wrapper script which enables you to use your own roles can be used as well. That is documented here: https://github.com/OpenConext/OpenConext-deploy/wiki/Add-your-own-roles-and-playbooks +- \/group_vars/all.yml +- \/group_vars/\.yml +- \/host_vars/\/yml +- \/inventory +- Openconext-deploy/provision.yml +- Openconext-deploy/roles +- \.yml +- ansible.cfg + +You can use the provision playbook now: + +```bash +ansible-playbook OpenConext-deploy/provision.yml -i /inventory -t --ask-vault-password +``` # License diff --git a/deploy_containers_playbook.yml b/deploy_containers_playbook.yml deleted file mode 100644 index bcbc43339..000000000 --- a/deploy_containers_playbook.yml +++ /dev/null @@ -1,171 +0,0 @@ ---- -- name: Deploy containerized applications - hosts: docker_servers - become: true - tasks: - - name: Read vars from secrets file - ansible.builtin.include_vars: "{{ inventory_dir }}/secrets/secrets.yml" - no_log: true - tags: - - always - -# Separate grouos for all containerized apps -# Dividing apps across the container services should be set in -# the inventory not in the playbook, this way you can easily change -# it for different environments - -- name: Deploy attribute-aggregation app - hosts: docker_attribute_aggregation - become: true - roles: - - { role: attribute-aggregation, tags: ['aa', 'attribute-aggregation'] } - -- name: Deploy dashboard app - hosts: docker_dashboard - become: true - roles: - - { role: dashboard, tags: ['dashboard'] } - -- name: Deploy diyidp app - hosts: docker_diyidp - become: true - roles: - - { role: diyidp, tags: ['diyidp'] } - -- name: Deploy engineblock app - hosts: docker_engineblock - become: true - roles: - - { role: engineblock, tags: ['engineblock', 'eb'] } - -- name: Deploy invite app - hosts: docker_invite - become: true - roles: - - { role: invite, tags: ['invite'] } - -- name: Deploy lifecycle app - hosts: docker_lifecycle - become: true - roles: - - { role: lifecycle, tags: ['lifecycle'] } - -- name: Deploy manage app - hosts: docker_manage - become: true - roles: - - { role: manage, tags: ['manage'] } - -- name: Deploy mujina-idp app - hosts: docker_mujina_idp - become: true - roles: - - { role: mujina-idp, tags: ['mujina-idp', 'mujina'] } - -- name: Deploy mujina-sp app - hosts: docker_mujina_sp - become: true - roles: - - { role: mujina-sp, tags: ['mujina-sp', 'mujina'] } - -- name: Deploy myconext app - hosts: docker_myconext - become: true - roles: - - { role: myconext, tags: ['myconext'] } - -- name: Deploy oidcng app - hosts: docker_oidcng - become: true - roles: - - { role: oidcng, tags: ['oidcng'] } - -- name: Deploy oidc-playground app - hosts: docker_oidc_playground - become: true - roles: - - { role: oidc-playground, tags: ['oidc-playground'] } - -- name: Deploy openaccess app & server - hosts: docker_openaccess - become: true - roles: - - { role: openaccess, tags: ['openaccess'] } - -- name: Deploy pdp app - hosts: docker_pdp - become: true - roles: - - { role: pdp, tags: ['pdp'] } - -- name: Deploy profile app - hosts: docker_profile - become: true - roles: - - { role: profile, tags: ['profile'] } - -- name: Deploy stats app - hosts: docker_stats - become: true - roles: - - { role: stats, tags: ['stats'] } - -- name: Deploy stepupazuremfa app - hosts: docker_stepupazuremfa - become: true - roles: - - { role: stepupazuremfa, tags: ['stepupazuremfa', 'stepup'] } - -- name: Deploy stepupgateway app - hosts: docker_stepupgateway - become: true - roles: - - { role: stepupgateway, tags: ['stepupgateway', 'stepup'] } - -- name: Deploy stepupmiddleware app - hosts: docker_stepupmiddleware - become: true - roles: - - { role: stepupmiddleware, tags: ['stepupmiddleware', 'stepup'] } - -- name: Deploy stepupra app - hosts: docker_stepupra - become: true - roles: - - { role: stepupra, tags: ['stepupra', 'stepup'] } - -- name: Deploy stepupselfservice app - hosts: docker_stepupselfservice - become: true - roles: - - { role: stepupselfservice, tags: ['stepupselfservice', 'stepup'] } - -- name: Deploy stepuptiqr app - hosts: docker_stepuptiqr - become: true - roles: - - { role: stepuptiqr, tags: ['stepuptiqr', 'stepup'] } - -- name: Deploy stepupwebauthn app - hosts: docker_stepupwebauthn - become: true - roles: - - { role: stepupwebauthn, tags: ['stepupwebauthn', 'stepup'] } - -- name: Deploy teams app - hosts: docker_teams - become: true - roles: - - { role: teams, tags: ['teams'] } - -- name: Deploy voot app - hosts: docker_voot - become: true - roles: - - { role: voot, tags: ['voot'] } - -- name: Deploy minio app - hosts: docker_minio - become: true - roles: - - { role: minio, tags: ['minio'] } diff --git a/deploy_docker_playbook.yml b/deploy_docker_playbook.yml deleted file mode 100644 index eaa54940f..000000000 --- a/deploy_docker_playbook.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: Read inventory secrets - hosts: docker_servers - become: true - tasks: - - name: Read vars from secrets file - ansible.builtin.include_vars: "{{ inventory_dir }}/secrets/secrets.yml" - no_log: true - tags: - - always - -- name: Configure docker servers - hosts: docker_servers - become: true - roles: - - { role: docker, tags: ['docker'] } diff --git a/deploy_loadbalancers_playbook.yml b/deploy_loadbalancers_playbook.yml deleted file mode 100644 index 12daca264..000000000 --- a/deploy_loadbalancers_playbook.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: Read secrets - hosts: loadbalancer - become: true - tasks: - - name: Read vars from secrets file - ansible.builtin.include_vars: "{{ inventory_dir }}/secrets/secrets.yml" - no_log: true - tags: - - always - -- name: Deploy haproxy - hosts: loadbalancer - gather_facts: true - become: true - roles: - - role: haproxy - tags: ['core', 'loadbalancer', 'lb'] - -- name: Deploy keepalived and bind for clustered loadbalancers - hosts: loadbalancer_ha - gather_facts: true - become: true - roles: - - role: keepalived - tags: ['core', 'loadbalancer_ha', 'keepalived'] - - role: bind - tags: ['core', 'loadbalancer_ha', 'bind'] diff --git a/deploy_mariadb_playbook.yml b/deploy_mariadb_playbook.yml deleted file mode 100644 index 73b314ff7..000000000 --- a/deploy_mariadb_playbook.yml +++ /dev/null @@ -1 +0,0 @@ ---- \ No newline at end of file diff --git a/deploy_mongo_playbook.yml b/deploy_mongo_playbook.yml deleted file mode 100644 index 73b314ff7..000000000 --- a/deploy_mongo_playbook.yml +++ /dev/null @@ -1 +0,0 @@ ---- \ No newline at end of file diff --git a/environments/template/files/invite/crm_config.json b/environments/template/files/invite/crm_config.json new file mode 100644 index 000000000..b786615cc --- /dev/null +++ b/environments/template/files/invite/crm_config.json @@ -0,0 +1,231 @@ +{ + "AAI": { + "name": "AAIverantwoordelijke", + "roleId": "fc89aa87-07e4-e811-8100-005056956c1a", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "BVW": { + "name": "Beveiligingsverantwoordelijke", + "roleId": "92b2b379-07e4-e811-8100-005056956c1a", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "CONBEH": { + "name": "SURFconextbeheerder", + "roleId": "5e17b508-08e4-e811-8100-005056956c1a", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "CONVER": { + "name": "SURFconextverantwoordelijke", + "roleId": "cf652619-08e4-e811-8100-005056956c1a", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "DNS": { + "name": "DNS-Beheerder", + "roleId": "f3ff6ef5-07e4-e811-8100-005056956c1a", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "DOM": { + "name": "Domeinnamenverantwoordelijke", + "roleId": "fba019c7-07e4-e811-8100-005056956c1a", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "INFB": { + "name": "Infrabeheerder", + "roleId": "1d3d6ae6-07e4-e811-8100-005056956c1a", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "IVW": { + "name": "Infraverantwoordelijke", + "roleId": "1ca2f1d9-07e4-e811-8100-005056956c1a", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "MISP": { + "name": "MISP-beheerder", + "roleId": "d01ddc0c-b930-ec11-9112-0050569571ea", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "SIEMB": { + "name": "SIEM-beheerder", + "roleId": "cf6ab19d-0a75-eb11-9106-0050569571ea", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "SIEMV": { + "name": "SIEM-verantwoordelijke", + "roleId": "26334f6b-0a75-eb11-9106-0050569571ea", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "SRAM": { + "name": "SRAM-verantwoordelijke", + "roleId": "716dda55-bd5e-ea11-90f9-0050569571ea", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "SWB": { + "name": "SURFwireless-beheerder", + "roleId": "57f2ce48-08e4-e811-8100-005056956c1a", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "SWV": { + "name": "SURFwireless-verantwoordelijke", + "roleId": "16fef478-9189-ec11-93b0-0022489d3138", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "SUP": { + "name": "Superuser", + "roleId": "7de1c1b9-11cb-ea11-90ff-0050569571ea", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "SUPRO": { + "name": "SuperuserRO", + "roleId": "80e1c1b9-11cb-ea11-90ff-0050569571ea", + "applications": [ + { + "manageEntityID": "https://manage.test.surfconext.nl/shibboleth", + "manageType": "saml20_sp" + }, + { + "manageEntityID": "invite.test.surfconext.nl", + "manageType": "oidc10_rp" + } + ] + }, + "OB": { + "name": "OperationeelBeheerder", + "roleId": "112a6aa1-07e4-e811-8100-005056956c1a", + "applications": [] + } +} diff --git a/group_vars/all.yml b/environments/template/group_vars/all.yml similarity index 93% rename from group_vars/all.yml rename to environments/template/group_vars/all.yml index d46919794..1d8bd6f84 100644 --- a/group_vars/all.yml +++ b/environments/template/group_vars/all.yml @@ -34,6 +34,7 @@ httpd_csp: lenient: "default-src 'self'; object-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" lenient_with_static_img: "default-src 'self'; object-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" lenient_with_static_img_with_oidcng: "default-src 'self'; object-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; connect-src 'self' https://{{ oidcng_vhost }}; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" + lenient_with_static_img_with_surfconext: "default-src 'self'; object-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; connect-src 'self' https://{{ oidcng_vhost }}; img-src 'self' https://{{ static_vhost }} https://*.surfconext.nl http://localhost:* data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" strict: "default-src 'none'; script-src 'self'; style-src 'self' 'unsafe-inline'; font-src 'self'; connect-src 'self'; img-src 'self' data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'; manifest-src 'self'" strict_with_static_img: "default-src 'none'; script-src 'self'; style-src 'self' 'unsafe-inline'; font-src 'self'; connect-src 'self'; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'; manifest-src 'self'" lenient_with_static_img_for_idp: "default-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; font-src 'self'; connect-src 'self' https://{{ oidcng_vhost }}; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self' https://*.{{ base_domain }}; frame-ancestors 'none'; base-uri 'none'" diff --git a/group_vars/java-apps-common.yml b/environments/template/group_vars/java-apps-common.yml similarity index 100% rename from group_vars/java-apps-common.yml rename to environments/template/group_vars/java-apps-common.yml diff --git a/group_vars/local-certs.yml b/environments/template/group_vars/local-certs.yml similarity index 100% rename from group_vars/local-certs.yml rename to environments/template/group_vars/local-certs.yml diff --git a/group_vars/minimal.yml b/environments/template/group_vars/minimal.yml similarity index 100% rename from group_vars/minimal.yml rename to environments/template/group_vars/minimal.yml diff --git a/environments/template/group_vars/mongo_servers.yml b/environments/template/group_vars/mongo_servers.yml new file mode 100644 index 000000000..70bb40871 --- /dev/null +++ b/environments/template/group_vars/mongo_servers.yml @@ -0,0 +1,12 @@ +--- +replica_set_name: my_mongo_cluster + +mongo_cluster_members: + - host: "mongo3.example.com:{{ mongo_port }}" # arbiter first or change mongo_arbiter_index + priority: 1 # can vote, cannot become primary + - host: "mongo2.example.com:{{ mongo_port }}" + priority: 2 + - host: "mongo1.example.com:{{ mongo_port }}" + priority: 3 + +# mongo_arbiter_index # default is 0 \ No newline at end of file diff --git a/environments/template/group_vars/template.yml b/environments/template/group_vars/template.yml index d785150d5..f06f0c0d0 100644 --- a/environments/template/group_vars/template.yml +++ b/environments/template/group_vars/template.yml @@ -1,6 +1,7 @@ --- env: "%env%" +show_debug_info: false # Show extra debug info mariadb_host: localhost rsyslog_host: localhost database_clients: [ "{{ mariadb_host }}" ] @@ -27,13 +28,11 @@ relp_remote: php_display_errors: 1 -attribute_aggregation_gui_version: "3.0.6" -attribute_aggregation_server_version: "3.0.6" +attribute_aggregation_version: "3.0.6" oidc_playground_client_version: "3.0.0" oidc_playground_server_version: "3.0.0" engine_version: "6.15.0" -manage_gui_version: "7.3.3-SNAPSHOT" -manage_server_version: "7.3.3-SNAPSHOT" +manage_version: "9.6.0" lifecycle_version: "0.1.1" monitoring_tests_version: "7.0.0" mujina_version: "8.0.2" @@ -43,9 +42,7 @@ profile_version: "3.1.4" teams_gui_version: "9.1.3" teams_server_version: "9.1.3" voot_version: "6.2.0" -myconext_server_version: "6.0.2" -myconext_gui_version: "6.0.2" -account_gui_version: "6.0.2" +myconext_version: "8.1.12-1" dashboard_server_version: "12.3.4" dashboard_gui_version: "12.3.4" invite_server_version: "0.0.2-SNAPSHOT" @@ -215,7 +212,7 @@ aa: sab_username: coin-test sab_rest_username: surfconexttest sab_rest_password: "{{ aa_sab_rest_password }}" - sab_rest_endpoint: https://sab.acc.surfaccess.nl + sab_rest_endpoint: https://sab.acc.surfaccess.nl/api/profile surfmarket_url: https://example.org surfmarket_username: example@example.org surfmarket_password: "{{ aa_surfmarket_password }}" @@ -289,6 +286,8 @@ myconext: feature_create_eduid_institution_landing: true feature_allowlist: false feature_dry_run_email_cron: true + feature_enable_account_linking: true + feature_use_app: true sms_api_url: "https://rest.spryngsms.com/v1/messages" sms_api_route: "default" sp_entity_id: https://engine.{{ base_domain }}/authentication/sp/metadata @@ -303,6 +302,12 @@ myconext: - { name: "terms_of_service" , url: "https://example.org/x/LozaAQ"} - { name: "voorwaarden" , url: "https://example.org/x/HYzaAQ"} geo_location_ external_url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key={license_key}&suffix=tar.gz" + email: + from_deprovisioning: MyConext + from_code: MyConext + from_app_nudge: MyConext + from_new_device: MyConext + monitoring_tests: metadata_sp_url: "{{ monitoring_tests_metadata_sp_url }}" diff --git a/environments/template/host_vars/mongo1.example.com/vars.yml b/environments/template/host_vars/mongo1.example.com/vars.yml new file mode 100644 index 000000000..66c502e27 --- /dev/null +++ b/environments/template/host_vars/mongo1.example.com/vars.yml @@ -0,0 +1,3 @@ +backend_ipv4: +mongo_replication_role: primary +mongo_cluster_cert: # use an identical OU, O or DC for all cluster members \ No newline at end of file diff --git a/environments/template/host_vars/mongo1.example.com/vault b/environments/template/host_vars/mongo1.example.com/vault new file mode 100644 index 000000000..df314594e --- /dev/null +++ b/environments/template/host_vars/mongo1.example.com/vault @@ -0,0 +1 @@ +mongo_cluster_private_key: # encrypt this \ No newline at end of file diff --git a/environments/template/host_vars/mongo2.example.com/vars.yml b/environments/template/host_vars/mongo2.example.com/vars.yml new file mode 100644 index 000000000..6800d6fdc --- /dev/null +++ b/environments/template/host_vars/mongo2.example.com/vars.yml @@ -0,0 +1,3 @@ +backend_ipv4: +mongo_replication_role: secondary +mongo_cluster_cert: # use an identical OU, O or DC for all cluster members \ No newline at end of file diff --git a/environments/template/host_vars/mongo2.example.com/vault b/environments/template/host_vars/mongo2.example.com/vault new file mode 100644 index 000000000..df314594e --- /dev/null +++ b/environments/template/host_vars/mongo2.example.com/vault @@ -0,0 +1 @@ +mongo_cluster_private_key: # encrypt this \ No newline at end of file diff --git a/environments/template/host_vars/mongo3.example.com/vars.yml b/environments/template/host_vars/mongo3.example.com/vars.yml new file mode 100644 index 000000000..dba8e1797 --- /dev/null +++ b/environments/template/host_vars/mongo3.example.com/vars.yml @@ -0,0 +1,3 @@ +backend_ipv4: +mongo_replication_role: arbiter +mongo_cluster_cert: # use an identical OU, O or DC for all cluster members \ No newline at end of file diff --git a/environments/template/host_vars/mongo3.example.com/vault b/environments/template/host_vars/mongo3.example.com/vault new file mode 100644 index 000000000..df314594e --- /dev/null +++ b/environments/template/host_vars/mongo3.example.com/vault @@ -0,0 +1 @@ +mongo_cluster_private_key: # encrypt this \ No newline at end of file diff --git a/environments/template/inventory b/environments/template/inventory index 89e35169f..f1b3dabed 100644 --- a/environments/template/inventory +++ b/environments/template/inventory @@ -1,79 +1,145 @@ -[storage] -%target_host% - -[mongo_servers] -%target_host% - -[selfsigned_certs] -%target_host% - -[loadbalancer] -%target_host% - [%env%:children] -storage -mongo_servers -selfsigned_certs -sysloghost -loadbalancer_ha -loadbalancer -elk -lifecycle dbcluster dbcluster_nodes +docker_servers +lifecycle +loadbalancer +loadbalancer_ha +mongo_servers +mysql_only stats - -[sysloghost] -[loadbalancer_ha] -[elk] -[lifecycle] -[dbcluster] -[dbcluster_nodes] -[stats] +sysloghost [base:children] -loadbalancer -storage dbcluster -sysloghost -elk +dbcluster_nodes +docker_servers lifecycle -selfsigned_certs - -[loadbalancer:children] +loadbalancer loadbalancer_ha - -[frontend:children] -lifecycle +mongo_servers +mysql_only +stats +sysloghost [db_mysql:children] -storage +mysql_only dbcluster dbcluster_nodes -[local] -localhost ansible_connection=local +[mysql_only] -# for refactored playbooks +[dbcluster] # all galera members including arbitrator +db1.example.com +db2.example.com +db3.example.com # arbitrator -[%location%:children] # create one or more groups here for your chosen location(s) -storage -mongo_servers -sysloghost -loadbalancer_ha -loadbalancer +[dbcluster_nodes] # all galera members with a full mysql installation +db1.example.com +db2.example.com + +[frontend:children] lifecycle -dbcluster -dbcluster_nodes -stats -docker_servers -[base:children] -docker_servers +[loadbalancer] # use this for standalone loadbalancer + +[loadbalancer_ha] # use this for failover loadbalancer setup +lb1.example.com +lb2.example.com + +[lifecycle] +lifecycle1.example.com + +[mongo_servers] +mongo1.example.com +mongo2.example.com +mongo3.example.com # arbitrator + +[mysql_standalone] +stats1.example.com # stats server needs mysql +log1.example.com # log server needs mysql -[docker_servers] +[stats] +stats1.example.com + +[sysloghost] +log1.example.com + +# Docker + +[docker_servers:children] +docker_apps1 +docker_apps2 + +# Group the docker servers + +[docker_apps1] docker1.example.com + +[docker_apps2] docker2.example.com -[docker_invite] -docker2.example.com \ No newline at end of file +# Install containerized apps on the docker group you want + +[docker_invite:children] +docker_apps1 + +[docker_teams:children] +docker_apps1 + +[docker_pdp:children] +docker_apps1 + +[docker_voot:children] +docker_apps1 + +[docker_attribute_aggregation:children] +docker_apps1 + +[docker_oidc_playground:children] +docker_apps1 + +[docker_myconext:children] +docker_apps1 + +[docker_manage:children] +docker_apps1 + +[docker_oidcng:children] +docker_apps1 + +[docker_stats:children] +docker_apps1 + +[docker_diyidp:children] +docker_apps1 + +[docker_profile:children] +docker_apps1 + +[docker_lifecycle:children] +docker_apps1 + +[docker_engineblock:children] +docker_apps2 + +[docker_stepuptiqr:children] +docker_apps1 + +[docker_stepupwebauthn:children] +docker_apps1 + +[docker_stepupazuremfa:children] +docker_apps1 + +[docker_stepupmiddleware:children] +docker_apps2 + +[docker_stepupselfservice:children] +docker_apps1 + +[docker_stepupra:children] +docker_apps1 + +[docker_stepupgateway:children] +docker_apps2 diff --git a/environments/template/secrets/skeleton.yml b/environments/template/secrets/secret_example.yml similarity index 99% rename from environments/template/secrets/skeleton.yml rename to environments/template/secrets/secret_example.yml index d2faac776..b029373fb 100644 --- a/environments/template/secrets/skeleton.yml +++ b/environments/template/secrets/secret_example.yml @@ -94,7 +94,6 @@ manage_myconext_secret: secret manage_pdp_secret: secret pdp_push_endpoint_secret: secret manage_sp_dashboard_secret: secret -manage_access_secret: secret manage_sysadmin_secret: secret manage_invite_secret: secret manage_sram_secret: secret @@ -175,3 +174,6 @@ invite_private_key_pkcs8: | exUPAkqg7ZYNOJa+amGnPWMA1LT0LsIchvqNM9D0xX7PY6zWIH/NDS/yMfIwzcmn NVHeh6irTrXgMsuDg1f/rqid -----END PRIVATE KEY----- + +minio_passwords: + openconext: secret diff --git a/filter_plugins/merge_usergroups.py b/filter_plugins/merge_usergroups.py deleted file mode 100644 index b252d80a2..000000000 --- a/filter_plugins/merge_usergroups.py +++ /dev/null @@ -1,42 +0,0 @@ -# merge_usersgroups: merge extra groups into a user object -# -# Usage: {{ users | merge_usergroups(extra_groups }} -# with users = [{"user1": "myuser", "groups": ["bar","baz"], "other": "stuff"}] -# and extra_groups = {"foo": ["user1"}] -# result: [{"user1": "myuser", "groups": ["foo","bar","baz"], "other": "stuff"}]` -# -from __future__ import annotations -from ansible.utils.display import Display - - -def _merge_usergroups(users: list[dict[str, str | list[str]]], - extra_groups: dict[str, list[str]]) -> list[dict[str, str | list[str]]]: - display = Display() - display.vv(f"_merge_usergroups: arg1: {users}") - display.vv(f"_merge_usergroups: arg2: {extra_groups}") - - # first invert the extra_groups dict to obtain a list of groups per user - user_extra_groups = {} - for group, group_users in extra_groups.items(): - for u in group_users: - if u not in user_extra_groups: - user_extra_groups[u] = [] - user_extra_groups[u].append(group) - - display.vv(f"_merge_usergroups: user_extra_groups: {user_extra_groups}") - - # then merge the extra groups into the user objects - for user in users: - user['groups'] = user.get('groups', []) + user_extra_groups.get(user['username'], []) - - display.vv(f"_merge_usergroups: users: {users}") - - return users - - -class FilterModule(object): - @staticmethod - def filters(): - return { - 'merge_usergroups': _merge_usergroups, - } diff --git a/inc_test_loadbalancer_tasklist.yml b/inc_test_loadbalancer_tasklist.yml deleted file mode 100644 index 9a697cf87..000000000 --- a/inc_test_loadbalancer_tasklist.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -# playbook loops over: -# loadbalancers -# haproxy_applications (item) -# -# and does an url request on lodbalancer ip with vhostname as the host header - -- name: Try to reach {{ item.vhost_name }} via https://{{ haproxy_sni_ip_restricted.ipv4 }}:443 # noqa: name[template] jinja template helps with debugging - when: item.restricted is defined and item.restricted - ansible.builtin.uri: - url: "https://{{ haproxy_sni_ip_restricted.ipv4 }}:443/{{ item.ha_url }}" - method: GET - status_code: [200, 302] - return_content: false - validate_certs: false - headers: - host: "{{ item.vhost_name }}" - register: result - until: result.status == 200 or result.status == 302 - retries: 3 - delay: 2 - delegate_to: 127.0.0.1 # run check from deploy host - -# Try to reach applications via loadbalancer without restricted ip v4 -- name: Try to reach {{ item.vhost_name }} via https://{{ haproxy_sni_ip.ipv4 }}:443 # noqa: name[template] jinja template helps with debugging - when: item.restricted is undefined - ansible.builtin.uri: - url: "https://{{ haproxy_sni_ip.ipv4 }}:443/{{ item.ha_url }}" - method: GET - status_code: [200, 302] - return_content: false - validate_certs: false - headers: - host: "{{ item.vhost_name }}" - register: result - until: result.status == 200 or result.status == 302 - retries: 3 - delay: 2 - delegate_to: 127.0.0.1 #run check from deploy host diff --git a/playbook_haproxy.yml b/playbook_haproxy.yml deleted file mode 100644 index 7c28c22f4..000000000 --- a/playbook_haproxy.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- hosts: loadbalancer_ha - become: false - gather_facts: no - roles: - - { role: haproxy_mgnt, tags: ['haproxy_mgnt'] } - - { role: haproxy_acls, tags: ['haproxy_acls'] } diff --git a/prep-env b/prep-env old mode 100755 new mode 100644 index df4f8ad36..4f2a9e11f --- a/prep-env +++ b/prep-env @@ -7,6 +7,7 @@ # When a password must be sha-encoded, the clear-text password must be set before the sha-encoded # Key for the sha-encoded password must be the same as the clear-text password with the '_sha' # +# todo: If goup_vars exampels are complete this is probably not necessary? investigate # ----- configuration ENV_DIR="environments" diff --git a/provision b/provision deleted file mode 100755 index 4d007087c..000000000 --- a/provision +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -set -e -export ANSIBLE_ROLES_PATH="roles:roles-external" - -# parse parameters -help="Usage: $0 [ANSIBLE_OPT] -eg $0 acc --tags eb -will provision the tag eb on acc" - -if [ "$#" -eq 0 ]; then - echo -e "$help" - exit 1 -fi - -env=$1 -shift - -# Set some variables -environment_dir="environments-external/$env/" -inventory="environments-external/$env/inventory" -playbook="provision.yml" - -if ! [ -e "$inventory" ]; then - echo "Inventory file '$inventory' for environment '$env' not found." - exit 1 -fi - -# Download extra roles when requirements.yml is present -# and roles-external is not a symlink and is not a git repository -# and the file .no-provision is not present -if [ -f "$environment_dir"/requirements.yml ] -then - if [ -L roles-external ] || [ -d roles-external/.git ] \ - || [ -f roles-external/.no-provision ] - then - echo "Skipping download of extra roles" - else - echo "Downloading roles to roles-external" - ansible-galaxy install -r "$environment_dir"/requirements.yml -f -p . - fi -else - echo "No extra roles found to be downloaded" -fi - -cmd=$( - cat <<-EOF -ansible-playbook -i $inventory $playbook -e environment_dir=$environment_dir $@ -EOF -) -echo "executing $cmd" | tr -d "\n" | tr -s ' ' -echo - -$cmd diff --git a/provision.yml b/provision.yml index 2f9482e49..95555f883 100644 --- a/provision.yml +++ b/provision.yml @@ -1,5 +1,6 @@ --- -- hosts: all +- name: Gather secrets + hosts: all gather_facts: no tasks: - name: Read vars from secrets file @@ -8,139 +9,304 @@ tags: - always -- hosts: rsyslog +- name: Deploy rsyslog + hosts: base gather_facts: yes become: true roles: - role: rsyslog - tags: ['core', 'base', 'rsyslog'] + tags: ['core', 'base', 'rsyslog'] -- hosts: base +- name: Deploy iptables + hosts: base gather_facts: yes become: true roles: - role: iptables when: - iptables_enable | bool - tags: ['core', 'base', 'iptables'] - - role: selfsigned_certs - when: - - "{{ use_selfsigned_certs | default(false) | bool }}" - tags: ['core', 'base', 'selfsigned_certs'] + tags: ['core', 'base', 'iptables'] -- hosts: loadbalancer +- name: Deploy loadbalancer + hosts: loadbalancer gather_facts: true become: true roles: - role: haproxy - tags: ['core', 'loadbalancer_ha', 'loadbalancer', 'lb'] + tags: ['core', 'loadbalancer_ha', 'loadbalancer', 'lb'] + +- name: Deploy loadbalancer keepalived + hosts: loadbalancer_ha + gather_facts: true + become: true + roles: - role: keepalived - when: - - "'loadbalancer_ha' in groups and inventory_hostname in groups['loadbalancer_ha']" - tags: ['core', 'loadbalancer_ha', 'keepalived'] + tags: ['core', 'loadbalancer_ha', 'keepalived'] + +- name: Deploy loadbalancer bind + hosts: loadbalancer_ha + gather_facts: true + become: true + roles: - role: bind - when: - - "'loadbalancer_ha' in groups and inventory_hostname in groups['loadbalancer_ha']" - tags: ['core', 'loadbalancer_ha', 'bind'] + tags: ['core', 'loadbalancer_ha', 'bind'] -- hosts: db_mysql +- name: Deploy standalone mariadb + hosts: mysql_standalone gather_facts: no become: true serial: 1 roles: - role: mysql - when: - - inventory_hostname in groups['storage'] - tags: ['core', 'db_mysql', 'mysql'] + tags: ['core', 'db_mysql', 'mysql'] + +# todo: clearer groupnames +# for now: +# dbcluster : all servers in galera cluster +# dbcluster_nodes: just full nodes with mariadb in galera cluster +# mysql_only: mariadb not in galera cluster +# db_mysql: all mariadb servers, so mysql_only + dbcluster_nodes + +- name: Deploy galera + hosts: dbcluster + gather_facts: no + become: true + serial: 1 + roles: - role: galera - when: - - inventory_hostname in groups['dbcluster'] tags: ['core', 'db_mysql', 'galera'] + +- name: Deploy galera keepalived + hosts: dbcluster_nodes + gather_facts: no + become: true + serial: 1 + roles: - role: keepalived - when: - - inventory_hostname in groups['dbcluster_nodes'] - tags: ['core', 'db_mysql', 'keepalived'] + tags: ['core', 'db_mysql', 'keepalived'] + +- name: Create mysql users + hosts: db_mysql + gather_facts: no + become: true + roles: - role: galera_create_users - tags: ['core', 'db_mysql', 'galera', 'galera_create_users'] + tags: ['core', 'db_mysql', 'galera', 'galera_create_users'] -- hosts: mongo_servers +- name: Deploy mongo servers + hosts: mongo_servers gather_facts: yes become: true + serial: 1 roles: - role: mongo - tags: ['core', 'mongo'] + tags: ['core', 'mongo'] -- hosts: elk +- name: Deploy stats + hosts: stats gather_facts: true become: true roles: - - role: elk - tags: ['elk' ] + - role: influxdb + tags: ['influxdb' ] -- hosts: stats - gather_facts: true +# Separate groups for all containerized apps +# Dividing apps across the container services should be set in +# the inventory not in the playbook, this way you can easily change +# it for different environments + +- name: Deploy attribute-aggregation app + hosts: docker_attribute_aggregation become: true roles: - - role: influxdb - tags: ['influxdb' ] + - role: attribute_aggregation + tags: ['aa', 'attribute-aggregation'] -- hosts: stepuppapp +- name: Deploy dashboard app + hosts: docker_dashboard become: true roles: - - { role: stepupwebauthn, tags: ['stepupwebauthn','stepup' ] } - - { role: stepupazuremfa, tags: ['stepupazuremfa', 'stepup'] } - - { role: stepupmiddleware, tags: ['stepupmiddleware' , 'stepup'] } - - { role: stepupgateway, tags: ['stepupgateway' , 'stepup'] } - - { role: stepupselfservice, tags: ['stepupselfservice' , 'stepup'] } - - { role: stepupra , tags: ['stepupra' , 'stepup'] } - - { role: stepupgateway , tags: ['stepupgateway' , 'stepup'] } + - role: dashboard + tags: ['dashboard'] -- hosts: docker_apps1,docker_apps2 +- name: Deploy diyidp app + hosts: docker_diyidp become: true roles: - - { role: docker, tags: ['docker' ] } + - diyidp + tags: ['diyidp'] -- hosts: mujina +- name: Deploy engineblock app + hosts: docker_engineblock become: true roles: - - { role: mujina-idp, tags: ["mujina-idp", "mujina"] } - - { role: mujina-sp, tags: ["mujina-sp", "mujina"] } + - engineblock + tags: ['engineblock', 'eb'] -- hosts: docker_apps1 +- name: Deploy invite app + hosts: docker_invite become: true roles: - - { role: invite, tags: ['invite' ] } - - { role: dashboard, tags: ["dashboard"] } - - { role: teams, tags: ["teams"] } - - { role: pdp, tags: ["pdp"] } - - { role: voot, tags: ["voot"] } - - { role: attribute-aggregation, tags: ["aa", "attribute-aggregation"] } - - { role: oidc-playground, tags: ["oidc-playground"] } - - { role: myconext, tags: ["myconext"] } - - { role: manage, tags: ["manage"] } - - { role: oidcng, tags: ["oidcng"] } - - { role: stats, tags: ["stats"] } - - { role: diyidp, tags: ["diyidp"] } - - { role: profile, tags: ["profile"] } - - { role: lifecycle, tags: ["lifecycle"] } - - { role: stepuptiqr, tags: ['stepuptiqr' , 'stepup'] } - - { role: openaccess, tags: ['openaccess' ] } + - invite + tags: ['invite'] -- hosts: docker_apps2 +- name: Deploy lifecycle app + hosts: docker_lifecycle become: true roles: - - { role: engineblock, tags: ["eb"] } - - { role: stepupgateway, tags: [ 'stepupgateway' , 'stepup' ] } + - lifecycle + tags: ['lifecycle'] -- hosts: docker_mariadb +- name: Deploy manage app + hosts: docker_manage become: true roles: - - { role: mariadbdocker, tags: ['mariadbdocker']} - - { role: mongodbdocker, tags: ['mongodbdocker']} + - role: manage + tags: ['manage'] + +- name: Deploy mujina-idp app + hosts: docker_mujina_idp + become: true + roles: + - mujina-idp + tags: ['mujina-idp', 'mujina'] + +- name: Deploy mujina-sp app + hosts: docker_mujina_sp + become: true + roles: + - mujina-sp + tags: ['mujina-sp', 'mujina'] + +- name: Deploy myconext app + hosts: docker_myconext + become: true + roles: + - myconext + tags: ['myconext'] + +- name: Deploy oidcng app + hosts: docker_oidcng + become: true + roles: + - oidcng + tags: ['oidcng'] + +- name: Deploy oidc-playground app + hosts: docker_oidc_playground + become: true + roles: + - oidc-playground + tags: ['oidc-playground'] + +- name: Deploy openaccess app & server + hosts: docker_openaccess + become: true + roles: + - openaccess + tags: ['openaccess'] + +- name: Deploy pdp app + hosts: docker_pdp + become: true + roles: + - role: pdp + tags: ['pdp'] + +- name: Deploy profile app + hosts: docker_profile + become: true + roles: + - role: profile + tags: ['profile'] + +- name: Deploy spdashboard app + hosts: docker_spdashboard + become: true + roles: + - spdashboard + tags: ['spdashboard'] + +- name: Deploy stats app + hosts: docker_stats + become: true + roles: + - stats + tags: ['stats'] + +- name: Deploy stepupazuremfa app + hosts: docker_stepupazuremfa + become: true + roles: + - stepupazuremfa + tags: ['stepupazuremfa', 'stepup'] + +- name: Deploy stepupgateway app + hosts: docker_stepupgateway + become: true + roles: + - stepupgateway + tags: ['stepupgateway', 'stepup'] + +- name: Deploy stepupmiddleware app + hosts: docker_stepupmiddleware + become: true + roles: + - stepupmiddleware + tags: ['stepupmiddleware', 'stepup'] + +- name: Deploy stepupra app + hosts: docker_stepupra + become: true + roles: + - stepupra + tags: ['stepupra', 'stepup'] + +- name: Deploy stepupselfservice app + hosts: docker_stepupselfservice + become: true + roles: + - stepupselfservice + tags: ['stepupselfservice', 'stepup'] + +- name: Deploy stepuptiqr app + hosts: docker_stepuptiqr + become: true + roles: + - stepuptiqr + tags: ['stepuptiqr', 'stepup'] + +- name: Deploy stepupwebauthn app + hosts: docker_stepupwebauthn + become: true + roles: + - role: stepupwebauthn + tags: ['stepupwebauthn', 'stepup'] -- hosts: docker_minio +- name: Deploy teams app + hosts: docker_teams become: true roles: - - { role: minio, tags: ["minio"] } + - teams + tags: ['teams'] + +- name: Deploy voot app + hosts: docker_voot + become: true + roles: + - voot + tags: ['voot'] + +- name: Deploy minio app + hosts: docker_minio + become: true + roles: + - minio + tags: ['minio'] + +- hosts: docker_mariadb + become: true + roles: + - { role: mariadbdocker, tags: ['mariadbdocker']} + - { role: mongodbdocker, tags: ['mongodbdocker']} -- import_playbook: "{{ environment_dir }}/playbook.yml" diff --git a/roles/attribute-aggregation/handlers/main.yml b/roles/attribute-aggregation/handlers/main.yml deleted file mode 100644 index 05662c7ba..000000000 --- a/roles/attribute-aggregation/handlers/main.yml +++ /dev/null @@ -1,9 +0,0 @@ -- name: restart attribute-aggregationserver - community.docker.docker_container: - name: aaserver - state: started - restart: true - # avoid restarting it creates unexpected data loss according to docker_container_module notes - comparisons: - '*': ignore - when: aaservercontainer is success and aaservercontainer is not change diff --git a/roles/attribute-aggregation/tasks/main.yml b/roles/attribute-aggregation/tasks/main.yml deleted file mode 100644 index a8959123f..000000000 --- a/roles/attribute-aggregation/tasks/main.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -- name: Create directory to keep configfile - ansible.builtin.file: - dest: "/opt/openconext/attribute-aggregation" - state: directory - owner: root - group: root - mode: "0770" - -- name: Place the serverapplication configfiles - ansible.builtin.template: - src: "{{ item }}.j2" - dest: /opt/openconext/attribute-aggregation/{{ item }} - owner: root - group: root - mode: "0644" - with_items: - - serverapplication.yml - - logback.xml - - attributeAuthorities.yml - - serviceProviderConfig.json - - apachelink.conf - notify: restart attribute-aggregationserver - -- name: Add the MariaDB docker network to the list of networks when MariaDB runs in Docker - ansible.builtin.set_fact: - aa_docker_networks: - - name: loadbalancer - - name: openconext_mariadb - when: mariadb_in_docker | default(false) | bool - -- name: Create and start the server container - community.docker.docker_container: - name: aaserver - image: ghcr.io/openconext/openconext-attribute-aggregation/aa-server:{{ attribute_aggregation_server_version }} - pull: true - restart_policy: "always" - state: started - networks: "{{ aa_docker_networks }}" - mounts: - - source: /opt/openconext/attribute-aggregation/serverapplication.yml - target: /application.yml - type: bind - - source: /opt/openconext/attribute-aggregation/logback.xml - target: /logback.xml - type: bind - - source: /opt/openconext/attribute-aggregation/attributeAuthorities.yml - target: /attributeAuthorities.yml - type: bind - - source: /opt/openconext/attribute-aggregation/serviceProviderConfig.json - target: /serviceProviderConfig.json - type: bind - command: "-Xmx128m --spring.config.location=./" - etc_hosts: - host.docker.internal: host-gateway - healthcheck: - test: - [ - "CMD", - "wget", - "-no-verbose", - "--tries=1", - "--spider", - "http://localhost:8080/aa/api/internal/health", - ] - interval: 10s - timeout: 10s - retries: 3 - start_period: 10s - register: aaservercontainer diff --git a/roles/attribute-aggregation/defaults/main.yml b/roles/attribute_aggregation/defaults/main.yml similarity index 100% rename from roles/attribute-aggregation/defaults/main.yml rename to roles/attribute_aggregation/defaults/main.yml diff --git a/roles/attribute_aggregation/handlers/main.yml b/roles/attribute_aggregation/handlers/main.yml new file mode 100644 index 000000000..c5d574e14 --- /dev/null +++ b/roles/attribute_aggregation/handlers/main.yml @@ -0,0 +1,19 @@ +- name: "Restart attribute-aggregationserver" + community.docker.docker_container: + name: aaserver + state: started + restart: true + # avoid restarting it creates unexpected data loss according to docker_container_module notes + comparisons: + '*': ignore + when: "aa_servercontainer is success and aa_servercontainer is not changed" + +- name: "Restart attribute-aggregationlink" + community.docker.docker_container: + name: aalink + state: started + restart: true + # avoid restarting it creates unexpected data loss according to docker_container_module notes + comparisons: + '*': ignore + when: "aa_linkcontainer is success and aa_linkcontainer is not changed" diff --git a/roles/attribute_aggregation/tasks/main.yml b/roles/attribute_aggregation/tasks/main.yml new file mode 100644 index 000000000..0631e8fdb --- /dev/null +++ b/roles/attribute_aggregation/tasks/main.yml @@ -0,0 +1,143 @@ +--- +- name: Create directory to keep configfile + ansible.builtin.file: + dest: "/opt/openconext/attribute-aggregation" + state: "directory" + owner: "root" + group: "root" + mode: "0770" + +- name: Place the server application configfiles + ansible.builtin.template: + src: "{{ item }}.j2" + dest: "/opt/openconext/attribute-aggregation/{{ item }}" + owner: "root" + group: "root" + mode: "0644" + with_items: + - "serverapplication.yml" + - "logback.xml" + - "attributeAuthorities.yml" + - "serviceProviderConfig.json" + notify: + - "Restart attribute-aggregationserver" + +- name: Place the link application configfiles + ansible.builtin.template: + src: "{{ item }}.j2" + dest: "/opt/openconext/attribute-aggregation/{{ item }}" + owner: "root" + group: "root" + mode: "0644" + with_items: + - "apachelink.conf" + notify: + - "Restart attribute-aggregationlink" + +- name: Add the MariaDB docker network to the list of networks when MariaDB runs in Docker + ansible.builtin.set_fact: + aa_docker_networks: + - name: "loadbalancer" + - name: "openconext_mariadb" + when: "mariadb_in_docker | default(false) | bool" + +- name: Create and start the server container + community.docker.docker_container: + name: "aaserver" + image: "ghcr.io/openconext/openconext-attribute-aggregation/aa-server:{{ attribute_aggregation_version }}" + pull: true + restart_policy: "always" + state: "started" + networks: "{{ aa_docker_networks }}" + mounts: + - source: "/opt/openconext/attribute-aggregation/serverapplication.yml" + target: "/application.yml" + read_only: true + type: "bind" + - source: "/opt/openconext/attribute-aggregation/logback.xml" + target: "/logback.xml" + read_only: true + type: "bind" + - source: "/opt/openconext/attribute-aggregation/attributeAuthorities.yml" + target: "/attributeAuthorities.yml" + read_only: true + type: "bind" + - source: "/opt/openconext/attribute-aggregation/serviceProviderConfig.json" + target: "/serviceProviderConfig.json" + read_only: true + type: "bind" + command: "-Xmx128m --spring.config.location=./" + etc_hosts: + host.docker.internal: "host-gateway" + labels: + traefik.http.routers.aaserver.rule: "Host(`aa.{{ base_domain }}`)" + traefik.http.routers.aaserver.tls: "true" + traefik.enable: "true" + healthcheck: + test: + [ + "CMD", + "wget", + "-no-verbose", + "--tries=1", + "--spider", + "http://localhost:8080/internal/health", + ] + interval: "10s" + timeout: "10s" + retries: 3 + start_period: "10s" + notify: "Restart attribute-aggregationserver" + register: "aa_servercontainer" + +- name: Create the gui link container + community.docker.docker_container: + name: "aalink" + image: "ghcr.io/openconext/openconext-basecontainers/apache2-shibboleth:latest" + pull: true + restart_policy: "always" + state: "started" + networks: "{{ aa_docker_networks }}" + mounts: + - source: "/opt/openconext/attribute-aggregation/apachelink.conf" + target: "/etc/apache2/sites-enabled/000-default.conf" + read_only: true + type: "bind" + - source: "/etc/localtime" + target: "/etc/localtime" + read_only: true + type: "bind" + - source: "/opt/openconext/common/favicon.ico" + target: "/var/www/favicon.ico" + read_only: true + type: "bind" + etc_hosts: + host.docker.internal: "host-gateway" + labels: + traefik.http.routers.aalink.rule: "Host(`link.{{ base_domain }}`)" + traefik.http.routers.aalink.tls: "true" + traefik.enable: "true" + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost/internal/health"] + interval: "10s" + timeout: "10s" + retries: 3 + start_period: "10s" + hostname: "attribute-link" + env: + HTTPD_CSP: "{{ httpd_csp.lenient_with_static_img }}" + HTTPD_SERVERNAME: "link.{{ base_domain }}" + OPENCONEXT_INSTANCENAME: "{{ instance_name }}" + OPENCONEXT_ENGINE_LOGOUT_URL: "https://engine.{{ base_domain }}/logout" + OPENCONEXT_HELP_EMAIL: "{{ support_email }}" + SHIB_ENTITYID: "https://link.{{ base_domain }}/shibboleth" + SHIB_REMOTE_ENTITYID: "https://engine.{{ base_domain }}/authentication/idp/metadata" + SHIB_REMOTE_METADATA: "{{ shibboleth_metadata_sources.engine }}" + register: "aa_linkcontainer" + +- name: Remove obsolete pdp containers + community.docker.docker_container: + name: "{{ item }}" + state: "absent" + loop: + - "aagui" diff --git a/roles/attribute-aggregation/templates/apachelink.conf.j2 b/roles/attribute_aggregation/templates/apachelink.conf.j2 similarity index 68% rename from roles/attribute-aggregation/templates/apachelink.conf.j2 rename to roles/attribute_aggregation/templates/apachelink.conf.j2 index ca0f3897f..f0cdcfda5 100644 --- a/roles/attribute-aggregation/templates/apachelink.conf.j2 +++ b/roles/attribute_aggregation/templates/apachelink.conf.j2 @@ -12,8 +12,8 @@ Redirect /orcid https://link.{{ base_domain }}/aa/api/client/information.html ProxyPass /Shibboleth.sso ! ProxyPass /redirect http://aaserver:8080/aa/api/redirect -ProxyPass /internal/health http://aaserver:8080/aa/api/internal/health -ProxyPass /internal/info http://aaserver:8080/aa/api/internal/info +ProxyPass /internal/health http://aaserver:8080/internal/health +ProxyPass /internal/info http://aaserver:8080/internal/info ProxyPass /aa/api http://aaserver:8080/aa/api ProxyPassReverse /aa/api http://aaserver:8080/aa/api @@ -22,3 +22,18 @@ ProxyPassReverse /aa/api/client http://aaserver:8080/aa/api/client Header always set X-Frame-Options "DENY" Header always set Referrer-Policy "strict-origin-when-cross-origin" Header always set X-Content-Type-Options "nosniff" + + + AuthType shibboleth + ShibUseHeaders On + ShibRequireSession On + Require valid-user + + + + Require all granted + + + + Require all denied + diff --git a/roles/attribute-aggregation/templates/attributeAuthorities.yml.j2 b/roles/attribute_aggregation/templates/attributeAuthorities.yml.j2 similarity index 92% rename from roles/attribute-aggregation/templates/attributeAuthorities.yml.j2 rename to roles/attribute_aggregation/templates/attributeAuthorities.yml.j2 index ff7b79e17..7cc4ce800 100644 --- a/roles/attribute-aggregation/templates/attributeAuthorities.yml.j2 +++ b/roles/attribute_aggregation/templates/attributeAuthorities.yml.j2 @@ -98,6 +98,7 @@ authorities: password: "{{ aa.invite_password }}", timeOut: 5000, type: "rest", + requestMethod: "GET", pathParams: [ { index: 1, sourceAttribute: "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" } ], @@ -108,7 +109,8 @@ authorities: } ], mappings: [ - { responseKey: "id", targetAttribute: "urn:mace:dir:attribute-def:isMemberOf" } + { responseKey: "id", targetAttribute: "urn:mace:dir:attribute-def:isMemberOf" }, + { responseKey: "autorisatie", targetAttribute: "urn:mace:surf.nl:attribute-def:surf-autorisaties" } ], attributes: [ { @@ -116,7 +118,14 @@ authorities: description: "Lists the roles the user is a member of", type: "string", example: "urn:mace:surf.nl:test.surfinvite.nl:77a273cf-6f0a-480a-991e-ada8e89e1d74:scim_mock_test_role" + }, + { + name: "urn:mace:surf.nl:attribute-def:surf-autorisaties", + description: "Lists the surf-autorisaties for the user", + type: "string", + example: "urn:mace:surfnet.nl:surfnet.nl:sab:role:DNS-Beheerder" } + ], requiredInputAttributes: [ { @@ -126,7 +135,7 @@ authorities: name: "SPentityID" } ], - validationRegExp: "^urn:(collab:group|mace:surf.nl):.*$" + validationRegExp: "^urn:(collab:group|mace:surf.nl|mace:surfnet.nl):.*$" } - { id: "sabrest", diff --git a/roles/attribute-aggregation/templates/logback.xml.j2 b/roles/attribute_aggregation/templates/logback.xml.j2 similarity index 100% rename from roles/attribute-aggregation/templates/logback.xml.j2 rename to roles/attribute_aggregation/templates/logback.xml.j2 diff --git a/roles/attribute-aggregation/templates/serverapplication.yml.j2 b/roles/attribute_aggregation/templates/serverapplication.yml.j2 similarity index 96% rename from roles/attribute-aggregation/templates/serverapplication.yml.j2 rename to roles/attribute_aggregation/templates/serverapplication.yml.j2 index 8e49715b4..e3a068d3e 100644 --- a/roles/attribute-aggregation/templates/serverapplication.yml.j2 +++ b/roles/attribute_aggregation/templates/serverapplication.yml.j2 @@ -5,10 +5,9 @@ logging: aa: DEBUG server: - # The port to where this Spring Boot application listens to. e.g. http://localhost:{{ springapp_tcpport }} + # The port to where this Spring Boot application listens to. e.g. http://localhost:8080 port: 8080 servlet: - context-path: /aa/api session: timeout: 28800 cookie: diff --git a/roles/attribute-aggregation/templates/serviceProviderConfig.json.j2 b/roles/attribute_aggregation/templates/serviceProviderConfig.json.j2 similarity index 100% rename from roles/attribute-aggregation/templates/serviceProviderConfig.json.j2 rename to roles/attribute_aggregation/templates/serviceProviderConfig.json.j2 diff --git a/roles/attribute-aggregation/vars/main.yml b/roles/attribute_aggregation/vars/main.yml similarity index 100% rename from roles/attribute-aggregation/vars/main.yml rename to roles/attribute_aggregation/vars/main.yml diff --git a/roles/engineblock/defaults/main.yml b/roles/engineblock/defaults/main.yml index 12cf0766c..e4769ec71 100644 --- a/roles/engineblock/defaults/main.yml +++ b/roles/engineblock/defaults/main.yml @@ -20,6 +20,7 @@ engine_feature_send_user_attributes: 0 # Cutoff point for showing unfiltered IdPs on the WAYF engine_wayf_cutoff_point_for_showing_unfiltered_idps: 50 +engine_wayf_show_remember_choice: false ## Engine installer specific variables. engine_version_dir: "{{ engine_version | replace('/', '-') }}" diff --git a/roles/engineblock/templates/parameters.yml.j2 b/roles/engineblock/templates/parameters.yml.j2 index 42d9d6f45..f14f45df4 100644 --- a/roles/engineblock/templates/parameters.yml.j2 +++ b/roles/engineblock/templates/parameters.yml.j2 @@ -182,7 +182,7 @@ parameters: wayf.cutoff_point_for_showing_unfiltered_idps: {{ engine_wayf_cutoff_point_for_showing_unfiltered_idps | int }} ## Allow users to save their selected IdP and then auto-select it on returning visits. - wayf.remember_choice: false + wayf.remember_choice: {{ engine_wayf_show_remember_choice }} ## Toggle the default IdP quick link banner on the WAYF. wayf.display_default_idp_banner_on_wayf: true diff --git a/roles/galera/tasks/arbiter_node.yml b/roles/galera/tasks/arbiter_node.yml index 3513c9bd3..66d88b18c 100644 --- a/roles/galera/tasks/arbiter_node.yml +++ b/roles/galera/tasks/arbiter_node.yml @@ -5,12 +5,18 @@ state: present ignoreerrors: yes -# Add MariaDB repo and key -- name: Add MariaDB.org repository +# todo add this to a generic file and apply to aribtrator and cluster node +- name: Add MariaDB.org repository Rocky 8 template: - src: "mariadb.repo.j2" + src: "mariadb.repo.rocky8.j2" dest: "/etc/yum.repos.d/mariadb.repo" - when: ansible_os_family == 'RedHat' + when: ansible_distribution_major_version == '8' + +- name: Add MariaDB.org repository Rocky 9 + template: + src: "mariadb.repo.rocky9.j2" + dest: "/etc/yum.repos.d/mariadb.repo" + when: ansible_distribution_major_version == '9' - name: Install Galera, rsync ansible.builtin.package: diff --git a/roles/galera/tasks/cluster_nodes.yml b/roles/galera/tasks/cluster_nodes.yml index dbc112293..398c829a5 100644 --- a/roles/galera/tasks/cluster_nodes.yml +++ b/roles/galera/tasks/cluster_nodes.yml @@ -211,11 +211,16 @@ - galera_bootstrap_node == inventory_hostname # Add cluster user +# todo: this task shows a change in --check mode although it is not necessarily +# changing anything for real, skipping in check mode is also not ideal +# maybe add a task that runs in check mode that only checks for presence of the user +# also, run_once could break things if there would be multiple galera clusters +# not sure whether its important though.. - name: add mariadb cluster sst user mysql_user: name: "{{ mariadb_cluster_user }}" password: "{{ mariadb_cluster_password }}" - priv: "{{ mariadb_cluster_user_privs | default('*.*:RELOAD,PROCESS,LOCK TABLES,BINLOG MONITOR,REPLICA MONITOR') }}" + priv: "{{ mariadb_cluster_user_privs | default('*.*:RELOAD,PROCESS,LOCK TABLES,BINLOG MONITOR,SLAVE MONITOR') }}" state: present login_user: root login_password: "{{ mariadb_root_password }}" @@ -233,7 +238,7 @@ login_user: root login_password: "{{ mariadb_root_password }}" login_unix_socket: /var/lib/mysql/mysql.sock - run_once: true + run_once: true # run once because it is synced to other cluster nodes no_log: true - name: Add Galera clustercheck user, used for keepalived to connect @@ -245,7 +250,7 @@ login_user: root login_password: "{{ mariadb_root_password }}" login_unix_socket: /var/lib/mysql/mysql.sock - run_once: true + run_once: true # run once because it is synced to other cluster nodes no_log: true - name: Create the backup directory @@ -282,7 +287,7 @@ login_unix_socket: /var/lib/mysql/mysql.sock with_items: - "{{ databases.names }}" - run_once: true + run_once: true # run once because it is synced to other cluster nodes tags: galera_create_db - name: MySQL my.cnf diff --git a/roles/haproxy/templates/haproxy_backend.cfg.j2 b/roles/haproxy/templates/haproxy_backend.cfg.j2 index a9432af76..d2387c033 100644 --- a/roles/haproxy/templates/haproxy_backend.cfg.j2 +++ b/roles/haproxy/templates/haproxy_backend.cfg.j2 @@ -21,7 +21,7 @@ #--------------------------------------------------------------------- # backend {{ application.name }}_be - option httpchk {{ application.ha_method }} {{ application.ha_url }} + option httpchk {{ application.ha_method }} {{ application.ha_url }} http-check send ver HTTP/1.1 hdr Host {{ application.vhost_name }} {%if application.x_forwarded_port is defined %} @@ -35,8 +35,8 @@ cookie HTTPSERVERID insert nocache indirect httponly secure maxidle {{ haproxy_cookie_max_idle }} {% for server in application.servers %} - server {{ server.label }} {{ server.ip }}:{% if server.port is defined %}{{ server.port }}{% else %}{{ application.port }}{% endif %} cookie {{ server.label }} check inter 8000 fall 5 rise 2 maxconn {{ application.maxconn | default('35') }} {% if application.sslbackend is defined%} ssl verify required verifyhost {{ application.backend_vhost_name }} ca-file {{ application.backend_ca_file }}{% endif %} weight 100 - + server {{ server.label }} {{ server.ip }}:{% if server.port is defined %}{{ server.port }}{% else %}{{ application.port }}{% endif %} cookie {{ server.label }} check inter 8000 fall 5 rise 2 maxconn {{ application.maxconn | default('35') }} {% if application.sslbackend is defined%} ssl verify required verifyhost {{ application.backend_vhost_name }} ca-file {{ application.backend_ca_file }}{% endif %} weight 50 + {% endfor %} {% endfor %} @@ -48,7 +48,7 @@ #--------------------------------------------------------------------- # backend {{ application.name }}_staging_be - option httpchk {{ application.ha_method }} {{ application.ha_url }} + option httpchk {{ application.ha_method }} {{ application.ha_url }} http-check send ver HTTP/1.1 hdr Host {{ application.vhost_name }} {%if application.x_forwarded_port is defined %} @@ -62,8 +62,8 @@ cookie HTTPSERVERIDSTAGING insert nocache indirect httponly secure maxidle {{ haproxy_cookie_max_idle }} {% for server in application.stagingservers %} - server {{ server.label }} {{ server.ip }}:{{ application.port }} cookie {{ server.label }} check inter 8000 fall 5 rise 2 maxconn {{ application.maxconn | default('35') }} {% if application.sslbackend is defined%} ssl verify required verifyhost {{ application.backend_vhost_name }} ca-file {{ application.backend_ca_file }}{% endif %} weight 100 - + server {{ server.label }} {{ server.ip }}:{{ application.port }} cookie {{ server.label }} check inter 8000 fall 5 rise 2 maxconn {{ application.maxconn | default('35') }} {% if application.sslbackend is defined%} ssl verify required verifyhost {{ application.backend_vhost_name }} ca-file {{ application.backend_ca_file }}{% endif %} weight 50 + {% endfor %} {% endif %} {% endfor %} diff --git a/roles/invite/tasks/main.yml b/roles/invite/tasks/main.yml index 3553331f1..fae467558 100644 --- a/roles/invite/tasks/main.yml +++ b/roles/invite/tasks/main.yml @@ -19,6 +19,15 @@ - logback.xml notify: restart inviteserver +- name: Copy CRM crm_config.json from inventory + ansible.builtin.copy: + src: "{{ inventory_dir }}/files/invite/crm_config.json" + dest: "/opt/openconext/invite/crm_config.json" + owner: "root" + group: "root" + mode: "0644" + notify: restart inviteserver + - name: Copy private key for manage secrets encryption ansible.builtin.copy: content: "{{ invite_private_key_pkcs8 }}" @@ -80,6 +89,10 @@ - source: /opt/openconext/invite/public_key.pem target: /public_key.pem type: bind + - source: /opt/openconext/invite/crm_config.json + target: /crm_config.json + type: bind + command: '-Xmx256M --spring.config.location=./' etc_hosts: host.docker.internal: host-gateway diff --git a/roles/invite/templates/serverapplication.yml.j2 b/roles/invite/templates/serverapplication.yml.j2 index ce195b62d..251f4cff1 100644 --- a/roles/invite/templates/serverapplication.yml.j2 +++ b/roles/invite/templates/serverapplication.yml.j2 @@ -24,7 +24,7 @@ spring: banner-mode: "off" session: jdbc: - initialize-schema: always + initialize-schema: never {% if invite_cronjobmaster is defined and invite_cronjobmaster == false %} cleanup-cron: "-" {% else %} @@ -96,6 +96,14 @@ oidcng: resource-server-secret: "{{ invite.resource_server_secret }}" base-url: https://invite.{{ base_domain }} +crm: + api-key-header: "{{ invite_crm_api_key_header }}" + collab-person-prefix: "urn:collab:person" + inviter-name: "SURF CRM" + # The crm-config-resource can an absolute file path, e.g. file:///opt/openconext/invite/crm_config.json or + # a classpath entry + crm-config-resource: "file:///crm_config.json" + super-admin: users: {{ invite.super_admins }} @@ -118,6 +126,10 @@ config: past-date-allowed: {{ invite.past_date_allowed }} performance-seed-allowed: {{ invite.performance_seed_allowed }} eduid-idp-schac-home-organization: {{ invite.eduid_idp_schac_home_organization }} + # Determines the languages available for switching language, supported are 'nl', 'en' and 'pt' + languages: "nl, en" + environment: {{ environment_shortname }} + feature: limit-institution-admin-role-visibility: {{ invite.limit_institution_admin_role_visibility }} @@ -152,6 +164,7 @@ external-api-configuration: - profile - username: {{ invite.sp_dashboard_user }} password: "{{ invite.sp_dashboard_secret }}" + organizationGUIDFallback: {{ invite.surf_idp_organization_guid }} scopes: - sp_dashboard applications: @@ -159,6 +172,7 @@ external-api-configuration: manageType: SAML20_SP - username: {{ invite.access_user }} password: "{{ invite.access_secret }}" + organizationGUIDFallback: {{ invite.surf_idp_organization_guid }} scopes: - access applications: @@ -186,7 +200,11 @@ manage: # staticManageDirectory: file:///usr/local/etc/manage springdoc: - pathsToMatch: "/api/external/v1/**" + pathsToMatch: + - "/api/external/v1/**" + - "/crm/profile" + - "/api/profile" + - "/crm/api/v1/**" api-docs: path: "/ui/api-docs" swagger-ui: diff --git a/roles/manage/files/policies/allowed_attributes.json b/roles/manage/files/policies/allowed_attributes.json index 656312d1e..beb5c8363 100644 --- a/roles/manage/files/policies/allowed_attributes.json +++ b/roles/manage/files/policies/allowed_attributes.json @@ -1,38 +1,56 @@ [ { "value": "urn:mace:terena.org:attribute-def:schacHomeOrganization", + "validationRegex": "^[a-z]+(\\.[a-z]+)+$", + "allowedInDenyRule": true, "label": "Schac home organization" }, { "value": "urn:mace:terena.org:attribute-def:schacHomeOrganizationType", + "validationRegex": "^[a-z]+$", + "allowedInDenyRule": true, "label": "Schac home organization type" }, { "value": "urn:mace:dir:attribute-def:eduPersonAffiliation", + "validationRegex": "^(student|staff|faculty|employee|member)$", + "allowedInDenyRule": true, "label": "Edu person affiliation" }, { "value": "urn:mace:dir:attribute-def:eduPersonScopedAffiliation", + "validationRegex": "^(student|staff|faculty|employee|member)@[a-z]+(\\.[a-z]+)+$", + "allowedInDenyRule": true, "label": "Edu person scoped affiliation" }, { "value": "urn:mace:dir:attribute-def:eduPersonEntitlement", + "validationRegex": "^[a-z]+$", + "allowedInDenyRule": true, "label": "Edu person entitlement" }, { "value": "urn:mace:dir:attribute-def:isMemberOf", + "validationRegex": "^.*$", + "allowedInDenyRule": true, "label": "Is-member-of" }, { "value": "urn:collab:group:surfteams.nl", + "validationRegex": "^(urn:mace:surf\\.nl:invite:|urn:collab:group:)[a-z0-9_]+$", + "allowedInDenyRule": false, "label": "SURFconext Invite (voot) role urn" }, { "value": "urn:collab:sab:surfnet.nl", + "validationRegex": "^(Superuser|Instellingsbevoegde|OperationeelBeheerder|SURFconextbeheerder|DNS-Beheerder)$", + "allowedInDenyRule": false, "label": "SAB role" }, { "value": "urn:mace:dir:attribute-def:mail", + "validationRegex": "^[^@]+@[^@]+\\.[^@]+$", + "allowedInDenyRule": true, "label": "Mail address" } ] diff --git a/roles/manage/tasks/main.yml b/roles/manage/tasks/main.yml index 35c20b29a..9df3ecb97 100644 --- a/roles/manage/tasks/main.yml +++ b/roles/manage/tasks/main.yml @@ -97,7 +97,7 @@ - name: Create and start the server container community.docker.docker_container: name: manageserver - image: ghcr.io/openconext/openconext-manage/manage-server:{{ manage_server_version }} + image: ghcr.io/openconext/openconext-manage/manage-server:{{ manage_version }} entrypoint: /__cacert_entrypoint.sh pull: true restart_policy: "{{ manage_server_restart_policy }}" @@ -140,7 +140,7 @@ - name: Create the gui container community.docker.docker_container: name: managegui - image: ghcr.io/openconext/openconext-manage/manage-gui:{{ manage_gui_version }} + image: ghcr.io/openconext/openconext-manage/manage-gui:{{ manage_version }} pull: true restart_policy: "always" state: started @@ -158,7 +158,7 @@ start_period: 10s hostname: managegui env: - HTTPD_CSP: "{{ httpd_csp.lenient_with_static_img }}" + HTTPD_CSP: "{{ httpd_csp.lenient_with_static_img_with_surfconext }}" HTTPD_SERVERNAME: "manage.{{ base_domain }}" OPENCONEXT_INSTANCENAME: "{{ instance_name }}" OPENCONEXT_ENGINE_LOGOUT_URL: "https://engine.{{ base_domain }}/logout" diff --git a/roles/manage/templates/application.yml.j2 b/roles/manage/templates/application.yml.j2 index 8cb960366..1f3d128bb 100644 --- a/roles/manage/templates/application.yml.j2 +++ b/roles/manage/templates/application.yml.j2 @@ -64,6 +64,9 @@ product: metadata_configuration_path: file://{{ manage_dir }}/metadata_configuration metadata_templates_path: file://{{ manage_dir }}/metadata_templates metadata_export_path: classpath:/metadata_export +disabled_metadata_schemas: "organisation.schema.json" + +disabled_metadata_schemas: [] security: backdoor_user_name: {{ manage.backdoor_api_user }} diff --git a/roles/manage/templates/metadata_configuration/provisioning.schema.json.j2 b/roles/manage/templates/metadata_configuration/provisioning.schema.json.j2 index 1dc8f68df..d2aa225dc 100644 --- a/roles/manage/templates/metadata_configuration/provisioning.schema.json.j2 +++ b/roles/manage/templates/metadata_configuration/provisioning.schema.json.j2 @@ -110,7 +110,7 @@ ], "default": "eduperson_principal_name", "info": "From the pull down, select the scim identifier used for the user provisioning.", - "disabledAfterPersisted": true + "disabledAfterPersisted": false }, "eva_url": { "type": "string", diff --git a/roles/minio/README.md b/roles/minio/README.md new file mode 100644 index 000000000..36d4e0691 --- /dev/null +++ b/roles/minio/README.md @@ -0,0 +1,2 @@ +# TODO +- option to remove users \ No newline at end of file diff --git a/roles/minio/defaults/main.yml b/roles/minio/defaults/main.yml index 67d9354c8..cd6baacbe 100644 --- a/roles/minio/defaults/main.yml +++ b/roles/minio/defaults/main.yml @@ -1,7 +1,14 @@ --- minio_dir: /opt/openconext/minio -minio_data_dir: "{{ minio_dir }}/data" minio_version: RELEASE.2025-05-24T17-08-30Z -minio_data_dir_oncontainer: "/mnt/data" minio_root_user: "minioadmin" +minio_data_dir_oncontainer: "/data" # minio_root_password get from vault +minio_url_local: "http://127.0.0.1:9000" +minio_alias: "openconext" +minio_client_path: $HOME/minio-binaries +minio_mc: "{{ minio_client_path }}/mc" +minio_users: + - { name: 'openconext', password: "{{ minio_passwords.openconext }}" } # set passwords in vault +minio_client_checksum: "sha256:01f866e9c5f9b87c2b09116fa5d7c06695b106242d829a8bb32990c00312e891" +minio_client_version: "RELEASE.2025-08-13T08-35-41Z" diff --git a/roles/minio/handlers/main.yml b/roles/minio/handlers/main.yml index d85b5db33..b491e4201 100644 --- a/roles/minio/handlers/main.yml +++ b/roles/minio/handlers/main.yml @@ -8,4 +8,4 @@ # https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#notes comparisons: '*': ignore - when: miniocontainer is success and miniocontainer is not change + when: minio_container is success and minio_container is not change diff --git a/roles/minio/tasks/configure_container.yml b/roles/minio/tasks/configure_container.yml new file mode 100644 index 000000000..0755bdd07 --- /dev/null +++ b/roles/minio/tasks/configure_container.yml @@ -0,0 +1,75 @@ +--- +- name: Create minio files directory + ansible.builtin.file: + state: directory + path: "{{ minio_dir }}" + owner: root + group: root + mode: "0755" + +- name: Place the serverapplication configfiles + ansible.builtin.template: + src: "{{ item }}.j2" + dest: "{{ minio_dir }}/{{ item }}" + owner: root + group: root + mode: "0644" + with_items: + - config.env + notify: Restart minio + +- name: Create a docker volume + community.docker.docker_volume: + name: minio_data + +- name: Create and start the server container + community.docker.docker_container: + name: minio + image: quay.io/minio/minio:{{ minio_version }} + pull: true + restart_policy: "always" + state: started + env: + MINIO_CONFIG_ENV_FILE: "/etc/config.env" + ports: + # Publish container port 9000 for mc client commands + - "9000:9000" + networks: + - name: "loadbalancer" + mounts: + - source: "{{ minio_dir }}/config.env" + target: /etc/config.env + type: bind + - source: minio_data + target: "{{ minio_data_dir_oncontainer }}" + type: volume + + command: server --console-address ":9090" {{ minio_data_dir_oncontainer }} + labels: + traefik.http.routers.minio.rule: "Host(`minio.{{ base_domain }}`)" + traefik.http.routers.minio.tls: "true" + traefik.http.routers.minio.service: "minio" + traefik.http.services.minio.loadbalancer.server.port: "9090" + traefik.http.routers.minioapi.rule: "Host(`minioapi.{{ base_domain }}`)" + traefik.http.routers.minioapi.tls: "true" + traefik.http.routers.minioapi.service: "minioapi" + traefik.http.services.minioapi.loadbalancer.server.port: "9000" + traefik.enable: "true" + healthcheck: + test: + [ + "CMD", + "curl", + "--fail", + "http://localhost:9000/minio/health/live" + ] + interval: 10s + timeout: 10s + retries: 3 + start_period: 10s + register: minio_container + +- name: Show container debug info + ansible.builtin.debug: + msg: "{{ minio_container }}" + verbosity: 2 diff --git a/roles/minio/tasks/configure_minio_client.yml b/roles/minio/tasks/configure_minio_client.yml new file mode 100644 index 000000000..729157b1c --- /dev/null +++ b/roles/minio/tasks/configure_minio_client.yml @@ -0,0 +1,60 @@ +--- +- name: Configure minio client + block: + + - name: Create directory for minio client + ansible.builtin.file: + path: "{{ minio_client_path }}" + state: directory + mode: '0700' + + # without these checks (is mc there and is it the desired version) the download minio client task will fail if + # the version we have defined is non existent in the minio repository, so lets check those before we + # continue to our Download Minio Client + + - name: Check for presence Minio Client + ansible.builtin.stat: + path: "{{ minio_mc }}" + register: minio_client_presence + + - name: Check version Minio Client + ansible.builtin.shell: + cmd: "{{ minio_mc }} --version | head -1 | awk -F ' ' '{ print $3 }'" + args: + executable: /bin/bash + changed_when: false + register: minio_client_current_version + + - name: Debug check version Minio Client + ansible.builtin.debug: + msg: "{{ minio_client_current_version }}" + verbosity: 2 + + - name: Download Minio Client + ansible.builtin.get_url: + url: "https://dl.min.io/client/mc/release/linux-amd64/mc.{{ minio_client_version }}" + dest: "{{ minio_mc }}" + mode: '0700' + checksum: "{{ minio_client_checksum }}" + backup: true # always nice to have a backup + when: not minio_client_presence.stat.exists or minio_client_current_version.stdout != minio_client_version + + - name: Check if minio alias is set + ansible.builtin.command: "{{ minio_mc }} alias list {{ minio_alias }}" + changed_when: false + register: minio_alias_present + check_mode: false # always run its safe + failed_when: minio_alias_present.rc > 1 # rc 1 means alias not present thjats what we wanted to know + + - name: Debug alias list + ansible.builtin.debug: + msg: "{{ minio_alias_present.rc }}" # stdout can contain password + verbosity: 2 + + - name: Configure minio connection alias + ansible.builtin.command: "{{ minio_mc }} alias set {{ minio_alias }} {{ minio_url_local }} {{ minio_root_user }} {{ minio_root_password }}" + register: alias_command + failed_when: '"Added `" + minio_alias + "` successfully" not in alias_command.stdout' + when: minio_alias_present.rc == 1 + + become: false # No mc client actions as root \ No newline at end of file diff --git a/roles/minio/tasks/configure_minio_server.yml b/roles/minio/tasks/configure_minio_server.yml new file mode 100644 index 000000000..cd21505a4 --- /dev/null +++ b/roles/minio/tasks/configure_minio_server.yml @@ -0,0 +1,2 @@ +--- + diff --git a/roles/minio/tasks/create_users.yml b/roles/minio/tasks/create_users.yml new file mode 100644 index 000000000..6cf56958d --- /dev/null +++ b/roles/minio/tasks/create_users.yml @@ -0,0 +1,25 @@ +- name: Check and create users + block: + - name: Check whether user is already configured + ansible.builtin.command: "{{ minio_mc }} admin user info {{ minio_alias }} {{ user.name }}" + register: minio_user_present + changed_when: false + ignore_errors: true + failed_when: minio_user_present.rc > 1 # rc 1 means alias not present thjats what we wanted to know + + - name: create and configure users + when: + - minio_user_present.rc==1 + - '"Unable to get user info" in minio_user_present.stderr' + block: + - name: Create users + ansible.builtin.command: "{{ minio_mc }} admin user add {{ minio_alias }} {{ user.name }} {{ user.password }}" + register: minio_add_user + changed_when: '"Added user `" + user.name + "` successfully" in minio_add_user.stdout' + no_log: true + + - name: Attach read write policy + ansible.builtin.command: "{{ minio_mc }} admin policy attach {{ minio_alias }} readwrite --user={{ user.name }}" + register: minio_attach_user + changed_when: '"Added user `" + user.name + "` successfully" in minio_add_user.stdout' + become: false # No mc client actions as root diff --git a/roles/minio/tasks/main.yml b/roles/minio/tasks/main.yml index 2c9ec4063..aa823b249 100644 --- a/roles/minio/tasks/main.yml +++ b/roles/minio/tasks/main.yml @@ -1,70 +1,14 @@ ---- -- name: Create minio files directory - ansible.builtin.file: - state: directory - path: "{{ minio_dir }}" - owner: root - group: root - mode: "0755" +- name: Configure and start container + ansible.builtin.include_tasks: "configure_container.yml" -- name: Create minio data directory - ansible.builtin.file: - state: directory - path: "{{ minio_data_dir }}" - owner: root - group: root - mode: "0755" +- name: Configure minio client + ansible.builtin.include_tasks: "configure_minio_client.yml" -- name: Place the serverapplication configfiles - ansible.builtin.template: - src: "{{ item }}.j2" - dest: /opt/openconext/minio/{{ item }} - owner: root - group: root - mode: "0644" - with_items: - - config.env - notify: Restart minio +- name: Configure minio server + ansible.builtin.include_tasks: "configure_minio_server.yml" -- name: Create and start the server container - community.docker.docker_container: - name: minio - image: quay.io/minio/minio:{{ minio_version }} - pull: true - restart_policy: "always" - state: started - env: - MINIO_CONFIG_ENV_FILE: "/etc/config.env" - networks: - - name: "loadbalancer" - mounts: - - source: "{{ minio_data_dir }}" - target: "{{ minio_data_dir_oncontainer }}" - type: bind - - source: "{{ minio_dir }}/config.env" - target: /etc/config.env - type: bind - command: server --console-address ":9090" {{ minio_data_dir_oncontainer }} - labels: - traefik.http.routers.minio.rule: "Host(`minio.{{ base_domain }}`)" - traefik.http.routers.minio.tls: "true" - traefik.http.routers.minio.service: "minio" - traefik.http.services.minio.loadbalancer.server.port: "9090" - traefik.http.routers.minioapi.rule: "Host(`minioapi.{{ base_domain }}`)" - traefik.http.routers.minioapi.tls: "true" - traefik.http.routers.minioapi.service: "minioapi" - traefik.http.services.minioapi.loadbalancer.server.port: "9000" - traefik.enable: "true" - healthcheck: - test: - [ - "CMD", - "curl", - "--fail", - "http://localhost:9000/minio/health/live" - ] - interval: 10s - timeout: 10s - retries: 3 - start_period: 10s - register: miniocontainer +- name: Add minio users + ansible.builtin.include_tasks: "create_users.yml" + loop: "{{ minio_users }}" + loop_control: + loop_var: "user" diff --git a/roles/minio/templates/config.env.j2 b/roles/minio/templates/config.env.j2 index 06c6c5954..6564667d1 100644 --- a/roles/minio/templates/config.env.j2 +++ b/roles/minio/templates/config.env.j2 @@ -1,3 +1,2 @@ MINIO_ROOT_USER={{ minio_root_user }} -MINIO_ROOT_PASSWORD={{ minio_root_password }} -MINIO_VOLUMES="{{ minio_data_dir_oncontainer }}" \ No newline at end of file +MINIO_ROOT_PASSWORD={{ minio_root_password }} \ No newline at end of file diff --git a/roles/mongo/README.md b/roles/mongo/README.md index f140d1480..9e96770e5 100644 --- a/roles/mongo/README.md +++ b/roles/mongo/README.md @@ -5,4 +5,15 @@ You need to set the role of your mongo hosts in the host_vars. the key is `mongo_replication_role:` and it can have the values: "primary", "secondary" or arbiter. +Cluster certificates have to have an identical value for the OU, O or DC attribute, as described in the Mongo documentation. + +Save the mongo ca.pem that is used for siging the cluster certifates as {{ environment_dir }}/secrets/mongo/mongoca.pem + +Set the cluster certificate as variable mongo_cluster_cert in host_vars +Set the mongo_cluster_private_key variable encrypted in host_vars + Please review the official Mongo documentation for more information. + +# Todo +- [ ] Add the possibility for adding and removing cluster members +- [ ] Add the possibility for a standalone mongo server diff --git a/roles/mongo/defaults/main.yml b/roles/mongo/defaults/main.yml index dade9cd4f..a58b2a320 100644 --- a/roles/mongo/defaults/main.yml +++ b/roles/mongo/defaults/main.yml @@ -1,11 +1,32 @@ # The global variable file mongodb installation +mongo_service: "mongod" +mongo_version: "7.0" + # In the current mongo role only one cluster per environments # is possible, that works for now. mongo_servers: [] # Set this in group_vars # - mongo1.example.com # - mongo2.example.com +# cluster members +# Not all mongo servers in the inventory are cluster members, so we use a separate list for this. +# Set this in group_vars of your environment(s). The arbiter should go first, or change the mongo_arbiter_index. +# mongo_cluster_members: +# - host: "mongoarbiter.example.com:27017" +# priority: 1 # can vote, cannot become primary +# - host: "mongo2.example.com:27017" +# priority: 2 +# - host: "mongo1.example.com:27017" +# priority: 3 +# mongo_arbiter_index: 0 + +# The replication role +# mongo_replication_role: # Set this in host_vars, it can have the values: "primary", "secondary" or arbiter + +# Todo: there is a link between mongo_replication_role and priority (arbiter is priority 1, primary the highest) so +# setting them separately is not ideal. + # The port for mongo server mongod_port: 27017 @@ -24,3 +45,11 @@ mongo: # Listen on all addresses by default mongo_bind_listen_address: "0.0.0.0" + +# Certs and keys +mongo_pki_dir: "/etc/pki/mongo" +# mongo_cluster_cert set this in host_vars +# mongo_cluster_private_key set this in host_vars (encrypted with ansible-vault) + +# Users and groups +mongo_group: "mongod" diff --git a/roles/mongo/files/mongo.repo b/roles/mongo/files/mongo.repo deleted file mode 100644 index a5f30dd31..000000000 --- a/roles/mongo/files/mongo.repo +++ /dev/null @@ -1,6 +0,0 @@ -[mongodb-org-6.0] -name=MongoDB Repository -baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/6.0/x86_64/ -gpgcheck=1 -enabled=1 -gpgkey=https://www.mongodb.org/static/pgp/server-6.0.asc diff --git a/roles/mongo/handlers/main.yml b/roles/mongo/handlers/main.yml index 01d6f72d7..6916f928f 100644 --- a/roles/mongo/handlers/main.yml +++ b/roles/mongo/handlers/main.yml @@ -1,6 +1,6 @@ --- -- name: restart mongod +- name: Restart mongod throttle: 1 service: - name: mongod + name: "{{ mongo_service }}" state: restarted diff --git a/roles/mongo/tasks/ca.yml b/roles/mongo/tasks/ca.yml deleted file mode 100644 index 11bb0f00b..000000000 --- a/roles/mongo/tasks/ca.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -# In this task file a ca key and certificate are created -# and saved on localhost. These are used for signing certificates for -# each individual mongo server in the cluster (in certs.yml) -# This works for new mongo servers, changing the ca and certificates -# on mongoservers with this role is not tested. -- name: Create mongo key dir in the environment repo - ansible.builtin.file: - path: "{{ inventory_dir }}/secrets/mongo/" - state: directory - mode: "0750" - -- name: Check if we have a CA in the environments repo - ansible.builtin.stat: - path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - register: mongo_ca_key - -- name: Create private key with password protection - community.crypto.openssl_privatekey: - path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - passphrase: "{{ mongo_ca_passphrase }}" - cipher: auto - when: not mongo_ca_key.stat.exists - -- name: Create certificate signing request (CSR) for CA certificate - community.crypto.openssl_csr_pipe: - privatekey_path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - privatekey_passphrase: "{{ mongo_ca_passphrase }}" - common_name: Mongo {{ env }} CA - use_common_name_for_san: false # since we do not specify SANs, don't use CN as a SAN - basic_constraints: - - 'CA:TRUE' - basic_constraints_critical: true - key_usage: - - keyCertSign - key_usage_critical: true - register: ca_csr - when: not mongo_ca_key.stat.exists - -- name: Create self-signed CA certificate from CSR - community.crypto.x509_certificate: - path: "{{ inventory_dir}}/secrets/mongo/mongoca.pem" - csr_content: "{{ ca_csr.csr }}" - privatekey_path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - privatekey_passphrase: "{{ mongo_ca_passphrase }}" - provider: selfsigned - when: not mongo_ca_key.stat.exists diff --git a/roles/mongo/tasks/certs.yml b/roles/mongo/tasks/certs.yml index 4a9bcb019..b79658567 100644 --- a/roles/mongo/tasks/certs.yml +++ b/roles/mongo/tasks/certs.yml @@ -1,88 +1,29 @@ --- # In this task file keys and certificates for the -# mongo servers are created and signed with the ca from ca.yml -- name: Install some packages - ansible.builtin.yum: - name: - - python3 - - python3-pip - state: present - -- name: Install python36-cryptography on CentOS7 - ansible.builtin.yum: - name: - - python36-cryptography - state: present - when: ansible_distribution_major_version == '7' - -- name: Install python36-cryptography on Rocky 8 and 9 - ansible.builtin.yum: - name: - - python3-cryptography - state: present - when: ansible_distribution_major_version == '8' or ansible_distribution_major_version == '9' +# mongo servers are distributed - name: Create directory to keep mongo key material ansible.builtin.file: - dest: "/etc/pki/mongo/" + dest: "{{ mongo_pki_dir }}" state: directory owner: root group: root mode: "0775" -- name: Create private keys - community.crypto.openssl_privatekey: - path: "/etc/pki/mongo/mongo.key" - -- name: Check whether certificate exists - ansible.builtin.stat: - path: "/etc/pki/mongo/mongo.pem" - register: certificate_exists - -- name: Create certificate signing request (CSR) for new certificate - community.crypto.openssl_csr_pipe: - privatekey_path: "/etc/pki/mongo/mongo.key" - subject_alt_name: - - "{{ mongo_tls_host_altname_dnsorip | default('DNS') }}:{{ inventory_hostname }}" - organizational_unit_name: "{{ instance_name }}" - register: csr - changed_when: false - -- name: Read existing certificate if exists - ansible.builtin.slurp: - src: /etc/pki/mongo/mongo.pem - when: certificate_exists.stat.exists - register: certificate - -- name: Sign certificate with our CA - community.crypto.x509_certificate_pipe: - content: "{{ (certificate.content | b64decode) if certificate_exists.stat.exists else omit }}" - csr_content: "{{ csr.csr }}" - provider: ownca - ownca_path: "{{ inventory_dir }}/secrets/mongo/mongoca.pem" - ownca_privatekey_path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - ownca_privatekey_passphrase: "{{ mongo_ca_passphrase }}" - ownca_not_after: +3650d # valid for ten years - ownca_not_before: "-1d" # valid since yesterday - delegate_to: localhost - register: certificate - ignore_errors: '{{ ansible_check_mode }}' - become: false - -- name: Write certificate file - ansible.builtin.copy: - dest: /etc/pki/mongo/mongo.pem - content: "{{ certificate.certificate }}" +- name: Distribute mongo cluster key and cert + ansible.builtin.template: + src: keyandcert.pem.j2 + dest: "{{ mongo_pki_dir }}/keyandcert.pem" owner: root - group: root - mode: "0400" - when: certificate is changed + group: "{{ mongo_group }}" + mode: "0440" + notify: Restart mongod -- name: Install the CA certificate +- name: Copy ca pem file ansible.builtin.copy: src: "{{ inventory_dir }}/secrets/mongo/mongoca.pem" - dest: /etc/pki/mongo/mongoca.pem + dest: "{{ mongo_pki_dir }}/mongoca.pem" owner: root group: root mode: "0644" - notify: restart mongod + notify: Restart mongod diff --git a/roles/mongo/tasks/cluster.yml b/roles/mongo/tasks/clusterconfig.yml similarity index 50% rename from roles/mongo/tasks/cluster.yml rename to roles/mongo/tasks/clusterconfig.yml index 55661d71a..0ee17dc39 100644 --- a/roles/mongo/tasks/cluster.yml +++ b/roles/mongo/tasks/clusterconfig.yml @@ -1,41 +1,30 @@ --- +# todo this weorks only for new deployments +# rewrite so mongo config can be changed and cluster members can be added or removed - name: Check if hosts are in clustered - ansible.builtin.shell: >- - mongosh --port {{ mongod_port }} --quiet --eval 'db.isMaster().hosts' + ansible.builtin.command: mongosh --port {{ mongod_port }} --quiet --eval 'db.isMaster().hosts' register: check_cluster changed_when: false + check_mode: false -- name: Set fact for roles - ansible.builtin.set_fact: - mongo_primary: "{{ mongo_replication_role == 'primary' }}" - mongo_secondary: "{{ mongo_replication_role == 'secondary' }}" - mongo_arbiter: "{{ mongo_replication_role == 'arbiter' }}" +- name: Debug check_cluster variable + when: show_debug_info is defined and show_debug_info | bool + ansible.builtin.debug: + msg: "{{ check_cluster }}" -- name: Build member list - ansible.builtin.set_fact: - members: >- - {{ - members | default([]) + - [{ - 'host': item , - 'priority': member_weight[hostvars[item].mongo_replication_role] - }] - }} - loop: "{{ ansible_play_hosts }}" - run_once: true - vars: - member_weight: - primary: 3 - secondary: 2 - arbiter: 1 +- name: Debug mongo_cluster_members variable + when: show_debug_info is defined and show_debug_info | bool + ansible.builtin.debug: + msg: "{{ mongo_cluster_members }}" + +- name: Debug mongo_replication_role variable + when: show_debug_info is defined and show_debug_info | bool + ansible.builtin.debug: + msg: "{{ mongo_replication_role }}" -- name: Set fact for arbiter index number - ansible.builtin.set_fact: - arbiter_index: "{{ hostid }}" - when: hostvars[item].mongo_arbiter - loop: "{{ ansible_play_hosts }}" - loop_control: - index_var: hostid +- name: Debug host_id + ansible.builtin.debug: + msg: "{{ hostid }}" - name: Initial cluster initialisation community.mongodb.mongodb_replicaset: @@ -44,8 +33,8 @@ login_port: "{{ mongod_port }}" login_password: "{{ mongo_admin_password }}" replica_set: "{{ replica_set_name }}" - members: "{{ members }}" - arbiter_at_index: "{{ arbiter_index | default(omit) }}" + members: "{{ mongo_cluster_members }}" + arbiter_at_index: "{{ mongo_arbiter_index | default(0) }}" validate: false run_once: true when: mongo_replication_role == 'primary' diff --git a/roles/mongo/tasks/install.yml b/roles/mongo/tasks/install.yml index 741589b29..673d465e3 100644 --- a/roles/mongo/tasks/install.yml +++ b/roles/mongo/tasks/install.yml @@ -1,31 +1,20 @@ --- - name: Create the repository for mongodb - ansible.builtin.copy: - src: "mongo.repo" + when: ansible_os_family == 'RedHat' + ansible.builtin.template: + src: "mongo.repo.j2" dest: "/etc/yum.repos.d/mongo.repo" owner: root mode: "0640" -- name: Install the mongodb package +- name: Install the mongodb package and some helper packages + when: ansible_os_family == 'RedHat' ansible.builtin.yum: name: - mongodb-org + - pip state: present -- name: Slurp the private key - ansible.builtin.slurp: - path: "/etc/pki/mongo/mongo.key" - register: mongo_key - -- name: Create combined key and certificate file - ansible.builtin.copy: - content: "{{ mongo_key['content'] | b64decode }}{{ certificate.certificate }}" - dest: "/etc/pki/mongo/keyandcert.pem" - owner: root - group: mongod - mode: "0440" - ignore_errors: '{{ ansible_check_mode }}' - - name: Install pymongo ansible.builtin.pip: name: pymongo @@ -70,7 +59,7 @@ owner: root group: root mode: "0644" - notify: restart mongod + notify: Restart mongod - name: Enable and start mongod ansible.builtin.service: diff --git a/roles/mongo/tasks/main.yml b/roles/mongo/tasks/main.yml index 6cda0f417..2485d4b1d 100644 --- a/roles/mongo/tasks/main.yml +++ b/roles/mongo/tasks/main.yml @@ -1,38 +1,40 @@ --- -- name: Use temporarily python3 as remote interpreter, this fixes pymongo - ansible.builtin.set_fact: - ansible_python_interpreter: "/usr/bin/python3" - tags: mongo_users - -- name: Include CA tasks - ansible.builtin.include_tasks: - file: ca.yml - apply: - delegate_to: localhost - run_once: true - become: false - -- name: Include Certificate tasks - ansible.builtin.include_tasks: - file: certs.yml - -- name: Include installation tasks - ansible.builtin.include_tasks: - file: install.yml - -- name: Include cluster installation tasks - ansible.builtin.include_tasks: - file: cluster.yml - -- name: Include user creation - ansible.builtin.include_tasks: - file: users.yml - -- name: Include postinstallation tasks - ansible.builtin.include_tasks: - file: postinstall.yml - -- name: Use python2 again as remote interpreter - ansible.builtin.set_fact: - ansible_python_interpreter: "/usr/bin/python" - when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' +- name: Install and configure mongo on redhat family servers + when: ansible_os_family == 'RedHat' + block: + - name: Use temporarily python3 as remote interpreter, this fixes pymongo + ansible.builtin.set_fact: + ansible_python_interpreter: "/usr/bin/python3" + tags: mongo_users + + - name: Include installation tasks + ansible.builtin.include_tasks: + file: install.yml + + - ansible.builtin.meta: flush_handlers + + - name: Include Certificate tasks + ansible.builtin.include_tasks: + file: certs.yml + + - name: Include cluster installation tasks + ansible.builtin.include_tasks: + file: clusterconfig.yml + + - name: Include user creation + ansible.builtin.include_tasks: + file: users.yml + + - name: Include postinstallation tasks + ansible.builtin.include_tasks: + file: postinstall.yml + + - name: Use python2 again as remote interpreter + ansible.builtin.set_fact: + ansible_python_interpreter: "/usr/bin/python" + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + +- name: Message for non redhat family servers + when: ansible_os_family != 'RedHat' + ansible.builtin.debug: + msg: "Sorry, this role only works on RedHat family servers" diff --git a/roles/mongo/tasks/users.yml b/roles/mongo/tasks/users.yml index 86ce28cb8..a218bac46 100644 --- a/roles/mongo/tasks/users.yml +++ b/roles/mongo/tasks/users.yml @@ -1,5 +1,5 @@ -- name: Create mongo database users - mongodb_user: +- name: Create mongo database users # requires pymongo 4+ + community.mongodb.mongodb_user: login_database: admin database: "{{ item.db_name }}" login_user: admin diff --git a/roles/mongo/templates/keyandcert.pem.j2 b/roles/mongo/templates/keyandcert.pem.j2 new file mode 100644 index 000000000..eed0ac731 --- /dev/null +++ b/roles/mongo/templates/keyandcert.pem.j2 @@ -0,0 +1 @@ +{{ mongo_cluster_private_key }}{{ mongo_cluster_cert }} diff --git a/roles/mongo/templates/mongo.repo.j2 b/roles/mongo/templates/mongo.repo.j2 new file mode 100644 index 000000000..d94eff281 --- /dev/null +++ b/roles/mongo/templates/mongo.repo.j2 @@ -0,0 +1,6 @@ +[mongodb-org-{{ mongo_version}}] +name=MongoDB Repository +baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/{{ mongo_version}}/x86_64/ +gpgcheck=1 +enabled=1 +gpgkey=https://www.mongodb.org/static/pgp/server-{{ mongo_version}}.asc diff --git a/roles/mongo/templates/mongod.conf.j2 b/roles/mongo/templates/mongod.conf.j2 index e0cf91d28..f5e990add 100644 --- a/roles/mongo/templates/mongod.conf.j2 +++ b/roles/mongo/templates/mongod.conf.j2 @@ -19,6 +19,6 @@ storage: replication: replSetName: {{ replica_set_name }} -security: +security: authorization: enabled clusterAuthMode: x509 diff --git a/roles/myconext/templates/application.yml.j2 b/roles/myconext/templates/application.yml.j2 index 515d50b3f..f00d7b668 100644 --- a/roles/myconext/templates/application.yml.j2 +++ b/roles/myconext/templates/application.yml.j2 @@ -37,8 +37,12 @@ springdoc: enabled: true email: - from: eduID - error_mail: info@surfconext.nl + from_deprovisioning: "{{ myconext.email.from_deprovisioning }}" + from_code: "{{ myconext.email.from_code }}" + from_app_nudge: "{{ myconext.email.from_app_nudge }}" + from_new_device: "{{ myconext.email.from_new_device }}" + error: {{ error_mail_to }} + error_mail: {{ error_mail_to }} magic-link-url: https://login.{{ myconext_base_domain }}/saml/guest-idp/magic my-surfconext-url: https://mijn.{{ myconext_base_domain }} idp-surfconext-url: https://login.{{ myconext_base_domain }} @@ -58,7 +62,13 @@ cron: manage-initial-delay-milliseconds: 15000 manage-fixed-rate-milliseconds: 300_000 # Runs on the first day of February, May, August, and November. - mail-institution-mail-usage-expression: "0 0 0 * 2,5,8,11 *" + # 0 – seconds + # 30 – minute + # 5 – hour + # * – every day of month + # 2,5,8,11 – February, May, August, November + # * – every day of week + mail-institution-mail-usage-expression: "0 30 5 * 2,5,8,11 *" mail-institution-batch-size: 500 # Every day at 6:30AM nudge-app-mail-expression: "0 30 6 * * ?" @@ -75,6 +85,7 @@ manage: base_url: "https://manage.{{ base_domain }}" enabled: True +mongodb_db: {{ myconext.mongo_database }} base_domain: {{ myconext_base_domain }} saml_metadata_base_path: https://login.{{ myconext_base_domain }} base_path: https://mijn.{{ myconext_base_domain }} @@ -115,6 +126,8 @@ feature: create_eduid_institution_landing: {{ myconext.feature_create_eduid_institution_landing }} # Do we default remember the user for a longer period default_remember_me: True + # Do we default add affiliate email address + default_affiliate_email: True # Does the SAMLIdpService expects authn requests to be signed requires_signed_authn_request: False # Do we support ID verify @@ -135,6 +148,12 @@ feature: captcha_enabled: True # Set to true to use the BRIN code to add ui-roles and authentication scoped affiliations use_remote_creation_for_affiliation: {{ myconext.feature_use_remote_creation_for_affiliation }} + # Set to true to show the account linking related options on the personal-info page and home page (banner) + enable_account_linking: {{ myconext.feature_enable_account_linking }} + # Set to true to show the app login option + use_app: {{ myconext.feature_use_app }} + +default_affiliate_email_domain: eduid.nl captcha: sitekey: {{ myconext.captcha_sitekey }} diff --git a/roles/oidcng/templates/application.yml.j2 b/roles/oidcng/templates/application.yml.j2 index e9d8574a2..5c52fd42d 100644 --- a/roles/oidcng/templates/application.yml.j2 +++ b/roles/oidcng/templates/application.yml.j2 @@ -3,7 +3,6 @@ logging: config: file://{{ oidcng_config_dir }}/logback.xml level: org.springframework: WARN - oidc: INFO # Is this node in a load-balanced topology responsible for cleaning up resources cron: @@ -39,6 +38,9 @@ server: max-http-form-post-size: 10000000 max-swallow-size: 10000000 +# Maximum length of query parameters in bytes for authorization endpoint +max-query-param-size: 8184 + mongodb_db: {{ oidcng.mongo_database }} oidc_saml_mapping_path: file://{{ oidcng_config_dir }}/oidc_saml_mapping.json openid_configuration_path: file://{{ oidcng_config_dir }}/openid-configuration.json diff --git a/roles/openaccess/templates/serverapplication.yml.j2 b/roles/openaccess/templates/serverapplication.yml.j2 index 6224bf7fc..c4cb82c89 100644 --- a/roles/openaccess/templates/serverapplication.yml.j2 +++ b/roles/openaccess/templates/serverapplication.yml.j2 @@ -93,7 +93,33 @@ config: discovery: "https://connect.test2.surfconext.nl/oidc/.well-known/openid-configuration" invite: "https://invite.{{ base_domain }}" sram: "https://{{ env }}.sram.surf.nl/" - serviceDesk: "https://servicedesk.surf.nl/jira/plugins/servlet/desk/user/requests?reporter=all" + service_desk: "https://servicedesk.surf.nl/jira/plugins/servlet/desk/user/requests?reporter=all" + feedback_widget_enabled: true + # For other environments, move to group_vars + identity_providers: + - name: "SXS IdP" + entityid: "http://mock-idp" + descriptionEN: "Een test-IdP waarmee je zelf attributen-sets kunt simuleren. De metadata vind je hier" + descriptionNL: "Een test-IdP waarmee je zelf attributen-sets kunt simuleren. De metadata vind je hier" + - name: "SXS Dummy" + entityid: "https://idp.diy.surfconext.nl" + descriptionEN: "Een test-IdP met fictieve gebruikersaccounts. De metadata vind je hier" + descriptionNL: "Een test-IdP met fictieve gebruikersaccounts. De metadata vind je hier" + idp_proxy_meta_data: https://metadata.test2.surfconext.nl/idp-metadata.xml + minimal_stepup_acr_level: "http://{{ base_domain }}/assurance/loa2" + features: + - name: idp + enabled: true + - name: invite + enabled: true + - name: sram + enabled: true + - name: mfa + enabled: true + acr_values: + {% for loa in [stepup_intrinsic_loa] + stepup_loa_values_supported %} + - "{{ loa }}" + {% endfor %} eduid-idp-entity-id: "https://login.{{ myconext_base_domain }}" @@ -113,6 +139,7 @@ email: from: "{{ noreply_email }}" contactEmail: "{{ support_email }}" serviceDeskEmail: "{{ support_email }}" + supportEmail: "support@surfconext.nl" environment: "{{ environment_shortname }}" manage: @@ -133,6 +160,19 @@ manage: staticManageDirectory: classpath:/manage # staticManageDirectory: file:///usr/local/etc/manage +invite: + enabled: True + url: "https://invite.{{ base_domain }}" + user: {{ invite.access_user }} + password: "{{ invite.access_secret }}" + +# Todo relace with openconextaccess user +statistics: + enabled: True + url: {{ dashboard.stats_url }} + user: {{ dashboard.stats_user }} + password: {{ stats_dashboard_api_password }} + s3storage: url: {{ openconextaccess.s3_storage.url }} key: {{ openconextaccess.s3_storage.key }} diff --git a/roles/pdp/tasks/main.yml b/roles/pdp/tasks/main.yml index 77c9072c9..2698edcd2 100644 --- a/roles/pdp/tasks/main.yml +++ b/roles/pdp/tasks/main.yml @@ -56,7 +56,7 @@ - source: /opt/openconext/pdp/xacml.conext.properties target: /xacml.conext.properties type: bind - command: "-Xmx512m --spring.config.location=./" + command: "-Xmx2048m --spring.config.location=./" etc_hosts: host.docker.internal: host-gateway labels: diff --git a/roles/rsyslog/defaults/main.yml b/roles/rsyslog/defaults/main.yml index 75da2bc36..755c27773 100644 --- a/roles/rsyslog/defaults/main.yml +++ b/roles/rsyslog/defaults/main.yml @@ -28,3 +28,15 @@ rsyslog_queue_dir: "/var/spool/rsyslog" # rsyslog_imjournal_ratelimitburst: 2000 # rsyslog_imjournal_ratelimitinterval: 600 # rsyslog_maxmessagesize: 8000 + +# rsyslog_imjournal_statefile # default is imjournal.state which means imjournal.state relative to the rsyslog workdir +# rsyslog_workdirectory # default /var/spool/rsyslog + +# Empty log check script, optional +rsyslog_enable_warn_empty_script: false +rsyslog_warn_empty_log_recipient: admin@example.com +rsyslog_monitor_for_emptylogs_path: "{{ rsyslog_dir }}/apps/prod_sc" +rsyslog_checkemptylogs_cron_minute: "0" +rsyslog_checkemptylogs_cron_hour: "9" +rsyslog_checkemptylogs_cron_weekdays: "1-5" +rsyslog_checkemptylogs_dir: "/usr/local/bin" diff --git a/roles/rsyslog/tasks/rsyslog_central.yml b/roles/rsyslog/tasks/rsyslog_central.yml index 8d1a56adb..7dbdbac1a 100644 --- a/roles/rsyslog/tasks/rsyslog_central.yml +++ b/roles/rsyslog/tasks/rsyslog_central.yml @@ -14,13 +14,13 @@ - name: Create directory to save the logs file: - path: "{{rsyslog_dir }}" + path: "{{ rsyslog_dir }}" owner: root group: "{{ rsyslog_read_group }}" mode: "0750" recurse: true -- name: put rsyslog client certificate +- name: Put rsyslog client certificate copy: src: "{{ inventory_dir }}/files/certs/rsyslog/rsyslogserver.crt" dest: "/etc/pki/rsyslog/rsyslogserver.crt" @@ -49,28 +49,53 @@ template: src: sc_template.conf.j2 dest: /etc/rsyslog.d/templates/{{ item.name }}.conf + backup: true with_items: "{{ rsyslog_environments }}" + notify: + - "restart rsyslog" - name: Create ruleset configurations template: src: sc_ruleset.conf.j2 dest: /etc/rsyslog.d/rulesets/{{ item.name }}.conf + backup: true with_items: "{{ rsyslog_environments }}" + notify: + - "restart rsyslog" - name: Create sc listener configurations template: src: listener.conf.j2 dest: /etc/rsyslog.d/listeners/{{ item.name }}.conf + backup: true with_items: "{{ rsyslog_environments }}" + notify: + - "restart rsyslog" - name: Create logrotate file for apps and host logs template: src: centralsyslog.j2 dest: /etc/logrotate.d/centralsyslog -- name: put ryslog config file +- name: Put ryslog config file template: src: "rsyslog.conf.j2" dest: "/etc/rsyslog.conf" notify: - "restart rsyslog" + +- name: Put log empty warn script and cronjob + when: rsyslog_enable_warn_empty_script + block: + - name: Put log empty script + ansible.builtin.template: + src: warn-empty-log.sh.j2 + dest: "{{ rsyslog_checkemptylogs_dir }}/warn-empty-log.sh" + backup: True + - name: Create cronjob + ansible.builtin.cron: + name: "check empty logs" + minute: "{{ rsyslog_checkemptylogs_cron_minute }}" + hour: "{{ rsyslog_checkemptylogs_cron_hour }}" + weekday: "{{ rsyslog_checkemptylogs_cron_weekdays }}" + job: "{{ rsyslog_checkemptylogs_dir }}/warn-empty-log.sh -m" diff --git a/roles/rsyslog/templates/rsyslog_onlyforward.conf.j2 b/roles/rsyslog/templates/rsyslog_onlyforward.conf.j2 index 36e569a28..874db8c97 100644 --- a/roles/rsyslog/templates/rsyslog_onlyforward.conf.j2 +++ b/roles/rsyslog/templates/rsyslog_onlyforward.conf.j2 @@ -1,4 +1,6 @@ -# This rsyslog configuration takes logs from journald and forwards them to a remote log serverad="imuxsock") # provides support for local system logging +# Where to place auxiliary files +global(workDirectory="{{ rsyslog_workdirectory | default('/var/spool/rsyslog') }}") + {% if 'docker' in group_names %} module(load="imptcp") input(type="imptcp" port="514") @@ -8,8 +10,8 @@ module(load="imuxsock") {% endif %} module(load="imjournal" # provides access to the systemd journal UsePid="system" # PID number is retrieved as the ID of the process the journal entry originates from - StateFile="imjournal.state" - ratelimit.interval="{{ rsyslog_imjournal_ratelimitinterval | default('600') }}" + StateFile="{{ rsyslog_imjournal_statefile | default('imjournal.state') }}" + ratelimit.interval="{{ rsyslog_imjournal_ratelimitinterval | default('60') }}" ratelimit.burst="{{ rsyslog_imjournal_ratelimitburst | default('20000') }}") # Reads journald logs module(load="imklog") # provides kernel logging support module(load="immark" interval="300" ) # provides --MARK-- message capability diff --git a/roles/rsyslog/templates/sc_ruleset.conf.j2 b/roles/rsyslog/templates/sc_ruleset.conf.j2 index 5b77695a7..86a0e5457 100644 --- a/roles/rsyslog/templates/sc_ruleset.conf.j2 +++ b/roles/rsyslog/templates/sc_ruleset.conf.j2 @@ -1,7 +1,8 @@ $RuleSet {{ item.name }} {% if item.name != "mgnt_sc" %} if $programname == "engineblock" and $msg startswith " engine" then { action(type="omfile" DynaFile="apache-eb-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "engineblock" and $msg startswith ' {"channel":"authentication"' then { action(type="omfile" DynaFile="ebauth-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "engineblock" and $msg startswith "engine" then { action(type="omfile" DynaFile="apache-eb-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "engineblock" and $msg contains '{"channel":"authentication"' then { action(type="omfile" DynaFile="ebauth-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "engineblock" { action(type="omfile" DynaFile="eblog-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "EBLOG" { action(type="omfile" DynaFile="eblog-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "Apache-EB" { action(type="omfile" DynaFile="apache-eb-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } @@ -15,10 +16,8 @@ if $programname == "engineblock" and $msg startswith ' {"channel":"authenticatio :programname, isequal, "Apache-EBAPI" { action(type="omfile" DynaFile="apache-eb-api-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "manageserver" { action(type="omfile" DynaFile="manage-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "managegui" { action(type="omfile" DynaFile="apache-manage-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -:programname, isequal, "PDPANALYTICS" { action(type="omfile" DynaFile="pdpanalytics-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -:programname, isequal, "pdpserver" { action(type="omfile" DynaFile="pdp-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -:programname, isequal, "pdpgui" { action(type="omfile" DynaFile="apache-pdp-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "profile" and $msg startswith " {" then { action(type="omfile" DynaFile="profile-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +:programname, isequal, "pdp" { action(type="omfile" DynaFile="pdp-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "profile" and $msg startswith "{" then { action(type="omfile" DynaFile="profile-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "profile" { action(type="omfile" DynaFile="apache-profile-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "teamsserver" { action(type="omfile" DynaFile="teams-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "teamsgui" { action(type="omfile" DynaFile="apache-teams-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } @@ -45,19 +44,17 @@ if $programname == "profile" and $msg startswith " {" then { action(type="omfile :programname, startswith, "inviteprovisioningmock" { action(type="omfile" DynaFile="inviteprovisioningmock-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, startswith, "loadbalancer" { action(type="omfile" DynaFile="loadbalancer-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "gateway" and $msg startswith ' {"message":"Second Factor Authenticated"' then { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "gateway" and $msg startswith ' {"message":"Intrinsic Loa Requested"' then { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "gateway" and $msg contains '{"message":"Second Factor Authenticated"' then { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "gateway" and $msg contains '{"message":"Intrinsic Loa Requested"' then { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } {% for stepupapp in stepupapps %} :programname, isequal, "stepup-{{ stepupapp }}" { action(type="omfile" DynaFile="stepup-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "{{ stepupapp }}" and $msg startswith " {{ stepupapp }}" then { action(type="omfile" DynaFile="apache-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "{{ stepupapp }}" and $msg startswith "{{ stepupapp }}" then { action(type="omfile" DynaFile="apache-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "{{ stepupapp }}" { action(type="omfile" DynaFile="stepup-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "Apache-{{ stepupapp }}" { action(type="omfile" DynaFile="apache-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "{{ stepupapp }}" and $msg startswith " {{ stepupapp }}" then { action(type="omfile" DynaFile="apache-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -:programname, isequal, "{{ stepupapp }}" { action(type="omfile" DynaFile="stepup-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } {% endfor %} :programname, isequal, "Apache-azuremfa" { action(type="omfile" DynaFile="apache-azure-mfa-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "spdashboard" and $msg startswith " spdashboard" then { action(type="omfile" DynaFile="apache-spdashboard-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "spdashboard" and $msg startswith "spdashboard" then { action(type="omfile" DynaFile="apache-spdashboard-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "spdashboard" { action(type="omfile" DynaFile="spdashboard-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "stepup-authentication" { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } diff --git a/roles/rsyslog/templates/sc_template.conf.j2 b/roles/rsyslog/templates/sc_template.conf.j2 index 8e90e4239..d6b765f0a 100644 --- a/roles/rsyslog/templates/sc_template.conf.j2 +++ b/roles/rsyslog/templates/sc_template.conf.j2 @@ -36,15 +36,16 @@ $template myconext-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/edui $template myconextjson-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eduid/myconextjson.log" $template apache-myconext-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eduid/myconext-apache.log" $template apache-account-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eduid/account-apache.log" +$template apache-servicedesk-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/servicedeskgui/servicedesk-apache.log $template apache-eduid-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eduid/eduid-apache.log" $template spdashboard-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/spdashboard/spdashboard.log" $template apache-spdashboard-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/spdashboard/apache.log" -$template inviteclient-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/invite//inviteclient.log" -$template invitewelcome-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/invite//invitewelcome.log" -$template inviteserver-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/invite//inviteserver.log" +$template inviteclient-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite//inviteclient.log" +$template invitewelcome-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite//invitewelcome.log" +$template inviteserver-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite//inviteserver.log" $template invitejson-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite/invitejson.log" -$template inviteprovisioningmock-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/invite//inviteprovisioningmock.log" -$template loadbalancer-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/traefik/traefik.log" +$template inviteprovisioningmock-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite//inviteprovisioningmock.log" +$template loadbalancer-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/traefik/traefik.log" {% for stepupapp in stepupapps %} $template stepup-{{ stepupapp }}-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/{{ stepupapp }}/{{ stepupapp }}.log $template apache-{{ stepupapp }}-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/{{ stepupapp }}/{{ stepupapp }}-apache.log diff --git a/roles/rsyslog/templates/warn-empty-log.sh.j2 b/roles/rsyslog/templates/warn-empty-log.sh.j2 new file mode 100644 index 000000000..aad42fdd1 --- /dev/null +++ b/roles/rsyslog/templates/warn-empty-log.sh.j2 @@ -0,0 +1,80 @@ +#!/bin/bash + +# Script to check for empty log files in {{ rsyslog_monitor_for_emptylogs_path }} +# Usage: ./check_empty_logs.sh [-m] +# -m: send mail if empty files are found + +set -e + +SEND_MAIL=false +RECIPIENT="{{ rsyslog_warn_empty_log_recipient }}" +HOSTNAME=$(hostname --fqdn) + +# Parse command line options +while getopts "m" opt; do + case $opt in + m) + SEND_MAIL=true + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + exit 1 + ;; + esac +done + +# Find empty log files and read into array +mapfile -t empty_files < <(find {{ rsyslog_monitor_for_emptylogs_path }} -maxdepth 2 -type f -size 0) + +# If no empty files found, exit successfully +{% raw %} +if [ ${#empty_files[@]} -eq 0 ]; then + exit 0 +fi +{% endraw %} + +# Empty files were found +{% raw %} +file_count=${#empty_files[@]} +{% endraw %} + +# Determine singular or plural +if [ $file_count -eq 1 ]; then + file_word="file" + verb="was" +else + file_word="files" + verb="were" +fi + +# Create mail body +mail_body=$(cat <


    1. -teams_host={{ spdashboard_teams_host }} -teams_username={{ spdashboard_teams_api_user }} -teams_password={{ spdashboard_teams_api_password }} -team_prefix_default_stem_name={{ spdashboard_teams_default_stem }} -team_prefix_group_name_context={{ spdashboard_teams_group_name_context }} + acs_location_route_name=dashboard_saml_consume_assertion metadata_url_timeout=30 test_idp_entity_ids=[{{ spdashboard_test_idps }}] authorization_attribute_name={{ spdashboard_surf_autorization_attribute }} surfconext_representative_authorization={{ spdashboard_surf_autorization_attribute_svc_value }} + +## Teams test instance +teams_host={{ spdashboard_teams_host }} +teams_username={{ spdashboard_teams_api_user }} +teams_password={{ spdashboard_teams_api_password }} +team_prefix_default_stem_name={{ spdashboard_teams_default_stem }} +team_prefix_group_name_context={{ spdashboard_teams_group_name_context }} + +spdashboard_manage_id={{ spdashboard_manage_id }} + +## Invite instance +invite_host={{ spdashboard_invite_host }} +invite_api_username={{ spdashboard_invite_api_username }} +invite_api_password={{ spdashboard_invite_api_password }} +invite_landing_url={{ spdashboard_invite_landing_url }}} diff --git a/test_containers_playbook.yml b/test_containers_playbook.yml deleted file mode 100644 index f89a1cc88..000000000 --- a/test_containers_playbook.yml +++ /dev/null @@ -1,336 +0,0 @@ ---- -- name: Test attribute-aggregation containers - hosts: docker_attribute_aggregation - become: true - tasks: - - name: Check attribite-aggregation containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - aaserver - -- name: Test dashboard containers - hosts: docker_dashboard - become: true - tasks: - - name: Check dashboard containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - dashboardserver - - dashboardgui - -- name: Test diyidp containers - hosts: docker_diyidp - become: true - tasks: - - name: Check diyidp containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - diyidp - -- name: Test engineblock containers - hosts: docker_engineblock - become: true - tasks: - - name: Check engineblock containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - engineblock - -- name: Test invite containers - hosts: docker_invite - become: true - tasks: - - name: Check invite containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - inviteserver - - inviteclient - - invitewelcome - -- name: Test lifecycle containers - hosts: docker_lifecycle - become: true - tasks: - - name: Check invite app containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - lifecycle - -- name: Test manage containers - hosts: docker_manage - become: true - tasks: - - name: Check manage containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - manageserver - - managegui - -- name: Test minio containers - hosts: docker_minio - become: true - tasks: - - name: Check minio containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - minio - -- name: Test mujina-idp containers - hosts: docker_mujina_idp - become: true - tasks: - - name: Check mujina-idp containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - mujina-idp - -- name: Test mujina-sp containers - hosts: docker_mujina_sp - become: true - tasks: - - name: Check mujina-sp containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - mujina-sp - -- name: Test myconext containers - hosts: docker_myconext - become: true - tasks: - - name: Check myconext containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - myconextserver - - myconextgui - - accountgui - - servicedeskgui - -- name: Test oidcng containers - hosts: docker_oidcng - become: true - tasks: - - name: Check oidcng containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - oidcngserver - -- name: Test oidc-playground containers - hosts: docker_oidc_playground - become: true - tasks: - - name: Check playground containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - oidcplaygroundserver - - oidcplaygroundgui - -- name: Test pdp pcontainers - hosts: docker_pdp - become: true - tasks: - - name: Check pdp containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - pdpserver - - pdpgui - -- name: Test profile containers - hosts: docker_profile - become: true - tasks: - - name: Check profile containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - profile - -- name: Test stats containers - hosts: docker_stats - become: true - tasks: - - name: Check stats containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - statsserver - - statsgui - -- name: Test stepupazuremfa containers - hosts: docker_stepupazuremfa - become: true - tasks: - - name: Check stepupazuremfa containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - azuremfa - -- name: Test stepupgateway containers - hosts: docker_stepupgateway - become: true - tasks: - - name: Check stepupgateway containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - gateway - -- name: Test stepupmiddleware containers - hosts: docker_stepupmiddleware - become: true - tasks: - - name: Check stepupmiddleware containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - middleware - -- name: Test stepupra containers - hosts: docker_stepupra - become: true - tasks: - - name: Check stepupra containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - ra - -- name: Test stepupselfservice containers - hosts: docker_stepupselfservice - become: true - tasks: - - name: Check stepupselfservice containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - selfservice - -- name: Test stepuptiqr containers - hosts: docker_stepuptiqr - become: true - tasks: - - name: Check stepuptiqr containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - tiqr - -- name: Test stepupwebauthn containers - hosts: docker_stepupwebauthn - become: true - tasks: - - name: Check stepupwebauthn containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - webauthn - -- name: Test teams containers - hosts: docker_teams - become: true - tasks: - - name: Check teams containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - teamsserver - - teamsgui - -- name: Test voot containers - hosts: docker_voot - become: true - tasks: - - name: Check voot containers state - ansible.builtin.command: - cmd: docker inspect -f '{{ '{{' }} .State.Status {{ '}}' }}' "{{ item }}" - register: container_state - failed_when: container_state.stdout != "running" - changed_when: false - with_items: - - vootserver diff --git a/test_loadbalancers_playbook.yml b/test_loadbalancers_playbook.yml deleted file mode 100644 index aedd9166f..000000000 --- a/test_loadbalancers_playbook.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Test loadbalancer - hosts: loadbalancer - gather_facts: false - become: false - tasks: - - - name: Read vars from secrets file - ansible.builtin.include_vars: "{{ inventory_dir }}/secrets/secrets.yml" - no_log: true - tags: - - always - - - name: Include task list in play - ansible.builtin.include_tasks: - file: inc_test_loadbalancer_tasklist.yml - with_items: "{{ haproxy_applications }}"