diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 000000000..5ab59f61d --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,3 @@ +--- +profile: "production" +offline: false diff --git a/.github/workflows/molecule-loadbalancer.yml b/.github/workflows/molecule-loadbalancer.yml index 9456def47..ca03e28f3 100644 --- a/.github/workflows/molecule-loadbalancer.yml +++ b/.github/workflows/molecule-loadbalancer.yml @@ -2,31 +2,32 @@ name: loadbalancer on: - push: - paths: - - 'roles/haproxy/**' - - 'roles/keepalived/**' - - 'roles/bind/**' - - 'molecule/loadbalancer/**' - - 'roles/selfsigned_certs/**' - - '.github/workflows/molecule-loadbalancer.yml' - pull_request: - paths: - - 'roles/haproxy/**' - - 'roles/keepalived/**' - - 'roles/bind/**' - - 'molecule/loadbalancer/**' - - 'roles/selfsigned_certs/**' - - '.github/workflows/molecule-loadbalancer.yml' + workflow_dispatch: + # push: + # paths: + # - 'roles/haproxy/**' + # - 'roles/keepalived/**' + # - 'roles/bind/**' + # - 'molecule/loadbalancer/**' + # - 'roles/selfsigned_certs/**' + # - '.github/workflows/molecule-loadbalancer.yml' + # pull_request: + # paths: + # - 'roles/haproxy/**' + # - 'roles/keepalived/**' + # - 'roles/bind/**' + # - 'molecule/loadbalancer/**' + # - 'roles/selfsigned_certs/**' + # - '.github/workflows/molecule-loadbalancer.yml' jobs: build: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Set up Python 3.8 - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: 3.8 diff --git a/.github/workflows/molecule-mongo.yml b/.github/workflows/molecule-mongo.yml index 4d8b6b713..f0b52d67d 100644 --- a/.github/workflows/molecule-mongo.yml +++ b/.github/workflows/molecule-mongo.yml @@ -2,25 +2,25 @@ name: mongo on: - push: - paths: - - 'roles/mongo/**' - - 'molecule/mongo/**' - - '.github/workflows/molecule-mongo.yml' - pull_request: - paths: - - 'roles/mongo/**' - - 'molecule/mongo/**' - - '.github/workflows/molecule-mongo.yml' + workflow_dispatch: + # push: + # paths: + # - 'roles/mongo/**' + # - 'molecule/mongo/**' + # - '.github/workflows/molecule-mongo.yml' + # pull_request: + # paths: + # - 'roles/mongo/**' + # - 'molecule/mongo/**' + # - '.github/workflows/molecule-mongo.yml' jobs: build: - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 - + - uses: actions/checkout@v6 - name: Set up Python 3.8 - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: 3.8 diff --git a/.github/workflows/syntax.yml b/.github/workflows/syntax.yml index c4a1e63aa..8295b151f 100644 --- a/.github/workflows/syntax.yml +++ b/.github/workflows/syntax.yml @@ -19,10 +19,10 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - name: Set up Python 3.8 - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: 3.8 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 000000000..fb9f01809 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,16 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +### Changed +- All group_var files are moved to the environment example template, more information about where to save group_vars in the [README](https://github.com/OpenConext/OpenConext-deploy/blob/main/README.md) +- separate plays for separate roles in the provision.yml playbook +- seperate groups are defined for separate apps, dividing apps across the container servers should be set in the inventory not in the playbook, this way you can easily change it for different environments. This also makes it impossible to use the wrong tag and deploy something you did not intend to, instead nothing will happen. +- mysql_standalone group replaces storage group + +### Removed +- selfsigned_certs role is deprecated and removed from the provision.yml playbook +- environment/playbook inclusion in provision.yml + +### Todo +- [ ] Complete environments/template diff --git a/README.md b/README.md index 76fb8da81..a6623af49 100644 --- a/README.md +++ b/README.md @@ -39,12 +39,12 @@ Every application has a seperate role to install it. The following roles can be | stepupra | Stepup ra interface | | stepupselfservice | Stepup selfservice interface | -All these applications run in Docker. You can use the "docker" role to install docker and Traefik. The result is a Docker application server, with port 443 open. Applications are served by Traefik and recognized on basis of a Host: header. If you run a small installation, you can add a https certificate to Traefik and run a single node application server. +All these applications run in Docker. You can use the "docker" role to install docker and Traefik. The result is a Docker application server, with port 443 open. Applications are served by Traefik and recognized on basis of a Host: header. If you run a small installation, you can add a https certificate to Traefik and run a single node application server. -For a fully functioning environment you also need a MariaDB database server and a Mongo database server. +For a fully functioning environment you also need a MariaDB database server and a Mongo database server. ## Infra roles -This repository is used for deployment of SURFconext, and several roles that the SURFconext teams uses to provision our infrastructure are provided here as well. You can use them for your own infrastructure or use them as inspiration. +This repository is used for deployment of SURFconext, and several roles that the SURFconext teams uses to provision our infrastructure are provided here as well. You can use them for your own infrastructure or use them as inspiration. | name | remarks | | --- | --- | | bind | DNS server for high availability. Very specific for SURFconext | @@ -59,40 +59,93 @@ This repository is used for deployment of SURFconext, and several roles that the | mongo | Install a mongo cluster (has its own README) | | manage_provision_entities|Provision entities to Manage | -# Environment specific variables -Many variables can be overridden to create a setup suitable for your needs. The environment should be placed in the directory environments_external. +# Setting up your environment +Many variables can be overridden to create a setup suitable for your needs. We will explain the setup here for one environment or for a multi-environment (OTAP for example) setup. -A script is available to provision a new environment. It will create a new environment directory under environments-external/ and it will create all necessary passwords and (self-signed) certificates. Replace with the name of the target. Replace with the domain of the target. +The setup descibed below should work, but when using ansible many paths lead to Rome. If you want to know more about variables and where to save them, this can be helpfull: https://docs.ansible.com/projects/ansible/latest/playbook_guide/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable +## Inventory +You need an inventory file for your environment or multiple inventory files if you have multiple environments. An example can be found in environments/template +## Playbook +You can use the provision.yml script to deploy all infra and application roles. Every play has a tag so you can deploy your environment one application at a time by using the specific tag. You can also use your own playbooks if you prefer. + +## First steps +Clone the repository with git. + +```bash +cd yourdir +git clone https://github.com/OpenConext/OpenConext-deploy.git ``` -/prep-env -``` -Then run -``` -cp environments-external//host_vars/template.yml environments-external//host_vars/.yml -``` -(where is the ip address or hostname of your target machine, whatever is set in your inventory file) -Change in environments-external//inventory: -Change all references from %target_host% to +Create ansible.cfg in your directory and add Openconext-deploy/roles to your roles_path +```bash +[defaults] +diff = true +roles_path = OpenConext-deploy/roles # Add your own roles directory if you want ``` -Please note that this has not been tested in quite a while. You will need a lot of manual work to get this environment working + +## One environment +Copy the inventory, host and group files from environment/template to your directory and adjust them according to your preferences: + +```bash +cp -R OpenConext-deploy/environments/template/* . ``` +Edit your inventory file +Edit group_var and host_var files if necessary + +Create an ansible vault in secrets and name it secrets.yml, an unencrypted example can be found in secrets/secret_example.yml +More information about vaults: https://docs.ansible.com/projects/ansible/latest/vault_guide/index.html +The final setup will look like this: -# Playbooks, tags and the provision wrapper script +- group_vars/all.yml +- group_vars/\.yml +- secrets/secrets.yml +- host_vars/\/yml +- inventory +- Openconext-deploy/provision.yml +- Openconext-deploy/roles +- \.yml +- ansible.cfg -Two playbooks exist in this repository: provision.yml and playbook_haproxy.yml. The latter can be used to do red/blue deployments if you also use our haproxy role. -The main playbook is provision.yml. It contains series of plays to install every role on the right node. All roles are tagged, so you can use the [Ansible tag mechanism](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_tags.html) to deploy a specific role. +You can use the provision playbook now: -If you would like to deploy manage to your test environment, you would run: +```bash +ansible-playbook OpenConext-deploy/provision.yml -i inventory -t --ask-vault-password ``` -ansible-playbook -i environments-external/test/inventory --tags manage -u THE_REMOTE_SSH_USER_WITH_SUDO_PERMISSIONS + +## Multi-environment +Copy the inventory and group files from environment/template to your directory and adjust them according to your preferences: + +```bash +mkdir # test for example +cp -R OpenConext-deploy/environments/template/* +# etc... ``` +Edit your inventory files +Edit group_var and host_var files if necessary + +For each environment create an ansible vault in secrets and name it secrets.yml, an unencrypted example can be found in secrets/secret_example.yml +More information about vaults: https://docs.ansible.com/projects/ansible/latest/vault_guide/index.html + +The final setup will look like this: -A wrapper script which enables you to use your own roles can be used as well. That is documented here: https://github.com/OpenConext/OpenConext-deploy/wiki/Add-your-own-roles-and-playbooks +- \/group_vars/all.yml +- \/group_vars/\.yml +- \/host_vars/\/yml +- \/inventory +- Openconext-deploy/provision.yml +- Openconext-deploy/roles +- \.yml +- ansible.cfg + +You can use the provision playbook now: + +```bash +ansible-playbook OpenConext-deploy/provision.yml -i /inventory -t --ask-vault-password +``` # License diff --git a/deploy_containers_playbook.yml b/deploy_containers_playbook.yml deleted file mode 100644 index 9bfa14562..000000000 --- a/deploy_containers_playbook.yml +++ /dev/null @@ -1,165 +0,0 @@ ---- -- name: Deploy containerized applications - hosts: docker_servers - become: true - tasks: - - name: Read vars from secrets file - ansible.builtin.include_vars: "{{ inventory_dir }}/secrets/secrets.yml" - no_log: true - tags: - - always - -# Separate grouos for all containerized apps -# Dividing apps across the container services should be set in -# the inventory not in the playbook, this way you can easily change -# it for different environments - -- name: Deploy attribute-aggregation app - hosts: docker_attribute_aggregation - become: true - roles: - - { role: attribute-aggregation, tags: ['aa', 'attribute-aggregation'] } - -- name: Deploy dashboard app - hosts: docker_dashboard - become: true - roles: - - { role: dashboard, tags: ['dashboard'] } - -- name: Deploy diyidp app - hosts: docker_diyidp - become: true - roles: - - { role: diyidp, tags: ['diyidp'] } - -- name: Deploy engineblock app - hosts: docker_engineblock - become: true - roles: - - { role: engineblock, tags: ['engineblock', 'eb'] } - -- name: Deploy invite app - hosts: docker_invite - become: true - roles: - - { role: invite, tags: ['invite'] } - -- name: Deploy lifecycle app - hosts: docker_lifecycle - become: true - roles: - - { role: lifecycle, tags: ['lifecycle'] } - -- name: Deploy manage app - hosts: docker_manage - become: true - roles: - - { role: manage, tags: ['manage'] } - -- name: Deploy mujina-idp app - hosts: docker_mujina_idp - become: true - roles: - - { role: mujina-idp, tags: ['mujina-idp', 'mujina'] } - -- name: Deploy mujina-sp app - hosts: docker_mujina_sp - become: true - roles: - - { role: mujina-sp, tags: ['mujina-sp', 'mujina'] } - -- name: Deploy myconext app - hosts: docker_myconext - become: true - roles: - - { role: myconext, tags: ['myconext'] } - -- name: Deploy oidcng app - hosts: docker_oidcng - become: true - roles: - - { role: oidcng, tags: ['oidcng'] } - -- name: Deploy oidc-playground app - hosts: docker_oidc-playground - become: true - roles: - - { role: oidc-playground, tags: ['oidc-playground'] } - -- name: Deploy pdp app - hosts: docker_pdp - become: true - roles: - - { role: pdp, tags: ['pdp'] } - -- name: Deploy profile app - hosts: docker_profile - become: true - roles: - - { role: profile, tags: ['profile'] } - -- name: Deploy stats app - hosts: docker_stats - become: true - roles: - - { role: stats, tags: ['stats'] } - -- name: Deploy stepupazuremfa app - hosts: docker_stepupazuremfa - become: true - roles: - - { role: stepupazuremfa, tags: ['stepupazuremfa', 'stepup'] } - -- name: Deploy stepupgateway app - hosts: docker_stepupgateway - become: true - roles: - - { role: stepupgateway, tags: ['stepupgateway', 'stepup'] } - -- name: Deploy stepupmiddleware app - hosts: docker_stepupmiddleware - become: true - roles: - - { role: stepupmiddleware, tags: ['stepupmiddleware', 'stepup'] } - -- name: Deploy stepupra app - hosts: docker_stepupra - become: true - roles: - - { role: stepupra, tags: ['stepupra', 'stepup'] } - -- name: Deploy stepupselfservice app - hosts: docker_stepupselfservice - become: true - roles: - - { role: stepupselfservice, tags: ['stepupselfservice', 'stepup'] } - -- name: Deploy stepuptiqr app - hosts: docker_stepuptiqr - become: true - roles: - - { role: stepuptiqr, tags: ['stepuptiqr', 'stepup'] } - -- name: Deploy stepupwebauthn app - hosts: docker_stepupwebauthn - become: true - roles: - - { role: stepupwebauthn, tags: ['stepupwebauthn', 'stepup'] } - -- name: Deploy teams app - hosts: docker_teams - become: true - roles: - - { role: teams, tags: ['teams'] } - -- name: Deploy voot app - hosts: docker_voot - become: true - roles: - - { role: voot, tags: ['voot'] } - -- name: Deploy minio app - hosts: docker_minio - become: true - roles: - - { role: minio, tags: ['minio'] } diff --git a/deploy_docker_playbook.yml b/deploy_docker_playbook.yml deleted file mode 100644 index eaa54940f..000000000 --- a/deploy_docker_playbook.yml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- name: Read inventory secrets - hosts: docker_servers - become: true - tasks: - - name: Read vars from secrets file - ansible.builtin.include_vars: "{{ inventory_dir }}/secrets/secrets.yml" - no_log: true - tags: - - always - -- name: Configure docker servers - hosts: docker_servers - become: true - roles: - - { role: docker, tags: ['docker'] } diff --git a/deploy_loadbalancers_playbook.yml b/deploy_loadbalancers_playbook.yml deleted file mode 100644 index 12daca264..000000000 --- a/deploy_loadbalancers_playbook.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -- name: Read secrets - hosts: loadbalancer - become: true - tasks: - - name: Read vars from secrets file - ansible.builtin.include_vars: "{{ inventory_dir }}/secrets/secrets.yml" - no_log: true - tags: - - always - -- name: Deploy haproxy - hosts: loadbalancer - gather_facts: true - become: true - roles: - - role: haproxy - tags: ['core', 'loadbalancer', 'lb'] - -- name: Deploy keepalived and bind for clustered loadbalancers - hosts: loadbalancer_ha - gather_facts: true - become: true - roles: - - role: keepalived - tags: ['core', 'loadbalancer_ha', 'keepalived'] - - role: bind - tags: ['core', 'loadbalancer_ha', 'bind'] diff --git a/deploy_mariadb_playbook.yml b/deploy_mariadb_playbook.yml deleted file mode 100644 index 73b314ff7..000000000 --- a/deploy_mariadb_playbook.yml +++ /dev/null @@ -1 +0,0 @@ ---- \ No newline at end of file diff --git a/deploy_mongo_playbook.yml b/deploy_mongo_playbook.yml deleted file mode 100644 index 73b314ff7..000000000 --- a/deploy_mongo_playbook.yml +++ /dev/null @@ -1 +0,0 @@ ---- \ No newline at end of file diff --git a/group_vars/all.yml b/environments/template/group_vars/all.yml similarity index 92% rename from group_vars/all.yml rename to environments/template/group_vars/all.yml index c09ca2628..1d8bd6f84 100644 --- a/group_vars/all.yml +++ b/environments/template/group_vars/all.yml @@ -34,8 +34,9 @@ httpd_csp: lenient: "default-src 'self'; object-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" lenient_with_static_img: "default-src 'self'; object-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" lenient_with_static_img_with_oidcng: "default-src 'self'; object-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; connect-src 'self' https://{{ oidcng_vhost }}; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" - strict: "default-src 'none'; script-src 'self'; style-src 'self' 'unsafe-inline'; font-src 'self'; connect-src 'self'; img-src 'self' data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" - strict_with_static_img: "default-src 'none'; script-src 'self'; style-src 'self' 'unsafe-inline'; font-src 'self'; connect-src 'self'; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" + lenient_with_static_img_with_surfconext: "default-src 'self'; object-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; connect-src 'self' https://{{ oidcng_vhost }}; img-src 'self' https://{{ static_vhost }} https://*.surfconext.nl http://localhost:* data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'" + strict: "default-src 'none'; script-src 'self'; style-src 'self' 'unsafe-inline'; font-src 'self'; connect-src 'self'; img-src 'self' data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'; manifest-src 'self'" + strict_with_static_img: "default-src 'none'; script-src 'self'; style-src 'self' 'unsafe-inline'; font-src 'self'; connect-src 'self'; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self'; frame-ancestors 'none'; base-uri 'none'; manifest-src 'self'" lenient_with_static_img_for_idp: "default-src 'none'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; font-src 'self'; connect-src 'self' https://{{ oidcng_vhost }}; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self' https://*.{{ base_domain }}; frame-ancestors 'none'; base-uri 'none'" lenient_with_static_img_for_idp_frcapi: "default-src 'none'; frame-src https://global.frcapi.com/; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; font-src 'self'; connect-src 'self' https://{{ oidcng_vhost }}; img-src 'self' https://{{ static_vhost }} http://localhost:* data:; form-action 'self' https://*.{{ base_domain }}; frame-ancestors 'none'; base-uri 'none'" nothing: "default-src 'none'; frame-ancestors 'none'; form-action 'none'; base-uri 'none'" @@ -84,12 +85,3 @@ stepup_loa_values_supported: - "http://{{ base_domain }}/assurance/loa1.5" - "http://{{ base_domain }}/assurance/loa2" - "http://{{ base_domain }}/assurance/loa3" - -# If values are added, please add them to the IdP-Dashboard locales -mfa_values_supported: - - "http://schemas.microsoft.com/claims/multipleauthn" - - "https://refeds.org/profile/mfa" - - "urn:oasis:names:tc:SAML:2.0:ac:classes:MobileOneFactorContract" - - "urn:oasis:names:tc:SAML:2.0:ac:classes:MobileOneFactorUnregistered" - - "urn:oasis:names:tc:SAML:2.0:ac:classes:Password" - - "transparent_authn_context" diff --git a/group_vars/java-apps-common.yml b/environments/template/group_vars/java-apps-common.yml similarity index 100% rename from group_vars/java-apps-common.yml rename to environments/template/group_vars/java-apps-common.yml diff --git a/group_vars/local-certs.yml b/environments/template/group_vars/local-certs.yml similarity index 100% rename from group_vars/local-certs.yml rename to environments/template/group_vars/local-certs.yml diff --git a/group_vars/minimal.yml b/environments/template/group_vars/minimal.yml similarity index 100% rename from group_vars/minimal.yml rename to environments/template/group_vars/minimal.yml diff --git a/environments/template/group_vars/mongo_servers.yml b/environments/template/group_vars/mongo_servers.yml new file mode 100644 index 000000000..70bb40871 --- /dev/null +++ b/environments/template/group_vars/mongo_servers.yml @@ -0,0 +1,12 @@ +--- +replica_set_name: my_mongo_cluster + +mongo_cluster_members: + - host: "mongo3.example.com:{{ mongo_port }}" # arbiter first or change mongo_arbiter_index + priority: 1 # can vote, cannot become primary + - host: "mongo2.example.com:{{ mongo_port }}" + priority: 2 + - host: "mongo1.example.com:{{ mongo_port }}" + priority: 3 + +# mongo_arbiter_index # default is 0 \ No newline at end of file diff --git a/environments/template/group_vars/template.yml b/environments/template/group_vars/template.yml index 866755ceb..a3ffd5d53 100644 --- a/environments/template/group_vars/template.yml +++ b/environments/template/group_vars/template.yml @@ -1,6 +1,7 @@ --- env: "%env%" +show_debug_info: false # Show extra debug info mariadb_host: localhost rsyslog_host: localhost database_clients: [ "{{ mariadb_host }}" ] @@ -27,8 +28,7 @@ relp_remote: php_display_errors: 1 -attribute_aggregation_gui_version: "3.0.6" -attribute_aggregation_server_version: "3.0.6" +attribute_aggregation_version: "3.0.6" oidc_playground_client_version: "3.0.0" oidc_playground_server_version: "3.0.0" engine_version: "6.15.0" @@ -38,15 +38,12 @@ lifecycle_version: "0.1.1" monitoring_tests_version: "7.0.0" mujina_version: "8.0.2" oidcng_version: "6.1.6" -pdp_server_version: "4.0.2" -pdp_gui_version: "4.0.2" +pdp_version: "7.3.0" profile_version: "3.1.4" teams_gui_version: "9.1.3" teams_server_version: "9.1.3" voot_version: "6.2.0" -myconext_server_version: "6.0.2" -myconext_gui_version: "6.0.2" -account_gui_version: "6.0.2" +myconext_version: "8.1.12-1" dashboard_server_version: "12.3.4" dashboard_gui_version: "12.3.4" invite_server_version: "0.0.2-SNAPSHOT" @@ -283,12 +280,15 @@ myconext: feature_mail_inactivity_mails: true feature_nudge_app_mail: true feature_service_desk_active: true + feature_use_remote_creation_for_affiliation: true feature_send_js_exceptions: true feature_deny_disposable_email_providers: true feature_create_eduid_institution_enabled: true feature_create_eduid_institution_landing: true feature_allowlist: false feature_dry_run_email_cron: true + feature_enable_account_linking: true + feature_use_app: true sms_api_url: "https://rest.spryngsms.com/v1/messages" sms_api_route: "default" sp_entity_id: https://engine.{{ base_domain }}/authentication/sp/metadata @@ -302,6 +302,13 @@ myconext: - { name: "privacy_policy" , url: "https://example.org/x/MIzaAQ" } - { name: "terms_of_service" , url: "https://example.org/x/LozaAQ"} - { name: "voorwaarden" , url: "https://example.org/x/HYzaAQ"} + geo_location_ external_url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key={license_key}&suffix=tar.gz" + email: + from_deprovisioning: MyConext + from_code: MyConext + from_app_nudge: MyConext + from_new_device: MyConext + monitoring_tests: metadata_sp_url: "{{ monitoring_tests_metadata_sp_url }}" @@ -372,6 +379,15 @@ manage: } oidc_rp_redirect_url_format: "url" +# If values are added, please add them to the IdP-Dashboard locales +mfa_values_supported: + - "http://schemas.microsoft.com/claims/multipleauthn" + - "https://refeds.org/profile/mfa" + - "urn:oasis:names:tc:SAML:2.0:ac:classes:MobileOneFactorContract" + - "urn:oasis:names:tc:SAML:2.0:ac:classes:MobileOneFactorUnregistered" + - "urn:oasis:names:tc:SAML:2.0:ac:classes:Password" + - "transparent_authn_context" + loadbalancing: engine: port: 401 @@ -597,3 +613,9 @@ docker_servers: iptables_enable: false dashboard_install: false + +# these already have appropriate defaults in the template or rsyslog but you can +# change them if necessary, for example on docker hosts they have to be a little higher +# rsyslog_imjournal_ratelimitburst: 2000 +# rsyslog_imjournal_ratelimitinterval: 600 +# rsyslog_maxmessagesize: 8000 diff --git a/environments/template/host_vars/mongo1.example.com/vars.yml b/environments/template/host_vars/mongo1.example.com/vars.yml new file mode 100644 index 000000000..66c502e27 --- /dev/null +++ b/environments/template/host_vars/mongo1.example.com/vars.yml @@ -0,0 +1,3 @@ +backend_ipv4: +mongo_replication_role: primary +mongo_cluster_cert: # use an identical OU, O or DC for all cluster members \ No newline at end of file diff --git a/environments/template/host_vars/mongo1.example.com/vault b/environments/template/host_vars/mongo1.example.com/vault new file mode 100644 index 000000000..df314594e --- /dev/null +++ b/environments/template/host_vars/mongo1.example.com/vault @@ -0,0 +1 @@ +mongo_cluster_private_key: # encrypt this \ No newline at end of file diff --git a/environments/template/host_vars/mongo2.example.com/vars.yml b/environments/template/host_vars/mongo2.example.com/vars.yml new file mode 100644 index 000000000..6800d6fdc --- /dev/null +++ b/environments/template/host_vars/mongo2.example.com/vars.yml @@ -0,0 +1,3 @@ +backend_ipv4: +mongo_replication_role: secondary +mongo_cluster_cert: # use an identical OU, O or DC for all cluster members \ No newline at end of file diff --git a/environments/template/host_vars/mongo2.example.com/vault b/environments/template/host_vars/mongo2.example.com/vault new file mode 100644 index 000000000..df314594e --- /dev/null +++ b/environments/template/host_vars/mongo2.example.com/vault @@ -0,0 +1 @@ +mongo_cluster_private_key: # encrypt this \ No newline at end of file diff --git a/environments/template/host_vars/mongo3.example.com/vars.yml b/environments/template/host_vars/mongo3.example.com/vars.yml new file mode 100644 index 000000000..dba8e1797 --- /dev/null +++ b/environments/template/host_vars/mongo3.example.com/vars.yml @@ -0,0 +1,3 @@ +backend_ipv4: +mongo_replication_role: arbiter +mongo_cluster_cert: # use an identical OU, O or DC for all cluster members \ No newline at end of file diff --git a/environments/template/host_vars/mongo3.example.com/vault b/environments/template/host_vars/mongo3.example.com/vault new file mode 100644 index 000000000..df314594e --- /dev/null +++ b/environments/template/host_vars/mongo3.example.com/vault @@ -0,0 +1 @@ +mongo_cluster_private_key: # encrypt this \ No newline at end of file diff --git a/environments/template/inventory b/environments/template/inventory index 89e35169f..f1b3dabed 100644 --- a/environments/template/inventory +++ b/environments/template/inventory @@ -1,79 +1,145 @@ -[storage] -%target_host% - -[mongo_servers] -%target_host% - -[selfsigned_certs] -%target_host% - -[loadbalancer] -%target_host% - [%env%:children] -storage -mongo_servers -selfsigned_certs -sysloghost -loadbalancer_ha -loadbalancer -elk -lifecycle dbcluster dbcluster_nodes +docker_servers +lifecycle +loadbalancer +loadbalancer_ha +mongo_servers +mysql_only stats - -[sysloghost] -[loadbalancer_ha] -[elk] -[lifecycle] -[dbcluster] -[dbcluster_nodes] -[stats] +sysloghost [base:children] -loadbalancer -storage dbcluster -sysloghost -elk +dbcluster_nodes +docker_servers lifecycle -selfsigned_certs - -[loadbalancer:children] +loadbalancer loadbalancer_ha - -[frontend:children] -lifecycle +mongo_servers +mysql_only +stats +sysloghost [db_mysql:children] -storage +mysql_only dbcluster dbcluster_nodes -[local] -localhost ansible_connection=local +[mysql_only] -# for refactored playbooks +[dbcluster] # all galera members including arbitrator +db1.example.com +db2.example.com +db3.example.com # arbitrator -[%location%:children] # create one or more groups here for your chosen location(s) -storage -mongo_servers -sysloghost -loadbalancer_ha -loadbalancer +[dbcluster_nodes] # all galera members with a full mysql installation +db1.example.com +db2.example.com + +[frontend:children] lifecycle -dbcluster -dbcluster_nodes -stats -docker_servers -[base:children] -docker_servers +[loadbalancer] # use this for standalone loadbalancer + +[loadbalancer_ha] # use this for failover loadbalancer setup +lb1.example.com +lb2.example.com + +[lifecycle] +lifecycle1.example.com + +[mongo_servers] +mongo1.example.com +mongo2.example.com +mongo3.example.com # arbitrator + +[mysql_standalone] +stats1.example.com # stats server needs mysql +log1.example.com # log server needs mysql -[docker_servers] +[stats] +stats1.example.com + +[sysloghost] +log1.example.com + +# Docker + +[docker_servers:children] +docker_apps1 +docker_apps2 + +# Group the docker servers + +[docker_apps1] docker1.example.com + +[docker_apps2] docker2.example.com -[docker_invite] -docker2.example.com \ No newline at end of file +# Install containerized apps on the docker group you want + +[docker_invite:children] +docker_apps1 + +[docker_teams:children] +docker_apps1 + +[docker_pdp:children] +docker_apps1 + +[docker_voot:children] +docker_apps1 + +[docker_attribute_aggregation:children] +docker_apps1 + +[docker_oidc_playground:children] +docker_apps1 + +[docker_myconext:children] +docker_apps1 + +[docker_manage:children] +docker_apps1 + +[docker_oidcng:children] +docker_apps1 + +[docker_stats:children] +docker_apps1 + +[docker_diyidp:children] +docker_apps1 + +[docker_profile:children] +docker_apps1 + +[docker_lifecycle:children] +docker_apps1 + +[docker_engineblock:children] +docker_apps2 + +[docker_stepuptiqr:children] +docker_apps1 + +[docker_stepupwebauthn:children] +docker_apps1 + +[docker_stepupazuremfa:children] +docker_apps1 + +[docker_stepupmiddleware:children] +docker_apps2 + +[docker_stepupselfservice:children] +docker_apps1 + +[docker_stepupra:children] +docker_apps1 + +[docker_stepupgateway:children] +docker_apps2 diff --git a/environments/template/secrets/skeleton.yml b/environments/template/secrets/secret_example.yml similarity index 99% rename from environments/template/secrets/skeleton.yml rename to environments/template/secrets/secret_example.yml index 30337b720..d2faac776 100644 --- a/environments/template/secrets/skeleton.yml +++ b/environments/template/secrets/secret_example.yml @@ -142,6 +142,7 @@ invite_teams_secret: "secret" invite_attribute_aggregation_secret: "secret" invite_manage_secret: secret invite_lifecycle_secret: "secret" +invite_internal_secret: "secret" invite_profile_secret: "secret" invite_sp_dashboard_secret: "secret" invite_access_secret: "secret" diff --git a/filter_plugins/merge_usergroups.py b/filter_plugins/merge_usergroups.py deleted file mode 100644 index b252d80a2..000000000 --- a/filter_plugins/merge_usergroups.py +++ /dev/null @@ -1,42 +0,0 @@ -# merge_usersgroups: merge extra groups into a user object -# -# Usage: {{ users | merge_usergroups(extra_groups }} -# with users = [{"user1": "myuser", "groups": ["bar","baz"], "other": "stuff"}] -# and extra_groups = {"foo": ["user1"}] -# result: [{"user1": "myuser", "groups": ["foo","bar","baz"], "other": "stuff"}]` -# -from __future__ import annotations -from ansible.utils.display import Display - - -def _merge_usergroups(users: list[dict[str, str | list[str]]], - extra_groups: dict[str, list[str]]) -> list[dict[str, str | list[str]]]: - display = Display() - display.vv(f"_merge_usergroups: arg1: {users}") - display.vv(f"_merge_usergroups: arg2: {extra_groups}") - - # first invert the extra_groups dict to obtain a list of groups per user - user_extra_groups = {} - for group, group_users in extra_groups.items(): - for u in group_users: - if u not in user_extra_groups: - user_extra_groups[u] = [] - user_extra_groups[u].append(group) - - display.vv(f"_merge_usergroups: user_extra_groups: {user_extra_groups}") - - # then merge the extra groups into the user objects - for user in users: - user['groups'] = user.get('groups', []) + user_extra_groups.get(user['username'], []) - - display.vv(f"_merge_usergroups: users: {users}") - - return users - - -class FilterModule(object): - @staticmethod - def filters(): - return { - 'merge_usergroups': _merge_usergroups, - } diff --git a/inc_test_loadbalancer_tasklist.yml b/inc_test_loadbalancer_tasklist.yml deleted file mode 100644 index 9a697cf87..000000000 --- a/inc_test_loadbalancer_tasklist.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -# playbook loops over: -# loadbalancers -# haproxy_applications (item) -# -# and does an url request on lodbalancer ip with vhostname as the host header - -- name: Try to reach {{ item.vhost_name }} via https://{{ haproxy_sni_ip_restricted.ipv4 }}:443 # noqa: name[template] jinja template helps with debugging - when: item.restricted is defined and item.restricted - ansible.builtin.uri: - url: "https://{{ haproxy_sni_ip_restricted.ipv4 }}:443/{{ item.ha_url }}" - method: GET - status_code: [200, 302] - return_content: false - validate_certs: false - headers: - host: "{{ item.vhost_name }}" - register: result - until: result.status == 200 or result.status == 302 - retries: 3 - delay: 2 - delegate_to: 127.0.0.1 # run check from deploy host - -# Try to reach applications via loadbalancer without restricted ip v4 -- name: Try to reach {{ item.vhost_name }} via https://{{ haproxy_sni_ip.ipv4 }}:443 # noqa: name[template] jinja template helps with debugging - when: item.restricted is undefined - ansible.builtin.uri: - url: "https://{{ haproxy_sni_ip.ipv4 }}:443/{{ item.ha_url }}" - method: GET - status_code: [200, 302] - return_content: false - validate_certs: false - headers: - host: "{{ item.vhost_name }}" - register: result - until: result.status == 200 or result.status == 302 - retries: 3 - delay: 2 - delegate_to: 127.0.0.1 #run check from deploy host diff --git a/playbook_haproxy.yml b/playbook_haproxy.yml deleted file mode 100644 index 7c28c22f4..000000000 --- a/playbook_haproxy.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- hosts: loadbalancer_ha - become: false - gather_facts: no - roles: - - { role: haproxy_mgnt, tags: ['haproxy_mgnt'] } - - { role: haproxy_acls, tags: ['haproxy_acls'] } diff --git a/prep-env b/prep-env old mode 100755 new mode 100644 index df4f8ad36..4f2a9e11f --- a/prep-env +++ b/prep-env @@ -7,6 +7,7 @@ # When a password must be sha-encoded, the clear-text password must be set before the sha-encoded # Key for the sha-encoded password must be the same as the clear-text password with the '_sha' # +# todo: If goup_vars exampels are complete this is probably not necessary? investigate # ----- configuration ENV_DIR="environments" diff --git a/provision b/provision deleted file mode 100755 index 6764b625a..000000000 --- a/provision +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -set -e -export ANSIBLE_ROLES_PATH="roles:roles-external" - -# parse parameters -help="Usage: $0 [ANSIBLE_OPT] -eg $0 acc --tags eb -will provision the tag eb on acc" - -if [ "$#" -eq 0 ]; then - echo -e "$help" - exit 1 -fi - -env=$1 -shift - -# Set some variables -environment_dir="environments-external/$env/" -inventory="environments-external/$env/inventory" -playbook="provision.yml" - -if ! [ -e "$inventory" ]; then - echo "Inventory file '$inventory' for environment '$env' not found." - exit 1 -fi - -# Download extra roles when requirements.yml is present -# and roles-external is not a symlink and is not a git repository -# and the file .no-provision is not present -if [ -d roles-external ] && [ -f "$environment_dir"/requirements.yml ] -then - if [ -L roles-external ] || [ -d roles-external/.git ] \ - || [ -f roles-external/.no-provision ] - then - echo "Skipping download of extra roles" - else - echo "Downloading roles to roles-external" - ansible-galaxy install -r "$environment_dir"/requirements.yml -f -p . - fi -else - echo "No extra roles found to be downloaded" -fi - -cmd=$( - cat <<-EOF -ansible-playbook -i $inventory $playbook -e environment_dir=$environment_dir $@ -EOF -) -echo "executing $cmd" | tr -d "\n" | tr -s ' ' -echo - -$cmd diff --git a/provision.yml b/provision.yml index 3e697658b..95555f883 100644 --- a/provision.yml +++ b/provision.yml @@ -1,5 +1,6 @@ --- -- hosts: all +- name: Gather secrets + hosts: all gather_facts: no tasks: - name: Read vars from secrets file @@ -8,133 +9,304 @@ tags: - always -- hosts: base +- name: Deploy rsyslog + hosts: base gather_facts: yes become: true roles: - role: rsyslog - tags: ['core', 'base', 'rsyslog'] + tags: ['core', 'base', 'rsyslog'] + +- name: Deploy iptables + hosts: base + gather_facts: yes + become: true + roles: - role: iptables when: - iptables_enable | bool - tags: ['core', 'base', 'iptables'] - - role: selfsigned_certs - when: - - "{{ use_selfsigned_certs | default(false) | bool }}" - tags: ['core', 'base', 'selfsigned_certs'] + tags: ['core', 'base', 'iptables'] -- hosts: loadbalancer +- name: Deploy loadbalancer + hosts: loadbalancer gather_facts: true become: true roles: - role: haproxy - tags: ['core', 'loadbalancer_ha', 'loadbalancer', 'lb'] + tags: ['core', 'loadbalancer_ha', 'loadbalancer', 'lb'] + +- name: Deploy loadbalancer keepalived + hosts: loadbalancer_ha + gather_facts: true + become: true + roles: - role: keepalived - when: - - "'loadbalancer_ha' in groups and inventory_hostname in groups['loadbalancer_ha']" - tags: ['core', 'loadbalancer_ha', 'keepalived'] + tags: ['core', 'loadbalancer_ha', 'keepalived'] + +- name: Deploy loadbalancer bind + hosts: loadbalancer_ha + gather_facts: true + become: true + roles: - role: bind - when: - - "'loadbalancer_ha' in groups and inventory_hostname in groups['loadbalancer_ha']" - tags: ['core', 'loadbalancer_ha', 'bind'] + tags: ['core', 'loadbalancer_ha', 'bind'] -- hosts: db_mysql +- name: Deploy standalone mariadb + hosts: mysql_standalone gather_facts: no become: true serial: 1 roles: - role: mysql - when: - - inventory_hostname in groups['storage'] - tags: ['core', 'db_mysql', 'mysql'] + tags: ['core', 'db_mysql', 'mysql'] + +# todo: clearer groupnames +# for now: +# dbcluster : all servers in galera cluster +# dbcluster_nodes: just full nodes with mariadb in galera cluster +# mysql_only: mariadb not in galera cluster +# db_mysql: all mariadb servers, so mysql_only + dbcluster_nodes + +- name: Deploy galera + hosts: dbcluster + gather_facts: no + become: true + serial: 1 + roles: - role: galera - when: - - inventory_hostname in groups['dbcluster'] tags: ['core', 'db_mysql', 'galera'] + +- name: Deploy galera keepalived + hosts: dbcluster_nodes + gather_facts: no + become: true + serial: 1 + roles: - role: keepalived - when: - - inventory_hostname in groups['dbcluster_nodes'] - tags: ['core', 'db_mysql', 'keepalived'] + tags: ['core', 'db_mysql', 'keepalived'] + +- name: Create mysql users + hosts: db_mysql + gather_facts: no + become: true + roles: - role: galera_create_users - tags: ['core', 'db_mysql', 'galera', 'galera_create_users'] + tags: ['core', 'db_mysql', 'galera', 'galera_create_users'] -- hosts: mongo_servers +- name: Deploy mongo servers + hosts: mongo_servers gather_facts: yes become: true + serial: 1 roles: - role: mongo - tags: ['core', 'mongo'] + tags: ['core', 'mongo'] -- hosts: elk +- name: Deploy stats + hosts: stats gather_facts: true become: true roles: - - role: elk - tags: ['elk' ] + - role: influxdb + tags: ['influxdb' ] -- hosts: stats - gather_facts: true +# Separate groups for all containerized apps +# Dividing apps across the container services should be set in +# the inventory not in the playbook, this way you can easily change +# it for different environments + +- name: Deploy attribute-aggregation app + hosts: docker_attribute_aggregation become: true roles: - - role: influxdb - tags: ['influxdb' ] + - role: attribute_aggregation + tags: ['aa', 'attribute-aggregation'] -- hosts: stepuppapp +- name: Deploy dashboard app + hosts: docker_dashboard become: true roles: - - { role: stepupwebauthn, tags: ['stepupwebauthn','stepup' ] } - - { role: stepupazuremfa, tags: ['stepupazuremfa', 'stepup'] } - - { role: stepupmiddleware, tags: ['stepupmiddleware' , 'stepup'] } - - { role: stepupgateway, tags: ['stepupgateway' , 'stepup'] } - - { role: stepupselfservice, tags: ['stepupselfservice' , 'stepup'] } - - { role: stepupra , tags: ['stepupra' , 'stepup'] } - - { role: stepupgateway , tags: ['stepupgateway' , 'stepup'] } + - role: dashboard + tags: ['dashboard'] -- hosts: docker_apps1,docker_apps2 +- name: Deploy diyidp app + hosts: docker_diyidp become: true roles: - - { role: docker, tags: ['docker' ] } + - diyidp + tags: ['diyidp'] -- hosts: mujina +- name: Deploy engineblock app + hosts: docker_engineblock become: true roles: - - { role: mujina-idp, tags: ["mujina-idp", "mujina"] } - - { role: mujina-sp, tags: ["mujina-sp", "mujina"] } + - engineblock + tags: ['engineblock', 'eb'] -- hosts: docker_apps1 +- name: Deploy invite app + hosts: docker_invite become: true roles: - - { role: invite, tags: ['invite' ] } - - { role: dashboard, tags: ["dashboard"] } - - { role: teams, tags: ["teams"] } - - { role: pdp, tags: ["pdp"] } - - { role: voot, tags: ["voot"] } - - { role: attribute-aggregation, tags: ["aa", "attribute-aggregation"] } - - { role: oidc-playground, tags: ["oidc-playground"] } - - { role: myconext, tags: ["myconext"] } - - { role: manage, tags: ["manage"] } - - { role: oidcng, tags: ["oidcng"] } - - { role: stats, tags: ["stats"] } - - { role: diyidp, tags: ["diyidp"] } - - { role: profile, tags: ["profile"] } - - { role: lifecycle, tags: ["lifecycle"] } - - { role: stepuptiqr, tags: ['stepuptiqr' , 'stepup'] } + - invite + tags: ['invite'] -- hosts: docker_apps2 +- name: Deploy lifecycle app + hosts: docker_lifecycle become: true roles: - - { role: engineblock, tags: ["eb"] } - - { role: stepupgateway, tags: [ 'stepupgateway' , 'stepup' ] } + - lifecycle + tags: ['lifecycle'] -- hosts: docker_mariadb +- name: Deploy manage app + hosts: docker_manage become: true roles: - - { role: mariadbdocker, tags: ['mariadbdocker']} - - { role: mongodbdocker, tags: ['mongodbdocker']} + - role: manage + tags: ['manage'] + +- name: Deploy mujina-idp app + hosts: docker_mujina_idp + become: true + roles: + - mujina-idp + tags: ['mujina-idp', 'mujina'] + +- name: Deploy mujina-sp app + hosts: docker_mujina_sp + become: true + roles: + - mujina-sp + tags: ['mujina-sp', 'mujina'] + +- name: Deploy myconext app + hosts: docker_myconext + become: true + roles: + - myconext + tags: ['myconext'] + +- name: Deploy oidcng app + hosts: docker_oidcng + become: true + roles: + - oidcng + tags: ['oidcng'] + +- name: Deploy oidc-playground app + hosts: docker_oidc_playground + become: true + roles: + - oidc-playground + tags: ['oidc-playground'] + +- name: Deploy openaccess app & server + hosts: docker_openaccess + become: true + roles: + - openaccess + tags: ['openaccess'] + +- name: Deploy pdp app + hosts: docker_pdp + become: true + roles: + - role: pdp + tags: ['pdp'] + +- name: Deploy profile app + hosts: docker_profile + become: true + roles: + - role: profile + tags: ['profile'] + +- name: Deploy spdashboard app + hosts: docker_spdashboard + become: true + roles: + - spdashboard + tags: ['spdashboard'] + +- name: Deploy stats app + hosts: docker_stats + become: true + roles: + - stats + tags: ['stats'] + +- name: Deploy stepupazuremfa app + hosts: docker_stepupazuremfa + become: true + roles: + - stepupazuremfa + tags: ['stepupazuremfa', 'stepup'] + +- name: Deploy stepupgateway app + hosts: docker_stepupgateway + become: true + roles: + - stepupgateway + tags: ['stepupgateway', 'stepup'] + +- name: Deploy stepupmiddleware app + hosts: docker_stepupmiddleware + become: true + roles: + - stepupmiddleware + tags: ['stepupmiddleware', 'stepup'] + +- name: Deploy stepupra app + hosts: docker_stepupra + become: true + roles: + - stepupra + tags: ['stepupra', 'stepup'] + +- name: Deploy stepupselfservice app + hosts: docker_stepupselfservice + become: true + roles: + - stepupselfservice + tags: ['stepupselfservice', 'stepup'] + +- name: Deploy stepuptiqr app + hosts: docker_stepuptiqr + become: true + roles: + - stepuptiqr + tags: ['stepuptiqr', 'stepup'] + +- name: Deploy stepupwebauthn app + hosts: docker_stepupwebauthn + become: true + roles: + - role: stepupwebauthn + tags: ['stepupwebauthn', 'stepup'] + +- name: Deploy teams app + hosts: docker_teams + become: true + roles: + - teams + tags: ['teams'] + +- name: Deploy voot app + hosts: docker_voot + become: true + roles: + - voot + tags: ['voot'] -- hosts: docker_minio +- name: Deploy minio app + hosts: docker_minio become: true roles: - - { role: minio, tags: ["minio"] } + - minio + tags: ['minio'] + +- hosts: docker_mariadb + become: true + roles: + - { role: mariadbdocker, tags: ['mariadbdocker']} + - { role: mongodbdocker, tags: ['mongodbdocker']} -- import_playbook: "{{ environment_dir }}/playbook.yml" diff --git a/roles/access/WORK_TODO.txt b/roles/access/WORK_TODO.txt deleted file mode 100644 index 30405a4d2..000000000 --- a/roles/access/WORK_TODO.txt +++ /dev/null @@ -1,7 +0,0 @@ -Placeholder for the work to be done yet - -@Ines, see the access: block in -https://gitlab.surf.nl/surfconext/surfconext-deploy-environments/-/blob/main/test2/group_vars/test2.yml -for the information about minio on test2. - -JIRA properties can be duplicated from idp-dashboard diff --git a/roles/attribute-aggregation/tasks/main.yml b/roles/attribute-aggregation/tasks/main.yml deleted file mode 100644 index a8959123f..000000000 --- a/roles/attribute-aggregation/tasks/main.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -- name: Create directory to keep configfile - ansible.builtin.file: - dest: "/opt/openconext/attribute-aggregation" - state: directory - owner: root - group: root - mode: "0770" - -- name: Place the serverapplication configfiles - ansible.builtin.template: - src: "{{ item }}.j2" - dest: /opt/openconext/attribute-aggregation/{{ item }} - owner: root - group: root - mode: "0644" - with_items: - - serverapplication.yml - - logback.xml - - attributeAuthorities.yml - - serviceProviderConfig.json - - apachelink.conf - notify: restart attribute-aggregationserver - -- name: Add the MariaDB docker network to the list of networks when MariaDB runs in Docker - ansible.builtin.set_fact: - aa_docker_networks: - - name: loadbalancer - - name: openconext_mariadb - when: mariadb_in_docker | default(false) | bool - -- name: Create and start the server container - community.docker.docker_container: - name: aaserver - image: ghcr.io/openconext/openconext-attribute-aggregation/aa-server:{{ attribute_aggregation_server_version }} - pull: true - restart_policy: "always" - state: started - networks: "{{ aa_docker_networks }}" - mounts: - - source: /opt/openconext/attribute-aggregation/serverapplication.yml - target: /application.yml - type: bind - - source: /opt/openconext/attribute-aggregation/logback.xml - target: /logback.xml - type: bind - - source: /opt/openconext/attribute-aggregation/attributeAuthorities.yml - target: /attributeAuthorities.yml - type: bind - - source: /opt/openconext/attribute-aggregation/serviceProviderConfig.json - target: /serviceProviderConfig.json - type: bind - command: "-Xmx128m --spring.config.location=./" - etc_hosts: - host.docker.internal: host-gateway - healthcheck: - test: - [ - "CMD", - "wget", - "-no-verbose", - "--tries=1", - "--spider", - "http://localhost:8080/aa/api/internal/health", - ] - interval: 10s - timeout: 10s - retries: 3 - start_period: 10s - register: aaservercontainer diff --git a/roles/attribute-aggregation/defaults/main.yml b/roles/attribute_aggregation/defaults/main.yml similarity index 100% rename from roles/attribute-aggregation/defaults/main.yml rename to roles/attribute_aggregation/defaults/main.yml diff --git a/roles/attribute_aggregation/handlers/main.yml b/roles/attribute_aggregation/handlers/main.yml new file mode 100644 index 000000000..c5d574e14 --- /dev/null +++ b/roles/attribute_aggregation/handlers/main.yml @@ -0,0 +1,19 @@ +- name: "Restart attribute-aggregationserver" + community.docker.docker_container: + name: aaserver + state: started + restart: true + # avoid restarting it creates unexpected data loss according to docker_container_module notes + comparisons: + '*': ignore + when: "aa_servercontainer is success and aa_servercontainer is not changed" + +- name: "Restart attribute-aggregationlink" + community.docker.docker_container: + name: aalink + state: started + restart: true + # avoid restarting it creates unexpected data loss according to docker_container_module notes + comparisons: + '*': ignore + when: "aa_linkcontainer is success and aa_linkcontainer is not changed" diff --git a/roles/attribute_aggregation/tasks/main.yml b/roles/attribute_aggregation/tasks/main.yml new file mode 100644 index 000000000..0631e8fdb --- /dev/null +++ b/roles/attribute_aggregation/tasks/main.yml @@ -0,0 +1,143 @@ +--- +- name: Create directory to keep configfile + ansible.builtin.file: + dest: "/opt/openconext/attribute-aggregation" + state: "directory" + owner: "root" + group: "root" + mode: "0770" + +- name: Place the server application configfiles + ansible.builtin.template: + src: "{{ item }}.j2" + dest: "/opt/openconext/attribute-aggregation/{{ item }}" + owner: "root" + group: "root" + mode: "0644" + with_items: + - "serverapplication.yml" + - "logback.xml" + - "attributeAuthorities.yml" + - "serviceProviderConfig.json" + notify: + - "Restart attribute-aggregationserver" + +- name: Place the link application configfiles + ansible.builtin.template: + src: "{{ item }}.j2" + dest: "/opt/openconext/attribute-aggregation/{{ item }}" + owner: "root" + group: "root" + mode: "0644" + with_items: + - "apachelink.conf" + notify: + - "Restart attribute-aggregationlink" + +- name: Add the MariaDB docker network to the list of networks when MariaDB runs in Docker + ansible.builtin.set_fact: + aa_docker_networks: + - name: "loadbalancer" + - name: "openconext_mariadb" + when: "mariadb_in_docker | default(false) | bool" + +- name: Create and start the server container + community.docker.docker_container: + name: "aaserver" + image: "ghcr.io/openconext/openconext-attribute-aggregation/aa-server:{{ attribute_aggregation_version }}" + pull: true + restart_policy: "always" + state: "started" + networks: "{{ aa_docker_networks }}" + mounts: + - source: "/opt/openconext/attribute-aggregation/serverapplication.yml" + target: "/application.yml" + read_only: true + type: "bind" + - source: "/opt/openconext/attribute-aggregation/logback.xml" + target: "/logback.xml" + read_only: true + type: "bind" + - source: "/opt/openconext/attribute-aggregation/attributeAuthorities.yml" + target: "/attributeAuthorities.yml" + read_only: true + type: "bind" + - source: "/opt/openconext/attribute-aggregation/serviceProviderConfig.json" + target: "/serviceProviderConfig.json" + read_only: true + type: "bind" + command: "-Xmx128m --spring.config.location=./" + etc_hosts: + host.docker.internal: "host-gateway" + labels: + traefik.http.routers.aaserver.rule: "Host(`aa.{{ base_domain }}`)" + traefik.http.routers.aaserver.tls: "true" + traefik.enable: "true" + healthcheck: + test: + [ + "CMD", + "wget", + "-no-verbose", + "--tries=1", + "--spider", + "http://localhost:8080/internal/health", + ] + interval: "10s" + timeout: "10s" + retries: 3 + start_period: "10s" + notify: "Restart attribute-aggregationserver" + register: "aa_servercontainer" + +- name: Create the gui link container + community.docker.docker_container: + name: "aalink" + image: "ghcr.io/openconext/openconext-basecontainers/apache2-shibboleth:latest" + pull: true + restart_policy: "always" + state: "started" + networks: "{{ aa_docker_networks }}" + mounts: + - source: "/opt/openconext/attribute-aggregation/apachelink.conf" + target: "/etc/apache2/sites-enabled/000-default.conf" + read_only: true + type: "bind" + - source: "/etc/localtime" + target: "/etc/localtime" + read_only: true + type: "bind" + - source: "/opt/openconext/common/favicon.ico" + target: "/var/www/favicon.ico" + read_only: true + type: "bind" + etc_hosts: + host.docker.internal: "host-gateway" + labels: + traefik.http.routers.aalink.rule: "Host(`link.{{ base_domain }}`)" + traefik.http.routers.aalink.tls: "true" + traefik.enable: "true" + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost/internal/health"] + interval: "10s" + timeout: "10s" + retries: 3 + start_period: "10s" + hostname: "attribute-link" + env: + HTTPD_CSP: "{{ httpd_csp.lenient_with_static_img }}" + HTTPD_SERVERNAME: "link.{{ base_domain }}" + OPENCONEXT_INSTANCENAME: "{{ instance_name }}" + OPENCONEXT_ENGINE_LOGOUT_URL: "https://engine.{{ base_domain }}/logout" + OPENCONEXT_HELP_EMAIL: "{{ support_email }}" + SHIB_ENTITYID: "https://link.{{ base_domain }}/shibboleth" + SHIB_REMOTE_ENTITYID: "https://engine.{{ base_domain }}/authentication/idp/metadata" + SHIB_REMOTE_METADATA: "{{ shibboleth_metadata_sources.engine }}" + register: "aa_linkcontainer" + +- name: Remove obsolete pdp containers + community.docker.docker_container: + name: "{{ item }}" + state: "absent" + loop: + - "aagui" diff --git a/roles/attribute-aggregation/templates/apachelink.conf.j2 b/roles/attribute_aggregation/templates/apachelink.conf.j2 similarity index 68% rename from roles/attribute-aggregation/templates/apachelink.conf.j2 rename to roles/attribute_aggregation/templates/apachelink.conf.j2 index ca0f3897f..f0cdcfda5 100644 --- a/roles/attribute-aggregation/templates/apachelink.conf.j2 +++ b/roles/attribute_aggregation/templates/apachelink.conf.j2 @@ -12,8 +12,8 @@ Redirect /orcid https://link.{{ base_domain }}/aa/api/client/information.html ProxyPass /Shibboleth.sso ! ProxyPass /redirect http://aaserver:8080/aa/api/redirect -ProxyPass /internal/health http://aaserver:8080/aa/api/internal/health -ProxyPass /internal/info http://aaserver:8080/aa/api/internal/info +ProxyPass /internal/health http://aaserver:8080/internal/health +ProxyPass /internal/info http://aaserver:8080/internal/info ProxyPass /aa/api http://aaserver:8080/aa/api ProxyPassReverse /aa/api http://aaserver:8080/aa/api @@ -22,3 +22,18 @@ ProxyPassReverse /aa/api/client http://aaserver:8080/aa/api/client Header always set X-Frame-Options "DENY" Header always set Referrer-Policy "strict-origin-when-cross-origin" Header always set X-Content-Type-Options "nosniff" + + + AuthType shibboleth + ShibUseHeaders On + ShibRequireSession On + Require valid-user + + + + Require all granted + + + + Require all denied + diff --git a/roles/attribute-aggregation/templates/attributeAuthorities.yml.j2 b/roles/attribute_aggregation/templates/attributeAuthorities.yml.j2 similarity index 99% rename from roles/attribute-aggregation/templates/attributeAuthorities.yml.j2 rename to roles/attribute_aggregation/templates/attributeAuthorities.yml.j2 index ff7b79e17..c98fbd4c7 100644 --- a/roles/attribute-aggregation/templates/attributeAuthorities.yml.j2 +++ b/roles/attribute_aggregation/templates/attributeAuthorities.yml.j2 @@ -98,6 +98,7 @@ authorities: password: "{{ aa.invite_password }}", timeOut: 5000, type: "rest", + requestMethod: "GET", pathParams: [ { index: 1, sourceAttribute: "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" } ], diff --git a/roles/attribute-aggregation/templates/logback.xml.j2 b/roles/attribute_aggregation/templates/logback.xml.j2 similarity index 100% rename from roles/attribute-aggregation/templates/logback.xml.j2 rename to roles/attribute_aggregation/templates/logback.xml.j2 diff --git a/roles/attribute-aggregation/templates/serverapplication.yml.j2 b/roles/attribute_aggregation/templates/serverapplication.yml.j2 similarity index 96% rename from roles/attribute-aggregation/templates/serverapplication.yml.j2 rename to roles/attribute_aggregation/templates/serverapplication.yml.j2 index 8e49715b4..e3a068d3e 100644 --- a/roles/attribute-aggregation/templates/serverapplication.yml.j2 +++ b/roles/attribute_aggregation/templates/serverapplication.yml.j2 @@ -5,10 +5,9 @@ logging: aa: DEBUG server: - # The port to where this Spring Boot application listens to. e.g. http://localhost:{{ springapp_tcpport }} + # The port to where this Spring Boot application listens to. e.g. http://localhost:8080 port: 8080 servlet: - context-path: /aa/api session: timeout: 28800 cookie: diff --git a/roles/attribute-aggregation/templates/serviceProviderConfig.json.j2 b/roles/attribute_aggregation/templates/serviceProviderConfig.json.j2 similarity index 100% rename from roles/attribute-aggregation/templates/serviceProviderConfig.json.j2 rename to roles/attribute_aggregation/templates/serviceProviderConfig.json.j2 diff --git a/roles/attribute-aggregation/vars/main.yml b/roles/attribute_aggregation/vars/main.yml similarity index 100% rename from roles/attribute-aggregation/vars/main.yml rename to roles/attribute_aggregation/vars/main.yml diff --git a/roles/dashboard/defaults/main.yml b/roles/dashboard/defaults/main.yml index 1a2678d68..e18cfbac3 100644 --- a/roles/dashboard/defaults/main.yml +++ b/roles/dashboard/defaults/main.yml @@ -1,2 +1,4 @@ dashboard_organization: SURFconext dashboard_hide_tabs: none +dashboard_server_restart_policy: always +dashboard_server_restart_retries: 0 diff --git a/roles/dashboard/tasks/main.yml b/roles/dashboard/tasks/main.yml index 1a904966f..cffc075cc 100644 --- a/roles/dashboard/tasks/main.yml +++ b/roles/dashboard/tasks/main.yml @@ -27,7 +27,8 @@ TZ: "{{ timezone }}" image: ghcr.io/openconext/openconext-dashboard/dashboard-server:{{ dashboard_server_version }} pull: true - restart_policy: "always" + restart_policy: "{{ dashboard_server_restart_policy }}" + restart_retries: "{{ dashboard_server_restart_retries }}" # Only for restart policy on-failure state: started networks: - name: "loadbalancer" diff --git a/roles/engineblock/defaults/main.yml b/roles/engineblock/defaults/main.yml index f267d051d..12cf0766c 100644 --- a/roles/engineblock/defaults/main.yml +++ b/roles/engineblock/defaults/main.yml @@ -16,6 +16,7 @@ engine_api_feature_consent_listing: 1 engine_api_feature_consent_remove: 0 engine_api_feature_metadata_api: 1 engine_api_feature_deprovision: 1 +engine_feature_send_user_attributes: 0 # Cutoff point for showing unfiltered IdPs on the WAYF engine_wayf_cutoff_point_for_showing_unfiltered_idps: 50 @@ -62,10 +63,14 @@ engine_api_deprovision_user: lifecycle # Minimum execution time in milliseconds when a received response is deemed invalid engine_minimum_execution_time_on_invalid_received_response: 5000 +# the timeout used when querying external sources (PDP, AA, etc) +engine_http_client_timeout: 10 + # Settings for detecting whether the user is stuck in a authentication loop within his session: # within a certain time frame in seconds, how many authentication procedures for the same SP are allowed engine_time_frame_for_authentication_loop_in_seconds: 60 engine_maximum_authentication_procedures_allowed: 5 +engine_maximum_authentications_per_session: 30 # This PCRE regex is used to blacklist incoming AuthnContextClassRef attributes on. If an empty string is used # the validation is skipped. The validator will throw an exception if the used regex is invalid. diff --git a/roles/engineblock/templates/parameters.yml.j2 b/roles/engineblock/templates/parameters.yml.j2 index 19a23f77e..42d9d6f45 100644 --- a/roles/engineblock/templates/parameters.yml.j2 +++ b/roles/engineblock/templates/parameters.yml.j2 @@ -83,6 +83,11 @@ parameters: api.users.deprovision.username: {{ engine_api_deprovision_user }} api.users.deprovision.password: {{ engine_api_deprovision_password | replace("%","%%") }} + ########################################################################################## + ## CLIENT SETTINGS + ########################################################################################## + ## Currently this is used for the outgoing requests with the PDP and AA client + http_client.timeout: {{ engine_http_client_timeout | int }} ########################################################################################## ## PDP SETTINGS @@ -190,6 +195,7 @@ parameters: ## Settings for detecting whether the user is stuck in a authentication loop within his session time_frame_for_authentication_loop_in_seconds: {{ engine_time_frame_for_authentication_loop_in_seconds | int }} maximum_authentication_procedures_allowed: {{ engine_maximum_authentication_procedures_allowed | int }} + maximum_authentications_per_session: {{ engine_maximum_authentications_per_session | int }} ## Store attributes with their values, meaning that if an Idp suddenly ## sends a new value (like a new e-mail address) consent has to be @@ -230,7 +236,8 @@ parameters: feature_enable_consent: {{ engine_feature_enable_consent | bool | to_json }} feature_stepup_sfo_override_engine_entityid: {{ engine_feature_stepup_override_entityid | bool | to_json }} feature_enable_idp_initiated_flow: {{ engine_feature_idp_initiated_flow | bool | to_json }} - ########################################################################################## + feature_stepup_send_user_attributes: {{ engine_feature_send_user_attributes | bool | to_json }} + ########################################################################################## ## PROFILE SETTINGS ########################################################################################## ## Location of Profile @@ -270,6 +277,11 @@ parameters: ## You can override the default entityID used by Engineblock for its callout to stepup gateway. ## You also need to enable the feature toggle feature_stepup_sfo_override_engine_entityid above. stepup.sfo.override_engine_entityid: '{{ engine_stepup_override_entityid }}' + ## The name of the SAML attributes to send to Stepup with the GSSP SAML extension when + ## feature_stepup_send_user_attributes is enabled + stepup.callout_user_attributes: + - urn:mace:dir:attribute-def:mail + - urn:mace:terena.org:attribute-def:schacHomeOrganization ########################################################################################## ## THEME SETTINGS diff --git a/roles/galera/tasks/arbiter_node.yml b/roles/galera/tasks/arbiter_node.yml index 3513c9bd3..66d88b18c 100644 --- a/roles/galera/tasks/arbiter_node.yml +++ b/roles/galera/tasks/arbiter_node.yml @@ -5,12 +5,18 @@ state: present ignoreerrors: yes -# Add MariaDB repo and key -- name: Add MariaDB.org repository +# todo add this to a generic file and apply to aribtrator and cluster node +- name: Add MariaDB.org repository Rocky 8 template: - src: "mariadb.repo.j2" + src: "mariadb.repo.rocky8.j2" dest: "/etc/yum.repos.d/mariadb.repo" - when: ansible_os_family == 'RedHat' + when: ansible_distribution_major_version == '8' + +- name: Add MariaDB.org repository Rocky 9 + template: + src: "mariadb.repo.rocky9.j2" + dest: "/etc/yum.repos.d/mariadb.repo" + when: ansible_distribution_major_version == '9' - name: Install Galera, rsync ansible.builtin.package: diff --git a/roles/galera/tasks/cluster_nodes.yml b/roles/galera/tasks/cluster_nodes.yml index 60817bc44..398c829a5 100644 --- a/roles/galera/tasks/cluster_nodes.yml +++ b/roles/galera/tasks/cluster_nodes.yml @@ -211,16 +211,22 @@ - galera_bootstrap_node == inventory_hostname # Add cluster user +# todo: this task shows a change in --check mode although it is not necessarily +# changing anything for real, skipping in check mode is also not ideal +# maybe add a task that runs in check mode that only checks for presence of the user +# also, run_once could break things if there would be multiple galera clusters +# not sure whether its important though.. - name: add mariadb cluster sst user mysql_user: name: "{{ mariadb_cluster_user }}" password: "{{ mariadb_cluster_password }}" - priv: "*.*:RELOAD,PROCESS,LOCK TABLES,BINLOG MONITOR,REPLICA MONITOR" + priv: "{{ mariadb_cluster_user_privs | default('*.*:RELOAD,PROCESS,LOCK TABLES,BINLOG MONITOR,SLAVE MONITOR') }}" state: present login_user: root login_password: "{{ mariadb_root_password }}" login_unix_socket: /var/lib/mysql/mysql.sock run_once: true + no_log: true # Add backup user - name: add mariadb backup user @@ -232,8 +238,8 @@ login_user: root login_password: "{{ mariadb_root_password }}" login_unix_socket: /var/lib/mysql/mysql.sock - run_once: true - + run_once: true # run once because it is synced to other cluster nodes + no_log: true - name: Add Galera clustercheck user, used for keepalived to connect mysql_user: @@ -244,7 +250,8 @@ login_user: root login_password: "{{ mariadb_root_password }}" login_unix_socket: /var/lib/mysql/mysql.sock - run_once: true + run_once: true # run once because it is synced to other cluster nodes + no_log: true - name: Create the backup directory file: @@ -280,23 +287,7 @@ login_unix_socket: /var/lib/mysql/mysql.sock with_items: - "{{ databases.names }}" - run_once: true - tags: galera_create_db - -- name: Create database super users for provisioning users and databases - mysql_user: - name: "{{ item.0.name }}" - host: "{{ item.1 }}" - password: "{{ item.0.password }}" - priv: "{{ item.0.privs | join('/') }}" - state: present - login_user: root - login_password: "{{ mariadb_root_password }}" - login_unix_socket: /var/lib/mysql/mysql.sock - with_subelements: - - "{{ galera_root_users }}" - - hosts - run_once: true + run_once: true # run once because it is synced to other cluster nodes tags: galera_create_db - name: MySQL my.cnf diff --git a/roles/invite/defaults/main.yml b/roles/invite/defaults/main.yml index 71b3c9b3e..a32f7600a 100644 --- a/roles/invite/defaults/main.yml +++ b/roles/invite/defaults/main.yml @@ -15,5 +15,8 @@ invite_manage_provision_oauth_rs_scopes: "openid" invite_mock_install: false # Override is in the dockerX.env host_var files invite_cronjobmaster: true +invite_logback_json: true invite_docker_networks: - name: loadbalancer +invite_server_restart_policy: always +invite_server_restart_retries: 0 diff --git a/roles/invite/tasks/main.yml b/roles/invite/tasks/main.yml index db8dfe285..3553331f1 100644 --- a/roles/invite/tasks/main.yml +++ b/roles/invite/tasks/main.yml @@ -16,6 +16,7 @@ mode: "0644" with_items: - serverapplication.yml + - logback.xml notify: restart inviteserver - name: Copy private key for manage secrets encryption @@ -60,15 +61,19 @@ name: inviteserver env: TZ: "{{ timezone }}" - image: ghcr.io/openconext/openconext-invite/inviteserver:{{ invite_server_version }} + image: ghcr.io/openconext/openconext-invite/inviteserver:{{ invite_version }} pull: true - restart_policy: "always" + restart_policy: "{{ invite_server_restart_policy }}" + restart_retries: "{{ invite_server_restart_retries }}" # Only for restart policy on-failure state: started networks: "{{ invite_docker_networks }}" mounts: - source: /opt/openconext/invite/serverapplication.yml target: /application.yml type: bind + - source: /opt/openconext/invite/logback.xml + target: /logback.xml + type: bind - source: /opt/openconext/invite/private_key_pkcs8.pem target: /private_key_pkcs8.pem type: bind @@ -89,7 +94,7 @@ - name: Create the client container community.docker.docker_container: name: inviteclient - image: ghcr.io/openconext/openconext-invite/inviteclient:{{ invite_client_version }} + image: ghcr.io/openconext/openconext-invite/inviteclient:{{ invite_version }} pull: true restart_policy: "always" state: started @@ -115,7 +120,7 @@ - name: Create the welcome container community.docker.docker_container: name: invitewelcome - image: ghcr.io/openconext/openconext-invite/invitewelcome:{{ invite_welcome_version }} + image: ghcr.io/openconext/openconext-invite/invitewelcome:{{ invite_version }} pull: true restart_policy: "always" state: started @@ -142,7 +147,7 @@ - name: Create and start the mock provisioning container community.docker.docker_container: name: inviteprovisioningmock - image: ghcr.io/openconext/openconext-invite/inviteprovisioningmock:{{ invite_mock_version }} + image: ghcr.io/openconext/openconext-invite/inviteprovisioningmock:{{ invite_version }} pull: true restart_policy: "always" state: started diff --git a/roles/invite/templates/logback.xml.j2 b/roles/invite/templates/logback.xml.j2 new file mode 100644 index 000000000..95ef4fe23 --- /dev/null +++ b/roles/invite/templates/logback.xml.j2 @@ -0,0 +1,42 @@ +#jinja2:lstrip_blocks: True + + + + + + %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n + + + + + host.docker.internal:514 + + {"app":"invite"} + true + + [ignore] + [ignore] + [ignore] + + + + invitejson: + + + + + + + + + + + + + + {% if invite_logback_json | bool %} + + {%endif%} + + + diff --git a/roles/invite/templates/serverapplication.yml.j2 b/roles/invite/templates/serverapplication.yml.j2 index 420a2977d..ff55ab04f 100644 --- a/roles/invite/templates/serverapplication.yml.j2 +++ b/roles/invite/templates/serverapplication.yml.j2 @@ -1,11 +1,12 @@ --- logging: + config: file:///logback.xml level: org.springframework: WARN org.springframework.web: WARN org.springframework.security: WARN com.zaxxer.hikari: ERROR - access: DEBUG + invite: DEBUG server: port: 8080 @@ -23,7 +24,7 @@ spring: banner-mode: "off" session: jdbc: - initialize-schema: always + initialize-schema: never {% if invite_cronjobmaster is defined and invite_cronjobmaster == false %} cleanup-cron: "-" {% else %} @@ -117,6 +118,10 @@ config: past-date-allowed: {{ invite.past_date_allowed }} performance-seed-allowed: {{ invite.performance_seed_allowed }} eduid-idp-schac-home-organization: {{ invite.eduid_idp_schac_home_organization }} + # Determines the languages available for switching language, supported are 'nl', 'en' and 'pt' + languages: "nl, en" + environment: {{ environment_shortname }} + feature: limit-institution-admin-role-visibility: {{ invite.limit_institution_admin_role_visibility }} @@ -125,49 +130,47 @@ feature: # We don't encode in-memory passwords, but they are reused so do NOT prefix them with {noop} external-api-configuration: remote-users: - - - username: {{ invite.vootuser }} + - username: {{ invite.vootuser }} password: "{{ invite.vootsecret }}" scopes: - voot - - - username: {{ invite.teamsuser}} + - username: {{ invite.teamsuser}} password: "{{ invite.teamssecret }}" scopes: - teams - - - username: {{ aa.invite_username }} + - username: {{ aa.invite_username }} password: "{{ invite_attribute_aggregation_secret }}" scopes: - attribute_aggregation - - - username: {{ invite.lifecycle_user }} + - username: {{ invite.lifecycle_user }} password: "{{ invite.lifecycle_secret }}" scopes: - lifecycle - - - username: {{ invite.profile_user }} + - username: internal + password: "{{ invite.internal_secret }}" + scopes: + - actuator + - username: {{ invite.profile_user }} password: "{{ invite.profile_secret }}" scopes: - profile - - - username: {{ invite.sp_dashboard_user }} + - username: {{ invite.sp_dashboard_user }} password: "{{ invite.sp_dashboard_secret }}" + organizationGUIDFallback: {{ invite.surf_idp_organization_guid }} scopes: - sp_dashboard applications: - manageId: {{ invite.sp_dashboard_manage_id }} manageType: SAML20_SP - - - username: {{ invite.access_user }} + - username: {{ invite.access_user }} password: "{{ invite.access_secret }}" + organizationGUIDFallback: {{ invite.surf_idp_organization_guid }} scopes: - access applications: - manageId: {{ invite.access_manage_id }} manageType: OIDC10_RP - voot: group_urn_domain: "{{ invite.group_urn_domain }}" @@ -208,15 +211,15 @@ management: endpoints: web: exposure: - include: "health,info,mappings" + include: "health,info,prometheus" base-path: "/internal" endpoint: info: enabled: true health: enabled: true - mappings: - enabled: true + prometheus: + access: unrestricted info: git: mode: full diff --git a/roles/iptables/tasks/main.yml b/roles/iptables/tasks/main.yml index 5b16b4103..f6b0a390e 100644 --- a/roles/iptables/tasks/main.yml +++ b/roles/iptables/tasks/main.yml @@ -26,6 +26,7 @@ owner: root group: root mode: "0644" + backup: true notify: - "restart iptables" @@ -36,6 +37,7 @@ owner: root group: root mode: "0644" + backup: true notify: - "restart ip6tables" diff --git a/roles/iptables/templates/ip4dockertables.sh.j2 b/roles/iptables/templates/ip4dockertables.sh.j2 index 3a8522e5e..efa9d83c7 100644 --- a/roles/iptables/templates/ip4dockertables.sh.j2 +++ b/roles/iptables/templates/ip4dockertables.sh.j2 @@ -75,12 +75,14 @@ done /sbin/iptables -t filter -P INPUT DROP /sbin/iptables -t filter -F INPUT /sbin/iptables -t filter -A INPUT -i lo -j ACCEPT -# Add your non docker rules here +# Add your non docker rules here -/sbin/iptables -t filter -A INPUT -p icmp -j ACCEPT +/sbin/iptables -t filter -A INPUT -p icmp -j ACCEPT # We open port 25 on docker hosts to allow containers to send emails to the docker host itself {% if 'docker' in group_names %} /sbin/iptables -t filter -A INPUT -p tcp -d {{ ansible_docker0.ipv4.address }} -m multiport --dports 25 -j ACCEPT +# We open port 514 on docker hosts to allow containers to send syslog messages to the docker host itself +/sbin/iptables -t filter -A INPUT -p tcp -d {{ ansible_docker0.ipv4.address }} -m multiport --dports 514 -j ACCEPT {% endif %} {% if iptables_incoming is defined %} diff --git a/roles/manage/defaults/main.yml b/roles/manage/defaults/main.yml index 1d94ca516..41c6f34d4 100644 --- a/roles/manage/defaults/main.yml +++ b/roles/manage/defaults/main.yml @@ -31,5 +31,8 @@ manage_tabs_enabled: - single_tenant_template - provisioning - sram + - organisation manage_docker_networks: - name: loadbalancer +manage_server_restart_policy: always +manage_server_restart_retries: 0 diff --git a/roles/manage/files/metadata_templates/organisation.template.json b/roles/manage/files/metadata_templates/organisation.template.json new file mode 100644 index 000000000..d90439fe5 --- /dev/null +++ b/roles/manage/files/metadata_templates/organisation.template.json @@ -0,0 +1,7 @@ +{ + "metaDataFields": {}, + "revisionnote": "", + "name": "", + "kvkNumber": "", + "notes": "" +} diff --git a/roles/manage/files/policies/allowed_attributes.json b/roles/manage/files/policies/allowed_attributes.json index f6a048acf..beb5c8363 100644 --- a/roles/manage/files/policies/allowed_attributes.json +++ b/roles/manage/files/policies/allowed_attributes.json @@ -1,40 +1,56 @@ [ { "value": "urn:mace:terena.org:attribute-def:schacHomeOrganization", + "validationRegex": "^[a-z]+(\\.[a-z]+)+$", + "allowedInDenyRule": true, "label": "Schac home organization" }, { "value": "urn:mace:terena.org:attribute-def:schacHomeOrganizationType", + "validationRegex": "^[a-z]+$", + "allowedInDenyRule": true, "label": "Schac home organization type" }, { "value": "urn:mace:dir:attribute-def:eduPersonAffiliation", + "validationRegex": "^(student|staff|faculty|employee|member)$", + "allowedInDenyRule": true, "label": "Edu person affiliation" }, { "value": "urn:mace:dir:attribute-def:eduPersonScopedAffiliation", + "validationRegex": "^(student|staff|faculty|employee|member)@[a-z]+(\\.[a-z]+)+$", + "allowedInDenyRule": true, "label": "Edu person scoped affiliation" }, { "value": "urn:mace:dir:attribute-def:eduPersonEntitlement", + "validationRegex": "^[a-z]+$", + "allowedInDenyRule": true, "label": "Edu person entitlement" }, { "value": "urn:mace:dir:attribute-def:isMemberOf", + "validationRegex": "^.*$", + "allowedInDenyRule": true, "label": "Is-member-of" }, { "value": "urn:collab:group:surfteams.nl", - "label": "SURFteams group name (fully qualified)" + "validationRegex": "^(urn:mace:surf\\.nl:invite:|urn:collab:group:)[a-z0-9_]+$", + "allowedInDenyRule": false, + "label": "SURFconext Invite (voot) role urn" }, { "value": "urn:collab:sab:surfnet.nl", + "validationRegex": "^(Superuser|Instellingsbevoegde|OperationeelBeheerder|SURFconextbeheerder|DNS-Beheerder)$", + "allowedInDenyRule": false, "label": "SAB role" - }, { "value": "urn:mace:dir:attribute-def:mail", + "validationRegex": "^[^@]+@[^@]+\\.[^@]+$", + "allowedInDenyRule": true, "label": "Mail address" - } -] \ No newline at end of file +] diff --git a/roles/manage/tasks/main.yml b/roles/manage/tasks/main.yml index 5181c742d..4b2d6d6de 100644 --- a/roles/manage/tasks/main.yml +++ b/roles/manage/tasks/main.yml @@ -100,7 +100,8 @@ image: ghcr.io/openconext/openconext-manage/manage-server:{{ manage_server_version }} entrypoint: /__cacert_entrypoint.sh pull: true - restart_policy: "always" + restart_policy: "{{ manage_server_restart_policy }}" + restart_retries: "{{ manage_server_restart_retries }}" # Only for restart policy on-failure state: started networks: "{{ manage_docker_networks }}" mounts: @@ -157,7 +158,7 @@ start_period: 10s hostname: managegui env: - HTTPD_CSP: "{{ httpd_csp.lenient_with_static_img }}" + HTTPD_CSP: "{{ httpd_csp.lenient_with_static_img_with_surfconext }}" HTTPD_SERVERNAME: "manage.{{ base_domain }}" OPENCONEXT_INSTANCENAME: "{{ instance_name }}" OPENCONEXT_ENGINE_LOGOUT_URL: "https://engine.{{ base_domain }}/logout" diff --git a/roles/manage/templates/application.yml.j2 b/roles/manage/templates/application.yml.j2 index 25ac07d8d..9b1bb7354 100644 --- a/roles/manage/templates/application.yml.j2 +++ b/roles/manage/templates/application.yml.j2 @@ -27,6 +27,9 @@ server: features: {{ manage.features }} +feature_toggles: + allow_secret_public_rp: True + push: eb: url: https://{{ engine_api_domain }}/api/connections @@ -44,8 +47,8 @@ push: enabled: {{ manage.oidc_push_enabled }} pdp: url: https://pdp.{{ base_domain }}/pdp/api/manage/push - policy_url: https://pdp.{{ base_domain }}/pdp/api/manage/policies decide_url: https://pdp.{{ base_domain }}/pdp/api/manage/decide + parse_url: https://pdp.{{ base_domain }}/pdp/api/manage/parse name: {{ manage.pdp_name }} user: {{ pdp.username }} password: "{{ pdp.password }}" @@ -61,6 +64,7 @@ product: metadata_configuration_path: file://{{ manage_dir }}/metadata_configuration metadata_templates_path: file://{{ manage_dir }}/metadata_templates metadata_export_path: classpath:/metadata_export +disabled_metadata_schemas: security: backdoor_user_name: {{ manage.backdoor_api_user }} diff --git a/roles/manage/templates/metadata_configuration/oidc10_rp.schema.json.j2 b/roles/manage/templates/metadata_configuration/oidc10_rp.schema.json.j2 index a550408a1..31f775b8f 100644 --- a/roles/manage/templates/metadata_configuration/oidc10_rp.schema.json.j2 +++ b/roles/manage/templates/metadata_configuration/oidc10_rp.schema.json.j2 @@ -50,6 +50,9 @@ "type": "string", "format": "basic-authentication-user" }, + "organisationid": { + "type": "string" + }, "type": { "type": "string", "enum": [ @@ -396,6 +399,10 @@ "format": "url", "info": "The URL of the service used to log on." }, + "coin:application_name": { + "type": "string", + "info": "The name of the service / application in related applications." + }, "coin:supports_idp_init_login": { "type": "boolean", "info": "The service provider supports IDP initiated login." @@ -410,7 +417,7 @@ }, "coin:policy_enforcement_decision_required": { "type": "boolean", - "info": "Set to activate the user policy engine (PDP)." + "info": "Set to activate the user policy engine (PDP) for this RP." }, "coin:service_team_id": { "type": "string", @@ -596,6 +603,27 @@ "default": 3600, "info": "The number of seconds that a new refresh token is valid for this Relying Party." }, + "application_tags": { + "type": "array", + "max": 3, + "items": { + "type": "string", + "enum": [ + "business_management", + "collaboration", + "content_library", + "learn_study", + "education_logistics", + "privacy_security", + "productivity", + "recommended", + "repository", + "surf", + "media_video" + ] + }, + "info": "The types of service used in the facet search in dashboard (max 3)." + } }, "patternProperties": { "^name:({{ supported_language_codes | replace(',','|') }})$": { diff --git a/roles/manage/templates/metadata_configuration/organisation.schema.json.j2 b/roles/manage/templates/metadata_configuration/organisation.schema.json.j2 new file mode 100644 index 000000000..ddf0d745b --- /dev/null +++ b/roles/manage/templates/metadata_configuration/organisation.schema.json.j2 @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "organisation", + "order": 5, + "type": "object", + "properties": { + "eid": { + "type": "number" + }, + "entityid": { + "type": "string", + "format": "string" + }, + "name": { + "type": "string" + }, + "kvkNumber": { + "type": [ + "string", + "null" + ] + }, + "notes": { + "type": [ + "string", + "null" + ] + }, + "metaDataFields": { + "type": "object", + "properties": {}, + "patternProperties": {}, + "required": [], + "additionalProperties": false + }, + "revisionnote": { + "type": "string", + "format": "string" + }, + "patternProperties": { + "name": { + "type": "string", + "info": "The friendly name of the Organisation." + }, + "kvkNumber": { + "type": "string", + "info": "The friendly name of the Organisation." + }, + "notes": { + "type": "string", + "info": "Additional information regarding the Organisation" + } + } + }, + "required": [ + "name" + ], + "additionalProperties": false +} diff --git a/roles/manage/templates/metadata_configuration/policy.schema.json.j2 b/roles/manage/templates/metadata_configuration/policy.schema.json.j2 index 5d39a219e..6aa371b95 100644 --- a/roles/manage/templates/metadata_configuration/policy.schema.json.j2 +++ b/roles/manage/templates/metadata_configuration/policy.schema.json.j2 @@ -51,7 +51,7 @@ }, "serviceProviderIds": { "type": "array", - "minItems": 1, + "minItems": 0, "items": { "type": "object", "properties": { @@ -61,6 +61,10 @@ } } }, + "serviceProvidersNegated": { + "type": "boolean", + "default": "false" + }, "identityProviderIds": { "type": "array", "items": { @@ -91,6 +95,9 @@ "value": { "type": "string" }, + "groupID": { + "type": "number" + }, "negated": { "type": "boolean", "default": "false" @@ -123,6 +130,9 @@ "value": { "type": "string" }, + "groupID": { + "type": "number" + }, "negated": { "type": "boolean" } @@ -195,8 +205,7 @@ } }, "required": [ - "name", - "serviceProviderIds" + "name" ], "additionalProperties": false, "indexes": [] diff --git a/roles/manage/templates/metadata_configuration/provisioning.schema.json.j2 b/roles/manage/templates/metadata_configuration/provisioning.schema.json.j2 index 556cb55b1..1dc8f68df 100644 --- a/roles/manage/templates/metadata_configuration/provisioning.schema.json.j2 +++ b/roles/manage/templates/metadata_configuration/provisioning.schema.json.j2 @@ -1,7 +1,7 @@ { "$schema": "http://json-schema.org/draft-04/schema#", "title": "provisioning", - "order": 5, + "order": 7, "type": "object", "properties": { "id": { diff --git a/roles/manage/templates/metadata_configuration/saml20_idp.schema.json.j2 b/roles/manage/templates/metadata_configuration/saml20_idp.schema.json.j2 index 98ca57103..bd64d7243 100644 --- a/roles/manage/templates/metadata_configuration/saml20_idp.schema.json.j2 +++ b/roles/manage/templates/metadata_configuration/saml20_idp.schema.json.j2 @@ -366,6 +366,10 @@ "type": "boolean", "default": false, "info": "if set for this Identity Provider, then this Identity Provider is listed as test IdP in Access." + }, + "coin:policy_enforcement_decision_required": { + "type": "boolean", + "info": "Set to activate the user policy engine (PDP) for this IdP." } }, "patternProperties": { diff --git a/roles/manage/templates/metadata_configuration/saml20_sp.schema.json.j2 b/roles/manage/templates/metadata_configuration/saml20_sp.schema.json.j2 index 7c2316207..b55972aee 100644 --- a/roles/manage/templates/metadata_configuration/saml20_sp.schema.json.j2 +++ b/roles/manage/templates/metadata_configuration/saml20_sp.schema.json.j2 @@ -77,6 +77,9 @@ "entityid": { "type": "string" }, + "organisationid": { + "type": "string" + }, "type": { "type": "string", "enum": [ @@ -488,6 +491,10 @@ "format": "url", "info": "The URL of the service used to log on." }, + "coin:application_name": { + "type": "string", + "info": "The name of the service / application in related applications." + }, "coin:supports_idp_init_login": { "type": "boolean", "info": "The service provider supports IDP initiated login." @@ -506,7 +513,7 @@ }, "coin:policy_enforcement_decision_required": { "type": "boolean", - "info": "Set to activate the user policy engine (PDP)." + "info": "Set to activate the user policy engine (PDP for this SP)." }, "coin:original_metadata_url": { "type": "string", @@ -661,6 +668,27 @@ "coin:collab_enabled": { "type": "boolean", "info": "Set for SRAM services." + }, + "application_tags": { + "type": "array", + "max": 3, + "items": { + "type": "string", + "enum": [ + "business_management", + "collaboration", + "content_library", + "learn_study", + "education_logistics", + "privacy_security", + "productivity", + "recommended", + "repository", + "surf", + "media_video" + ] + }, + "info": "The types of service used in the facet search in dashboard (max 3)." } }, "patternProperties": { diff --git a/roles/manage/templates/metadata_configuration/single_tenant_template.schema.json.j2 b/roles/manage/templates/metadata_configuration/single_tenant_template.schema.json.j2 index c03a0acfc..3733b6a47 100644 --- a/roles/manage/templates/metadata_configuration/single_tenant_template.schema.json.j2 +++ b/roles/manage/templates/metadata_configuration/single_tenant_template.schema.json.j2 @@ -1,7 +1,7 @@ { "$schema": "http://json-schema.org/draft-04/schema#", "title": "single_tenant_template", - "order": 7, + "order": 9, "definitions": { "AssertionConsumerServiceBinding": { "type": "string", @@ -384,6 +384,10 @@ "format": "url", "info": "The URL of the service used to log on." }, + "coin:application_name": { + "type": "string", + "info": "The name of the service / application in related applications." + }, "coin:transparant_issuer": { "type": "boolean", "info": "Set this to let the Engineblock use the EntityID of the IdP in stead of the EntityID of the Engineblock." @@ -536,6 +540,28 @@ ], "default": "AO", "info": "Set the contractual base that underlies a production SP / RP entity." + }, + "application_tags": { + "type": "array", + "max": 3, + "items": { + "type": "string", + "enum": [ + "education", + "research", + "privacy", + "content", + "repository", + "company", + "recommended", + "productivity", + "organization", + "cooperation", + "video", + "surf" + ] + }, + "info": "The types of service used in the facet search in dashboard (max 3)." } }, "patternProperties": { diff --git a/roles/manage/templates/metadata_configuration/sram.schema.json.j2 b/roles/manage/templates/metadata_configuration/sram.schema.json.j2 index e6c9a249c..c4a7dc31b 100644 --- a/roles/manage/templates/metadata_configuration/sram.schema.json.j2 +++ b/roles/manage/templates/metadata_configuration/sram.schema.json.j2 @@ -347,9 +347,13 @@ "format": "url", "info": "The URL of the service used to log on." }, + "coin:application_name": { + "type": "string", + "info": "The name of the service / application in related applications." + }, "coin:policy_enforcement_decision_required": { "type": "boolean", - "info": "Set to activate the user policy engine (PDP)." + "info": "Set to activate the user policy engine (PDP) for this Service." }, "coin:privacy:privacy_policy": { "type": "boolean", @@ -423,6 +427,28 @@ ] }, "info": "The authorisation grant type's of this Relying Party." + }, + "application_tags": { + "type": "array", + "max": 3, + "items": { + "type": "string", + "enum": [ + "education", + "research", + "privacy", + "content", + "repository", + "company", + "recommended", + "productivity", + "organization", + "cooperation", + "video", + "surf" + ] + }, + "info": "The types of service used in the facet search in dashboard (max 3)." } }, "patternProperties": { diff --git a/roles/mariadbdocker/tasks/main.yml b/roles/mariadbdocker/tasks/main.yml index cd748f83f..8f3d92b7e 100644 --- a/roles/mariadbdocker/tasks/main.yml +++ b/roles/mariadbdocker/tasks/main.yml @@ -68,7 +68,7 @@ login_user: root login_host: localhost login_password: "{{ mariadb_root_password }}" - # no_log: true + no_log: true with_nested: - "{{ databases.users }}" - "{{ database_clients }}" @@ -82,7 +82,7 @@ login_host: localhost priv: "*.*:SELECT,RELOAD,PROCESS,LOCK TABLES,BINLOG MONITOR,CONNECTION ADMIN,SHOW VIEW" state: present - # no_log: true + no_log: true - name: Create the backup directory ansible.builtin.file: diff --git a/roles/mongo/README.md b/roles/mongo/README.md index f140d1480..9e96770e5 100644 --- a/roles/mongo/README.md +++ b/roles/mongo/README.md @@ -5,4 +5,15 @@ You need to set the role of your mongo hosts in the host_vars. the key is `mongo_replication_role:` and it can have the values: "primary", "secondary" or arbiter. +Cluster certificates have to have an identical value for the OU, O or DC attribute, as described in the Mongo documentation. + +Save the mongo ca.pem that is used for siging the cluster certifates as {{ environment_dir }}/secrets/mongo/mongoca.pem + +Set the cluster certificate as variable mongo_cluster_cert in host_vars +Set the mongo_cluster_private_key variable encrypted in host_vars + Please review the official Mongo documentation for more information. + +# Todo +- [ ] Add the possibility for adding and removing cluster members +- [ ] Add the possibility for a standalone mongo server diff --git a/roles/mongo/defaults/main.yml b/roles/mongo/defaults/main.yml index dade9cd4f..a58b2a320 100644 --- a/roles/mongo/defaults/main.yml +++ b/roles/mongo/defaults/main.yml @@ -1,11 +1,32 @@ # The global variable file mongodb installation +mongo_service: "mongod" +mongo_version: "7.0" + # In the current mongo role only one cluster per environments # is possible, that works for now. mongo_servers: [] # Set this in group_vars # - mongo1.example.com # - mongo2.example.com +# cluster members +# Not all mongo servers in the inventory are cluster members, so we use a separate list for this. +# Set this in group_vars of your environment(s). The arbiter should go first, or change the mongo_arbiter_index. +# mongo_cluster_members: +# - host: "mongoarbiter.example.com:27017" +# priority: 1 # can vote, cannot become primary +# - host: "mongo2.example.com:27017" +# priority: 2 +# - host: "mongo1.example.com:27017" +# priority: 3 +# mongo_arbiter_index: 0 + +# The replication role +# mongo_replication_role: # Set this in host_vars, it can have the values: "primary", "secondary" or arbiter + +# Todo: there is a link between mongo_replication_role and priority (arbiter is priority 1, primary the highest) so +# setting them separately is not ideal. + # The port for mongo server mongod_port: 27017 @@ -24,3 +45,11 @@ mongo: # Listen on all addresses by default mongo_bind_listen_address: "0.0.0.0" + +# Certs and keys +mongo_pki_dir: "/etc/pki/mongo" +# mongo_cluster_cert set this in host_vars +# mongo_cluster_private_key set this in host_vars (encrypted with ansible-vault) + +# Users and groups +mongo_group: "mongod" diff --git a/roles/mongo/files/mongo.repo b/roles/mongo/files/mongo.repo deleted file mode 100644 index a5f30dd31..000000000 --- a/roles/mongo/files/mongo.repo +++ /dev/null @@ -1,6 +0,0 @@ -[mongodb-org-6.0] -name=MongoDB Repository -baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/6.0/x86_64/ -gpgcheck=1 -enabled=1 -gpgkey=https://www.mongodb.org/static/pgp/server-6.0.asc diff --git a/roles/mongo/handlers/main.yml b/roles/mongo/handlers/main.yml index 01d6f72d7..6916f928f 100644 --- a/roles/mongo/handlers/main.yml +++ b/roles/mongo/handlers/main.yml @@ -1,6 +1,6 @@ --- -- name: restart mongod +- name: Restart mongod throttle: 1 service: - name: mongod + name: "{{ mongo_service }}" state: restarted diff --git a/roles/mongo/tasks/ca.yml b/roles/mongo/tasks/ca.yml deleted file mode 100644 index 11bb0f00b..000000000 --- a/roles/mongo/tasks/ca.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -# In this task file a ca key and certificate are created -# and saved on localhost. These are used for signing certificates for -# each individual mongo server in the cluster (in certs.yml) -# This works for new mongo servers, changing the ca and certificates -# on mongoservers with this role is not tested. -- name: Create mongo key dir in the environment repo - ansible.builtin.file: - path: "{{ inventory_dir }}/secrets/mongo/" - state: directory - mode: "0750" - -- name: Check if we have a CA in the environments repo - ansible.builtin.stat: - path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - register: mongo_ca_key - -- name: Create private key with password protection - community.crypto.openssl_privatekey: - path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - passphrase: "{{ mongo_ca_passphrase }}" - cipher: auto - when: not mongo_ca_key.stat.exists - -- name: Create certificate signing request (CSR) for CA certificate - community.crypto.openssl_csr_pipe: - privatekey_path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - privatekey_passphrase: "{{ mongo_ca_passphrase }}" - common_name: Mongo {{ env }} CA - use_common_name_for_san: false # since we do not specify SANs, don't use CN as a SAN - basic_constraints: - - 'CA:TRUE' - basic_constraints_critical: true - key_usage: - - keyCertSign - key_usage_critical: true - register: ca_csr - when: not mongo_ca_key.stat.exists - -- name: Create self-signed CA certificate from CSR - community.crypto.x509_certificate: - path: "{{ inventory_dir}}/secrets/mongo/mongoca.pem" - csr_content: "{{ ca_csr.csr }}" - privatekey_path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - privatekey_passphrase: "{{ mongo_ca_passphrase }}" - provider: selfsigned - when: not mongo_ca_key.stat.exists diff --git a/roles/mongo/tasks/certs.yml b/roles/mongo/tasks/certs.yml index 4a9bcb019..b79658567 100644 --- a/roles/mongo/tasks/certs.yml +++ b/roles/mongo/tasks/certs.yml @@ -1,88 +1,29 @@ --- # In this task file keys and certificates for the -# mongo servers are created and signed with the ca from ca.yml -- name: Install some packages - ansible.builtin.yum: - name: - - python3 - - python3-pip - state: present - -- name: Install python36-cryptography on CentOS7 - ansible.builtin.yum: - name: - - python36-cryptography - state: present - when: ansible_distribution_major_version == '7' - -- name: Install python36-cryptography on Rocky 8 and 9 - ansible.builtin.yum: - name: - - python3-cryptography - state: present - when: ansible_distribution_major_version == '8' or ansible_distribution_major_version == '9' +# mongo servers are distributed - name: Create directory to keep mongo key material ansible.builtin.file: - dest: "/etc/pki/mongo/" + dest: "{{ mongo_pki_dir }}" state: directory owner: root group: root mode: "0775" -- name: Create private keys - community.crypto.openssl_privatekey: - path: "/etc/pki/mongo/mongo.key" - -- name: Check whether certificate exists - ansible.builtin.stat: - path: "/etc/pki/mongo/mongo.pem" - register: certificate_exists - -- name: Create certificate signing request (CSR) for new certificate - community.crypto.openssl_csr_pipe: - privatekey_path: "/etc/pki/mongo/mongo.key" - subject_alt_name: - - "{{ mongo_tls_host_altname_dnsorip | default('DNS') }}:{{ inventory_hostname }}" - organizational_unit_name: "{{ instance_name }}" - register: csr - changed_when: false - -- name: Read existing certificate if exists - ansible.builtin.slurp: - src: /etc/pki/mongo/mongo.pem - when: certificate_exists.stat.exists - register: certificate - -- name: Sign certificate with our CA - community.crypto.x509_certificate_pipe: - content: "{{ (certificate.content | b64decode) if certificate_exists.stat.exists else omit }}" - csr_content: "{{ csr.csr }}" - provider: ownca - ownca_path: "{{ inventory_dir }}/secrets/mongo/mongoca.pem" - ownca_privatekey_path: "{{ inventory_dir }}/secrets/mongo/mongoca.key" - ownca_privatekey_passphrase: "{{ mongo_ca_passphrase }}" - ownca_not_after: +3650d # valid for ten years - ownca_not_before: "-1d" # valid since yesterday - delegate_to: localhost - register: certificate - ignore_errors: '{{ ansible_check_mode }}' - become: false - -- name: Write certificate file - ansible.builtin.copy: - dest: /etc/pki/mongo/mongo.pem - content: "{{ certificate.certificate }}" +- name: Distribute mongo cluster key and cert + ansible.builtin.template: + src: keyandcert.pem.j2 + dest: "{{ mongo_pki_dir }}/keyandcert.pem" owner: root - group: root - mode: "0400" - when: certificate is changed + group: "{{ mongo_group }}" + mode: "0440" + notify: Restart mongod -- name: Install the CA certificate +- name: Copy ca pem file ansible.builtin.copy: src: "{{ inventory_dir }}/secrets/mongo/mongoca.pem" - dest: /etc/pki/mongo/mongoca.pem + dest: "{{ mongo_pki_dir }}/mongoca.pem" owner: root group: root mode: "0644" - notify: restart mongod + notify: Restart mongod diff --git a/roles/mongo/tasks/cluster.yml b/roles/mongo/tasks/clusterconfig.yml similarity index 50% rename from roles/mongo/tasks/cluster.yml rename to roles/mongo/tasks/clusterconfig.yml index 55661d71a..0ee17dc39 100644 --- a/roles/mongo/tasks/cluster.yml +++ b/roles/mongo/tasks/clusterconfig.yml @@ -1,41 +1,30 @@ --- +# todo this weorks only for new deployments +# rewrite so mongo config can be changed and cluster members can be added or removed - name: Check if hosts are in clustered - ansible.builtin.shell: >- - mongosh --port {{ mongod_port }} --quiet --eval 'db.isMaster().hosts' + ansible.builtin.command: mongosh --port {{ mongod_port }} --quiet --eval 'db.isMaster().hosts' register: check_cluster changed_when: false + check_mode: false -- name: Set fact for roles - ansible.builtin.set_fact: - mongo_primary: "{{ mongo_replication_role == 'primary' }}" - mongo_secondary: "{{ mongo_replication_role == 'secondary' }}" - mongo_arbiter: "{{ mongo_replication_role == 'arbiter' }}" +- name: Debug check_cluster variable + when: show_debug_info is defined and show_debug_info | bool + ansible.builtin.debug: + msg: "{{ check_cluster }}" -- name: Build member list - ansible.builtin.set_fact: - members: >- - {{ - members | default([]) + - [{ - 'host': item , - 'priority': member_weight[hostvars[item].mongo_replication_role] - }] - }} - loop: "{{ ansible_play_hosts }}" - run_once: true - vars: - member_weight: - primary: 3 - secondary: 2 - arbiter: 1 +- name: Debug mongo_cluster_members variable + when: show_debug_info is defined and show_debug_info | bool + ansible.builtin.debug: + msg: "{{ mongo_cluster_members }}" + +- name: Debug mongo_replication_role variable + when: show_debug_info is defined and show_debug_info | bool + ansible.builtin.debug: + msg: "{{ mongo_replication_role }}" -- name: Set fact for arbiter index number - ansible.builtin.set_fact: - arbiter_index: "{{ hostid }}" - when: hostvars[item].mongo_arbiter - loop: "{{ ansible_play_hosts }}" - loop_control: - index_var: hostid +- name: Debug host_id + ansible.builtin.debug: + msg: "{{ hostid }}" - name: Initial cluster initialisation community.mongodb.mongodb_replicaset: @@ -44,8 +33,8 @@ login_port: "{{ mongod_port }}" login_password: "{{ mongo_admin_password }}" replica_set: "{{ replica_set_name }}" - members: "{{ members }}" - arbiter_at_index: "{{ arbiter_index | default(omit) }}" + members: "{{ mongo_cluster_members }}" + arbiter_at_index: "{{ mongo_arbiter_index | default(0) }}" validate: false run_once: true when: mongo_replication_role == 'primary' diff --git a/roles/mongo/tasks/install.yml b/roles/mongo/tasks/install.yml index 741589b29..673d465e3 100644 --- a/roles/mongo/tasks/install.yml +++ b/roles/mongo/tasks/install.yml @@ -1,31 +1,20 @@ --- - name: Create the repository for mongodb - ansible.builtin.copy: - src: "mongo.repo" + when: ansible_os_family == 'RedHat' + ansible.builtin.template: + src: "mongo.repo.j2" dest: "/etc/yum.repos.d/mongo.repo" owner: root mode: "0640" -- name: Install the mongodb package +- name: Install the mongodb package and some helper packages + when: ansible_os_family == 'RedHat' ansible.builtin.yum: name: - mongodb-org + - pip state: present -- name: Slurp the private key - ansible.builtin.slurp: - path: "/etc/pki/mongo/mongo.key" - register: mongo_key - -- name: Create combined key and certificate file - ansible.builtin.copy: - content: "{{ mongo_key['content'] | b64decode }}{{ certificate.certificate }}" - dest: "/etc/pki/mongo/keyandcert.pem" - owner: root - group: mongod - mode: "0440" - ignore_errors: '{{ ansible_check_mode }}' - - name: Install pymongo ansible.builtin.pip: name: pymongo @@ -70,7 +59,7 @@ owner: root group: root mode: "0644" - notify: restart mongod + notify: Restart mongod - name: Enable and start mongod ansible.builtin.service: diff --git a/roles/mongo/tasks/main.yml b/roles/mongo/tasks/main.yml index 6cda0f417..2485d4b1d 100644 --- a/roles/mongo/tasks/main.yml +++ b/roles/mongo/tasks/main.yml @@ -1,38 +1,40 @@ --- -- name: Use temporarily python3 as remote interpreter, this fixes pymongo - ansible.builtin.set_fact: - ansible_python_interpreter: "/usr/bin/python3" - tags: mongo_users - -- name: Include CA tasks - ansible.builtin.include_tasks: - file: ca.yml - apply: - delegate_to: localhost - run_once: true - become: false - -- name: Include Certificate tasks - ansible.builtin.include_tasks: - file: certs.yml - -- name: Include installation tasks - ansible.builtin.include_tasks: - file: install.yml - -- name: Include cluster installation tasks - ansible.builtin.include_tasks: - file: cluster.yml - -- name: Include user creation - ansible.builtin.include_tasks: - file: users.yml - -- name: Include postinstallation tasks - ansible.builtin.include_tasks: - file: postinstall.yml - -- name: Use python2 again as remote interpreter - ansible.builtin.set_fact: - ansible_python_interpreter: "/usr/bin/python" - when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' +- name: Install and configure mongo on redhat family servers + when: ansible_os_family == 'RedHat' + block: + - name: Use temporarily python3 as remote interpreter, this fixes pymongo + ansible.builtin.set_fact: + ansible_python_interpreter: "/usr/bin/python3" + tags: mongo_users + + - name: Include installation tasks + ansible.builtin.include_tasks: + file: install.yml + + - ansible.builtin.meta: flush_handlers + + - name: Include Certificate tasks + ansible.builtin.include_tasks: + file: certs.yml + + - name: Include cluster installation tasks + ansible.builtin.include_tasks: + file: clusterconfig.yml + + - name: Include user creation + ansible.builtin.include_tasks: + file: users.yml + + - name: Include postinstallation tasks + ansible.builtin.include_tasks: + file: postinstall.yml + + - name: Use python2 again as remote interpreter + ansible.builtin.set_fact: + ansible_python_interpreter: "/usr/bin/python" + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + +- name: Message for non redhat family servers + when: ansible_os_family != 'RedHat' + ansible.builtin.debug: + msg: "Sorry, this role only works on RedHat family servers" diff --git a/roles/mongo/tasks/users.yml b/roles/mongo/tasks/users.yml index 86ce28cb8..a218bac46 100644 --- a/roles/mongo/tasks/users.yml +++ b/roles/mongo/tasks/users.yml @@ -1,5 +1,5 @@ -- name: Create mongo database users - mongodb_user: +- name: Create mongo database users # requires pymongo 4+ + community.mongodb.mongodb_user: login_database: admin database: "{{ item.db_name }}" login_user: admin diff --git a/roles/mongo/templates/keyandcert.pem.j2 b/roles/mongo/templates/keyandcert.pem.j2 new file mode 100644 index 000000000..eed0ac731 --- /dev/null +++ b/roles/mongo/templates/keyandcert.pem.j2 @@ -0,0 +1 @@ +{{ mongo_cluster_private_key }}{{ mongo_cluster_cert }} diff --git a/roles/mongo/templates/mongo.repo.j2 b/roles/mongo/templates/mongo.repo.j2 new file mode 100644 index 000000000..d94eff281 --- /dev/null +++ b/roles/mongo/templates/mongo.repo.j2 @@ -0,0 +1,6 @@ +[mongodb-org-{{ mongo_version}}] +name=MongoDB Repository +baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/{{ mongo_version}}/x86_64/ +gpgcheck=1 +enabled=1 +gpgkey=https://www.mongodb.org/static/pgp/server-{{ mongo_version}}.asc diff --git a/roles/mongo/templates/mongod.conf.j2 b/roles/mongo/templates/mongod.conf.j2 index e0cf91d28..f5e990add 100644 --- a/roles/mongo/templates/mongod.conf.j2 +++ b/roles/mongo/templates/mongod.conf.j2 @@ -19,6 +19,6 @@ storage: replication: replSetName: {{ replica_set_name }} -security: +security: authorization: enabled clusterAuthMode: x509 diff --git a/roles/myconext/defaults/main.yml b/roles/myconext/defaults/main.yml index 4082fa28d..3e8c209c4 100644 --- a/roles/myconext/defaults/main.yml +++ b/roles/myconext/defaults/main.yml @@ -1,4 +1,7 @@ --- myconext_cronjobmaster: true +myconext_logback_json: true myconext_docker_networks: - name: loadbalancer +myconext_server_restart_policy: always +myconext_server_restart_retries: 0 diff --git a/roles/myconext/tasks/main.yml b/roles/myconext/tasks/main.yml index 469a7c847..326668702 100644 --- a/roles/myconext/tasks/main.yml +++ b/roles/myconext/tasks/main.yml @@ -126,9 +126,10 @@ - name: Create and start the server container community.docker.docker_container: name: myconextserver - image: ghcr.io/openconext/openconext-myconext/myconext-server:{{ myconext_server_version }} + image: ghcr.io/openconext/openconext-myconext/myconext-server:{{ myconext_version }} pull: true - restart_policy: "always" + restart_policy: "{{ myconext_server_restart_policy }}" + restart_retries: "{{ myconext_server_restart_retries }}" # Only for restart policy on-failure state: started env: USE_SYSTEM_CA_CERTS: "1" @@ -160,7 +161,7 @@ - name: Create the client container community.docker.docker_container: name: myconextgui - image: ghcr.io/openconext/openconext-myconext/myconext-gui:{{ myconext_gui_version }} + image: ghcr.io/openconext/openconext-myconext/myconext-gui:{{ myconext_version }} pull: true restart_policy: "always" state: started @@ -196,7 +197,7 @@ - name: Create the account gui community.docker.docker_container: name: accountgui - image: ghcr.io/openconext/openconext-myconext/account-gui:{{ account_gui_version }} + image: ghcr.io/openconext/openconext-myconext/account-gui:{{ myconext_version }} pull: true restart_policy: "always" state: started @@ -226,7 +227,7 @@ - name: Create the servicedesk gui community.docker.docker_container: name: servicedeskgui - image: ghcr.io/openconext/openconext-myconext/servicedesk-gui:{{ servicedesk_gui_version }} + image: ghcr.io/openconext/openconext-myconext/servicedesk-gui:{{ myconext_version }} pull: true restart_policy: "always" state: started diff --git a/roles/myconext/templates/application.yml.j2 b/roles/myconext/templates/application.yml.j2 index c8192c90f..f00d7b668 100644 --- a/roles/myconext/templates/application.yml.j2 +++ b/roles/myconext/templates/application.yml.j2 @@ -37,8 +37,12 @@ springdoc: enabled: true email: - from: eduID - error_mail: info@surfconext.nl + from_deprovisioning: "{{ myconext.email.from_deprovisioning }}" + from_code: "{{ myconext.email.from_code }}" + from_app_nudge: "{{ myconext.email.from_app_nudge }}" + from_new_device: "{{ myconext.email.from_new_device }}" + error: {{ error_mail_to }} + error_mail: {{ error_mail_to }} magic-link-url: https://login.{{ myconext_base_domain }}/saml/guest-idp/magic my-surfconext-url: https://mijn.{{ myconext_base_domain }} idp-surfconext-url: https://login.{{ myconext_base_domain }} @@ -58,7 +62,14 @@ cron: manage-initial-delay-milliseconds: 15000 manage-fixed-rate-milliseconds: 300_000 # Runs on the first day of February, May, August, and November. - mail-institution-mail-usage-expression: "0 0 0 1 2,5,8,11 *" + # 0 – seconds + # 30 – minute + # 5 – hour + # * – every day of month + # 2,5,8,11 – February, May, August, November + # * – every day of week + mail-institution-mail-usage-expression: "0 30 5 * 2,5,8,11 *" + mail-institution-batch-size: 500 # Every day at 6:30AM nudge-app-mail-expression: "0 30 6 * * ?" # Number of days after creation of the eduID account which the nudge mail is send @@ -74,6 +85,7 @@ manage: base_url: "https://manage.{{ base_domain }}" enabled: True +mongodb_db: {{ myconext.mongo_database }} base_domain: {{ myconext_base_domain }} saml_metadata_base_path: https://login.{{ myconext_base_domain }} base_path: https://mijn.{{ myconext_base_domain }} @@ -114,6 +126,8 @@ feature: create_eduid_institution_landing: {{ myconext.feature_create_eduid_institution_landing }} # Do we default remember the user for a longer period default_remember_me: True + # Do we default add affiliate email address + default_affiliate_email: True # Does the SAMLIdpService expects authn requests to be signed requires_signed_authn_request: False # Do we support ID verify @@ -132,6 +146,14 @@ feature: service_desk_active: {{ myconext.feature_service_desk_active }} # Set to true to enable captcha in the account creation captcha_enabled: True + # Set to true to use the BRIN code to add ui-roles and authentication scoped affiliations + use_remote_creation_for_affiliation: {{ myconext.feature_use_remote_creation_for_affiliation }} + # Set to true to show the account linking related options on the personal-info page and home page (banner) + enable_account_linking: {{ myconext.feature_enable_account_linking }} + # Set to true to show the app login option + use_app: {{ myconext.feature_use_app }} + +default_affiliate_email_domain: eduid.nl captcha: sitekey: {{ myconext.captcha_sitekey }} @@ -144,7 +166,8 @@ private_key_path: file:///config/myconext_saml.key certificate_path: file:///config/myconext_saml.crt tiqr_hash_secret: "{{ myconext_tiqr_hash_secret }}" -remember_me_max_age_seconds: 15_768_000 +remember_me_max_age_seconds: 15_768_000 # 11 uur +tiqr_trust_for_pn_max_age_seconds: 15_768_000 sso_mfa_duration_seconds: 3600 nudge_eduid_app_login_days: {{ myconext.nudge_eduid_app_login_days }} nudge_eduid_app_pause_days: {{ myconext.nudge_eduid_app_pause_days }} @@ -240,7 +263,7 @@ geo_location: # Use either max_mind or mock service: max_mind license_key: {{ myconext_geo2lite_license_key }} - external_url: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key={license_key}&suffix=tar.gz" + external_url: {{ myconext.geo_location_external_url }} download_directory: "/config/geo2lite" # Signicat configuration @@ -284,7 +307,7 @@ management: access: unrestricted health: access: unrestricted - show-details: when_authorized + show-details: always prometheus: access: read_only mappings: diff --git a/roles/myconext/templates/logback.xml.j2 b/roles/myconext/templates/logback.xml.j2 index 493052246..b04d0e0ec 100644 --- a/roles/myconext/templates/logback.xml.j2 +++ b/roles/myconext/templates/logback.xml.j2 @@ -8,14 +8,35 @@ + + host.docker.internal:514 + + {"app":"myconext"} + true + + [ignore] + [ignore] + [ignore] + + + + myconextjson: + + + + + - - + + + {% if myconext_logback_json | bool %} + + {%endif%} diff --git a/roles/oidc-playground/defaults/main.yml b/roles/oidc-playground/defaults/main.yml index 58cb18672..91c4346a8 100644 --- a/roles/oidc-playground/defaults/main.yml +++ b/roles/oidc-playground/defaults/main.yml @@ -1 +1,3 @@ oidc_playground_dir: /opt/openconext/oidc-playground +oidc_playground_server_restart_policy: always +oidc_playground_server_restart_retries: 0 diff --git a/roles/oidc-playground/tasks/main.yml b/roles/oidc-playground/tasks/main.yml index 48afef265..c969ee246 100644 --- a/roles/oidc-playground/tasks/main.yml +++ b/roles/oidc-playground/tasks/main.yml @@ -27,8 +27,8 @@ TZ: "{{ timezone }}" image: ghcr.io/openconext/openconext-oidc-playground/oidc-playground-server:{{ oidc_playground_server_version }} pull: true - restart_policy: "always" - state: started + restart_policy: "{{ oidc_playground_server_restart_policy }}" + restart_retries: "{{ oidc_playground_server_restart_retries }}" # Only for restart policy on-failure networks: - name: "loadbalancer" mounts: diff --git a/roles/oidcng/defaults/main.yml b/roles/oidcng/defaults/main.yml index 44641c6c1..3dcbd97b3 100644 --- a/roles/oidcng/defaults/main.yml +++ b/roles/oidcng/defaults/main.yml @@ -21,3 +21,5 @@ oidcng_manage_provision_samlsp_sign: "True" oidcng_manage_provision_samlsp_trusted_proxy: "True" oidcng_docker_networks: - name: loadbalancer +oidcng_server_restart_policy: always +oidcng_server_restart_retries: 0 diff --git a/roles/oidcng/tasks/main.yml b/roles/oidcng/tasks/main.yml index fa35fac7c..a306fa7f1 100644 --- a/roles/oidcng/tasks/main.yml +++ b/roles/oidcng/tasks/main.yml @@ -101,7 +101,8 @@ image: ghcr.io/openconext/openconext-oidcng/oidcng:{{ oidcng_version }} entrypoint: /__cacert_entrypoint.sh pull: true - restart_policy: "always" + restart_policy: "{{ oidcng_server_restart_policy }}" + restart_retries: "{{ oidcng_server_restart_retries }}" # Only for restart policy on-failure state: started networks: "{{ oidcng_docker_networks }}" mounts: diff --git a/roles/oidcng/templates/logback.xml.j2 b/roles/oidcng/templates/logback.xml.j2 index 1e7f995ee..7b38d5627 100644 --- a/roles/oidcng/templates/logback.xml.j2 +++ b/roles/oidcng/templates/logback.xml.j2 @@ -9,7 +9,7 @@ - {{ ansible_fqdn }}:514 + host.docker.internal:514 {"app":"oidcng"} true @@ -40,7 +40,8 @@ - + + {%if oidcng_logback_email |bool %} diff --git a/roles/oidcng/templates/openid-configuration.json.j2 b/roles/oidcng/templates/openid-configuration.json.j2 index 08073b30e..d430b321e 100644 --- a/roles/oidcng/templates/openid-configuration.json.j2 +++ b/roles/oidcng/templates/openid-configuration.json.j2 @@ -49,6 +49,7 @@ "eduid.nl/eppn", "eduid.nl/eduid", "eduid.nl/mobile", + "eduid.nl/links", "edubadges.nl/sis" ], "token_endpoint_auth_methods_supported": [ diff --git a/roles/openaccess/defaults/main.yml b/roles/openaccess/defaults/main.yml new file mode 100644 index 000000000..ba813a4c8 --- /dev/null +++ b/roles/openaccess/defaults/main.yml @@ -0,0 +1,3 @@ +--- +openaccess_server_restart_policy: always +openaccess_server_restart_retries: 0 diff --git a/roles/attribute-aggregation/handlers/main.yml b/roles/openaccess/handlers/main.yml similarity index 60% rename from roles/attribute-aggregation/handlers/main.yml rename to roles/openaccess/handlers/main.yml index 05662c7ba..e80525c30 100644 --- a/roles/attribute-aggregation/handlers/main.yml +++ b/roles/openaccess/handlers/main.yml @@ -1,9 +1,9 @@ -- name: restart attribute-aggregationserver +- name: restart accessserver community.docker.docker_container: - name: aaserver + name: accessserver state: started restart: true # avoid restarting it creates unexpected data loss according to docker_container_module notes comparisons: '*': ignore - when: aaservercontainer is success and aaservercontainer is not change + when: accessservercontainer is success and accessservercontainer is not change \ No newline at end of file diff --git a/roles/openaccess/tasks/main.yml b/roles/openaccess/tasks/main.yml new file mode 100644 index 000000000..c3cfb6e4a --- /dev/null +++ b/roles/openaccess/tasks/main.yml @@ -0,0 +1,98 @@ +--- +- name: Create directory to keep configfile + ansible.builtin.file: + dest: "/opt/openconext/openaccess" + state: directory + owner: root + group: root + mode: "0770" + +- name: Place the serverapplication configfiles + ansible.builtin.template: + src: "{{ item }}.j2" + dest: /opt/openconext/openaccess/{{ item }} + owner: root + group: root + mode: "0644" + with_items: + - logback.xml + - serverapplication.yml + notify: restart accessserver + +- name: Add the MariaDB docker network to the list of networks when MariaDB runs in Docker + ansible.builtin.set_fact: + invite_docker_networks: + - name: loadbalancer + - name: openconext_mariadb + when: mariadb_in_docker | default(false) | bool + +- name: Create and start the access server container + community.docker.docker_container: + name: accessserver + env: + TZ: "{{ timezone }}" + image: ghcr.io/openconext/openconext-access/accessserver:{{ openconextaccess_server_version }} + pull: true + restart_policy: "{{ openaccess_server_restart_policy }}" + restart_retries: "{{ openaccess_server_restart_retries }}" # Only for restart policy on-failure + state: started + networks: + - name: "loadbalancer" + mounts: + - source: /opt/openconext/openaccess/serverapplication.yml + target: /application.yml + type: bind + - source: /opt/openconext/openaccess/logback.xml + target: /logback.xml + type: bind + command: "-Xmx512m --spring.config.location=./" + etc_hosts: + host.docker.internal: host-gateway + healthcheck: + test: + [ + "CMD", + "wget", + "-no-verbose", + "--tries=1", + "--spider", + "http://localhost:8080/internal/health", + ] + interval: 10s + timeout: 10s + retries: 3 + start_period: 10s + register: accessservercontainer + +- name: Create the access client container + community.docker.docker_container: + name: accessgui + image: ghcr.io/openconext/openconext-access/accessclient:{{ openconextaccess_client_version }} + pull: true + restart_policy: "always" + state: started + networks: + - name: "loadbalancer" + labels: + traefik.http.routers.accessclient.rule: "Host(`{{ openconextaccess_base_domain }}`)" + traefik.http.routers.accessclient.tls: "true" + traefik.enable: "true" + healthcheck: + test: ["CMD", "curl", "--fail", "http://localhost/internal/health"] + interval: 10s + timeout: 10s + retries: 3 + start_period: 10s + hostname: access + mounts: + - source: /etc/localtime + target: /etc/localtime + type: bind + - source: /opt/openconext/common/favicon.ico + target: /var/www/favicon.ico + type: bind + env: + S3_STORAGE_URL : "{{ openconextaccess.s3_storage.url }}" + S3_STORAGE_KEY : "{{ openconextaccess.s3_storage.key }}" + S3_STORAGE_SECRET : "{{ openconextaccess.s3_storage.secret }}" + S3_STORAGE_BUCKET : "{{ openconextaccess.s3_storage.bucket }}" \ No newline at end of file diff --git a/roles/openaccess/templates/logback.xml.j2 b/roles/openaccess/templates/logback.xml.j2 new file mode 100644 index 000000000..f2d82cb6a --- /dev/null +++ b/roles/openaccess/templates/logback.xml.j2 @@ -0,0 +1,29 @@ +#jinja2:lstrip_blocks: True + + + + + + %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n + + + + + {{ smtp_server }} + {{ noreply_email }} + {{ error_mail_to }} + {{ error_subject_prefix }}Unexpected error in surfaccess + + + + ERROR + + + + + + + + + + \ No newline at end of file diff --git a/roles/openaccess/templates/openaccess.conf.j2 b/roles/openaccess/templates/openaccess.conf.j2 new file mode 100644 index 000000000..c3236cf4f --- /dev/null +++ b/roles/openaccess/templates/openaccess.conf.j2 @@ -0,0 +1,71 @@ + # General setup for the virtual host, inherited from global configuration + ServerName https://access.{{ base_domain }} + + ErrorLog "|/usr/bin/logger -S 32k -p local3.err -t 'Apache-access'" + CustomLog "|/usr/bin/logger -S 32k -p local3.info -t 'Apache-access'" combined + + RewriteEngine on + + RewriteCond %{REQUEST_URI} !\.html$ + RewriteCond %{REQUEST_URI} !\.(js|css)(\.map)?$ + RewriteCond %{REQUEST_URI} !\.svg$ + RewriteCond %{REQUEST_URI} !\.png$ + RewriteCond %{REQUEST_URI} !\.ico$ + RewriteCond %{REQUEST_URI} !\.woff$ + RewriteCond %{REQUEST_URI} !\.woff2$ + RewriteCond %{REQUEST_URI} !\.ttf$ + RewriteCond %{REQUEST_URI} !\.eot$ + RewriteCond %{REQUEST_URI} !^/(asset-)?manifest.json$ + RewriteCond %{REQUEST_URI} !^/access + RewriteCond %{REQUEST_URI} !^/spDashboard + RewriteCond %{REQUEST_URI} !^/health + RewriteCond %{REQUEST_URI} !^/info + RewriteCond %{REQUEST_URI} !^/internal + RewriteCond %{REQUEST_URI} !^/login + RewriteCond %{REQUEST_URI} !^/startSSO + RewriteCond %{REQUEST_URI} !^/fonts + RewriteRule (.*) /index.html [L] + + ProxyPreserveHost On + ProxyPass /Shibboleth.sso ! + ProxyPass /access/api http://accessserver:8080/access/api retry=0 + ProxyPassReverse /access/api http://accessserver:8080/access/api + + ProxyPass /health http://accessserver:8080/internal/health retry=0 + ProxyPass /info http://accessserver:8080/internal/info retry=0 + ProxyPass /login http://accessserver:8080/login retry=0 + ProxyPass /startSSO http://accessserver:8080/startSSO retry=0 + + ProxyPass /internal http://accessserver:8080/internal retry=0 + ProxyPassReverse /internal http://accessserver:8080/internal + + + AuthType shibboleth + ShibUseHeaders On + ShibRequireSession On + ShibRequestSetting REMOTE_ADDR X-Forwarded-For + Require valid-user + + + DocumentRoot "/var/www/" + + + Require all granted + + + + Require all granted + + + + Require all granted + + + + Require all granted + + + Header always set X-Frame-Options "DENY" + Header always set Referrer-Policy "strict-origin-when-cross-origin" + Header always set X-Content-Type-Options "nosniff" + diff --git a/roles/openaccess/templates/serverapplication.yml.j2 b/roles/openaccess/templates/serverapplication.yml.j2 new file mode 100644 index 000000000..944e4ff46 --- /dev/null +++ b/roles/openaccess/templates/serverapplication.yml.j2 @@ -0,0 +1,208 @@ +--- +logging: + config: classpath:/logback-spring.xml + +server: + port: 8080 + error: + path: "/error" + include-message: always + forward-headers-strategy: framework + servlet: + session: + cookie: + secure: false + +spring: + main: + banner-mode: "off" + session: + jdbc: + cleanup-cron: "-" + initialize-schema: always + store-type: jdbc + timeout: 8h + mvc: + log-request-details: false + security: + oauth2: + client: + registration: + oidcng: + client-id: {{ oidc_playground.client_id }} + client-secret: {{ oidc_playground.secret }} + redirect-uri: "{baseUrl}/login/oauth2/code/{registrationId}" + authorization-grant-type: "authorization_code" + scope: openid + provider: + oidcng: + authorization-uri: "https://connect.{{ base_domain }}/oidc/authorize" + token-uri: "https://connect.{{ base_domain }}/oidc/token" + user-info-uri: "https://connect.{{ base_domain }}/oidc/userinfo" + jwk-set-uri: "https://connect.{{ base_domain }}/oidc/certs" + user-name-attribute: sub + user-info-authentication-method: client_secret_basic + jpa: + properties: + hibernate: + naming-strategy: org.hibernate.cfg.ImprovedNamingStrategy + format_sql: false + show_sql: false + open-in-view: false + show-sql: false + datasource: + driver-class-name: org.mariadb.jdbc.Driver + url: jdbc:mariadb://{{ openconextaccess.db.host }}/openconextaccess?autoReconnect=true + username: {{ openconextaccess.db.user }} + password: {{ openconextaccess.db.secret }} + flyway: + locations: classpath:db/mysql/migration + fail-on-missing-locations: true + mail: + host: {{ smtp_server }} + +oidcng: + discovery-url: "https://connect.test2.surfconext.nl/oidc/.well-known/openid-configuration" + introspect-url: "https://connect.test2.surfconext.nl/oidc/introspect" + resource-server-id: myconext.rs + resource-server-secret: secret + base-url: {{ openconextaccess_base_domain }} + +lifecycle: + user: lifecycle + password: {{ openconextaccess_lifecycle_secret }} + +jira: + enabled: true + base-url: {{ openconextaccess.jira.base_url }} + user-name: {{ openconextaccess.jira.username }} + project-key: {{ openconextaccess.jira.project_key }} + environment: {{ openconextaccess.jira.environment }} + api-key: {{ openconextaccess.jira.api_key }} + # Timeout in milliseconds + connection-timeout: 10000 + +institution-admin: + entitlement: "urn:mace:surfnet.nl:surfnet.nl:sab:role:SURFconextverantwoordelijke" + organization-guid-prefix: "urn:mace:surfnet.nl:surfnet.nl:sab:organizationGUID:" + +config: + client-url: "https://{{ openconextaccess_base_domain }}" + base-url: "{{ base_domain }}" + edu_id_schac_home_organization: "eduid.nl" + discovery: "https://connect.test2.surfconext.nl/oidc/.well-known/openid-configuration" + invite: "https://invite.{{ base_domain }}" + sram: "https://{{ env }}.sram.surf.nl/" + serviceDesk: "https://servicedesk.surf.nl/jira/plugins/servlet/desk/user/requests?reporter=all" + # For other environments, move to group_vars + identity_providers: + - name: "SXS IdP" + entityid: "http://mock-idp" + descriptionEN: "Een test-IdP waarmee je zelf attributen-sets kunt simuleren. De metadata vind je hier" + descriptionNL: "Een test-IdP waarmee je zelf attributen-sets kunt simuleren. De metadata vind je hier" + - name: "SXS Dummy" + entityid: "https://idp.diy.surfconext.nl" + descriptionEN: "Een test-IdP met fictieve gebruikersaccounts. De metadata vind je hier" + descriptionNL: "Een test-IdP met fictieve gebruikersaccounts. De metadata vind je hier" + idp_proxy_meta_data: https://metadata.test2.surfconext.nl/idp-metadata.xml + minimal_stepup_acr_level: "http://{{ base_domain }}/assurance/loa2" + features: + - name: idp + enabled: true + - name: invite + enabled: true + - name: sram + enabled: true + - name: mfa + enabled: true + acr_values: + {% for loa in [stepup_intrinsic_loa] + stepup_loa_values_supported %} + - "{{ loa }}" + {% endfor %} + +eduid-idp-entity-id: "https://login.{{ myconext_base_domain }}" + +super-admin: + users: + - "urn:collab:person:example.com:admin" + +gui: + disclaimer: + background-color: {{ environment_ribbon_colour }} + content: {{ environment_shortname }} + +feature: + enable-performance-seed: False + +email: + from: "{{ noreply_email }}" + contactEmail: "{{ support_email }}" + serviceDeskEmail: "{{ support_email }}" + supportEmail: "support@surfconext.nl" + environment: "{{ environment_shortname }}" + +manage: + enabled: True + activeManage: TEST + test: + url: {{ openconextaccess.managetest.url }} + user: {{ openconextaccess.managetest.user }} + password: {{ openconextaccess.managetest.password }} + defaultState: prodaccepted + prod: + url: {{ openconextaccess.manageprod.url }} + user: {{ openconextaccess.manageprod.user }} + password: {{ openconextaccess.managetest.password }} + defaultState: testaccepted + # If manage is disabled (e.g. enabled: False) the staticManageDirectory is the directory where the {metadata_type}.json files + # are located. This can also be an absolute file path, e.g. file:///opt/openconext/invite/manage + staticManageDirectory: classpath:/manage + # staticManageDirectory: file:///usr/local/etc/manage + +invite: + enabled: True + url: "https://invite.{{ base_domain }}" + user: {{ invite.access_user }} + password: "{{ invite.access_secret }}" + +# Todo relace with openconextaccess user +statistics: + enabled: True + url: {{ dashboard.stats_url }} + user: {{ dashboard.stats_user }} + password: {{ stats_dashboard_api_password }} + +s3storage: + url: {{ openconextaccess.s3_storage.url }} + key: {{ openconextaccess.s3_storage.key }} + secret: {{ openconextaccess.s3_storage.secret }} + bucket: {{ openconextaccess.s3_storage.bucket }} + +management: + health: + mail: + enabled: false + endpoints: + web: + exposure: + include: "health,info,mappings,metrics" + base-path: "/internal" + endpoint: + info: + access: unrestricted + health: + access: unrestricted + show-details: always + mappings: + access: none + metrics: + access: none + info: + git: + mode: full + +# used by the git plugin +info: + build: + artifact: "@project.artifactId@" + version: "@project.version@" diff --git a/roles/pdp/defaults/main.yml b/roles/pdp/defaults/main.yml index 782635029..902c68c6e 100644 --- a/roles/pdp/defaults/main.yml +++ b/roles/pdp/defaults/main.yml @@ -23,3 +23,5 @@ pdp_spring_flyway_enabled: true pdp_manage_push_testmode: true pdp_docker_networks: - name: loadbalancer +pdp_server_restart_policy: always +pdp_server_restart_retries: 0 diff --git a/roles/pdp/tasks/main.yml b/roles/pdp/tasks/main.yml index 2933eb586..77c9072c9 100644 --- a/roles/pdp/tasks/main.yml +++ b/roles/pdp/tasks/main.yml @@ -27,14 +27,23 @@ - name: openconext_mariadb when: mariadb_in_docker | default(false) | bool +- name: Remove obsolete pdp containers + community.docker.docker_container: + name: "{{ item }}" + state: absent + loop: + - "pdpgui" + - "pdpserver" + - name: Create and start the server container community.docker.docker_container: - name: pdpserver + name: pdp env: TZ: "{{ timezone }}" - image: ghcr.io/openconext/openconext-pdp/pdp-server:{{ pdp_server_version }} + image: ghcr.io/openconext/openconext-pdp/pdp-server:{{ pdp_version }} pull: true - restart_policy: "always" + restart_policy: "{{ pdp_server_restart_policy }}" + restart_retries: "{{ pdp_server_restart_retries }}" # Only for restart policy on-failure state: started networks: "{{ pdp_docker_networks }}" mounts: @@ -51,8 +60,8 @@ etc_hosts: host.docker.internal: host-gateway labels: - traefik.http.routers.pdpgui.rule: "Host(`pdp.{{ base_domain }}`)" - traefik.http.routers.pdpgui.tls: "true" + traefik.http.routers.pdp.rule: "Host(`pdp.{{ base_domain }}`)" + traefik.http.routers.pdp.tls: "true" traefik.enable: "true" healthcheck: test: diff --git a/roles/rsyslog/defaults/main.yml b/roles/rsyslog/defaults/main.yml index ba04e46ec..755c27773 100644 --- a/roles/rsyslog/defaults/main.yml +++ b/roles/rsyslog/defaults/main.yml @@ -18,3 +18,25 @@ rsyslog_dir_file_modes: 'dirCreateMode="0755" fileCreateMode="0640" FileGroup="s # host: '' # central syslog server # port: '' # syslog port # peer: '' # CN of the certificate of the central syslog server + +rsyslog_service_dropindir: "/etc/systemd/system/rsyslog.service.d" +rsyslog_certifcate_dir: "/etc/pki/rsyslog" +rsyslog_queue_dir: "/var/spool/rsyslog" + +# these already have appropriate defaults in the template or rsyslog but you can +# change them if necessary, for example on docker hosts they have to be a little higher +# rsyslog_imjournal_ratelimitburst: 2000 +# rsyslog_imjournal_ratelimitinterval: 600 +# rsyslog_maxmessagesize: 8000 + +# rsyslog_imjournal_statefile # default is imjournal.state which means imjournal.state relative to the rsyslog workdir +# rsyslog_workdirectory # default /var/spool/rsyslog + +# Empty log check script, optional +rsyslog_enable_warn_empty_script: false +rsyslog_warn_empty_log_recipient: admin@example.com +rsyslog_monitor_for_emptylogs_path: "{{ rsyslog_dir }}/apps/prod_sc" +rsyslog_checkemptylogs_cron_minute: "0" +rsyslog_checkemptylogs_cron_hour: "9" +rsyslog_checkemptylogs_cron_weekdays: "1-5" +rsyslog_checkemptylogs_dir: "/usr/local/bin" diff --git a/roles/rsyslog/handlers/main.yml b/roles/rsyslog/handlers/main.yml index 6500301be..532abdffe 100644 --- a/roles/rsyslog/handlers/main.yml +++ b/roles/rsyslog/handlers/main.yml @@ -3,3 +3,11 @@ service: name: rsyslog state: restarted + +- name: Restart journald + ansible.builtin.systemd_service: + name: systemd-journald + state: restarted + +- name: Reload systemd + ansible.builtin.command: "systemctl daemon-reload" # noqa command-instead-of-module diff --git a/roles/rsyslog/tasks/main.yml b/roles/rsyslog/tasks/main.yml index fb95f34ec..1fc0608dc 100644 --- a/roles/rsyslog/tasks/main.yml +++ b/roles/rsyslog/tasks/main.yml @@ -1,69 +1,110 @@ -- name: install rsyslog - package: +- name: Install rsyslog + ansible.builtin.package: name: - rsyslog - rsyslog-gnutls - rsyslog-relp state: present notify: - - "restart rsyslog" + - "restart rsyslog" -- name: Create the pki directory if it does not exist - file: - path: /etc/pki/rsyslog - state: directory - owner: root - group: root - mode: 0755 +- name: Enable rsyslog + ansible.builtin.service: + name: rsyslog + enabled: true -# We need a key and client certificate to when using RELP -- name: put rsyslog client key - copy: - content: "{{ rsyslogclientkey }}" - dest: "/etc/pki/rsyslog/rsyslogclient.key" - mode: 0400 - owner: root - when: - - "'sysloghost' not in group_names" - no_log: true - notify: - - "restart rsyslog" +- name: Forwarding journalctl to rsyslog forward syslog to central logserver + when: "'sysloghost' not in group_names" + block: -- name: put rsyslog client certificate - copy: - src: "{{ inventory_dir }}/files/certs/rsyslog/rsyslogclient.crt" - dest: "/etc/pki/rsyslog/rsyslogclient.crt" - mode: 0744 - owner: root - group: adm - when: - - "'sysloghost' not in group_names" - notify: - - "restart rsyslog" + # journald forwards logs to rsyslog + - name: When using imjournal no forwarding necessary + ansible.builtin.lineinfile: + path: /etc/systemd/journald.conf + search_string: 'ForwardToSyslog=' + line: 'ForwardToSyslog=no' # commenting out ForwardToSyslog=yes still results in enabling the option on debian + notify: + - "Restart journald" -- name: put rsyslog CA file (new location) - copy: - src: "{{ inventory_dir }}/files/certs/rsyslog/rsyslogclientca.crt" - dest: "/etc/pki/rsyslog/rsyslogclientca.crt" - mode: 0744 - owner: root - group: adm - when: - - "'sysloghost' not in group_names" + - name: Remove logging dropin + ansible.builtin.file: + path: "{{ rsyslog_service_dropindir }}/logging.conf" + state: absent + notify: "Reload systemd" -- name: put rsyslog config file for logforwarding - template: - src: "rsyslog_onlyforward.conf.j2" - dest: "/etc/rsyslog.conf" - notify: - - "restart rsyslog" - when: - - "'sysloghost' not in group_names" + # Since we specify queue.spoolDirectory, lets make sure it exists + - name: Create queue dir + ansible.builtin.file: + path: "{{ rsyslog_queue_dir }}" + state: directory + owner: root + group: root + mode: "0700" + + # rsyslog certificates for relp + - name: Create the pki directory if it does not exist + ansible.builtin.file: + path: "{{ rsyslog_certifcate_dir }}" + state: directory + owner: root + group: root + mode: "0700" + + # We need a key and client certificate to when using RELP + - name: Put rsyslog client key + ansible.builtin.copy: + content: "{{ rsyslogclientkey }}" + dest: "{{ rsyslog_certifcate_dir }}/rsyslogclient.key" + mode: "0400" + owner: root + no_log: true + notify: + - "restart rsyslog" + + - name: Put rsyslog client certificate + ansible.builtin.copy: + src: "{{ inventory_dir }}/files/certs/rsyslog/rsyslogclient.crt" + dest: "{{ rsyslog_certifcate_dir }}/rsyslogclient.crt" + mode: "0744" + owner: root + group: adm + notify: + - "restart rsyslog" + + - name: Put rsyslog CA file (new location) + ansible.builtin.copy: + src: "{{ inventory_dir }}/files/certs/rsyslog/rsyslogclientca.crt" + dest: "{{ rsyslog_certifcate_dir }}/rsyslogclientca.crt" + mode: "0744" + owner: root + group: adm + + - name: Put rsyslog config file for logforwarding + ansible.builtin.template: + src: "rsyslog_onlyforward.conf.j2" + dest: "/etc/rsyslog.conf" + mode: "0644" + owner: root + group: root + notify: + - "restart rsyslog" + + # Not sure why this is necessary on the forwarding server + # but seems to work + - name: Allow extra port rsyslog_tls_port_t + community.general.seport: + ports: "{{ rsyslog_remote_relp_port }}" + proto: tcp + setype: syslog_tls_port_t + state: present + when: ansible_selinux.mode is defined and ansible_selinux.mode == "enforcing" + +# central logserver -- name: include tasks for central syslog server - include_tasks: rsyslog_central.yml - when: "'sysloghost' in group_names" +- name: Include tasks for central syslog server + ansible.builtin.include_tasks: rsyslog_central.yml + when: "'sysloghost' in group_names" - name: Include tasks for authentication log processing - include_tasks: process_auth_logs.yml + ansible.builtin.include_tasks: process_auth_logs.yml when: "'auth_processor' in group_names" diff --git a/roles/rsyslog/tasks/process_auth_logs.yml b/roles/rsyslog/tasks/process_auth_logs.yml index 213765feb..e62027530 100644 --- a/roles/rsyslog/tasks/process_auth_logs.yml +++ b/roles/rsyslog/tasks/process_auth_logs.yml @@ -2,33 +2,33 @@ - name: Copy the log_logins and lastseen database table definitions copy: src: "{{ item }}" - dest: /tmp/{{ item }} + dest: /var/tmp/{{ item }} owner: root mode: 0744 with_items: - log_logins.sql - lastseen.sql - + - name: Create log_logins table for each log_login environment - mysql_db: + community.mysql.mysql_db: name: "{{ item.db_loglogins_name }}" login_user: "{{ item.db_loglogins_user }}" login_password: "{{ item.db_loglogins_password }}" login_host: "{{ item.db_loglogins_host }}" state: import - target: /tmp/log_logins.sql + target: /var/tmp/log_logins.sql changed_when: false with_items: "{{ rsyslog_environments }}" when: item.db_loglogins_name is defined - name: Create lastseen table for each log_login environment - mysql_db: + community.mysql.mysql_db: name: "{{ item.db_lastseen_name }}" login_user: "{{ item.db_lastseen_user }}" login_password: "{{ item.db_lastseen_password }}" login_host: "{{ item.db_lastseen_host }}" state: import - target: /tmp/lastseen.sql + target: /var/tmp/lastseen.sql changed_when: false with_items: "{{ rsyslog_environments }}" when: item.db_loglogins_name is defined @@ -40,7 +40,7 @@ when: ansible_os_family == "Debian" - name: Create a python script that parses log_logins per environment - template: + ansible.builtin.template: src: parse_ebauth_to_mysql.py.j2 dest: /usr/local/sbin/parse_ebauth_to_mysql_{{ item.name }}.py mode: 0740 @@ -50,7 +50,7 @@ when: item.db_loglogins_name is defined - name: Put log_logins logrotate scripts - template: + ansible.builtin.template: src: logrotate_ebauth.j2 dest: /etc/logrotate.d/logrotate_ebauth_{{ item.name }} mode: 0644 @@ -60,7 +60,7 @@ when: item.db_loglogins_name is defined - name: Create logdirectory for log_logins cleanup script - file: + ansible.builtin.file: path: "{{ rsyslog_dir }}/apps/{{ item.name }}/loglogins_cleanup/" state: directory owner: root @@ -70,7 +70,7 @@ when: item.db_loglogins_name is defined - name: Put log_logins cleanup script - template: + ansible.builtin.template: src: clean_loglogins.j2 dest: /usr/local/sbin/clean_loglogins_{{ item.name }} owner: root @@ -80,7 +80,7 @@ when: item.db_loglogins_name is defined - name: Create cronjobs to run the log_logins script - cron: + ansible.builtin.cron: name: Delete old {{ item.name }} log_login data user: root minute: "20" diff --git a/roles/rsyslog/tasks/rsyslog_central.yml b/roles/rsyslog/tasks/rsyslog_central.yml index 8d1a56adb..7dbdbac1a 100644 --- a/roles/rsyslog/tasks/rsyslog_central.yml +++ b/roles/rsyslog/tasks/rsyslog_central.yml @@ -14,13 +14,13 @@ - name: Create directory to save the logs file: - path: "{{rsyslog_dir }}" + path: "{{ rsyslog_dir }}" owner: root group: "{{ rsyslog_read_group }}" mode: "0750" recurse: true -- name: put rsyslog client certificate +- name: Put rsyslog client certificate copy: src: "{{ inventory_dir }}/files/certs/rsyslog/rsyslogserver.crt" dest: "/etc/pki/rsyslog/rsyslogserver.crt" @@ -49,28 +49,53 @@ template: src: sc_template.conf.j2 dest: /etc/rsyslog.d/templates/{{ item.name }}.conf + backup: true with_items: "{{ rsyslog_environments }}" + notify: + - "restart rsyslog" - name: Create ruleset configurations template: src: sc_ruleset.conf.j2 dest: /etc/rsyslog.d/rulesets/{{ item.name }}.conf + backup: true with_items: "{{ rsyslog_environments }}" + notify: + - "restart rsyslog" - name: Create sc listener configurations template: src: listener.conf.j2 dest: /etc/rsyslog.d/listeners/{{ item.name }}.conf + backup: true with_items: "{{ rsyslog_environments }}" + notify: + - "restart rsyslog" - name: Create logrotate file for apps and host logs template: src: centralsyslog.j2 dest: /etc/logrotate.d/centralsyslog -- name: put ryslog config file +- name: Put ryslog config file template: src: "rsyslog.conf.j2" dest: "/etc/rsyslog.conf" notify: - "restart rsyslog" + +- name: Put log empty warn script and cronjob + when: rsyslog_enable_warn_empty_script + block: + - name: Put log empty script + ansible.builtin.template: + src: warn-empty-log.sh.j2 + dest: "{{ rsyslog_checkemptylogs_dir }}/warn-empty-log.sh" + backup: True + - name: Create cronjob + ansible.builtin.cron: + name: "check empty logs" + minute: "{{ rsyslog_checkemptylogs_cron_minute }}" + hour: "{{ rsyslog_checkemptylogs_cron_hour }}" + weekday: "{{ rsyslog_checkemptylogs_cron_weekdays }}" + job: "{{ rsyslog_checkemptylogs_dir }}/warn-empty-log.sh -m" diff --git a/roles/rsyslog/templates/rsyslog.conf.j2 b/roles/rsyslog/templates/rsyslog.conf.j2 index cdccfde66..d0973e866 100644 --- a/roles/rsyslog/templates/rsyslog.conf.j2 +++ b/roles/rsyslog/templates/rsyslog.conf.j2 @@ -120,6 +120,3 @@ queue.saveonshutdown="on" action.resumeRetryCount="-1" action.resumeInterval="5") {% endif %} - - - diff --git a/roles/rsyslog/templates/rsyslog_onlyforward.conf.j2 b/roles/rsyslog/templates/rsyslog_onlyforward.conf.j2 index e2ed23495..874db8c97 100644 --- a/roles/rsyslog/templates/rsyslog_onlyforward.conf.j2 +++ b/roles/rsyslog/templates/rsyslog_onlyforward.conf.j2 @@ -1,32 +1,52 @@ -# This rsyslog configuration takes logs from journald and forwards them to a remote log serverad="imuxsock") # provides support for local system logging -module(load="imuxsock") +# Where to place auxiliary files +global(workDirectory="{{ rsyslog_workdirectory | default('/var/spool/rsyslog') }}") + +{% if 'docker' in group_names %} +module(load="imptcp") +input(type="imptcp" port="514") +{% endif %} +{% if ansible_os_family == "RedHat" %} +module(load="imuxsock") +{% endif %} +module(load="imjournal" # provides access to the systemd journal + UsePid="system" # PID number is retrieved as the ID of the process the journal entry originates from + StateFile="{{ rsyslog_imjournal_statefile | default('imjournal.state') }}" + ratelimit.interval="{{ rsyslog_imjournal_ratelimitinterval | default('60') }}" + ratelimit.burst="{{ rsyslog_imjournal_ratelimitburst | default('20000') }}") # Reads journald logs module(load="imklog") # provides kernel logging support -module(load="immark" interval="600" ) # provides --MARK-- message capability +module(load="immark" interval="300" ) # provides --MARK-- message capability +module(load="omrelp") + +template(name="CustomRelpFormat" type="string" + string="<%PRI%>%TIMESTAMP% {{ ansible_fqdn }} %syslogtag%%msg%\n") $PreserveFQDN on -*.emerg :omusrmsg:* +*.emerg :omusrmsg:* +{% if rsyslog_maxmessagesize is defined %} +$MaxMessageSize {{ rsyslog_maxmessagesize }} +{% endif %} {% if 'sysloghost' not in group_names %} {% for relp_host in relp_remote %} # Logs are forwarded to {{ relp_host.name }} -module(load="omrelp") -action(type="omrelp" -target="{{ relp_host.host }}" -port="{{ relp_host.port }}" -tls="on" -tls.caCert="/etc/pki/rsyslog/rsyslogclientca.crt" +action(type="omrelp" +target="{{ relp_host.host }}" +port="{{ relp_host.port }}" +tls="on" +tls.caCert="/etc/pki/rsyslog/rsyslogclientca.crt" tls.MyCert="/etc/pki/rsyslog/rsyslogclient.crt" tls.MyPrivKey="/etc/pki/rsyslog/rsyslogclient.key" -tls.authmode="name" +tls.authmode="name" tls.permittedpeer=["{{ relp_host.peer }}"] -queue.type="LinkedList" +queue.type="LinkedList" queue.filename="{{ relp_host.name }}" -queue.spoolDirectory="/var/spool/rsyslog" +queue.spoolDirectory="{{ rsyslog_queue_dir }}" queue.maxdiskspace="1G" -queue.saveonshutdown="on" +queue.saveonshutdown="on" action.resumeRetryCount="-1" action.resumeInterval="5" -action.writeAllMarkMessages="on") +action.writeAllMarkMessages="on" +template="CustomRelpFormat") {% endfor %} {% endif %} diff --git a/roles/rsyslog/templates/sc_ruleset.conf.j2 b/roles/rsyslog/templates/sc_ruleset.conf.j2 index 3f7094101..86a0e5457 100644 --- a/roles/rsyslog/templates/sc_ruleset.conf.j2 +++ b/roles/rsyslog/templates/sc_ruleset.conf.j2 @@ -1,7 +1,8 @@ $RuleSet {{ item.name }} {% if item.name != "mgnt_sc" %} if $programname == "engineblock" and $msg startswith " engine" then { action(type="omfile" DynaFile="apache-eb-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "engineblock" and $msg startswith ' {"channel":"authentication"' then { action(type="omfile" DynaFile="ebauth-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "engineblock" and $msg startswith "engine" then { action(type="omfile" DynaFile="apache-eb-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "engineblock" and $msg contains '{"channel":"authentication"' then { action(type="omfile" DynaFile="ebauth-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "engineblock" { action(type="omfile" DynaFile="eblog-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "EBLOG" { action(type="omfile" DynaFile="eblog-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "Apache-EB" { action(type="omfile" DynaFile="apache-eb-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } @@ -15,10 +16,8 @@ if $programname == "engineblock" and $msg startswith ' {"channel":"authenticatio :programname, isequal, "Apache-EBAPI" { action(type="omfile" DynaFile="apache-eb-api-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "manageserver" { action(type="omfile" DynaFile="manage-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "managegui" { action(type="omfile" DynaFile="apache-manage-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -:programname, isequal, "PDPANALYTICS" { action(type="omfile" DynaFile="pdpanalytics-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -:programname, isequal, "pdpserver" { action(type="omfile" DynaFile="pdp-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -:programname, isequal, "pdpgui" { action(type="omfile" DynaFile="apache-pdp-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "profile" and $msg startswith " {" then { action(type="omfile" DynaFile="profile-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +:programname, isequal, "pdp" { action(type="omfile" DynaFile="pdp-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "profile" and $msg startswith "{" then { action(type="omfile" DynaFile="profile-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "profile" { action(type="omfile" DynaFile="apache-profile-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "teamsserver" { action(type="omfile" DynaFile="teams-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "teamsgui" { action(type="omfile" DynaFile="apache-teams-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } @@ -26,6 +25,7 @@ if $programname == "profile" and $msg startswith " {" then { action(type="omfile :programname, isequal, "mariadbd" { action(type="omfile" DynaFile="galera-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "garb-systemd" { action(type="omfile" DynaFile="haproxy-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "Keepalived_vrrp" { action(type="omfile" DynaFile="keepalived-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +:programname, startswith, "mongo" { action(type="omfile" DynaFile="mongo-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "Apache-STATIC" { action(type="omfile" DynaFile="apache-static-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "Apache-METADATA" { action(type="omfile" DynaFile="apache-metadata-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "oidcngserver" { action(type="omfile" DynaFile="oidcng-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } @@ -36,25 +36,25 @@ if $programname == "profile" and $msg startswith " {" then { action(type="omfile :programname, isequal, "myconextjson" { action(type="omfile" DynaFile="myconextjson-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "myconextgui" { action(type="omfile" DynaFile="apache-myconext-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "accountgui" { action(type="omfile" DynaFile="apache-account-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +:programname, isequal, "servicedeskgui" { action(type="omfile" DynaFile="apache-servicedesk-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, startswith, "inviteclient" { action(type="omfile" DynaFile="inviteclient-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, startswith, "invitewelcome" { action(type="omfile" DynaFile="invitewelcome-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, startswith, "inviteserver" { action(type="omfile" DynaFile="inviteserver-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +:programname, isequal, "invitejson" { action(type="omfile" DynaFile="invitejson-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, startswith, "inviteprovisioningmock" { action(type="omfile" DynaFile="inviteprovisioningmock-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, startswith, "loadbalancer" { action(type="omfile" DynaFile="loadbalancer-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "gateway" and $msg startswith ' {"message":"Second Factor Authenticated"' then { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "gateway" and $msg startswith ' {"message":"Intrinsic Loa Requested"' then { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "gateway" and $msg contains '{"message":"Second Factor Authenticated"' then { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "gateway" and $msg contains '{"message":"Intrinsic Loa Requested"' then { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } {% for stepupapp in stepupapps %} :programname, isequal, "stepup-{{ stepupapp }}" { action(type="omfile" DynaFile="stepup-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "{{ stepupapp }}" and $msg startswith " {{ stepupapp }}" then { action(type="omfile" DynaFile="apache-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "{{ stepupapp }}" and $msg startswith "{{ stepupapp }}" then { action(type="omfile" DynaFile="apache-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "{{ stepupapp }}" { action(type="omfile" DynaFile="stepup-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "Apache-{{ stepupapp }}" { action(type="omfile" DynaFile="apache-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "{{ stepupapp }}" and $msg startswith " {{ stepupapp }}" then { action(type="omfile" DynaFile="apache-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -:programname, isequal, "{{ stepupapp }}" { action(type="omfile" DynaFile="stepup-{{ stepupapp }}-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } {% endfor %} :programname, isequal, "Apache-azuremfa" { action(type="omfile" DynaFile="apache-azure-mfa-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } -if $programname == "spdashboard" and $msg startswith " spdashboard" then { action(type="omfile" DynaFile="apache-spdashboard-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } +if $programname == "spdashboard" and $msg startswith "spdashboard" then { action(type="omfile" DynaFile="apache-spdashboard-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "spdashboard" { action(type="omfile" DynaFile="spdashboard-{{item.name }}" {{ rsyslog_dir_file_modes }} ) stop } :programname, isequal, "stepup-authentication" { action(type="omfile" DynaFile="stepup-authentication-{{ item.name }}" {{ rsyslog_dir_file_modes }} ) stop } diff --git a/roles/rsyslog/templates/sc_template.conf.j2 b/roles/rsyslog/templates/sc_template.conf.j2 index a7fadb6e9..d6b765f0a 100644 --- a/roles/rsyslog/templates/sc_template.conf.j2 +++ b/roles/rsyslog/templates/sc_template.conf.j2 @@ -21,6 +21,7 @@ $template apache-voot-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/v $template galera-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/galera/galera.log" $template garb-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/galera/galera_garb.log" $template keepalived-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/keepalived/keepalived.log" +$template mongo-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/mongo/mongo.log" $template apache-static-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/static/apache.log" $template apache-eb-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eb/apache.log" $template eblog-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eb/eb.log" @@ -35,14 +36,16 @@ $template myconext-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/edui $template myconextjson-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eduid/myconextjson.log" $template apache-myconext-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eduid/myconext-apache.log" $template apache-account-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eduid/account-apache.log" +$template apache-servicedesk-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/servicedeskgui/servicedesk-apache.log $template apache-eduid-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/eduid/eduid-apache.log" $template spdashboard-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/spdashboard/spdashboard.log" $template apache-spdashboard-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/spdashboard/apache.log" -$template inviteclient-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/invite//inviteclient.log" -$template invitewelcome-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/invite//invitewelcome.log" -$template inviteserver-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/invite//inviteserver.log" -$template inviteprovisioningmock-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/invite//inviteprovisioningmock.log" -$template loadbalancer-{{ item.name }}, "/opt/surfconext/logs/apps/{{ item.name }}/traefik/traefik.log" +$template inviteclient-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite//inviteclient.log" +$template invitewelcome-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite//invitewelcome.log" +$template inviteserver-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite//inviteserver.log" +$template invitejson-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite/invitejson.log" +$template inviteprovisioningmock-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/invite//inviteprovisioningmock.log" +$template loadbalancer-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/traefik/traefik.log" {% for stepupapp in stepupapps %} $template stepup-{{ stepupapp }}-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/{{ stepupapp }}/{{ stepupapp }}.log $template apache-{{ stepupapp }}-{{ item.name }}, "{{ rsyslog_dir }}/apps/{{ item.name }}/{{ stepupapp }}/{{ stepupapp }}-apache.log diff --git a/roles/rsyslog/templates/warn-empty-log.sh.j2 b/roles/rsyslog/templates/warn-empty-log.sh.j2 new file mode 100644 index 000000000..aad42fdd1 --- /dev/null +++ b/roles/rsyslog/templates/warn-empty-log.sh.j2 @@ -0,0 +1,80 @@ +#!/bin/bash + +# Script to check for empty log files in {{ rsyslog_monitor_for_emptylogs_path }} +# Usage: ./check_empty_logs.sh [-m] +# -m: send mail if empty files are found + +set -e + +SEND_MAIL=false +RECIPIENT="{{ rsyslog_warn_empty_log_recipient }}" +HOSTNAME=$(hostname --fqdn) + +# Parse command line options +while getopts "m" opt; do + case $opt in + m) + SEND_MAIL=true + ;; + \?) + echo "Invalid option: -$OPTARG" >&2 + exit 1 + ;; + esac +done + +# Find empty log files and read into array +mapfile -t empty_files < <(find {{ rsyslog_monitor_for_emptylogs_path }} -maxdepth 2 -type f -size 0) + +# If no empty files found, exit successfully +{% raw %} +if [ ${#empty_files[@]} -eq 0 ]; then + exit 0 +fi +{% endraw %} + +# Empty files were found +{% raw %} +file_count=${#empty_files[@]} +{% endraw %} + +# Determine singular or plural +if [ $file_count -eq 1 ]; then + file_word="file" + verb="was" +else + file_word="files" + verb="were" +fi + +# Create mail body +mail_body=$(cat <