diff --git a/.github/workflows/helm-docs-updates.yml b/.github/workflows/helm-docs-updates.yml deleted file mode 100644 index 0d70215e146..00000000000 --- a/.github/workflows/helm-docs-updates.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: Update HELM docs for Renovate & Dependabot - -on: - pull_request: - branches: - - master - - dev - - bugfix - - release/** - - hotfix/** - -jobs: - docs_updates: - name: Update documentation - runs-on: ubuntu-latest - if: startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') - steps: - - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - - name: Run helm-docs - uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 - with: - chart-search-root: "helm/defectdojo" - git-push: true diff --git a/.github/workflows/k8s-tests.yml b/.github/workflows/k8s-tests.yml index 30dc7ab5cff..237c27e4dc5 100644 --- a/.github/workflows/k8s-tests.yml +++ b/.github/workflows/k8s-tests.yml @@ -16,7 +16,7 @@ jobs: # databases, broker and k8s are independent, so we don't need to test each combination # lastest k8s version (https://kubernetes.io/releases/) and the oldest officially supported version # are tested (https://kubernetes.io/releases/) - - k8s: 'v1.34.1' # renovate: datasource=github-releases depName=kubernetes/kubernetes versioning=loose + - k8s: 'v1.34.0' # renovate: datasource=github-releases depName=kubernetes/kubernetes versioning=loose os: debian - k8s: 'v1.31.13' # Do not track with renovate as we likely want to rev this manually os: debian diff --git a/.github/workflows/shellcheck.yml b/.github/workflows/shellcheck.yml index ab338bfa37b..99a51ddcf6d 100644 --- a/.github/workflows/shellcheck.yml +++ b/.github/workflows/shellcheck.yml @@ -2,10 +2,7 @@ name: Shellcheck on: pull_request: -env: - SHELLCHECK_REPO: 'koalaman/shellcheck' - SHELLCHECK_VERSION: 'v0.9.0' # renovate: datasource=github-releases depName=koalaman/shellcheck - SHELLCHECK_SHA: '038fd81de6b7e20cc651571362683853670cdc71' # Renovate config is not currently adjusted to update hash - it needs to be done manually for now + jobs: shellcheck: runs-on: ubuntu-latest @@ -13,113 +10,9 @@ jobs: - name: Checkout uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - name: Grab shellcheck - run: | - set -e - - SHELLCHECK_TARBALL_URL="https://github.com/${SHELLCHECK_REPO}/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" - SHELLCHECK_TARBALL_LOC="shellcheck.tar.xz" - curl -L "${SHELLCHECK_TARBALL_URL}" -o "${SHELLCHECK_TARBALL_LOC}" - tarball_sha=$(shasum ${SHELLCHECK_TARBALL_LOC} | awk '{print $1}') - if [ "${tarball_sha}" != "${SHELLCHECK_SHA}" ]; then - echo "Got invalid SHA for shellcheck: ${tarball_sha}" - exit 1 - fi - tar -xvf "${SHELLCHECK_TARBALL_LOC}" - cd "shellcheck-${SHELLCHECK_VERSION}" || exit 1 - mv shellcheck "${GITHUB_WORKSPACE}/shellcheck" - - - name: Run shellcheck - shell: bash - run: | - set -o pipefail - - # Make sure we already put the proper shellcheck binary in place - if [ ! -f "./shellcheck" ]; then - echo "shellcheck not found" - exit 1 - fi - - # Make sure we know what to compare the PR's changes against - if [ -z "${GITHUB_BASE_REF}" ]; then - echo "No base reference supplied" - exit 1 - fi - - num_findings=0 - - # Execute shellcheck and add errors based on the output - run_shellcheck() { - local modified_shell_script="${1}" - local findings_file="findings.txt" - - # Remove leftover findings file from previous iterations - if [ -f "${findings_file}" ]; then - rm "${findings_file}" - fi - - echo "Running shellcheck against ${modified_shell_script}..." - - # If shellcheck reported no errors (exited with 0 status code), return - if ./shellcheck -f json -S warning "${modified_shell_script}" | jq -c '.[]' > "${findings_file}"; then - return 0 - fi - - # Walk each of the individual findings - while IFS= read -r finding; do - num_findings=$((num_findings+1)) - - line=$(echo "${finding}" | jq '.line') - end_line=$(echo "${finding}" | jq '.endLine') - column=$(echo "${finding}" | jq '.column') - end_column=$(echo "${finding}" | jq '.endColumn') - code=$(echo "${finding}" | jq '.code') - title="SC${code}" - message="$(echo "${finding}" | jq -r '.message') See https://github.com/koalaman/shellcheck/wiki/${title}" - - echo "Line: ${line}" - echo "End line: ${end_line}" - echo "Column: ${column}" - echo "End column: ${end_column}" - echo "Title: ${title}" - echo "Message: ${message}" - - # Raise an error with the file/line/etc - echo "::error file=${modified_shell_script},line=${line},endLine=${end_line},column=${column},endColumn=${end_column},title=${title}::${message}" - done < ${findings_file} - } - - # Find the shell scripts that were created or modified by this PR - find_modified_shell_scripts() { - shell_scripts="shell_scripts.txt" - modified_files="modified_files.txt" - modified_shell_scripts="modified_shell_scripts.txt" - - find . -name "*.sh" -or -name "*.bash" | sed 's#^\./##' > "${shell_scripts}" - git diff --name-only "origin/${GITHUB_BASE_REF}" HEAD > "${modified_files}" - - if [ ! -s "${shell_scripts}" ] || [ ! -s "${modified_files}" ]; then - echo "No modified shell scripts detected" - exit 0 - fi - - if ! grep -Fxf "${shell_scripts}" "${modified_files}" > "${modified_shell_scripts}"; then - echo "No modified shell scripts detected" - exit 0 - fi - } - - git fetch origin "${GITHUB_BASE_REF}" || exit 1 - - find_modified_shell_scripts - - # Loop through the modified shell scripts - while IFS= read -r modified_shell_script; do - run_shellcheck "${modified_shell_script}" - done < ${modified_shell_scripts} - - # If shellcheck reported any findings, fail the workflow - if [ ${num_findings} -gt 0 ]; then - echo "shellcheck reported ${num_findings} findings." - exit 1 - fi + - name: Run ShellCheck + uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # 2.0.0 + with: + version: 'v0.11.0' # renovate: datasource=github-releases depName=koalaman/shellcheck versioning=loose + env: + SHELLCHECK_OPTS: -e SC1091 -e SC2086 # TODO: fix following findings diff --git a/.github/workflows/test-helm-chart.yml b/.github/workflows/test-helm-chart.yml index 57d58f019c1..c35698e51e9 100644 --- a/.github/workflows/test-helm-chart.yml +++ b/.github/workflows/test-helm-chart.yml @@ -107,12 +107,25 @@ jobs: steps: - name: Checkout uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - + + - name: Update values in HELM chart + if: startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') + run: | + yq -i '.annotations."artifacthub.io/changes" += "- kind: changed\n description: ${{ github.event.pull_request.title }}\n"' helm/defectdojo/Chart.yaml + + - name: Run helm-docs (update) + uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 + if: startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') + with: + chart-search-root: "helm/defectdojo" + git-push: true + # Documentation provided in the README file needs to contain the latest information from `values.yaml` and all other related assets. # If this step fails, install https://github.com/norwoodj/helm-docs and run locally `helm-docs --chart-search-root helm/defectdojo` before committing your changes. # The helm-docs documentation will be generated for you. - - name: Run helm-docs + - name: Run helm-docs (check) uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 + if: ! startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') with: fail-on-diff: true chart-search-root: "helm/defectdojo" diff --git a/docs/assets/images/priority_chooseengine.png b/docs/assets/images/priority_chooseengine.png new file mode 100644 index 00000000000..c3211fa0e92 Binary files /dev/null and b/docs/assets/images/priority_chooseengine.png differ diff --git a/docs/assets/images/priority_default.png b/docs/assets/images/priority_default.png new file mode 100644 index 00000000000..65443684094 Binary files /dev/null and b/docs/assets/images/priority_default.png differ diff --git a/docs/assets/images/priority_engine_new.png b/docs/assets/images/priority_engine_new.png new file mode 100644 index 00000000000..9021231e76d Binary files /dev/null and b/docs/assets/images/priority_engine_new.png differ diff --git a/docs/assets/images/priority_sliders.png b/docs/assets/images/priority_sliders.png new file mode 100644 index 00000000000..4fc720ab8a0 Binary files /dev/null and b/docs/assets/images/priority_sliders.png differ diff --git a/docs/assets/images/risk_threshold.png b/docs/assets/images/risk_threshold.png new file mode 100644 index 00000000000..0e0a6215f72 Binary files /dev/null and b/docs/assets/images/risk_threshold.png differ diff --git a/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md b/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md index 5ef730db7d1..67c6b892e0c 100644 --- a/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md +++ b/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md @@ -172,6 +172,8 @@ The SonarQube Connector can fetch data from either a SonarCloud account or from 1. Enter the base url of your SonarQube instance in the Location field: for example `https://my.sonarqube.com/` 2. Enter a valid **API key** in the Secret field. This will need to be a **[User](https://docs.sonarsource.com/sonarqube/latest/user-guide/user-account/generating-and-using-tokens/)** [API Token Type](https://docs.sonarsource.com/sonarqube/latest/user-guide/user-account/generating-and-using-tokens/). +The token will need to have access to Projects, Vulnerabilities and Hotspots within Sonar. + API tokens can be found and generated via **My Account \-\> Security \-\> Generate Token** in the SonarQube app. For more information, [see SonarQube documentation](https://docs.sonarsource.com/sonarqube/latest/user-guide/user-account/generating-and-using-tokens/). ## **Snyk** @@ -187,7 +189,7 @@ See the [Snyk API documentation](https://docs.snyk.io/snyk-api) for more info. ## Tenable -The Tenable connector uses the **Tenable.io** REST API to fetch data. +The Tenable connector uses the **Tenable.io** REST API to fetch data. Currently, only vulnerability scans are imported - Web App Scans cannot be imported with the Connector. On\-premise Tenable Connectors are not available at this time. diff --git a/docs/content/en/share_your_findings/integrations.md b/docs/content/en/share_your_findings/integrations.md index ea18f545b02..e2dd663a9f4 100644 --- a/docs/content/en/share_your_findings/integrations.md +++ b/docs/content/en/share_your_findings/integrations.md @@ -9,7 +9,7 @@ Supported Integrations: - [Azure Devops](/en/share_your_findings/integrations_toolreference/#azure-devops-boards) - [GitHub](/en/share_your_findings/integrations_toolreference/#github) - [GitLab Boards](/en/share_your_findings/integrations_toolreference/#gitlab) -- ServiceNow (Coming Soon) +- [ServiceNow](/en/share_your_findings/integrations_toolreference/#servicenow) ## Opening the Integrations page diff --git a/docs/content/en/share_your_findings/integrations_toolreference.md b/docs/content/en/share_your_findings/integrations_toolreference.md index 68799f6bdca..e8c36e4b51c 100644 --- a/docs/content/en/share_your_findings/integrations_toolreference.md +++ b/docs/content/en/share_your_findings/integrations_toolreference.md @@ -1,6 +1,6 @@ --- title: "Integrators Tool Reference" -description: "Beta Feature" +description: "Detailed setup guides for Integrators" weight: 1 --- @@ -101,7 +101,7 @@ The GitLab integration allows you to add issues to a [GitLab Project](https://do ### Issue Tracker Mapping -- **Project Name**: The name of the project in GitLab that you want to send issues to +- **Project Name**: The name of the project in GitLab that you want to send issues to. ### Severity Mapping Details @@ -122,3 +122,62 @@ By default, GitLab has statuses of 'opened' and 'closed'. Additional status lab - **Closed Mapping**: `closed` - **False Positive Mapping**: `closed` - **Risk Accepted Mapping**: `closed` + +## ServiceNow + +The ServiceNow Integration allows you to push DefectDojo Findings as ServiceNow Incidents. + +### Instance Setup + +Your ServiceNow instance will require you to obtain a Refresh Token, associated with the User or Service account that will push Incidents to ServiceNow. + +You'll need to start by creating an OAuth registration on your ServiceNow instance for DefectDojo: + +1. In the left-hand navigation bar, search for “Application Registry” and select it. +2. Click “New”. +3. Choose “Create an OAuth API endpoint for external clients”. +4. Fill in the required fields: + * Name: Provide a meaningful name for your application (e.g., Vulnerability Integration Client). + * (Optional) Adjust the Token Lifespan: + * Access Token Lifespan: Default is 1800 seconds (30 minutes). + * Refresh Token Lifespan: The default is 8640000 seconds (approximately 100 days). +5. Click Submit to create the application record. +6. After submission, select the application from the list and take note of the **Client ID and Client Secret** fields. + +You will then need to use this registration to obtain a Refresh Token, which can only be obtained through the ServiceNow API. Open a terminal window and paste the following (substituting the variables wrapped in `{{}}` with your user's actual information) + +``` +curl --request POST \ + --url {{INSTANCE_HOST}}/oauth_token.do \ + --header 'content-type: application/x-www-form-urlencoded' \ + --data grant_type=password \ + --data 'client_id={{CLIENT_ID}}' \ + --data 'client_secret={{CLIENT_SECRET}}' \ + --data 'username={{USERNAME}}' \ + --data 'password={{PASSWORD}}' + ``` + +If your ServiceNow credentials are correct, and allow for admin level-access to ServiceNow, you should receive a response with a RefreshToken. You'll need that token to complete integration with DefectDojo. + +- **Instance Label** should be the label that you want to use to identify this integration. +- **Location** should be set to the URL for your ServiceNow server, for example `https://your-organization.service-now.com/`. +- **Refresh Token** is where the Refresh Token should be entered. +- **Client ID** should be the Client ID set in the OAuth App Registration. +- **Client ID** should be the Client Secret set in the OAuth App Registration. + +### Severity Mapping Details + +This maps to the ServiceNow Impact field. +- **Info Mapping**: `1` +- **Low Mapping**: `1` +- **Medium Mapping**: `2` +- **High Mapping**: `3` +- **Critical Mapping**: `3` + +### Status Mapping Details + +- **Status Field Name**: `State` +- **Active Mapping**: `New` +- **Closed Mapping**: `Closed` +- **False Positive Mapping**: `Resolved` +- **Risk Accepted Mapping**: `Resolved` diff --git a/docs/content/en/working_with_findings/finding_priority.md b/docs/content/en/working_with_findings/finding_priority.md index 646d4e83765..caff9c56bae 100644 --- a/docs/content/en/working_with_findings/finding_priority.md +++ b/docs/content/en/working_with_findings/finding_priority.md @@ -58,8 +58,9 @@ High. Criticality is a subjective field, so when assigning this field, consider Product compares to other Products in your organization. * **User Records** is a numerical estimation of user records in a database (or a system that can access that database). -* **Revenue** is a numerical estimation of annual revenue for the Product. It is not -possible to set a currency type in DefectDojo, so make sure that all of your Revenue +* **Revenue** is a numerical estimation of annual revenue for the Product. To calculate Priority, DefectDojo will calculate a percentage by comparing this Product's revenue to the sum of all Products within the Product Type. + +It is not possible to set a currency type in DefectDojo, so make sure that all of your Revenue estimations have the same currency denomination. (“50000” could mean $50,000 US Dollars or ¥50,000 Japanese Yen - the denomination does not matter as long as all of your Products have revenue calculated in the same currency). @@ -85,9 +86,6 @@ Findings within a Product can have additional metadata which can further adjust * Whether the Finding is in the KEV (Known Exploited Vulnerabilities) database, which is checked by DefectDojo on a regular basis * The tool-reported Severity of a Finding (Info, Low, Medium, High, Critical) -Currently, Priority calculation and the underlying formula cannot be adjusted. These -numbers are meant as a reference only - your team’s actual priority for remediation -may vary from the DefectDojo calculation. ## Finding Risk Calculation diff --git a/docs/content/en/working_with_findings/priority_adjustments.md b/docs/content/en/working_with_findings/priority_adjustments.md new file mode 100644 index 00000000000..2ea030b61c2 --- /dev/null +++ b/docs/content/en/working_with_findings/priority_adjustments.md @@ -0,0 +1,62 @@ +--- +title: "Adjusting Priority and Risk (Pro)" +description: "Change weighting of Priority and Risk calculations" +weight: 2 +--- + +DefectDojo Pro's Priority and Risk calculations can be adjusted, allowing you to tailor DefectDojo Pro to match your internal standards for Finding Priority and Risk. + +## Prioritization Engines + +Similar to SLA configurations, Prioritization Engines allow you to set the rules governing how Priority and Risk are calculated. + +![image](images/priority_default.png) + +DefectDojo comes with a built-in Prioritization Engine, which is applied to all Products. However, you can edit this Prioritization Engine to change the weighting of **Finding** and **Product** multipliers, which will adjust how Finding Priority and Risk are assigned. + +### Finding Multipliers + +Eight contextual factors impact the Priority score of a Finding. Three of these are Finding-specific, and the other five are assigned based on the Product that holds the Finding. + +You can tune your Prioritization Engine by adjusting how these factors are applied to the final calculation. + +![image](images/priority_sliders.png) + +Select a factor by clicking the button, and adjust this slider allows you to control the percentage a particular factor is applied. As you adjust the slider, you'll see the Risk thresholds change as a result. + +#### Finding-Level Multipliers + +* **Severity** - a Finding's Severity level +* **Exploitability** - a Finding's KEV and/or EPSS score +* **Endpoints** - the amount of Endpoints associated with a Finding + +#### Product-Level Multipliers + +* **Business Criticality** - the related Product's Business Criticality (None, Very Low, Low, Medium, High, or Very +High) +* **User Records** - the related Product's User Records count +* **Revenue** - the related Product's revenue, relative to the total revenue of the Product Type +* **External Audience** - whether or not the related Product has an external audience +* **Internet Accessible** - whether or not the related Product is internet accessible + +### Risk Thresholds + +Based on the tuning of the Priority Engine, DefectDojo will automatically recommend Risk Thresholds. However, these thresholds can be adjusted as well and set to whatever values you deem appropriate. + +![image](images/risk_threshold.png) + +## Creating New Prioritization Engines + +You can use multiple Prioritization Engines, which can each be assigned to different Products. + +![image](images/priority_engine_new.png) + +Creating a new Prioritization Engine will open the Prioritization Engine form. Once this form is submitted, a new Prioritization Engine will be added to the table. + +## Assigning Prioritization Engines to Products + +Each Product can have a Prioritization Engine currently in use via the **Edit Product** form for a given Product. + +![image](images/priority_chooseengine.png) + +Note that when a Product's Prioritization Engine is changed, or a Prioritization Engine is updated, the Product's Prioritization Engine or the Prioritization Engine itself will be "Locked" until the prioritization calculation has completed. \ No newline at end of file diff --git a/docs/package-lock.json b/docs/package-lock.json index f2025f915ba..26c62b5a377 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -9,18 +9,18 @@ "version": "1.8.0", "license": "MIT", "dependencies": { - "@docsearch/css": "^4.2.0", - "@docsearch/js": "^4.2.0", - "@tabler/icons": "^3.34.1", - "@thulite/doks-core": "^1.8.3", - "@thulite/images": "^3.3.1", - "@thulite/inline-svg": "^1.2.0", - "@thulite/seo": "^2.4.1", - "thulite": "^2.6.3" + "@docsearch/css": "4.2.0", + "@docsearch/js": "4.2.0", + "@tabler/icons": "3.35.0", + "@thulite/doks-core": "1.8.3", + "@thulite/images": "3.3.3", + "@thulite/inline-svg": "1.2.1", + "@thulite/seo": "2.4.2", + "thulite": "2.6.3" }, "devDependencies": { - "prettier": "^3.6.2", - "vite": "^7.0.6" + "prettier": "3.6.2", + "vite": "7.1.11" }, "engines": { "node": ">=20.11.0" @@ -2120,7 +2120,6 @@ "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz", "integrity": "sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==", "license": "MIT", - "peer": true, "funding": { "type": "opencollective", "url": "https://opencollective.com/popperjs" @@ -2727,7 +2726,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "baseline-browser-mapping": "^2.8.19", "caniuse-lite": "^1.0.30001751", @@ -3799,7 +3797,6 @@ } ], "license": "MIT", - "peer": true, "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", @@ -4456,7 +4453,6 @@ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -4660,7 +4656,6 @@ "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, diff --git a/docs/package.json b/docs/package.json index 6bbc6290c89..69785ab15ee 100644 --- a/docs/package.json +++ b/docs/package.json @@ -16,18 +16,18 @@ "preview": "vite preview --outDir public" }, "dependencies": { - "@docsearch/css": "^4.2.0", - "@docsearch/js": "^4.2.0", - "@tabler/icons": "^3.34.1", - "@thulite/doks-core": "^1.8.3", - "@thulite/images": "^3.3.1", - "@thulite/inline-svg": "^1.2.0", - "@thulite/seo": "^2.4.1", - "thulite": "^2.6.3" + "@docsearch/css": "4.2.0", + "@docsearch/js": "4.2.0", + "@tabler/icons": "3.35.0", + "@thulite/doks-core": "1.8.3", + "@thulite/images": "3.3.3", + "@thulite/inline-svg": "1.2.1", + "@thulite/seo": "2.4.2", + "thulite": "2.6.3" }, "devDependencies": { - "prettier": "^3.6.2", - "vite": "^7.0.6" + "prettier": "3.6.2", + "vite": "7.1.11" }, "engines": { "node": ">=20.11.0" diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index bff663173d3..2b2c7a36d2e 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -934,6 +934,8 @@ def close(self, request, pk=None): context={"request": request}, ) if finding_close.is_valid(): + # Remove the prefetched tags to avoid issues with delegating to celery + finding.tags._remove_prefetched_objects() # Use shared helper to perform close operations finding_helper.close_finding( finding=finding, diff --git a/dojo/middleware.py b/dojo/middleware.py index 5d63b1a35a0..5b50f3cc987 100644 --- a/dojo/middleware.py +++ b/dojo/middleware.py @@ -6,13 +6,18 @@ from urllib.parse import quote import pghistory.middleware +import requests from auditlog.context import set_actor from auditlog.middleware import AuditlogMiddleware as _AuditlogMiddleware from django.conf import settings +from django.contrib import messages from django.db import models from django.http import HttpResponseRedirect +from django.shortcuts import redirect from django.urls import reverse from django.utils.functional import SimpleLazyObject +from social_core.exceptions import AuthCanceled, AuthFailed, AuthForbidden +from social_django.middleware import SocialAuthExceptionMiddleware from watson.middleware import SearchContextMiddleware from watson.search import search_context_manager @@ -75,6 +80,28 @@ def __call__(self, request): return self.get_response(request) +class CustomSocialAuthExceptionMiddleware(SocialAuthExceptionMiddleware): + def process_exception(self, request, exception): + if isinstance(exception, requests.exceptions.RequestException): + messages.error(request, "Please use the standard login below.") + return redirect("/login?force_login_form") + if isinstance(exception, AuthCanceled): + messages.warning(request, "Social login was canceled. Please try again or use the standard login.") + return redirect("/login?force_login_form") + if isinstance(exception, AuthFailed): + messages.error(request, "Social login failed. Please try again or use the standard login.") + return redirect("/login?force_login_form") + if isinstance(exception, AuthForbidden): + messages.error(request, "You are not authorized to log in via this method. Please contact support or use the standard login.") + return redirect("/login?force_login_form") + if isinstance(exception, TypeError) and "'NoneType' object is not iterable" in str(exception): + logger.warning("OIDC login error: NoneType is not iterable") + messages.error(request, "An unexpected error occurred during social login. Please use the standard login.") + return redirect("/login?force_login_form") + logger.error(f"Unhandled exception during social login: {exception}") + return super().process_exception(request, exception) + + class DojoSytemSettingsMiddleware: _thread_local = local() diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py index f59060331d1..c4458daec01 100644 --- a/dojo/notifications/helper.py +++ b/dojo/notifications/helper.py @@ -627,6 +627,10 @@ def __init__(self, *args: list, **kwargs: dict) -> None: def create_notification(self, event: str | None = None, **kwargs: dict) -> None: # Process the notifications for a given list of recipients if kwargs.get("recipients") is not None: + recipients = kwargs.get("recipients", []) + if not recipients: + logger.debug("No recipients provided for event: %s", event) + return self._process_recipients(event=event, **kwargs) else: logger.debug("creating system notifications for event: %s", event) diff --git a/dojo/pipeline.py b/dojo/pipeline.py index 888cce0ba06..8aaea4079bb 100644 --- a/dojo/pipeline.py +++ b/dojo/pipeline.py @@ -183,5 +183,6 @@ def sanitize_username(username): def create_user(strategy, details, backend, user=None, *args, **kwargs): if not settings.SOCIAL_AUTH_CREATE_USER: return None - details["username"] = sanitize_username(details.get("username")) + username = details.get(settings.SOCIAL_AUTH_CREATE_USER_MAPPING) + details["username"] = sanitize_username(username) return social_core.pipeline.user.create_user(strategy, details, backend, user, args, kwargs) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 2ca0c60b462..97fdd706ea4 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -113,6 +113,7 @@ DD_FORGOT_USERNAME=(bool, True), # do we show link "I forgot my username" on login screen DD_SOCIAL_AUTH_SHOW_LOGIN_FORM=(bool, True), # do we show user/pass input DD_SOCIAL_AUTH_CREATE_USER=(bool, True), # if True creates user at first login + DD_SOCIAL_AUTH_CREATE_USER_MAPPING=(str, "username"), # could also be email or fullname DD_SOCIAL_LOGIN_AUTO_REDIRECT=(bool, False), # auto-redirect if there is only one social login method DD_SOCIAL_AUTH_TRAILING_SLASH=(bool, True), DD_SOCIAL_AUTH_OIDC_AUTH_ENABLED=(bool, False), @@ -576,6 +577,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param SHOW_LOGIN_FORM = env("DD_SOCIAL_AUTH_SHOW_LOGIN_FORM") SOCIAL_LOGIN_AUTO_REDIRECT = env("DD_SOCIAL_LOGIN_AUTO_REDIRECT") SOCIAL_AUTH_CREATE_USER = env("DD_SOCIAL_AUTH_CREATE_USER") +SOCIAL_AUTH_CREATE_USER_MAPPING = env("DD_SOCIAL_AUTH_CREATE_USER_MAPPING") SOCIAL_AUTH_STRATEGY = "social_django.strategy.DjangoStrategy" SOCIAL_AUTH_STORAGE = "social_django.models.DjangoStorage" @@ -941,7 +943,7 @@ def generate_url(scheme, double_slashes, user, password, host, port, path, param "django.middleware.clickjacking.XFrameOptionsMiddleware", "dojo.middleware.LoginRequiredMiddleware", "dojo.middleware.AdditionalHeaderMiddleware", - "social_django.middleware.SocialAuthExceptionMiddleware", + "dojo.middleware.CustomSocialAuthExceptionMiddleware", "crum.CurrentRequestUserMiddleware", "dojo.middleware.AuditlogMiddleware", "dojo.middleware.AsyncSearchContextMiddleware", @@ -1895,6 +1897,7 @@ def saml2_attrib_map_format(din): "KB": "https://support.hcl-software.com/csm?id=kb_article&sysparm_article=", # e.g. https://support.hcl-software.com/csm?id=kb_article&sysparm_article=KB0108401 "KHV": "https://avd.aquasec.com/misconfig/kubernetes/", # e.g. https://avd.aquasec.com/misconfig/kubernetes/khv045 "LEN-": "https://support.lenovo.com/cl/de/product_security/", # e.g. https://support.lenovo.com/cl/de/product_security/LEN-94953 + "MAL-": "https://cvepremium.circl.lu/vuln/", # e.g. https://cvepremium.circl.lu/vuln/mal-2025-49305 "MGAA-": "https://advisories.mageia.org/&&.html", # e.g. https://advisories.mageia.org/MGAA-2013-0054.html "MGASA-": "https://advisories.mageia.org/&&.html", # e.g. https://advisories.mageia.org/MGASA-2025-0023.html "MSRC_": "https://cvepremium.circl.lu/vuln/", # e.g. https://cvepremium.circl.lu/vuln/msrc_cve-2025-59200 diff --git a/dojo/templates/dojo/finding_related_row.html b/dojo/templates/dojo/finding_related_row.html index ba5336570ab..d02e884100b 100644 --- a/dojo/templates/dojo/finding_related_row.html +++ b/dojo/templates/dojo/finding_related_row.html @@ -13,8 +13,8 @@ {% else %} Similar {% endif %} - - + + {{ similar_finding.severity_display }} diff --git a/dojo/templates/notifications/alert/scan_added_empty.tpl b/dojo/templates/notifications/alert/scan_added_empty.tpl deleted file mode 120000 index 03390a2d58d..00000000000 --- a/dojo/templates/notifications/alert/scan_added_empty.tpl +++ /dev/null @@ -1 +0,0 @@ -{% include "notifications/alert/scan_added.tpl" %} \ No newline at end of file diff --git a/dojo/templates/notifications/alert/scan_added_empty.tpl b/dojo/templates/notifications/alert/scan_added_empty.tpl new file mode 100644 index 00000000000..6d749556aa2 --- /dev/null +++ b/dojo/templates/notifications/alert/scan_added_empty.tpl @@ -0,0 +1 @@ +{% include notifications/alert/scan_added.tpl %} diff --git a/dojo/tools/cyclonedx/json_parser.py b/dojo/tools/cyclonedx/json_parser.py index a53b9dd799d..73cda102c1f 100644 --- a/dojo/tools/cyclonedx/json_parser.py +++ b/dojo/tools/cyclonedx/json_parser.py @@ -36,7 +36,10 @@ def _get_findings_json(self, file, test): # better than always 'Medium' ratings = vulnerability.get("ratings") if ratings: - severity = ratings[0]["severity"] + # Determine if we can use the severity field + # In some cases, the severity field is missing, so we can rely on either the Medium severity + # or the CVSS vector (retrieved further down below) to determine the severity: + severity = ratings[0].get("severity", "Medium") severity = Cyclonedxhelper().fix_severity(severity) else: severity = "Medium" diff --git a/dojo/tools/wazuh/v4_7.py b/dojo/tools/wazuh/v4_7.py index 1357571d0d5..661dfd3c5a7 100644 --- a/dojo/tools/wazuh/v4_7.py +++ b/dojo/tools/wazuh/v4_7.py @@ -25,6 +25,19 @@ def parse_findings(self, test, data): agent_ip = item.get("agent_ip") detection_time = item.get("detection_time").split("T")[0] + # Map Wazuh severity to its equivalent in DefectDojo + SEVERITY_MAP = { + "Critical": "Critical", + "High": "High", + "Medium": "Medium", + "Low": "Low", + "Info": "Info", + "Informational": "Info", + "Untriaged": "Info", + } + # Get DefectDojo severity and default to "Info" if severity is not in the mapping + severity = SEVERITY_MAP.get(severity, "Info") + references = "\n".join(links) if links else None title = ( diff --git a/dojo/tools/wazuh/v4_8.py b/dojo/tools/wazuh/v4_8.py index 636ee0210d5..2031c759986 100644 --- a/dojo/tools/wazuh/v4_8.py +++ b/dojo/tools/wazuh/v4_8.py @@ -25,6 +25,19 @@ def parse_findings(self, test, data): detection_time = vuln.get("detected_at").split("T")[0] references = vuln.get("reference") + # Map Wazuh severity to its equivalent in DefectDojo + SEVERITY_MAP = { + "Critical": "Critical", + "High": "High", + "Medium": "Medium", + "Low": "Low", + "Info": "Info", + "Informational": "Info", + "Untriaged": "Info", + } + # Get DefectDojo severity and default to "Info" if severity is not in the mapping + severity = SEVERITY_MAP.get(severity, "Info") + title = ( cve + " affects (version: " + item.get("package").get("version") + ")" ) diff --git a/unittests/scans/cyclonedx/no-severity.json b/unittests/scans/cyclonedx/no-severity.json new file mode 100644 index 00000000000..ed12833bc5c --- /dev/null +++ b/unittests/scans/cyclonedx/no-severity.json @@ -0,0 +1,35 @@ +{ + "bomFormat": "CycloneDX", + "specVersion": "1.4", + "version": 1, + "metadata": { + "timestamp": "2025-10-28T14:38:10Z" + }, + "vulnerabilities": [ + { + "id": "CVE-2021-44228", + "source": { + "name": "NVD", + "url": "https://nvd.nist.gov/vuln/detail/CVE-2021-44228" + }, + "ratings": [ + { + "source": { + "name": "NVD", + "url": "https://nvd.nist.gov/vuln/detail/CVE-2021-44228" + }, + "score": 10.0, + "method": "CVSSv3", + "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H" + } + ], + "created": "2025-09-05T05:05:47Z", + "updated": "2025-03-03T16:51:00Z", + "affects": [ + { + "ref": "gerbwetbqt" + } + ] + } + ] +} diff --git a/unittests/scans/wazuh/wazuh_abnormal_severity.json b/unittests/scans/wazuh/wazuh_abnormal_severity.json new file mode 100644 index 00000000000..7a35f00c559 --- /dev/null +++ b/unittests/scans/wazuh/wazuh_abnormal_severity.json @@ -0,0 +1,80 @@ +{ + "took": 8, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": { + "value": 125, + "relation": "eq" + }, + "max_score": 5.596354, + "hits": [ + { + "_index": "wazuh-states-vulnerabilities-wazuh-server", + "_id": "001_c2f8c1a3b6e902b4c6d8e0g7a4b6c5d0e2b4a6n5_CVE-2025-27558", + "_score": 5.596323, + "_source": { + "agent": { + "id": "001", + "name": "myhost0", + "type": "Wazuh", + "version": "v4.11.1" + }, + "host": { + "os": { + "full": "Ubuntu 24.04.2 LTS", + "kernel": "6.8.0-62-generic", + "name": "Ubuntu", + "platform": "ubuntu", + "type": "ubuntu", + "version": "24.04.2" + } + }, + "package": { + "architecture": "amd64", + "description": "Signed kernel image generic", + "name": "linux-image-6.8.0-60-generic", + "size": 15025152, + "type": "deb", + "version": "6.8.0-60.63" + }, + "vulnerability": { + "category": "Packages", + "classification": "-", + "description": "IEEE P603.12-REVme D1.2 through D7.1 allows FragAttacks against meshnetworks. In mesh networks using Wi-Fi Protected Access (WPA, WPA2, orWPA3) or Wired Equivalent Privacy (WEP), an adversary can exploit thisvulnerability to inject arbitrary frames towards devices that supportreceiving non-SSP A-MSDU frames. NOTE: this issue exists because of anincorrect fix for CVE-2020-24588. P802.11-REVme, as of early 2025, is aplanned release of the 802.11 standard.", + "detected_at": "2025-05-25T17:07:15.204Z", + "enumeration": "CVE", + "id": "CVE-2025-27558", + "published_at": "2025-04-22T19:16:08Z", + "reference": "https://ubuntu.com/security/CVE-2025-27558, https://www.cve.org/CVERecord?id=CVE-2025-27558", + "scanner": { + "condition": "Package default status", + "reference": "https://cti.wazuh.com/vulnerabilities/cves/CVE-2025-27558", + "source": "Canonical Security Tracker", + "vendor": "Wazuh" + }, + "score": { + "base": 9.1, + "version": "3.1" + }, + "severity": "-", + "under_evaluation": false + }, + "wazuh": { + "cluster": { + "name": "wazuh-server" + }, + "schema": { + "version": "1.0.0" + } + } + } + } + ] + } + } \ No newline at end of file diff --git a/unittests/test_social_auth_failure_handling.py b/unittests/test_social_auth_failure_handling.py new file mode 100644 index 00000000000..0cf55f8d860 --- /dev/null +++ b/unittests/test_social_auth_failure_handling.py @@ -0,0 +1,152 @@ +from unittest.mock import patch + +from django.contrib import messages +from django.contrib.auth.models import AnonymousUser +from django.contrib.messages.storage.fallback import FallbackStorage +from django.contrib.sessions.middleware import SessionMiddleware +from django.http import HttpResponse +from django.test import RequestFactory, override_settings +from requests.exceptions import ConnectionError as RequestsConnectionError +from social_core.exceptions import AuthCanceled, AuthFailed, AuthForbidden + +from dojo.middleware import CustomSocialAuthExceptionMiddleware + +from .dojo_test_case import DojoTestCase + + +class TestSocialAuthMiddlewareUnit(DojoTestCase): + + """ + Unit tests: + Directly test CustomSocialAuthExceptionMiddleware behavior + by simulating exceptions (ConnectionError, AuthCanceled, AuthFailed, AuthForbidden), + without relying on actual backend configuration or whether the + /complete// URLs are registered and accessible. + """ + + def setUp(self): + self.factory = RequestFactory() + self.middleware = CustomSocialAuthExceptionMiddleware(lambda *_: HttpResponse("OK")) + + def _prepare_request(self, path): + request = self.factory.get(path) + request.user = AnonymousUser() + SessionMiddleware(lambda *_: None).process_request(request) + request.session.save() + request._messages = FallbackStorage(request) + return request + + def test_social_auth_exception_redirects_to_login(self): + login_paths = [ + "/login/oidc/", + "/login/auth0/", + "/login/google-oauth2/", + "/login/okta-oauth2/", + "/login/azuread-tenant-oauth2/", + "/login/gitlab/", + "/login/keycloak-oauth2/", + "/login/github/", + ] + exceptions = [ + (RequestsConnectionError("Host unreachable"), "Please use the standard login below."), + (AuthCanceled("User canceled login"), "Social login was canceled. Please try again or use the standard login."), + (AuthFailed("Token exchange failed"), "Social login failed. Please try again or use the standard login."), + (AuthForbidden("User not allowed"), "You are not authorized to log in via this method. Please contact support or use the standard login."), + ] + for path in login_paths: + for exception, expected_message in exceptions: + with self.subTest(path=path, exception=type(exception).__name__): + request = self._prepare_request(path) + response = self.middleware.process_exception(request, exception) + self.assertEqual(response.status_code, 302) + self.assertEqual(response.url, "/login?force_login_form") + storage = list(messages.get_messages(request)) + self.assertTrue(any(expected_message in str(msg) for msg in storage)) + + def test_non_social_auth_path_still_redirects_on_auth_exception(self): + """Ensure middleware handles AuthFailed even on unrelated paths.""" + request = self._prepare_request("/some/other/path/") + exception = AuthFailed("Should be handled globally") + response = self.middleware.process_exception(request, exception) + self.assertEqual(response.status_code, 302) + self.assertEqual(response.url, "/login?force_login_form") + storage = list(messages.get_messages(request)) + self.assertTrue(any("Social login failed. Please try again or use the standard login." in str(msg) for msg in storage)) + + def test_non_social_auth_path_redirects_on_auth_forbidden(self): + """Ensure middleware handles AuthForbidden even on unrelated paths.""" + request = self._prepare_request("/some/other/path/") + exception = AuthForbidden("User not allowed") + response = self.middleware.process_exception(request, exception) + self.assertEqual(response.status_code, 302) + self.assertEqual(response.url, "/login?force_login_form") + storage = list(messages.get_messages(request)) + self.assertTrue(any("You are not authorized to log in via this method." in str(msg) for msg in storage)) + + def test_type_error_none_type_iterable_redirect(self): + """Ensure middleware catches 'NoneType' object is not iterable TypeError and redirects.""" + request = self._prepare_request("/login/oidc/") + exception = TypeError("'NoneType' object is not iterable") + response = self.middleware.process_exception(request, exception) + self.assertEqual(response.status_code, 302) + self.assertEqual(response.url, "/login?force_login_form") + storage = list(messages.get_messages(request)) + self.assertTrue(any("An unexpected error occurred during social login." in str(msg) for msg in storage)) + + +@override_settings( + AUTHENTICATION_BACKENDS=( + "social_core.backends.github.GithubOAuth2", + "social_core.backends.gitlab.GitLabOAuth2", + "social_core.backends.keycloak.KeycloakOAuth2", + "social_core.backends.azuread_tenant.AzureADTenantOAuth2", + "social_core.backends.auth0.Auth0OAuth2", + "social_core.backends.okta.OktaOAuth2", + "social_core.backends.open_id_connect.OpenIdConnectAuth", + "django.contrib.auth.backends.ModelBackend", + ), +) +class TestSocialAuthIntegrationFailures(DojoTestCase): + + """ + Integration tests: + Simulate social login failures by calling /complete// URLs + and mocking auth_complete() to raise AuthFailed, AuthCanceled, and AuthForbidden. + Verifies that the middleware is correctly integrated and handles backend failures. + """ + + BACKEND_CLASS_PATHS = { + "github": "social_core.backends.github.GithubOAuth2", + "gitlab": "social_core.backends.gitlab.GitLabOAuth2", + "keycloak": "social_core.backends.keycloak.KeycloakOAuth2", + "azuread-tenant-oauth2": "social_core.backends.azuread_tenant.AzureADTenantOAuth2", + "auth0": "social_core.backends.auth0.Auth0OAuth2", + "okta-oauth2": "social_core.backends.okta.OktaOAuth2", + "oidc": "social_core.backends.open_id_connect.OpenIdConnectAuth", + } + + def _test_backend_exception(self, backend_slug, exception, expected_message): + backend_class_path = self.BACKEND_CLASS_PATHS[backend_slug] + with patch(f"{backend_class_path}.auth_complete", side_effect=exception): + response = self.client.get(f"/complete/{backend_slug}/", follow=True) + self.assertEqual(response.status_code, 200) + self.assertContains(response, expected_message) + + def test_all_backends_auth_failed(self): + for backend in self.BACKEND_CLASS_PATHS: + with self.subTest(backend=backend): + self._test_backend_exception(backend, AuthFailed(backend=None), "Social login failed. Please try again or use the standard login.") + + def test_all_backends_auth_canceled(self): + for backend in self.BACKEND_CLASS_PATHS: + with self.subTest(backend=backend): + self._test_backend_exception(backend, AuthCanceled(backend=None), "Social login was canceled. Please try again or use the standard login.") + + def test_all_backends_auth_forbidden(self): + for backend in self.BACKEND_CLASS_PATHS: + with self.subTest(backend=backend): + self._test_backend_exception( + backend, + AuthForbidden(backend=None), + "You are not authorized to log in via this method. Please contact support or use the standard login.", + ) diff --git a/unittests/tools/test_cyclonedx_parser.py b/unittests/tools/test_cyclonedx_parser.py index 898d649c38b..e98b5338ff8 100644 --- a/unittests/tools/test_cyclonedx_parser.py +++ b/unittests/tools/test_cyclonedx_parser.py @@ -357,3 +357,17 @@ def test_cyclonedx_issue_8022(self): self.assertIn(finding.severity, Finding.SEVERITIES) finding.clean() self.assertEqual(1, len(findings)) + + def test_cyclonedx_no_severity(self): + """CycloneDX version 1.4 JSON format""" + with (get_unit_tests_scans_path("cyclonedx") / "no-severity.json").open(encoding="utf-8") as file: + parser = CycloneDXParser() + findings = parser.get_findings(file, Test()) + self.assertEqual(1, len(findings)) + finding = findings[0] + # There is so little information in the vulnerability, that we cannot build a proper title + self.assertEqual("None:None | CVE-2021-44228", finding.title) + self.assertEqual("Critical", finding.severity) + # The score will be evaluated when the finding save method is ran + # self.assertEqual(10.0, finding.cvssv3_score) + self.assertEqual("CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H", finding.cvssv3) diff --git a/unittests/tools/test_wazuh_parser.py b/unittests/tools/test_wazuh_parser.py index 5f73fef4f47..60d741c0b9b 100644 --- a/unittests/tools/test_wazuh_parser.py +++ b/unittests/tools/test_wazuh_parser.py @@ -60,3 +60,10 @@ def test_parse_v4_8_many_findings(self): self.assertEqual("CVE-2025-27558 affects (version: 6.8.0-60.63)", findings[0].title) self.assertEqual("Critical", findings[0].severity) self.assertEqual(9.1, findings[0].cvssv3_score) + + def test_parse_wazuh_abnormal_severity(self): + with (get_unit_tests_scans_path("wazuh") / "wazuh_abnormal_severity.json").open(encoding="utf-8") as testfile: + parser = WazuhParser() + findings = parser.get_findings(testfile, Test()) + for finding in findings: + self.assertEqual("Info", finding.severity)