diff --git a/.github/renovate.json b/.github/renovate.json index a5ae6324179..8b51f3bfa3a 100644 --- a/.github/renovate.json +++ b/.github/renovate.json @@ -27,6 +27,17 @@ "matchDatasources": "github-releases", "matchPackageNames": "renovatebot/renovate", "schedule": ["* * * * 0"] + },{ + "description": "Minikube does not like freshly released k8s. We need to wait some time so it will be adopted", + "matchDatasources": [ + "custom.endoflife-oldest-maintained", + "github-releases" + ], + "matchPackageNames": [ + "kubernetes", + "kubernetes/kubernetes" + ], + "minimumReleaseAge": "2 days" }], "customDatasources": { "endoflife-oldest-maintained": { diff --git a/.github/workflows/test-helm-chart.yml b/.github/workflows/test-helm-chart.yml index 64305fd10c7..9e9bd8a9658 100644 --- a/.github/workflows/test-helm-chart.yml +++ b/.github/workflows/test-helm-chart.yml @@ -115,6 +115,8 @@ jobs: if: startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') run: | yq -i '.annotations."artifacthub.io/changes" += "- kind: changed\n description: ${{ github.event.pull_request.title }}\n"' helm/defectdojo/Chart.yaml + git add helm/defectdojo/Chart.yaml + git commit -m "ci: update Chart annotations from PR #${{ github.event.pull_request.number }}" || echo "No changes to commit" - name: Run helm-docs (update) uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 @@ -123,16 +125,18 @@ jobs: chart-search-root: "helm/defectdojo" git-push: true - # Documentation provided in the README file needs to contain the latest information from `values.yaml` and all other related assets. - # If this step fails, install https://github.com/norwoodj/helm-docs and run locally `helm-docs --chart-search-root helm/defectdojo` before committing your changes. - # The helm-docs documentation will be generated for you. - name: Run helm-docs (check) uses: losisin/helm-docs-github-action@a57fae5676e4c55a228ea654a1bcaec8dd3cf5b5 # v1.6.2 - if: ! startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/') + if: ${{ !(startsWith(github.head_ref, 'renovate/') || startsWith(github.head_ref, 'dependabot/')) }} with: fail-on-diff: true chart-search-root: "helm/defectdojo" + - name: Failed Information + if: failure() + run: |- + echo "Your HELM chart changed but you haven't adjusted documentation. Check https://github.com/defectdojo/django-DefectDojo/tree/master/helm/defectdojo#helm-docs-update for more information." + generate_schema: name: Update schema runs-on: ubuntu-latest @@ -140,10 +144,6 @@ jobs: - name: Checkout uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - # The HELM structure supports the existence of a `values.schema.json` file. This file is used to validate all values provided by the user before Helm starts rendering templates. - # The chart needs to have a `values.schema.json` file that is compatible with the default `values.yaml` file. - # If this step fails, install https://github.com/losisin/helm-values-schema-json and run locally `helm schema --use-helm-docs` in `helm/defectdojo` before committing your changes. - # The helm schema will be generated for you. - name: Generate values schema json uses: losisin/helm-values-schema-json-action@660c441a4a507436a294fc55227e1df54aca5407 # v2.3.1 with: @@ -152,6 +152,11 @@ jobs: useHelmDocs: true values: values.yaml + - name: Failed Information + if: failure() + run: |- + echo "Your HELM chart changed but you haven't adjusted schema. Check https://github.com/defectdojo/django-DefectDojo/tree/master/helm/defectdojo#helm-schema-update for more information." + lint_format: name: Lint chart (format) runs-on: ubuntu-latest diff --git a/docs/content/en/connecting_your_tools/import_intro.md b/docs/content/en/connecting_your_tools/import_intro.md index c0576c93aa1..44eb8fd2b44 100644 --- a/docs/content/en/connecting_your_tools/import_intro.md +++ b/docs/content/en/connecting_your_tools/import_intro.md @@ -4,18 +4,16 @@ description: "Learn how to import data manually, through the API, or via a conne weight: 1 --- -One of the things we understand at DefectDojo is that every company’s security needs are completely different. There is no ‘one\-size\-fits\-all’ approach. As your organization changes, having a flexible approach is key. - -DefectDojo allows you to connect your security tools in a flexible way to match those changes. +One of the things we understand at DefectDojo is that every company’s security needs are completely different. There is no one-size-fits-all approach. As your organization changes, having a flexible approach is key, and DefectDojo allows you to connect your security tools in a flexible way to match those changes. ## Scan Upload Methods -When DefectDojo receives a vulnerability report from a security tool, it will create Findings based on the vulnerabilities contained within that report. DefectDojo acts as the central repository for these Findings where they can be triaged, remediated or otherwise addressed by you and your team. +When DefectDojo receives a vulnerability report from a security tool, it will create Findings based on the vulnerabilities contained within that report. DefectDojo acts as the central repository for these Findings where they can be triaged, remediated, or otherwise addressed by you and your team. There are two main ways that DefectDojo can upload Finding reports. * Via direct **import** through the UI: [Import Scan Form](../import_scan_files/import_scan_ui) -* Via **API** endpoint (allowing for automated data ingest): See [API Docs](https://docs.defectdojo.com/en/api/api-v2-docs/) +* Via **API** endpoint (allowing for automated data ingestion): See [API Docs](https://docs.defectdojo.com/en/api/api-v2-docs/) #### DefectDojo Pro Methods @@ -29,8 +27,8 @@ There are two main ways that DefectDojo can upload Finding reports. | | **UI Import** | **API** | **Connectors** (Pro) | **Smart Upload** (Pro)| | --- | --- | --- | --- | --- | -| **Supported Scan Types** | All: see [Supported Tools](/supported_tools/) | All: see [Supported Tools](/supported_tools/) | Snyk, Semgrep, Burp Suite, AWS Security Hub, Probely, Checkmarx, Tenable | Nexpose, NMap, OpenVas, Qualys, Tenable | -| **Automation?** | Available via API: `/reimport` `/import` endpoints | Triggered from [CLI Importer](../external_tools) or external code | Connectors is inherently automated | Available via API: `/smart_upload_import` endpoint | +| **Supported Scan Types** | All: see [Supported Tools](/supported_tools/) | All: see [Supported Tools](/supported_tools/) | Anchore, AWS Security Hub, BurpSuite, Checkmarx ONE, Dependency-Track, Probely, Semgrep, SonarQube, Snyk, Tenable, Wiz | Nexpose, NMap, OpenVas, Qualys, Tenable | +| **Automation?** | Available via API: `/reimport` `/import` endpoints | Triggered from [CLI Importer](../external_tools) or external code | Connectors is an inherently automated feature | Available via API: `/smart_upload_import` endpoint | ### Product Hierarchy and organization diff --git a/docs/content/en/working_with_findings/organizing_engagements_tests/product_hierarchy.md b/docs/content/en/working_with_findings/organizing_engagements_tests/product_hierarchy.md index bff5e356e2f..d2105b75ac5 100644 --- a/docs/content/en/working_with_findings/organizing_engagements_tests/product_hierarchy.md +++ b/docs/content/en/working_with_findings/organizing_engagements_tests/product_hierarchy.md @@ -116,7 +116,7 @@ Tests always have: * an associated test **Environment** * an associated **Engagement** -Tests can be created in different ways. Tests can be automatically created when scan data is imported directly into to an Engagement, resulting in a new Test containing the scan data. Tests can also be created in anticipation of planning future engagements, or for manually entered security findings requiring tracking and remediation. +Tests can be created in different ways. Tests can be automatically created when scan data is imported directly into an Engagement, resulting in a new Test containing the scan data. Tests can also be created in anticipation of planning future engagements, or for manually entered security findings requiring tracking and remediation. ### **Test Types** @@ -124,8 +124,9 @@ DefectDojo supports two categories of Test Types: 1. **Parser-based Test Types**: These correspond to specific security scanners that produce output in formats like XML, JSON, or CSV. When importing scan results, DefectDojo uses specialized parsers to convert the scanner output into Findings. -2. **Non-parser Test Types**: These are used for manually created findings not imported from a scan files. -The following Test Types appear in the "Scan Type" dropdown when creating a new test, but will not appear when selecting "Import Scan": +2. **Non-parser Test Types**: These are used for manually created Findings not imported from scan files. These Test Types use the [Generic Findings Import](/supported_tools/parsers/generic_findings_import/) method to render Findings and metadata. + +The following Test Types appear in the "Scan Type" dropdown when creating a new test. * API Test * Static Check * Pen Test diff --git a/docs/content/supported_tools/_index.md b/docs/content/supported_tools/_index.md index 0429a2744b7..aaa71e1b65e 100644 --- a/docs/content/supported_tools/_index.md +++ b/docs/content/supported_tools/_index.md @@ -26,7 +26,7 @@ DefectDojo can parse data from 200+ security reports and counting. | [Connectors](/en/connecting_your_tools/connectors/about_connectors/): supported tools | [Smart Upload](/en/connecting_your_tools/import_scan_files/smart_upload/): supported tools | | --- | --- | -| AWS Security Hub, BurpSuite, Checkmarx ONE, Dependency-Track, Probely, Semgrep, SonarQube, Snyk, Tenable | Nexpose, NMap, OpenVas, Qualys, Tenable, Wiz | +| Anchore, AWS Security Hub, BurpSuite, Checkmarx ONE, Dependency-Track, Probely, Semgrep, SonarQube, Snyk, Tenable | Nexpose, NMap, OpenVas, Qualys, Tenable, Wiz | # All Supported Tools diff --git a/docs/content/supported_tools/parsers/generic_findings_import.md b/docs/content/supported_tools/parsers/generic_findings_import.md index 06c229ef2e1..a7b16002917 100644 --- a/docs/content/supported_tools/parsers/generic_findings_import.md +++ b/docs/content/supported_tools/parsers/generic_findings_import.md @@ -8,11 +8,13 @@ Open-source and Pro users can use Generic Findings Import as a method to ingest Using Generic Findings Import will create a new Test Type in your DefectDojo instance called "`{The Name Of Your Test}` (Generic Findings Import)". For example, this JSON content will result in a Test Type called "Example Report (Generic Findings Import)": +``` { "name": "Example Report", "findings": [] } +``` DefectDojo Pro users can also consider using the [Universal Parser](../universal_parser), a tool which allows for highly customizable JSON, XML and CSV imports. -For more information on supported parameters for Generic Findings Import, see the [Parser Guide](../file/generic) \ No newline at end of file +For more information on supported parameters for Generic Findings Import, see the related [Parser Guide](../file/generic). \ No newline at end of file diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py index 2b2c7a36d2e..bdde57955f2 100644 --- a/dojo/api_v2/views.py +++ b/dojo/api_v2/views.py @@ -14,6 +14,7 @@ from django.db.models.query import QuerySet as DjangoQuerySet from django.http import FileResponse, HttpResponse from django.shortcuts import get_object_or_404 +from django.urls import reverse from django.utils import timezone from django_filters.rest_framework import DjangoFilterBackend from drf_spectacular.renderers import OpenApiJsonRenderer2 @@ -176,6 +177,7 @@ generate_file_response, get_setting, get_system_setting, + process_tag_notifications, ) logger = logging.getLogger(__name__) @@ -528,6 +530,15 @@ def notes(self, request, pk=None): ) note.save() engagement.notes.add(note) + # Determine if we need to send any notifications for user mentioned + process_tag_notifications( + request=request, + note=note, + parent_url=request.build_absolute_uri( + reverse("view_engagement", args=(engagement.id,)), + ), + parent_title=f"Engagement: {engagement.name}", + ) serialized_note = serializers.NoteSerializer( {"author": author, "entry": entry, "private": private}, @@ -1086,6 +1097,15 @@ def notes(self, request, pk=None): ) note.save() finding.notes.add(note) + # Determine if we need to send any notifications for user mentioned + process_tag_notifications( + request=request, + note=note, + parent_url=request.build_absolute_uri( + reverse("view_finding", args=(finding.id,)), + ), + parent_title=f"Finding: {finding.title}", + ) if finding.has_jira_issue: jira_helper.add_comment(finding, note) @@ -2135,6 +2155,15 @@ def notes(self, request, pk=None): ) note.save() test.notes.add(note) + # Determine if we need to send any notifications for user mentioned + process_tag_notifications( + request=request, + note=note, + parent_url=request.build_absolute_uri( + reverse("view_test", args=(test.id,)), + ), + parent_title=f"Test: {test.title}", + ) serialized_note = serializers.NoteSerializer( {"author": author, "entry": entry, "private": private}, diff --git a/dojo/finding/helper.py b/dojo/finding/helper.py index dc3cfdc7d13..277609d3153 100644 --- a/dojo/finding/helper.py +++ b/dojo/finding/helper.py @@ -1,5 +1,6 @@ import logging from contextlib import suppress +from datetime import datetime from time import strftime from django.conf import settings @@ -9,6 +10,7 @@ from django.dispatch.dispatcher import receiver from django.urls import reverse from django.utils import timezone +from django.utils.timezone import is_naive, make_aware, now from fieldsignals import pre_save_changed import dojo.jira_link.helper as jira_helper @@ -797,6 +799,17 @@ def save_vulnerability_ids_template(finding_template, vulnerability_ids): finding_template.cve = None +def normalize_datetime(value): + """Ensure value is timezone-aware datetime.""" + if value: + if not isinstance(value, datetime): + value = datetime.combine(value, datetime.min.time()) + # Make timezone-aware if naive + if is_naive(value): + value = make_aware(value) + return value + + def close_finding( *, finding, @@ -818,15 +831,16 @@ def close_finding( """ # Core status updates finding.is_mitigated = is_mitigated - now = timezone.now() - finding.mitigated = mitigated or now + current_time = now() + mitigated_date = normalize_datetime(mitigated) or current_time + finding.mitigated = mitigated_date finding.mitigated_by = mitigated_by or user finding.active = False finding.false_p = bool(false_p) finding.out_of_scope = bool(out_of_scope) finding.duplicate = bool(duplicate) finding.under_review = False - finding.last_reviewed = finding.mitigated + finding.last_reviewed = mitigated_date finding.last_reviewed_by = user # Create note if provided @@ -836,16 +850,16 @@ def close_finding( entry=note_entry, author=user, note_type=note_type, - date=finding.mitigated, + date=mitigated_date, ) finding.notes.add(new_note) # Endpoint statuses for status in finding.status_finding.all(): status.mitigated_by = finding.mitigated_by - status.mitigated_time = finding.mitigated + status.mitigated_time = mitigated_date status.mitigated = True - status.last_modified = timezone.now() + status.last_modified = current_time status.save() # Risk acceptance diff --git a/dojo/finding/views.py b/dojo/finding/views.py index 9a944dccb60..e48554e613d 100644 --- a/dojo/finding/views.py +++ b/dojo/finding/views.py @@ -1145,12 +1145,14 @@ def close_finding(request, fid): note_type_activation = Note_Type.objects.filter(is_active=True) missing_note_types = get_missing_mandatory_notetypes(finding) if len(note_type_activation) else note_type_activation form = CloseFindingForm( + instance=finding, missing_note_types=missing_note_types, can_edit_mitigated_data=finding_helper.can_edit_mitigated_data(request.user), ) if request.method == "POST": form = CloseFindingForm( request.POST, + instance=finding, missing_note_types=missing_note_types, can_edit_mitigated_data=finding_helper.can_edit_mitigated_data(request.user), ) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index b74dd7e85a9..359b6c8edde 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1949,6 +1949,7 @@ def saml2_attrib_map_format(din): "TS-": """https://tailscale.com/security-bulletins#""", # e.g. https://tailscale.com/security-bulletins or https://tailscale.com/security-bulletins#ts-2022-001-1243 "TYPO3-": "https://typo3.org/security/advisory/", # e.g. https://typo3.org/security/advisory/typo3-core-sa-2025-010 "USN-": "https://ubuntu.com/security/notices/", # e.g. https://ubuntu.com/security/notices/USN-6642-1 + "VA-": "https://cvepremium.circl.lu/vuln/", # e.g. https://cvepremium.circl.lu/vuln/va-25-282-01 "VAR-": "https://cvepremium.circl.lu/vuln/", # e.g. https://cvepremium.circl.lu/vuln/var-201801-0152 "VNS": "https://vulners.com/", "WID-SEC-W-": "https://cvepremium.circl.lu/vuln/", # e.g. https://cvepremium.circl.lu/vuln/wid-sec-w-2025-1468 diff --git a/dojo/tools/dawnscanner/parser.py b/dojo/tools/dawnscanner/parser.py index c2b9ab930a2..375ff073798 100644 --- a/dojo/tools/dawnscanner/parser.py +++ b/dojo/tools/dawnscanner/parser.py @@ -30,7 +30,6 @@ def get_findings(self, filename, test): if item["message"][0:2] != "b," else item["message"][0:-1] ) - finding = Finding( title=item["name"], test=test, @@ -42,6 +41,10 @@ def get_findings(self, filename, test): static_finding=True, dynamic_finding=False, ) + if item.get("remediation"): + finding.fix_available = True + else: + finding.fix_available = False if self.CVE_REGEX.match(item["name"]): finding.unsaved_vulnerability_ids = [ diff --git a/dojo/tools/deepfence_threatmapper/compliance.py b/dojo/tools/deepfence_threatmapper/compliance.py index 32b24cde2c4..36b71a4d796 100644 --- a/dojo/tools/deepfence_threatmapper/compliance.py +++ b/dojo/tools/deepfence_threatmapper/compliance.py @@ -3,7 +3,13 @@ class DeepfenceThreatmapperCompliance: def get_findings(self, row, headers, test): - description = "" + if "compliance_check_type" in headers and "test_number" in headers: + return self._parse_old_format(row, headers, test) + if "Compliance Standard" in headers and "Control ID" in headers: + return self._parse_new_format(row, headers, test) + return None + + def _parse_old_format(self, row, headers, test): compliance_check_type = row[headers["compliance_check_type"]] count = row[headers["count"]] doc_id = row[headers["doc_id"]] @@ -18,34 +24,76 @@ def get_findings(self, row, headers, test): test_desc = row[headers["test_desc"]] test_info = row[headers["test_info"]] test_number = row[headers["test_number"]] - description += "**compliance_check_type:** " + str(compliance_check_type) + "\n" - description += "**host_name:** " + str(host_name) + "\n" - description += "**cloud_account_id:** " + str(cloud_account_id) + "\n" - description += "**masked:** " + str(masked) + "\n" - description += "**node_id:** " + str(node_id) + "\n" - description += "**node_name:** " + str(node_name) + "\n" - description += "**node_type:** " + str(node_type) + "\n" - description += "**status:** " + str(status) + "\n" - description += "**test_category:** " + str(test_category) + "\n" - description += "**test_desc:** " + str(test_desc) + "\n" - description += "**test_info:** " + str(test_info) + "\n" - description += "**test_number:** " + str(test_number) + "\n" - description += "**count:** " + str(count) + "\n" - description += "**doc_id:** " + str(doc_id) + "\n" + + description = ( + f"**Compliance Check Type:** {compliance_check_type}\n" + f"**Host Name:** {host_name}\n" + f"**Cloud Account ID:** {cloud_account_id}\n" + f"**Masked:** {masked}\n" + f"**Node ID:** {node_id}\n" + f"**Node Name:** {node_name}\n" + f"**Node Type:** {node_type}\n" + f"**Status:** {status}\n" + f"**Test Category:** {test_category}\n" + f"**Test Description:** {test_desc}\n" + f"**Test Info:** {test_info}\n" + f"**Test Number:** {test_number}\n" + f"**Count:** {count}\n" + f"**Doc ID:** {doc_id}\n" + ) + + return Finding( + title=f"Threatmapper_Compliance_Report-{test_number}", + description=description, + severity=self.compliance_severity(status), + static_finding=False, + dynamic_finding=True, + test=test, + ) + + def _parse_new_format(self, row, headers, test): + compliance_standard = row[headers["Compliance Standard"]] + status = row[headers["Status"]] + category = row[headers["Category"]] + description_text = row[headers["Description"]] + info = row[headers["Info"]] + control_id = row[headers["Control ID"]] + node_name = row[headers["Node Name"]] + node_type = row[headers["Node Type"]] + remediation = row[headers["Remediation"]] + masked = row[headers["Masked"]] + + description = ( + f"**Compliance Standard:** {compliance_standard}\n" + f"**Status:** {status}\n" + f"**Category:** {category}\n" + f"**Description:** {description_text}\n" + f"**Info:** {info}\n" + f"**Control ID:** {control_id}\n" + f"**Node Name:** {node_name}\n" + f"**Node Type:** {node_type}\n" + f"**Remediation:** {remediation}\n" + f"**Masked:** {masked}\n" + ) + return Finding( - title="Threatmapper_Compliance_Report-" + test_number, + title=f"Threatmapper_Compliance_Report-{control_id}", description=description, severity=self.compliance_severity(status), static_finding=False, dynamic_finding=True, + mitigation=remediation, test=test, ) def compliance_severity(self, severity_input): + if severity_input is None: + return "Info" + severity_input = severity_input.lower() if severity_input in {"pass", "info"}: - output = "Info" - elif severity_input == "warn": - output = "Medium" - else: - output = "Info" - return output + return "Info" + if severity_input == "warn": + return "Medium" + if severity_input == "fail": + return "High" + return "Info" diff --git a/dojo/tools/deepfence_threatmapper/malware.py b/dojo/tools/deepfence_threatmapper/malware.py index a1defd18401..3fa0a230920 100644 --- a/dojo/tools/deepfence_threatmapper/malware.py +++ b/dojo/tools/deepfence_threatmapper/malware.py @@ -3,7 +3,13 @@ class DeepfenceThreatmapperMalware: def get_findings(self, row, headers, test): - description = "" + if "Rule Name" in headers and "Class" in headers: + return self._parse_old_format(row, headers, test) + if "Rule Name" in headers and "Node Type" in headers: + return self._parse_new_format(row, headers, test) + return None + + def _parse_old_format(self, row, headers, test): Rule_Name = row[headers["Rule Name"]] Class = row[headers["Class"]] File_Name = row[headers["File Name"]] @@ -13,14 +19,48 @@ def get_findings(self, row, headers, test): NodeType = row[headers["NodeType"]] Container_Name = row[headers["Container Name"]] Kubernetes_Cluster_Name = row[headers["Kubernetes Cluster Name"]] - description += "**Summary:** " + str(Summary) + "\n" - description += "**Rule Name:** " + str(Rule_Name) + "\n" - description += "**Class:** " + str(Class) + "\n" - description += "**File Name:** " + str(File_Name) + "\n" - description += "**Node Name:** " + str(Node_Name) + "\n" - description += "**NodeType:** " + str(NodeType) + "\n" - description += "**Container Name:** " + str(Container_Name) + "\n" - description += "**Kubernetes Cluster Name:** " + str(Kubernetes_Cluster_Name) + "\n" + + description = ( + f"**Summary:** {Summary}\n" + f"**Rule Name:** {Rule_Name}\n" + f"**Class:** {Class}\n" + f"**File Name:** {File_Name}\n" + f"**Node Name:** {Node_Name}\n" + f"**NodeType:** {NodeType}\n" + f"**Container Name:** {Container_Name}\n" + f"**Kubernetes Cluster Name:** {Kubernetes_Cluster_Name}\n" + ) + + return Finding( + title=Rule_Name, + description=description, + file_path=File_Name, + severity=self.severity(Severity), + static_finding=False, + dynamic_finding=True, + test=test, + ) + + def _parse_new_format(self, row, headers, test): + Rule_Name = row[headers["Rule Name"]] + File_Name = row[headers["File Name"]] + Summary = row[headers["Summary"]] + Severity = row[headers["Severity"]] + Node_Name = row[headers["Node Name"]] + Node_Type = row[headers["Node Type"]] + Kubernetes_Cluster_Name = row[headers["Kubernetes Cluster Name"]] + Masked = row[headers["Masked"]] + + description = ( + f"**Summary:** {Summary}\n" + f"**Rule Name:** {Rule_Name}\n" + f"**File Name:** {File_Name}\n" + f"**Node Name:** {Node_Name}\n" + f"**Node Type:** {Node_Type}\n" + f"**Kubernetes Cluster Name:** {Kubernetes_Cluster_Name}\n" + f"**Masked:** {Masked}\n" + ) + return Finding( title=Rule_Name, description=description, diff --git a/dojo/tools/deepfence_threatmapper/parser.py b/dojo/tools/deepfence_threatmapper/parser.py index 3f5fd2a5a18..2b95a385f09 100644 --- a/dojo/tools/deepfence_threatmapper/parser.py +++ b/dojo/tools/deepfence_threatmapper/parser.py @@ -27,14 +27,23 @@ def get_findings(self, filename, test): first = False for i in range(len(row)): headers[row[i]] = i - elif headers.get("Rule Name") is not None and headers.get("Class") is not None: + elif ( + ("Rule Name" in headers and "Class" in headers) or + ("Rule Name" in headers and "Node Type" in headers) + ): findings.append(DeepfenceThreatmapperMalware().get_findings(row, headers, test)) elif headers.get("Filename") is not None and headers.get("Content") is not None: value = DeepfenceThreatmapperSecret().get_findings(row, headers, test) if value is not None: findings.append(value) - elif headers.get("@timestamp") is not None and headers.get("cve_attack_vector") is not None: + elif ( + ("cve_id" in headers and "cve_attack_vector" in headers) or + ("CVE ID" in headers and "Attack Vector" in headers) + ): findings.append(DeepfenceThreatmapperVulnerability().get_findings(row, headers, test)) - elif headers.get("@timestamp") is not None and headers.get("compliance_check_type") is not None: + elif ( + ("compliance_check_type" in headers and "test_number" in headers) or + ("Compliance Standard" in headers and "Control ID" in headers) + ): findings.append(DeepfenceThreatmapperCompliance().get_findings(row, headers, test)) return findings diff --git a/dojo/tools/deepfence_threatmapper/secret.py b/dojo/tools/deepfence_threatmapper/secret.py index 3d9f2584149..1915e4be694 100644 --- a/dojo/tools/deepfence_threatmapper/secret.py +++ b/dojo/tools/deepfence_threatmapper/secret.py @@ -3,6 +3,13 @@ class DeepfenceThreatmapperSecret: def get_findings(self, row, headers, test): + if "Name" in headers and "Signature" in headers: + return self._parse_old_format(row, headers, test) + if "Content Starting Index" in headers and "Masked" in headers: + return self._parse_new_format(row, headers, test) + return None + + def _parse_old_format(self, row, headers, test): description = "" Filename = row[headers["Filename"]] Content = row[headers["Content"]] @@ -13,27 +20,57 @@ def get_findings(self, row, headers, test): Container_Name = row[headers["Container Name"]] Kubernetes_Cluster_Name = row[headers["Kubernetes Cluster Name"]] Signature = row[headers["Signature"]] - description += "**Filename:** " + str(Filename) + "\n" - description += "**Name:** " + str(Name) + "\n" - description += "**Rule:** " + str(Rule) + "\n" - description += "**Node Name:** " + str(Node_Name) + "\n" - description += "**Container Name:** " + str(Container_Name) + "\n" - description += "**Kubernetes Cluster Name:** " + str(Kubernetes_Cluster_Name) + "\n" - description += "**Content:** " + str(Content) + "\n" - description += "**Signature:** " + str(Signature) + "\n" - if Name is not None and Severity is not None: - finding = Finding( - title=str(Name), - description=description, - file_path=Filename, - severity=self.severity(Severity), - static_finding=False, - dynamic_finding=True, - test=test, + description += f"**Filename:** {Filename}\n" + description += f"**Name:** {Name}\n" + description += f"**Rule:** {Rule}\n" + description += f"**Node Name:** {Node_Name}\n" + description += f"**Container Name:** {Container_Name}\n" + description += f"**Kubernetes Cluster Name:** {Kubernetes_Cluster_Name}\n" + description += f"**Content:** {Content}\n" + description += f"**Signature:** {Signature}\n" + if Name and Severity: + return Finding( + title=str(Name), + description=description, + file_path=Filename, + severity=self.severity(Severity), + static_finding=False, + dynamic_finding=True, + test=test, + ) + return None + + def _parse_new_format(self, row, headers, test): + description = "" + Filename = row[headers["Filename"]] + Content = row[headers["Content"]] + Rule = row[headers["Rule"]] + Severity = row[headers["Severity"]] + Content_Starting_Index = row[headers["Content Starting Index"]] + Node_Name = row[headers["Node Name"]] + Node_Type = row[headers["Node Type"]] + Kubernetes_Cluster_Name = row[headers["Kubernetes Cluster Name"]] + Masked = row[headers["Masked"]] + description += f"**Filename:** {Filename}\n" + description += f"**Rule:** {Rule}\n" + description += f"**Node Name:** {Node_Name}\n" + description += f"**Node Type:** {Node_Type}\n" + description += f"**Kubernetes Cluster Name:** {Kubernetes_Cluster_Name}\n" + description += f"**Content:** {Content}\n" + description += f"**Content Starting Index:** {Content_Starting_Index}\n" + description += f"**Masked:** {Masked}\n" + title = f"{Rule} in {Filename}" if Rule else "Secret Finding" + if Severity: + return Finding( + title=title, + description=description, + file_path=Filename, + severity=self.severity(Severity), + static_finding=False, + dynamic_finding=True, + test=test, ) - else: - finding = None - return finding + return None def severity(self, severity_input): if severity_input is None: diff --git a/dojo/tools/deepfence_threatmapper/vulnerability.py b/dojo/tools/deepfence_threatmapper/vulnerability.py index 3539518177b..69a01850e7a 100644 --- a/dojo/tools/deepfence_threatmapper/vulnerability.py +++ b/dojo/tools/deepfence_threatmapper/vulnerability.py @@ -3,7 +3,13 @@ class DeepfenceThreatmapperVulnerability: def get_findings(self, row, headers, test): - description = "" + if "cve_id" in headers and "cve_attack_vector" in headers: + return self._parse_old_format(row, headers, test) + if "CVE ID" in headers and "Attack Vector" in headers: + return self._parse_new_format(row, headers, test) + return None + + def _parse_old_format(self, row, headers, test): cve_attack_vector = row[headers["cve_attack_vector"]] cve_caused_by_package = row[headers["cve_caused_by_package"]] cve_container_image = row[headers["cve_container_image"]] @@ -18,19 +24,23 @@ def get_findings(self, row, headers, test): host_name = row[headers["host_name"]] cloud_account_id = row[headers["cloud_account_id"]] masked = row[headers["masked"]] - description += "**cve_attack_vector:** " + str(cve_attack_vector) + "\n" - description += "**cve_caused_by_package:** " + str(cve_caused_by_package) + "\n" - description += "**cve_container_image:** " + str(cve_container_image) + "\n" - description += "**cve_container_image_id:** " + str(cve_container_image_id) + "\n" - description += "**cve_description:** " + str(cve_description) + "\n" - description += "**cve_severity:** " + str(cve_severity) + "\n" - description += "**cve_overall_score:** " + str(cve_overall_score) + "\n" - description += "**cve_type:** " + str(cve_type) + "\n" - description += "**host_name:** " + str(host_name) + "\n" - description += "**cloud_account_id:** " + str(cloud_account_id) + "\n" - description += "**masked:** " + str(masked) + "\n" + + description = ( + f"**Attack Vector:** {cve_attack_vector}\n" + f"**Caused By Package:** {cve_caused_by_package}\n" + f"**Container Image:** {cve_container_image}\n" + f"**Container Image ID:** {cve_container_image_id}\n" + f"**Description:** {cve_description}\n" + f"**Severity:** {cve_severity}\n" + f"**Overall Score:** {cve_overall_score}\n" + f"**Type:** {cve_type}\n" + f"**Host Name:** {host_name}\n" + f"**Cloud Account ID:** {cloud_account_id}\n" + f"**Masked:** {masked}\n" + ) + return Finding( - title="Threatmapper_Vuln_Report-" + cve_id, + title=f"Threatmapper_Vuln_Report-{cve_id}", description=description, component_name=cve_caused_by_package, severity=self.severity(cve_severity), @@ -42,6 +52,54 @@ def get_findings(self, row, headers, test): test=test, ) + def _parse_new_format(self, row, headers, test): + cve_id = row[headers["CVE ID"]] + severity = row[headers["Severity"]] + attack_vector = row[headers["Attack Vector"]] + caused_by_package = row[headers["Caused By Package"]] + caused_by_package_path = row[headers["Caused By Package Path"]] + cvss_score = row[headers["CVSS Score"]] + description_text = row[headers["Description"]] + fixed_in = row[headers["Fixed In"]] + link = row[headers["Link"]] + overall_score = row[headers["Overall Score"]] + cve_type = row[headers["Type"]] + node_name = row[headers["Node Name"]] + node_type = row[headers["Node Type"]] + cluster_name = row[headers["Kubernetes Cluster Name"]] + masked = row[headers["Masked"]] + + description = ( + f"**CVE ID:** {cve_id}\n" + f"**Severity:** {severity}\n" + f"**Attack Vector:** {attack_vector}\n" + f"**Caused By Package:** {caused_by_package}\n" + f"**Caused By Package Path:** {caused_by_package_path}\n" + f"**CVSS Score:** {cvss_score}\n" + f"**Description:** {description_text}\n" + f"**Fixed In:** {fixed_in}\n" + f"**Link:** {link}\n" + f"**Overall Score:** {overall_score}\n" + f"**Type:** {cve_type}\n" + f"**Node Name:** {node_name}\n" + f"**Node Type:** {node_type}\n" + f"**Kubernetes Cluster Name:** {cluster_name}\n" + f"**Masked:** {masked}\n" + ) + + return Finding( + title=f"Threatmapper_Vuln_Report-{cve_id}", + description=description, + component_name=caused_by_package, + severity=self.severity(severity), + static_finding=False, + dynamic_finding=True, + mitigation=fixed_in, + references=link, + cve=cve_id, + test=test, + ) + def severity(self, severity_input): if severity_input is None: return "Info" diff --git a/dojo/tools/ibm_app/parser.py b/dojo/tools/ibm_app/parser.py index 224395e1edc..762be2139dd 100644 --- a/dojo/tools/ibm_app/parser.py +++ b/dojo/tools/ibm_app/parser.py @@ -106,6 +106,10 @@ def get_findings(self, file, test): finding.unsaved_vulnerability_ids = [ vulnerability_id, ] + if recommendation_data: + finding.fix_available = True + else: + finding.fix_available = False finding.unsaved_endpoints = [] dupes[dupe_key] = finding diff --git a/dojo/tools/jfrog_xray_unified/parser.py b/dojo/tools/jfrog_xray_unified/parser.py index 83e0222ade0..8aceba5c54d 100644 --- a/dojo/tools/jfrog_xray_unified/parser.py +++ b/dojo/tools/jfrog_xray_unified/parser.py @@ -104,7 +104,8 @@ def get_item(vulnerability, test): else: title = vulnerability["summary"] - references = "\n".join(vulnerability["references"]) + references_str = vulnerability.get("references") + references = "\n".join(references_str) if isinstance(references_str, list) else (references_str if isinstance(references_str, str) else "") scan_time = datetime.strptime( vulnerability["artifact_scan_time"], "%Y-%m-%dT%H:%M:%S%z", @@ -118,7 +119,10 @@ def get_item(vulnerability, test): # remove package type from component name component_name = component_name.split("://", 1)[1] - tags = ["packagetype_" + vulnerability["package_type"]] + tags = [] + package_type = vulnerability.get("package_type") + if package_type: + tags.append("packagetype_" + package_type) # create the finding object finding = Finding( @@ -126,7 +130,7 @@ def get_item(vulnerability, test): test=test, severity=severity, description=( - vulnerability["description"] + "\n\n" + extra_desc + vulnerability.get("description", vulnerability.get("summary")) + "\n\n" + extra_desc ).strip(), mitigation=mitigation, component_name=component_name, diff --git a/dojo/tools/kubeaudit/parser.py b/dojo/tools/kubeaudit/parser.py index 26638bd6a2f..af7babaab73 100644 --- a/dojo/tools/kubeaudit/parser.py +++ b/dojo/tools/kubeaudit/parser.py @@ -81,5 +81,9 @@ def get_findings(self, filename, test): static_finding=True, dynamic_finding=False, ) + if msg: + finding.fix_available = True + else: + finding.fix_available = False findings.append(finding) return findings diff --git a/dojo/tools/nancy/parser.py b/dojo/tools/nancy/parser.py index 2d4f4d986eb..8bc00c10fd8 100644 --- a/dojo/tools/nancy/parser.py +++ b/dojo/tools/nancy/parser.py @@ -34,14 +34,24 @@ def get_findings(self, scan_file, test): return findings + def convert_cvss_score(self, raw_value): + if raw_value is None: + return "Info" + val = float(raw_value) + if val == 0.0: + return "Info" + if val < 4.0: + return "Low" + if val < 7.0: + return "Medium" + if val < 9.0: + return "High" + return "Critical" + def get_items(self, vulnerable, test): findings = [] for vuln in vulnerable: finding = None - severity = "Info" - # the tool does not define severity, however it - # provides CVSSv3 vector which will calculate - # severity dynamically on save() references = [] if vuln["Vulnerabilities"]: comp_name = vuln["Coordinates"].split(":")[1].split("@")[0] @@ -57,7 +67,7 @@ def get_items(self, vulnerable, test): title=associated_vuln["Title"], description=associated_vuln["Description"], test=test, - severity=severity, + severity=self.convert_cvss_score(associated_vuln["CvssScore"]), component_name=comp_name, component_version=comp_version, false_p=False, diff --git a/dojo/tools/pwn_sast/parser.py b/dojo/tools/pwn_sast/parser.py index 89635ad8155..8be69874bbe 100644 --- a/dojo/tools/pwn_sast/parser.py +++ b/dojo/tools/pwn_sast/parser.py @@ -115,6 +115,10 @@ def get_findings(self, filename, test): file_path=offending_file, unique_id_from_tool=unique_finding_key, ) + if mitigation: + finding.fix_available = True + else: + finding.fix_available = False findings[unique_finding_key] = finding return list(findings.values()) diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py index c629bcf28cd..0300ee43ff7 100644 --- a/dojo/tools/qualys/parser.py +++ b/dojo/tools/qualys/parser.py @@ -311,22 +311,16 @@ def parse_finding(host, tree): split_cvss(cvss2, temp) # DefectDojo does not support cvssv2 temp["CVSS_vector"] = None - # CVE and LINKS - temp_cve_details = vuln_item.iterfind("CVE_ID_LIST/CVE_ID") - if temp_cve_details: - cl = { - cve_detail.findtext("ID"): cve_detail.findtext("URL") - for cve_detail in temp_cve_details - } - temp["cve"] = "\n".join(list(cl.keys())) - temp["links"] = "\n".join(list(cl.values())) + temp_cve_details = [(cve.findtext("ID"), cve.findtext("URL")) for cve in vuln_item.iterfind("CVE_ID_LIST/CVE_ID")] + temp["cve_list"] = [cve_id for cve_id, _ in temp_cve_details if cve_id] + temp["links"] = [url for _, url in temp_cve_details if url] # Generate severity from number in XML's 'SEVERITY' field, if not present default to 'Informational' sev = get_severity(vuln_item.findtext("SEVERITY")) finding = None if temp_cve_details: - refs = "\n".join(list(cl.values())) + refs = temp.get("links", "") finding = Finding( title="QID-" + gid[4:] + " | " + temp["vuln_name"], mitigation=temp["solution"], @@ -363,6 +357,7 @@ def parse_finding(host, tree): finding.verified = True finding.unsaved_endpoints = [] finding.unsaved_endpoints.append(ep) + finding.unsaved_vulnerability_ids = temp.get("cve_list", []) ret_rows.append(finding) return ret_rows diff --git a/helm/defectdojo/README.md b/helm/defectdojo/README.md index f1a8471f177..919550dbe37 100644 --- a/helm/defectdojo/README.md +++ b/helm/defectdojo/README.md @@ -493,6 +493,23 @@ kubectl delete serviceAccount defectdojo kubectl delete pvc data-defectdojo-redis-0 data-defectdojo-postgresql-0 ``` +## Development/contribution + +In case you decide to help with the improvement of the HELM chart, keep in mind that values/descriptions might need to be adjusted in multiple places (see below). + +### HELM Docs update + +Documentation provided in the README file needs to contain the latest information from `values.yaml` and all other related assets. +If GitHub Action _Lint Helm chart / Update documentation_ step fails, install https://github.com/norwoodj/helm-docs and run locally `helm-docs --chart-search-root helm/deeefectdojo` before committing your changes. +The helm-docs documentation will be generated for you. + +### HELM Schema update + +The HELM structure supports the existence of a `values.schema.json` file. This file is used to validate all values provided by the user before Helm starts rendering templates. +The chart needs to have a `values.schema.json` file that is compatible with the default `values.yaml` file. +If GitHub Action _Lint Helm chart / Update schema_ step fails, install https://github.com/losisin/helm-values-schema-json and run locally `helm schema --use-helm-docs` in `helm/defectdojo` before committing your changes. +The HELM schema will be generated for you. + # General information about chart values ![Version: 1.9.0-dev](https://img.shields.io/badge/Version-1.9.0--dev-informational?style=flat-square) ![AppVersion: 2.53.0-dev](https://img.shields.io/badge/AppVersion-2.53.0--dev-informational?style=flat-square) diff --git a/helm/defectdojo/README.md.gotmpl b/helm/defectdojo/README.md.gotmpl index e4ab067a647..2edff657296 100644 --- a/helm/defectdojo/README.md.gotmpl +++ b/helm/defectdojo/README.md.gotmpl @@ -495,6 +495,22 @@ kubectl delete serviceAccount defectdojo kubectl delete pvc data-defectdojo-redis-0 data-defectdojo-postgresql-0 ``` +## Development/contribution + +In case you decide to help with the improvement of the HELM chart, keep in mind that values/descriptions might need to be adjusted in multiple places (see below). + +### HELM Docs update + +Documentation provided in the README file needs to contain the latest information from `values.yaml` and all other related assets. +If GitHub Action _Lint Helm chart / Update documentation_ step fails, install https://github.com/norwoodj/helm-docs and run locally `helm-docs --chart-search-root helm/deeefectdojo` before committing your changes. +The helm-docs documentation will be generated for you. + +### HELM Schema update + +The HELM structure supports the existence of a `values.schema.json` file. This file is used to validate all values provided by the user before Helm starts rendering templates. +The chart needs to have a `values.schema.json` file that is compatible with the default `values.yaml` file. +If GitHub Action _Lint Helm chart / Update schema_ step fails, install https://github.com/losisin/helm-values-schema-json and run locally `helm schema --use-helm-docs` in `helm/defectdojo` before committing your changes. +The HELM schema will be generated for you. # General information about chart values diff --git a/unittests/scans/dawnscanner/dawnscanner_v1.6.9.json b/unittests/scans/dawnscanner/dawnscanner_v1.6.9.json index 7f9afcb7261..46b9075387a 100644 --- a/unittests/scans/dawnscanner/dawnscanner_v1.6.9.json +++ b/unittests/scans/dawnscanner/dawnscanner_v1.6.9.json @@ -31,7 +31,7 @@ "severity": "info", "cvss_score": " ", "message": "Ruby on Rails has specific, built in support for CSRF tokens. To enable it, or ensure that it is enabled, find the base ApplicationController and look for the protect_from_forgery directive. Note that by default Rails does not provide CSRF protection for any HTTP GET request.", - "remediation": "Make sure you are using Rails protect_from_forgery facilities in application_controller.rMake sure you are using Rails protect_from_forgery facilities in application_controller.rb" + "remediation": "" }, { "name": "Owasp Ror CheatSheet: Security Related Headers", "cve_link": "http://cve.mitre.org/cgi-bin/cvename.cgi?name=Owasp Ror CheatSheet: Security Related Headers", diff --git a/unittests/scans/deepfence_threatmapper/compliance_report_newformat.xlsx b/unittests/scans/deepfence_threatmapper/compliance_report_newformat.xlsx new file mode 100644 index 00000000000..b0addbd103a Binary files /dev/null and b/unittests/scans/deepfence_threatmapper/compliance_report_newformat.xlsx differ diff --git a/unittests/scans/deepfence_threatmapper/malware_report_newformat.xlsx b/unittests/scans/deepfence_threatmapper/malware_report_newformat.xlsx new file mode 100644 index 00000000000..46dc625a2c6 Binary files /dev/null and b/unittests/scans/deepfence_threatmapper/malware_report_newformat.xlsx differ diff --git a/unittests/scans/deepfence_threatmapper/secret_report_newformat.xlsx b/unittests/scans/deepfence_threatmapper/secret_report_newformat.xlsx new file mode 100644 index 00000000000..5083eb9ce78 Binary files /dev/null and b/unittests/scans/deepfence_threatmapper/secret_report_newformat.xlsx differ diff --git a/unittests/scans/deepfence_threatmapper/vulnerability_report_newformat.xlsx b/unittests/scans/deepfence_threatmapper/vulnerability_report_newformat.xlsx new file mode 100644 index 00000000000..a91204162df Binary files /dev/null and b/unittests/scans/deepfence_threatmapper/vulnerability_report_newformat.xlsx differ diff --git a/unittests/scans/drheader/scan2.json b/unittests/scans/drheader/scan2.json index 061323da810..5ab2670d5dd 100644 --- a/unittests/scans/drheader/scan2.json +++ b/unittests/scans/drheader/scan2.json @@ -1 +1,72 @@ -[{"rule": "Content-Security-Policy", "severity": "high", "message": "Must-Contain-One directive missed", "expected": ["default-src 'none'", "default-src 'self'"], "delimiter": ";", "value": "default-src 'self' service.maxymiser.net; child-src 'self' 'unsafe-inline' https://www.googleadservices.com https://*.fls.doubleclick.net/ https://*.santander.co.uk https://santander.demdex.net; script-src 'self' 'unsafe-inline' https://track.omguk.com https://cdn.usersnap.com https://screencapture.kampyle.com https://nebula-cdn.kampyle.com https://resources.digital-cloud-uk.medallia.eu https://pagead2.googlesyndication.com https://js-cdn.dynatrace.com https://activitymap.adobe.com https://cdn-ukwest.onetrust.com https://cdn.mouseflow.com https://googleads.g.doubleclick.net lptag.liveperson.net lo.v.liveperson.net lo.msg.liveperson.net accdn.lpsnmedia.net lpcdn.lpsnmedia.net https://www.googletagservices.com https://ad.doubleclick.net service.maxymiser.net https://connect.facebook.net https://*.fls.doubleclick.net/ https://www.googleadservices.com https://www.googletagmanager.com https://assets.adobedtm.com https://dpm.demdex.net/ https://*.santander.co.uk 'unsafe-eval'; connect-src 'self' 'unsafe-inline' https://udc-neb.kampyle.com https://*.bf.dynatrace.com https://privacyportal-uk.onetrust.com https://cdn-ukwest.onetrust.com https://o2.mouseflow.com https://googleads4.g.doubleclick.net wss://lo.msg.liveperson.net https://dpm.demdex.net https://*.santander.co.uk; img-src 'self' https://lpcdn.lpsnmedia.net service.maxymiser.net 'unsafe-inline' https://*.santander.co.uk data: https:; style-src 'self' service.maxymiser.net 'unsafe-inline'; font-src 'self'; frame-src 'self' 'unsafe-inline' https://www.youtube-nocookie.com https://resources.digital-cloud-uk.medallia.eu https://lo.tokenizer.liveperson.net https://lo.msghist.liveperson.net https://lo.msg.liveperson.net https://lpcdn.lpsnmedia.net lo.idp.liveperson.net server.lon.liveperson.net https://authorize.omniture.com https://sitecatalyst.omniture.com service.maxymiser.net https://edigitalsurvey.com https://www.youtube.com https://santander.demdex.net https://*.fls.doubleclick.net; object-src 'self'; media-src lpcdn.lpsnmedia.net; worker-src blob:;", "anomaly": ["default-src 'none'", "default-src 'self'"]}, {"rule": "Content-Security-Policy", "severity": "medium", "message": "Must-Avoid directive included", "avoid": ["unsafe-inline", "unsafe-eval"], "delimiter": ";", "value": "default-src 'self' service.maxymiser.net; child-src 'self' 'unsafe-inline' https://www.googleadservices.com https://*.fls.doubleclick.net/ https://*.santander.co.uk https://santander.demdex.net; script-src 'self' 'unsafe-inline' https://track.omguk.com https://cdn.usersnap.com https://screencapture.kampyle.com https://nebula-cdn.kampyle.com https://resources.digital-cloud-uk.medallia.eu https://pagead2.googlesyndication.com https://js-cdn.dynatrace.com https://activitymap.adobe.com https://cdn-ukwest.onetrust.com https://cdn.mouseflow.com https://googleads.g.doubleclick.net lptag.liveperson.net lo.v.liveperson.net lo.msg.liveperson.net accdn.lpsnmedia.net lpcdn.lpsnmedia.net https://www.googletagservices.com https://ad.doubleclick.net service.maxymiser.net https://connect.facebook.net https://*.fls.doubleclick.net/ https://www.googleadservices.com https://www.googletagmanager.com https://assets.adobedtm.com https://dpm.demdex.net/ https://*.santander.co.uk 'unsafe-eval'; connect-src 'self' 'unsafe-inline' https://udc-neb.kampyle.com https://*.bf.dynatrace.com https://privacyportal-uk.onetrust.com https://cdn-ukwest.onetrust.com https://o2.mouseflow.com https://googleads4.g.doubleclick.net wss://lo.msg.liveperson.net https://dpm.demdex.net https://*.santander.co.uk; img-src 'self' https://lpcdn.lpsnmedia.net service.maxymiser.net 'unsafe-inline' https://*.santander.co.uk data: https:; style-src 'self' service.maxymiser.net 'unsafe-inline'; font-src 'self'; frame-src 'self' 'unsafe-inline' https://www.youtube-nocookie.com https://resources.digital-cloud-uk.medallia.eu https://lo.tokenizer.liveperson.net https://lo.msghist.liveperson.net https://lo.msg.liveperson.net https://lpcdn.lpsnmedia.net lo.idp.liveperson.net server.lon.liveperson.net https://authorize.omniture.com https://sitecatalyst.omniture.com service.maxymiser.net https://edigitalsurvey.com https://www.youtube.com https://santander.demdex.net https://*.fls.doubleclick.net; object-src 'self'; media-src lpcdn.lpsnmedia.net; worker-src blob:;", "anomaly": "unsafe-inline"}, {"rule": "Content-Security-Policy", "severity": "medium", "message": "Must-Avoid directive included", "avoid": ["unsafe-inline", "unsafe-eval"], "delimiter": ";", "value": "default-src 'self' service.maxymiser.net; child-src 'self' 'unsafe-inline' https://www.googleadservices.com https://*.fls.doubleclick.net/ https://*.santander.co.uk https://santander.demdex.net; script-src 'self' 'unsafe-inline' https://track.omguk.com https://cdn.usersnap.com https://screencapture.kampyle.com https://nebula-cdn.kampyle.com https://resources.digital-cloud-uk.medallia.eu https://pagead2.googlesyndication.com https://js-cdn.dynatrace.com https://activitymap.adobe.com https://cdn-ukwest.onetrust.com https://cdn.mouseflow.com https://googleads.g.doubleclick.net lptag.liveperson.net lo.v.liveperson.net lo.msg.liveperson.net accdn.lpsnmedia.net lpcdn.lpsnmedia.net https://www.googletagservices.com https://ad.doubleclick.net service.maxymiser.net https://connect.facebook.net https://*.fls.doubleclick.net/ https://www.googleadservices.com https://www.googletagmanager.com https://assets.adobedtm.com https://dpm.demdex.net/ https://*.santander.co.uk 'unsafe-eval'; connect-src 'self' 'unsafe-inline' https://udc-neb.kampyle.com https://*.bf.dynatrace.com https://privacyportal-uk.onetrust.com https://cdn-ukwest.onetrust.com https://o2.mouseflow.com https://googleads4.g.doubleclick.net wss://lo.msg.liveperson.net https://dpm.demdex.net https://*.santander.co.uk; img-src 'self' https://lpcdn.lpsnmedia.net service.maxymiser.net 'unsafe-inline' https://*.santander.co.uk data: https:; style-src 'self' service.maxymiser.net 'unsafe-inline'; font-src 'self'; frame-src 'self' 'unsafe-inline' https://www.youtube-nocookie.com https://resources.digital-cloud-uk.medallia.eu https://lo.tokenizer.liveperson.net https://lo.msghist.liveperson.net https://lo.msg.liveperson.net https://lpcdn.lpsnmedia.net lo.idp.liveperson.net server.lon.liveperson.net https://authorize.omniture.com https://sitecatalyst.omniture.com service.maxymiser.net https://edigitalsurvey.com https://www.youtube.com https://santander.demdex.net https://*.fls.doubleclick.net; object-src 'self'; media-src lpcdn.lpsnmedia.net; worker-src blob:;", "anomaly": "unsafe-eval"}, {"rule": "Strict-Transport-Security", "severity": "high", "message": "Header not included in response", "expected": ["max-age=31536000", "includesubdomains"], "delimiter": ";"}, {"rule": "Cache-Control", "severity": "high", "message": "Value does not match security policy", "expected": ["no-cache", "no-store", "must-revalidate"], "delimiter": ",", "value": "private, must-revalidate, max-age=900"}, {"rule": "Pragma", "severity": "high", "message": "Header not included in response", "expected": ["no-cache"], "delimiter": ";"}] +[ + { + "rule": "Content-Security-Policy", + "severity": "high", + "message": "Must-Contain-One directive missed", + "expected": [ + "default-src 'none'", + "default-src 'self'" + ], + "delimiter": ";", + "value": "default-src 'self' service.maxymiser.net; child-src 'self' 'unsafe-inline' https://www.googleadservices.com https://*.fls.doubleclick.net/ https://*.santander.co.uk https://santander.demdex.net; script-src 'self' 'unsafe-inline' https://track.omguk.com https://cdn.usersnap.com https://screencapture.kampyle.com https://nebula-cdn.kampyle.com https://resources.digital-cloud-uk.medallia.eu https://pagead2.googlesyndication.com https://js-cdn.dynatrace.com https://activitymap.adobe.com https://cdn-ukwest.onetrust.com https://cdn.mouseflow.com https://googleads.g.doubleclick.net lptag.liveperson.net lo.v.liveperson.net lo.msg.liveperson.net accdn.lpsnmedia.net lpcdn.lpsnmedia.net https://www.googletagservices.com https://ad.doubleclick.net service.maxymiser.net https://connect.facebook.net https://*.fls.doubleclick.net/ https://www.googleadservices.com https://www.googletagmanager.com https://assets.adobedtm.com https://dpm.demdex.net/ https://*.santander.co.uk 'unsafe-eval'; connect-src 'self' 'unsafe-inline' https://udc-neb.kampyle.com https://*.bf.dynatrace.com https://privacyportal-uk.onetrust.com https://cdn-ukwest.onetrust.com https://o2.mouseflow.com https://googleads4.g.doubleclick.net wss://lo.msg.liveperson.net https://dpm.demdex.net https://*.santander.co.uk; img-src 'self' https://lpcdn.lpsnmedia.net service.maxymiser.net 'unsafe-inline' https://*.santander.co.uk data: https:; style-src 'self' service.maxymiser.net 'unsafe-inline'; font-src 'self'; frame-src 'self' 'unsafe-inline' https://www.youtube-nocookie.com https://resources.digital-cloud-uk.medallia.eu https://lo.tokenizer.liveperson.net https://lo.msghist.liveperson.net https://lo.msg.liveperson.net https://lpcdn.lpsnmedia.net lo.idp.liveperson.net server.lon.liveperson.net https://authorize.omniture.com https://sitecatalyst.omniture.com service.maxymiser.net https://edigitalsurvey.com https://www.youtube.com https://santander.demdex.net https://*.fls.doubleclick.net; object-src 'self'; media-src lpcdn.lpsnmedia.net; worker-src blob:;", + "anomaly": [ + "default-src 'none'", + "default-src 'self'" + ] + }, + { + "rule": "Content-Security-Policy", + "severity": "medium", + "message": "Must-Avoid directive included", + "avoid": [ + "unsafe-inline", + "unsafe-eval" + ], + "delimiter": ";", + "value": "default-src 'self' service.maxymiser.net; child-src 'self' 'unsafe-inline' https://www.googleadservices.com https://*.fls.doubleclick.net/ https://*.santander.co.uk https://santander.demdex.net; script-src 'self' 'unsafe-inline' https://track.omguk.com https://cdn.usersnap.com https://screencapture.kampyle.com https://nebula-cdn.kampyle.com https://resources.digital-cloud-uk.medallia.eu https://pagead2.googlesyndication.com https://js-cdn.dynatrace.com https://activitymap.adobe.com https://cdn-ukwest.onetrust.com https://cdn.mouseflow.com https://googleads.g.doubleclick.net lptag.liveperson.net lo.v.liveperson.net lo.msg.liveperson.net accdn.lpsnmedia.net lpcdn.lpsnmedia.net https://www.googletagservices.com https://ad.doubleclick.net service.maxymiser.net https://connect.facebook.net https://*.fls.doubleclick.net/ https://www.googleadservices.com https://www.googletagmanager.com https://assets.adobedtm.com https://dpm.demdex.net/ https://*.santander.co.uk 'unsafe-eval'; connect-src 'self' 'unsafe-inline' https://udc-neb.kampyle.com https://*.bf.dynatrace.com https://privacyportal-uk.onetrust.com https://cdn-ukwest.onetrust.com https://o2.mouseflow.com https://googleads4.g.doubleclick.net wss://lo.msg.liveperson.net https://dpm.demdex.net https://*.santander.co.uk; img-src 'self' https://lpcdn.lpsnmedia.net service.maxymiser.net 'unsafe-inline' https://*.santander.co.uk data: https:; style-src 'self' service.maxymiser.net 'unsafe-inline'; font-src 'self'; frame-src 'self' 'unsafe-inline' https://www.youtube-nocookie.com https://resources.digital-cloud-uk.medallia.eu https://lo.tokenizer.liveperson.net https://lo.msghist.liveperson.net https://lo.msg.liveperson.net https://lpcdn.lpsnmedia.net lo.idp.liveperson.net server.lon.liveperson.net https://authorize.omniture.com https://sitecatalyst.omniture.com service.maxymiser.net https://edigitalsurvey.com https://www.youtube.com https://santander.demdex.net https://*.fls.doubleclick.net; object-src 'self'; media-src lpcdn.lpsnmedia.net; worker-src blob:;", + "anomaly": "unsafe-inline" + }, + { + "rule": "Content-Security-Policy", + "severity": "medium", + "message": "Must-Avoid directive included", + "avoid": [ + "unsafe-inline", + "unsafe-eval" + ], + "delimiter": ";", + "value": "default-src 'self' service.maxymiser.net; child-src 'self' 'unsafe-inline' https://www.googleadservices.com https://*.fls.doubleclick.net/ https://*.santander.co.uk https://santander.demdex.net; script-src 'self' 'unsafe-inline' https://track.omguk.com https://cdn.usersnap.com https://screencapture.kampyle.com https://nebula-cdn.kampyle.com https://resources.digital-cloud-uk.medallia.eu https://pagead2.googlesyndication.com https://js-cdn.dynatrace.com https://activitymap.adobe.com https://cdn-ukwest.onetrust.com https://cdn.mouseflow.com https://googleads.g.doubleclick.net lptag.liveperson.net lo.v.liveperson.net lo.msg.liveperson.net accdn.lpsnmedia.net lpcdn.lpsnmedia.net https://www.googletagservices.com https://ad.doubleclick.net service.maxymiser.net https://connect.facebook.net https://*.fls.doubleclick.net/ https://www.googleadservices.com https://www.googletagmanager.com https://assets.adobedtm.com https://dpm.demdex.net/ https://*.santander.co.uk 'unsafe-eval'; connect-src 'self' 'unsafe-inline' https://udc-neb.kampyle.com https://*.bf.dynatrace.com https://privacyportal-uk.onetrust.com https://cdn-ukwest.onetrust.com https://o2.mouseflow.com https://googleads4.g.doubleclick.net wss://lo.msg.liveperson.net https://dpm.demdex.net https://*.santander.co.uk; img-src 'self' https://lpcdn.lpsnmedia.net service.maxymiser.net 'unsafe-inline' https://*.santander.co.uk data: https:; style-src 'self' service.maxymiser.net 'unsafe-inline'; font-src 'self'; frame-src 'self' 'unsafe-inline' https://www.youtube-nocookie.com https://resources.digital-cloud-uk.medallia.eu https://lo.tokenizer.liveperson.net https://lo.msghist.liveperson.net https://lo.msg.liveperson.net https://lpcdn.lpsnmedia.net lo.idp.liveperson.net server.lon.liveperson.net https://authorize.omniture.com https://sitecatalyst.omniture.com service.maxymiser.net https://edigitalsurvey.com https://www.youtube.com https://santander.demdex.net https://*.fls.doubleclick.net; object-src 'self'; media-src lpcdn.lpsnmedia.net; worker-src blob:;", + "anomaly": "unsafe-eval" + }, + { + "rule": "Strict-Transport-Security", + "severity": "high", + "message": "Header not included in response", + "expected": [ + "max-age=31536000", + "includesubdomains" + ], + "delimiter": ";" + }, + { + "rule": "Cache-Control", + "severity": "high", + "message": "Value does not match security policy", + "expected": [ + "no-cache", + "no-store", + "must-revalidate" + ], + "delimiter": ",", + "value": "private, must-revalidate, max-age=900" + }, + { + "rule": "Pragma", + "severity": "high", + "message": "Header not included in response", + "expected": [ + "no-cache" + ], + "delimiter": ";" + } +] \ No newline at end of file diff --git a/unittests/scans/drheader/scan3.json b/unittests/scans/drheader/scan3.json index 7867926fd86..f8d5838f055 100644 --- a/unittests/scans/drheader/scan3.json +++ b/unittests/scans/drheader/scan3.json @@ -1 +1,104 @@ -[{"rule": "Content-Security-Policy", "severity": "high", "message": "Header not included in response"}, {"rule": "X-XSS-Protection", "severity": "high", "message": "Value does not match security policy", "expected": ["1", "mode=block"], "delimiter": ";", "value": "0"}, {"rule": "Server", "severity": "high", "message": "Header should not be returned"}, {"rule": "Strict-Transport-Security", "severity": "high", "message": "Header not included in response", "expected": ["max-age=31536000", "includesubdomains"], "delimiter": ";"}, {"rule": "X-Content-Type-Options", "severity": "high", "message": "Header not included in response", "expected": ["nosniff"], "delimiter": ";"}, {"rule": "Set-Cookie", "severity": "high", "message": "Must-Contain directive missed", "expected": ["httponly", "secure"], "delimiter": ";", "value": "nid=208=d8xko0gp8g_pycvdqrwtvdpdiu_7es-hyvqugfqshzyjz5sozpy3y0ayn4kzdkpuzz-ylqjsydscnyuf58liz54ytg7by8smauul5noxicgela-oyi5lu4d_juan8geufgyxg1xao2bqronqyiplvbivs-nndfbywyjwnz0myso; expires=wed, 11-aug-2021 16:59:02 gmt; path=/; domain=.google.com; httponly", "anomaly": "secure"}, {"rule": "Set-Cookie", "severity": "medium", "message": "Must-Contain directive missed", "expected": ["httponly", "secure"], "delimiter": ";", "value": "consent=pending+061; expires=fri, 01-jan-2038 00:00:00 gmt; path=/; domain=.google.com", "anomaly": "httponly"}, {"rule": "Set-Cookie", "severity": "high", "message": "Must-Contain directive missed", "expected": ["httponly", "secure"], "delimiter": ";", "value": "consent=pending+061; expires=fri, 01-jan-2038 00:00:00 gmt; path=/; domain=.google.com", "anomaly": "secure"}, {"rule": "Referrer-Policy", "severity": "high", "message": "Header not included in response"}, {"rule": "Cache-Control", "severity": "high", "message": "Value does not match security policy", "expected": ["no-cache", "no-store", "must-revalidate"], "delimiter": ",", "value": "private, max-age=0"}, {"rule": "Pragma", "severity": "high", "message": "Header not included in response", "expected": ["no-cache"], "delimiter": ";"}] +[ + { + "rule": "Content-Security-Policy", + "severity": "high", + "message": "Header not included in response" + }, + { + "rule": "X-XSS-Protection", + "severity": "high", + "message": "Value does not match security policy", + "expected": [ + "1", + "mode=block" + ], + "delimiter": ";", + "value": "0" + }, + { + "rule": "Server", + "severity": "high", + "message": "Header should not be returned" + }, + { + "rule": "Strict-Transport-Security", + "severity": "high", + "message": "Header not included in response", + "expected": [ + "max-age=31536000", + "includesubdomains" + ], + "delimiter": ";" + }, + { + "rule": "X-Content-Type-Options", + "severity": "high", + "message": "Header not included in response", + "expected": [ + "nosniff" + ], + "delimiter": ";" + }, + { + "rule": "Set-Cookie", + "severity": "high", + "message": "Must-Contain directive missed", + "expected": [ + "httponly", + "secure" + ], + "delimiter": ";", + "value": "nid=208=d8xko0gp8g_pycvdqrwtvdpdiu_7es-hyvqugfqshzyjz5sozpy3y0ayn4kzdkpuzz-ylqjsydscnyuf58liz54ytg7by8smauul5noxicgela-oyi5lu4d_juan8geufgyxg1xao2bqronqyiplvbivs-nndfbywyjwnz0myso; expires=wed, 11-aug-2021 16:59:02 gmt; path=/; domain=.google.com; httponly", + "anomaly": "secure" + }, + { + "rule": "Set-Cookie", + "severity": "medium", + "message": "Must-Contain directive missed", + "expected": [ + "httponly", + "secure" + ], + "delimiter": ";", + "value": "consent=pending+061; expires=fri, 01-jan-2038 00:00:00 gmt; path=/; domain=.google.com", + "anomaly": "httponly" + }, + { + "rule": "Set-Cookie", + "severity": "high", + "message": "Must-Contain directive missed", + "expected": [ + "httponly", + "secure" + ], + "delimiter": ";", + "value": "consent=pending+061; expires=fri, 01-jan-2038 00:00:00 gmt; path=/; domain=.google.com", + "anomaly": "secure" + }, + { + "rule": "Referrer-Policy", + "severity": "high", + "message": "Header not included in response" + }, + { + "rule": "Cache-Control", + "severity": "high", + "message": "Value does not match security policy", + "expected": [ + "no-cache", + "no-store", + "must-revalidate" + ], + "delimiter": ",", + "value": "private, max-age=0" + }, + { + "rule": "Pragma", + "severity": "high", + "message": "Header not included in response", + "expected": [ + "no-cache" + ], + "delimiter": ";" + } +] \ No newline at end of file diff --git a/unittests/scans/intsights/intsights_zero_vuln.json b/unittests/scans/intsights/intsights_zero_vuln.json index da0b981bbae..34fe8994ead 100644 --- a/unittests/scans/intsights/intsights_zero_vuln.json +++ b/unittests/scans/intsights/intsights_zero_vuln.json @@ -1 +1,3 @@ -{"Alerts": []} \ No newline at end of file +{ + "Alerts": [] +} \ No newline at end of file diff --git a/unittests/scans/jfrog_xray_unified/issue_13628.json b/unittests/scans/jfrog_xray_unified/issue_13628.json new file mode 100644 index 00000000000..6cf5a92926a --- /dev/null +++ b/unittests/scans/jfrog_xray_unified/issue_13628.json @@ -0,0 +1,36 @@ +{ + "total_rows": 123, + "rows": [ + { + "cves": [ + { + "cve": "CVE-2023-42282", + "cvss_v3_score": 9.8, + "cvss_v3_vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H" + } + ], + "cvss3_max_score": 9.8, + "severity": "Critical", + "component_physical_path": "ip:2.0.0", + "impact_path": [ + "somepath" + ], + "fixed_versions": [ + "2.0.1", + "1.1.9" + ], + "issue_id": "XRAY-123", + "project_keys": [ + "somepath" + ], + "applicability": null, + "applicability_result": "not_scanned", + "summary": "The ip package before 1.1.9 for Node.js might allow SSRF because some IP addresses (such as 0x7f.1) are improperly categorized as globally routable via isPublic.", + "vulnerable_component": "npm://ip:2.0.0", + "impacted_artifact": "build://[some_artifact_id]", + "path": "somepath", + "published": "2024-02-09T16:30:10Z", + "artifact_scan_time": "2025-11-03T11:42:09Z" + } + ] +} \ No newline at end of file diff --git a/unittests/scans/mend/mend-sca-platform-api3-no-findings.json b/unittests/scans/mend/mend-sca-platform-api3-no-findings.json index 9df1c1c1a27..96e072f0e2e 100644 --- a/unittests/scans/mend/mend-sca-platform-api3-no-findings.json +++ b/unittests/scans/mend/mend-sca-platform-api3-no-findings.json @@ -1 +1,8 @@ -{"additionalData": {"totalItems": 0, "paging": {}}, "supportToken": "123442284e284dddb0652ff65c9f3ebd1731540952924", "response": []} \ No newline at end of file +{ + "additionalData": { + "totalItems": 0, + "paging": {} + }, + "supportToken": "123442284e284dddb0652ff65c9f3ebd1731540952924", + "response": [] +} \ No newline at end of file diff --git a/unittests/scans/mend/okhttp_no_vuln.json b/unittests/scans/mend/okhttp_no_vuln.json index 831bf814c0b..88dc06c72d7 100644 --- a/unittests/scans/mend/okhttp_no_vuln.json +++ b/unittests/scans/mend/okhttp_no_vuln.json @@ -1,5 +1,3 @@ -{ - "vulnerabilities":[ - - ] +{ + "vulnerabilities": [] } \ No newline at end of file diff --git a/unittests/scans/semgrep/empty.json b/unittests/scans/semgrep/empty.json index 3cbbe070d1b..bed8fbdc619 100644 --- a/unittests/scans/semgrep/empty.json +++ b/unittests/scans/semgrep/empty.json @@ -1 +1,4 @@ -{"results": [], "errors": []} \ No newline at end of file +{ + "results": [], + "errors": [] +} \ No newline at end of file diff --git a/unittests/scans/semgrep_pro/no_vuln.json b/unittests/scans/semgrep_pro/no_vuln.json index 5865a185a47..ef24ab5c42c 100644 --- a/unittests/scans/semgrep_pro/no_vuln.json +++ b/unittests/scans/semgrep_pro/no_vuln.json @@ -1,4 +1,3 @@ { - "findings": [ - ] + "findings": [] } \ No newline at end of file diff --git a/unittests/scans/snyk_issue_api/empty.json b/unittests/scans/snyk_issue_api/empty.json index e6b11fc0300..28dda1029e0 100644 --- a/unittests/scans/snyk_issue_api/empty.json +++ b/unittests/scans/snyk_issue_api/empty.json @@ -1,6 +1,6 @@ { - "jsonapi": { - "version": "1.0" - }, - "data": [] + "jsonapi": { + "version": "1.0" + }, + "data": [] } \ No newline at end of file diff --git a/unittests/scans/whitehat_sentinel/empty_file.json b/unittests/scans/whitehat_sentinel/empty_file.json index 8d64b7c1015..0467e59e0c8 100644 --- a/unittests/scans/whitehat_sentinel/empty_file.json +++ b/unittests/scans/whitehat_sentinel/empty_file.json @@ -1 +1,3 @@ -{"collection": []} \ No newline at end of file +{ + "collection": [] +} \ No newline at end of file diff --git a/unittests/test_finding_model.py b/unittests/test_finding_model.py index bc65ffd1096..6de9e4847fc 100644 --- a/unittests/test_finding_model.py +++ b/unittests/test_finding_model.py @@ -1,8 +1,23 @@ -from datetime import datetime, timedelta +from datetime import date, datetime, timedelta from crum import impersonate - -from dojo.models import DojoMeta, Engagement, Finding, Test, User +from django.utils.timezone import is_naive, now + +from dojo.finding.helper import close_finding +from dojo.models import ( + DojoMeta, + Endpoint, + Endpoint_Status, + Engagement, + Finding, + Note_Type, + Notes, + Product, + Product_Type, + Test, + Test_Type, + User, +) from .dojo_test_case import DojoTestCase @@ -10,6 +25,79 @@ class TestFindingModel(DojoTestCase): fixtures = ["dojo_testdata.json"] + def setUp(self): + self.user = User.objects.first() # Use a user from fixtures + self.product_type = Product_Type.objects.create(name="Test Product Type") + self.product = Product.objects.create(name="Test Product", prod_type=self.product_type) + self.engagement = Engagement.objects.create( + name="Test Engagement", + product=self.product, + target_start=now(), + target_end=now(), + ) + self.test_type = Test_Type.objects.create(name="Unit Test Type") + self.test = Test.objects.create( + engagement=self.engagement, + test_type=self.test_type, + title="Test for Finding", + target_start=now(), + target_end=now(), + ) + self.finding = Finding.objects.create(title="Close Finding Test", active=True, test=self.test) + self.endpoint = Endpoint.objects.create(host="test.local") + self.endpoint_status = Endpoint_Status.objects.create(finding=self.finding, endpoint=self.endpoint) + self.finding.status_finding.add(self.endpoint_status) + + def test_close_finding_with_naive_date(self): + note_type_obj = Note_Type.objects.create(name="General") + naive_date = date.today() # No timezone + close_finding( + finding=self.finding, + user=self.user, + is_mitigated=True, + mitigated=naive_date, + mitigated_by=None, + false_p=False, + out_of_scope=False, + duplicate=False, + note_entry="Mitigation note", + note_type=note_type_obj, + ) + self.assertFalse(is_naive(self.finding.mitigated)) + note = Notes.objects.filter(finding=self.finding).first() + self.assertIsNotNone(note) + self.assertFalse(is_naive(note.date)) + status = Endpoint_Status.objects.filter(finding=self.finding).first() + self.assertTrue(status.mitigated) + self.assertFalse(is_naive(status.mitigated_time)) + + def test_close_finding_with_naive_datetime(self): + naive_datetime = datetime(2025, 11, 12, 0, 0, 0) + close_finding( + finding=self.finding, + user=self.user, + is_mitigated=True, + mitigated=naive_datetime, + mitigated_by=None, + false_p=False, + out_of_scope=False, + duplicate=False, + ) + self.assertFalse(is_naive(self.finding.mitigated)) + + def test_close_finding_with_none_mitigated(self): + close_finding( + finding=self.finding, + user=self.user, + is_mitigated=True, + mitigated=None, + mitigated_by=None, + false_p=False, + out_of_scope=False, + duplicate=False, + ) + self.assertFalse(is_naive(self.finding.mitigated)) + def test_get_sast_source_file_path_with_link_no_file_path(self): finding = Finding() self.assertEqual(None, finding.get_sast_source_file_path_with_link()) diff --git a/unittests/test_importers_deduplication.py b/unittests/test_importers_deduplication.py index a006cc90099..2c607008720 100644 --- a/unittests/test_importers_deduplication.py +++ b/unittests/test_importers_deduplication.py @@ -18,10 +18,8 @@ from .dojo_test_case import DojoAPITestCase, get_unit_tests_scans_path -logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) - STACK_HAWK_FILENAME = get_unit_tests_scans_path("stackhawk") / "stackhawk_many_vul_without_duplicated_findings.json" STACK_HAWK_SUBSET_FILENAME = get_unit_tests_scans_path("stackhawk") / "stackhawk_many_vul_without_duplicated_findings_subset.json" STACK_HAWK_SCAN_TYPE = "StackHawk HawkScan" diff --git a/unittests/test_importers_performance.py b/unittests/test_importers_performance.py index c0da1e213c7..9da777ccecc 100644 --- a/unittests/test_importers_performance.py +++ b/unittests/test_importers_performance.py @@ -26,10 +26,8 @@ from .dojo_test_case import DojoTestCase, get_unit_tests_scans_path -logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) - STACK_HAWK_FILENAME = get_unit_tests_scans_path("stackhawk") / "stackhawk_many_vul_without_duplicated_findings.json" STACK_HAWK_SUBSET_FILENAME = get_unit_tests_scans_path("stackhawk") / "stackhawk_many_vul_without_duplicated_findings_subset.json" STACK_HAWK_SCAN_TYPE = "StackHawk HawkScan" diff --git a/unittests/test_system_settings.py b/unittests/test_system_settings.py index a735ca03f98..d2e25630b0c 100644 --- a/unittests/test_system_settings.py +++ b/unittests/test_system_settings.py @@ -1,4 +1,17 @@ -from dojo.models import System_Settings +from django.test import TestCase, override_settings +from django.urls import reverse +from django.utils.timezone import now + +from dojo.models import ( + Engagement, + Finding, + Product, + Product_Type, + System_Settings, + Test, + Test_Type, + User, +) from .dojo_test_case import DojoTestCase @@ -26,3 +39,50 @@ def test_system_settings_update(self): system_settings.save() system_settings = System_Settings.objects.get(no_cache=True) self.assertEqual(system_settings.enable_jira, True) + + +@override_settings(DD_EDITABLE_MITIGATED_DATA=True) +class CloseFindingViewInstanceTest(TestCase): + def setUp(self): + self.user = User.objects.create_user( + username="tester", + password="pass", # noqa: S106 + is_staff=True, + is_superuser=True, + ) + self.client.force_login(self.user) + self.product_type = Product_Type.objects.create(name="Test Product Type") + self.product = Product.objects.create(name="Test Product", prod_type=self.product_type) + self.engagement = Engagement.objects.create( + name="Test Engagement", + product=self.product, + target_start=now(), + target_end=now(), + ) + self.test_type = Test_Type.objects.create(name="Unit Test Type") + self.test = Test.objects.create( + engagement=self.engagement, + test_type=self.test_type, + title="Test for Finding", + target_start=now(), + target_end=now(), + ) + self.finding = Finding.objects.create( + title="Close Finding Test", + active=True, + test=self.test, + reporter=self.user, + ) + self.url = reverse("close_finding", args=[self.finding.id]) + + def test_get_request_initializes_form_with_finding_instance(self): + response = self.client.get(self.url) + self.assertEqual(response.status_code, 200) + form = response.context["form"] + self.assertIsInstance(form.instance, Finding) + self.assertEqual(form.instance.id, self.finding.id) + + def test_post_request_initializes_form_with_finding_instance(self): + data = {"close_reason": "Mitigated", "notes": "Closing this finding"} + response = self.client.post(self.url, data) + self.assertIn(response.status_code, [200, 302]) diff --git a/unittests/tools/test_dawnscanner_parser.py b/unittests/tools/test_dawnscanner_parser.py index dd42b37ad0a..6d7107c83b5 100644 --- a/unittests/tools/test_dawnscanner_parser.py +++ b/unittests/tools/test_dawnscanner_parser.py @@ -13,38 +13,24 @@ def test_burp_with_one_vuln_has_one_finding(self): for finding in findings: for endpoint in finding.unsaved_endpoints: endpoint.clean() - self.assertEqual(4, len(findings)) - with self.subTest(i=0): finding = findings[0] self.assertEqual("CVE-2016-6316", finding.title) self.assertEqual("Medium", finding.severity) self.assertEqual(1, len(finding.unsaved_vulnerability_ids)) self.assertEqual("CVE-2016-6316", finding.unsaved_vulnerability_ids[0]) - self.assertEqual( - 'Text declared as "HTML safe" when passed as an attribute value to a tag helper will not have quotes escaped which can lead to an XSS attack.', - finding.description, - ) - self.assertEqual( - datetime.datetime(2019, 4, 1, 21, 14, 32, tzinfo=datetime.timezone(datetime.timedelta(seconds=0))), - finding.date, - ) # 2019-04-01 21:14:32 +0000 - + self.assertEqual(finding.description, 'Text declared as "HTML safe" when passed as an attribute value to a tag helper will not have quotes escaped which can lead to an XSS attack.') + self.assertEqual(datetime.datetime(2019, 4, 1, 21, 14, 32, tzinfo=datetime.timezone(datetime.timedelta(seconds=0))), finding.date) # 2019-04-01 21:14:32 +0000 + with self.subTest(i=2): + finding = findings[2] + self.assertEqual(False, finding.fix_available) with self.subTest(i=3): finding = findings[3] self.assertEqual("Owasp Ror CheatSheet: Security Related Headers", finding.title) self.assertEqual("Info", finding.severity) self.assertIsNone(finding.unsaved_vulnerability_ids) - self.assertEqual( - 'To set a header value, simply access the response.headers object as a hash inside your controller (often in a before/after_filter). Rails 4 provides the "default_headers" functionality that will automatically apply the values supplied. This works for most headers in almost all cases.', - finding.description, - ) - self.assertEqual( - "Use response headers like X-Frame-Options, X-Content-Type-Options, X-XSS-Protection in your project.", - finding.mitigation, - ) - self.assertEqual( - datetime.datetime(2019, 4, 1, 21, 14, 32, tzinfo=datetime.timezone(datetime.timedelta(seconds=0))), - finding.date, - ) # 2019-04-01 21:14:32 +0000 + self.assertEqual(finding.description, 'To set a header value, simply access the response.headers object as a hash inside your controller (often in a before/after_filter). Rails 4 provides the "default_headers" functionality that will automatically apply the values supplied. This works for most headers in almost all cases.') + self.assertEqual("Use response headers like X-Frame-Options, X-Content-Type-Options, X-XSS-Protection in your project.", finding.mitigation) + self.assertEqual(datetime.datetime(2019, 4, 1, 21, 14, 32, tzinfo=datetime.timezone(datetime.timedelta(seconds=0))), finding.date) # 2019-04-01 21:14:32 +0000 + self.assertEqual(True, finding.fix_available) diff --git a/unittests/tools/test_deepfence_threatmapper_parser.py b/unittests/tools/test_deepfence_threatmapper_parser.py index 8561920f61a..47cd67a4e0d 100644 --- a/unittests/tools/test_deepfence_threatmapper_parser.py +++ b/unittests/tools/test_deepfence_threatmapper_parser.py @@ -13,6 +13,14 @@ def test_parse_file_compliance_report(self): self.assertEqual(findings[0].title, "Threatmapper_Compliance_Report-gdpr_3.6") self.assertEqual(findings[0].severity, "Info") + def test_parse_file_compliance_report_newformat(self): + with (get_unit_tests_scans_path("deepfence_threatmapper") / "compliance_report_newformat.xlsx").open("rb") as testfile: + parser = DeepfenceThreatmapperParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(66, len(findings)) + self.assertEqual(findings[0].title, "Threatmapper_Compliance_Report-gdpr_3.4") + self.assertEqual(findings[0].severity, "Info") + def test_parse_file_malware_report(self): with (get_unit_tests_scans_path("deepfence_threatmapper") / "malware_report.xlsx").open("rb") as testfile: parser = DeepfenceThreatmapperParser() @@ -22,6 +30,14 @@ def test_parse_file_malware_report(self): self.assertEqual(findings[0].severity, "Low") self.assertEqual(findings[0].file_path, "/tmp/Deepfence/YaraHunter/df_db09257b02e615049e0aecc05be2dc2401735e67db4ab74225df777c62c39753/usr/sbin/mkfs.cramfs") + def test_parse_file_malware_report_newformat(self): + with (get_unit_tests_scans_path("deepfence_threatmapper") / "malware_report_newformat.xlsx").open("rb") as testfile: + parser = DeepfenceThreatmapperParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(66, len(findings)) + self.assertEqual(findings[0].title, "spyeye") + self.assertEqual(findings[0].severity, "High") + def test_parse_file_secret_report(self): with (get_unit_tests_scans_path("deepfence_threatmapper") / "secret_report.xlsx").open("rb") as testfile: parser = DeepfenceThreatmapperParser() @@ -31,6 +47,14 @@ def test_parse_file_secret_report(self): self.assertEqual(findings[0].severity, "High") self.assertEqual(findings[0].file_path, "usr/share/doc/curl-8.3.0/TheArtOfHttpScripting.md") + def test_parse_file_secret_report_newformat(self): + with (get_unit_tests_scans_path("deepfence_threatmapper") / "secret_report_newformat.xlsx").open("rb") as testfile: + parser = DeepfenceThreatmapperParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(15, len(findings)) + self.assertEqual(findings[0].title, "index-username_and_password_in_uri in /var/lib/host-containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/1/fs/usr/lib64/python2.7/urllib2.py") + self.assertEqual(findings[0].severity, "High") + def test_parse_file_vulnerability_report(self): with (get_unit_tests_scans_path("deepfence_threatmapper") / "vulnerability_report.xlsx").open("rb") as testfile: parser = DeepfenceThreatmapperParser() @@ -40,3 +64,12 @@ def test_parse_file_vulnerability_report(self): self.assertEqual(findings[0].severity, "Low") self.assertEqual(findings[0].mitigation, "2.5-10.amzn2.0.1") self.assertEqual(findings[0].cve, "CVE-2021-36084") + + def test_parse_file_vulnerability_report_newformat(self): + with (get_unit_tests_scans_path("deepfence_threatmapper") / "vulnerability_report_newformat.xlsx").open("rb") as testfile: + parser = DeepfenceThreatmapperParser() + findings = parser.get_findings(testfile, Test()) + self.assertEqual(254, len(findings)) + self.assertEqual(findings[0].title, "Threatmapper_Vuln_Report-CVE-2005-2541") + self.assertEqual(findings[0].severity, "Critical") + self.assertEqual(findings[0].cve, "CVE-2005-2541") diff --git a/unittests/tools/test_ibm_app_parser.py b/unittests/tools/test_ibm_app_parser.py index 1a66f86d656..6eaacee2cca 100644 --- a/unittests/tools/test_ibm_app_parser.py +++ b/unittests/tools/test_ibm_app_parser.py @@ -23,3 +23,4 @@ def test_parse_file(self): finding = findings[1] self.assertEqual("Info", finding.severity) + self.assertEqual(True, finding.fix_available) diff --git a/unittests/tools/test_jfrog_xray_unified_parser.py b/unittests/tools/test_jfrog_xray_unified_parser.py index 52b673308c4..92bc30c75ff 100644 --- a/unittests/tools/test_jfrog_xray_unified_parser.py +++ b/unittests/tools/test_jfrog_xray_unified_parser.py @@ -345,3 +345,12 @@ def test_parse_file_with_another_report(self): findings = parser.get_findings(testfile, Test()) testfile.close() self.assertEqual(7, len(findings)) + + def test_parse_file_issue_13628(self): + testfile = (get_unit_tests_scans_path("jfrog_xray_unified") / "issue_13628.json").open(encoding="utf-8") + parser = JFrogXrayUnifiedParser() + findings = parser.get_findings(testfile, Test()) + testfile.close() + self.assertEqual(1, len(findings)) + self.assertEqual("Critical", findings[0].severity) + self.assertEqual("XRAY-123 - The ip package before 1.1.9 for Node.js might allow SSRF because some IP addresses (such as 0x7f.1) are improperly categorized as globally routable via isPublic.", findings[0].title) diff --git a/unittests/tools/test_kubeaudit_parser.py b/unittests/tools/test_kubeaudit_parser.py index 38b8e7ee8fd..9e5bdd0928a 100644 --- a/unittests/tools/test_kubeaudit_parser.py +++ b/unittests/tools/test_kubeaudit_parser.py @@ -15,3 +15,4 @@ def test_parse_file_has_no_findings(self): self.assertEqual(findings[5].mitigation, "hostNetwork is set to 'true' in PodSpec. It should be set to 'false'.") self.assertEqual(findings[8].description, "AuditResultName: AllowPrivilegeEscalationNil\nResourceApiVersion: v1\nResourceKind: Pod\nResourceName: storage-provisioner\nlevel: error\nmsg: allowPrivilegeEscalation not set which allows privilege escalation. It should be set to 'false'.\nContainer: storage-provisioner\nResourceNamespace: kube-system\n") self.assertEqual(findings[11].severity, "High") + self.assertEqual(findings[11].fix_available, True) diff --git a/unittests/tools/test_nancy_parser.py b/unittests/tools/test_nancy_parser.py index 09b3194360d..f233aaf7691 100644 --- a/unittests/tools/test_nancy_parser.py +++ b/unittests/tools/test_nancy_parser.py @@ -18,7 +18,7 @@ def test_nancy_parser_with_one_vuln_has_one_findings(self): self.assertEqual(1, len(findings)) with self.subTest(i=0): finding = findings[0] - self.assertEqual("Info", finding.severity) + self.assertEqual("Medium", finding.severity) self.assertIsNotNone(finding.description) self.assertGreater(len(finding.description), 0) self.assertEqual(None, finding.cve) diff --git a/unittests/tools/test_pwn_sast_parser.py b/unittests/tools/test_pwn_sast_parser.py index ff2762b146a..49feb32df3a 100644 --- a/unittests/tools/test_pwn_sast_parser.py +++ b/unittests/tools/test_pwn_sast_parser.py @@ -24,6 +24,7 @@ def test_parse_many_finding(self): findings = parser.get_findings(testfile, Test()) self.assertIsInstance(findings, list) self.assertEqual(3, len(findings)) + self.assertEqual(True, findings[0].fix_available) def test_one_dup_finding(self): with (get_unit_tests_scans_path("pwn_sast") / "one_dup_finding.json").open(encoding="utf-8") as testfile: diff --git a/unittests/tools/test_qualys_parser.py b/unittests/tools/test_qualys_parser.py index b7d9f95b944..457588a70c0 100644 --- a/unittests/tools/test_qualys_parser.py +++ b/unittests/tools/test_qualys_parser.py @@ -151,10 +151,38 @@ def test_parse_file_with_cvss_values_and_scores(self): for finding in findings: if finding.unsaved_endpoints[0].host == "demo14.s02.sjc01.qualys.com" and finding.title == "QID-370876 | AMD Processors Multiple Security Vulnerabilities (RYZENFALL/MASTERKEY/CHIMERA-FW/FALLOUT)": finding_cvssv3_score = finding + self.assertEqual( + finding.unsaved_vulnerability_ids, + [ + "CVE-2018-8930", + "CVE-2018-8931", + "CVE-2018-8932", + "CVE-2018-8933", + "CVE-2018-8934", + "CVE-2018-8935", + "CVE-2018-8936", + ], + ) if finding.unsaved_endpoints[0].host == "demo13.s02.sjc01.qualys.com" and finding.title == "QID-370876 | AMD Processors Multiple Security Vulnerabilities (RYZENFALL/MASTERKEY/CHIMERA-FW/FALLOUT)": finding_no_cvssv3_at_detection = finding + self.assertEqual( + finding.unsaved_vulnerability_ids, + [ + "CVE-2018-8930", + "CVE-2018-8931", + "CVE-2018-8932", + "CVE-2018-8933", + "CVE-2018-8934", + "CVE-2018-8935", + "CVE-2018-8936", + ], + ) if finding.unsaved_endpoints[0].host == "demo14.s02.sjc01.qualys.com" and finding.title == 'QID-121695 | NTP "monlist" Feature Denial of Service Vulnerability': finding_no_cvssv3 = finding + self.assertEqual( + finding.unsaved_vulnerability_ids, + ["CVE-2013-5211"], + ) # The CVSS Vector is not used from the Knowledgebase self.assertEqual( # CVSS_FINAL is defined without a cvssv3 vector