diff --git a/src/sentry/models/project.py b/src/sentry/models/project.py index 612fb7aa7c75..b6885c87a30d 100644 --- a/src/sentry/models/project.py +++ b/src/sentry/models/project.py @@ -881,7 +881,9 @@ def normalize_before_relocation_import( def write_relocation_import( self, scope: ImportScope, flags: ImportFlags ) -> tuple[int, ImportKind] | None: - from sentry.receivers.project_detectors import disable_default_detector_creation + from sentry.workflow_engine.receivers.project_detectors import ( + disable_default_detector_creation, + ) with disable_default_detector_creation(): return super().write_relocation_import(scope, flags) diff --git a/src/sentry/projects/project_rules/creator.py b/src/sentry/projects/project_rules/creator.py index 5d3a0e435911..b34732a87e35 100644 --- a/src/sentry/projects/project_rules/creator.py +++ b/src/sentry/projects/project_rules/creator.py @@ -9,8 +9,8 @@ from sentry.models.project import Project from sentry.models.rule import Rule, RuleSource from sentry.types.actor import Actor +from sentry.workflow_engine.defaults.detectors import ensure_default_detectors from sentry.workflow_engine.migration_helpers.issue_alert_migration import IssueAlertMigrator -from sentry.workflow_engine.processors.detector import ensure_default_detectors from sentry.workflow_engine.utils.legacy_metric_tracking import report_used_legacy_models logger = logging.getLogger(__name__) diff --git a/src/sentry/receivers/__init__.py b/src/sentry/receivers/__init__.py index a8c00c33aa6f..83b66df9be09 100644 --- a/src/sentry/receivers/__init__.py +++ b/src/sentry/receivers/__init__.py @@ -9,7 +9,6 @@ from .outbox.cell import * # noqa: F401,F403 from .outbox.control import * # noqa: F401,F403 from .owners import * # noqa: F401,F403 -from .project_detectors import * # noqa: F401,F403 from .releases import * # noqa: F401,F403 from .rule_snooze import * # noqa: F401,F403 from .rules import * # noqa: F401,F403 diff --git a/src/sentry/testutils/factories.py b/src/sentry/testutils/factories.py index fe4ba6414a57..330d276b57d6 100644 --- a/src/sentry/testutils/factories.py +++ b/src/sentry/testutils/factories.py @@ -561,7 +561,9 @@ def create_project( create_default_detectors=True, **kwargs, ) -> Project: - from sentry.receivers.project_detectors import disable_default_detector_creation + from sentry.workflow_engine.receivers.project_detectors import ( + disable_default_detector_creation, + ) if not kwargs.get("name"): kwargs["name"] = petname.generate(2, " ", letters=10).title() diff --git a/src/sentry/workflow_engine/defaults/detectors.py b/src/sentry/workflow_engine/defaults/detectors.py new file mode 100644 index 000000000000..25d40f3c3e23 --- /dev/null +++ b/src/sentry/workflow_engine/defaults/detectors.py @@ -0,0 +1,286 @@ +import logging +from collections.abc import Mapping +from datetime import timedelta +from functools import cache + +from django.db import router, transaction +from rest_framework import status + +from sentry import features +from sentry.api.exceptions import SentryAPIException +from sentry.grouping.grouptype import ErrorGroupType +from sentry.incidents.grouptype import MetricIssue +from sentry.incidents.models.alert_rule import AlertRuleDetectionType +from sentry.incidents.utils.constants import INCIDENTS_SNUBA_SUBSCRIPTION_TYPE +from sentry.incidents.utils.types import DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION +from sentry.issue_detection.performance_detection import PERFORMANCE_DETECTOR_CONFIG_MAPPINGS +from sentry.issues import grouptype +from sentry.locks import locks +from sentry.models.project import Project +from sentry.projectoptions.defaults import DEFAULT_PROJECT_PERFORMANCE_DETECTION_SETTINGS +from sentry.seer.anomaly_detection.store_data_workflow_engine import send_new_detector_data +from sentry.seer.anomaly_detection.types import ( + AnomalyDetectionSeasonality, + AnomalyDetectionSensitivity, + AnomalyDetectionThresholdType, +) +from sentry.snuba.dataset import Dataset +from sentry.snuba.models import SnubaQuery, SnubaQueryEventType +from sentry.snuba.subscriptions import create_snuba_query, create_snuba_subscription +from sentry.utils.locking import UnableToAcquireLock +from sentry.workflow_engine.models import ( + DataCondition, + DataConditionGroup, + DataSource, + DataSourceDetector, + Detector, +) +from sentry.workflow_engine.models.data_condition import Condition +from sentry.workflow_engine.types import ( + ERROR_DETECTOR_NAME, + ISSUE_STREAM_DETECTOR_NAME, + DetectorPriorityLevel, +) +from sentry.workflow_engine.typings.grouptype import IssueStreamGroupType + +VALID_DEFAULT_DETECTOR_TYPES = [ + ErrorGroupType.slug, + IssueStreamGroupType.slug, + *[m.wfe_detector_type for m in PERFORMANCE_DETECTOR_CONFIG_MAPPINGS.values()], +] + +logger = logging.getLogger(__name__) + + +@cache +def get_disabled_platforms_by_detector_type() -> Mapping[str, frozenset[str]]: + """ + Map WFE detector types to platforms where they should be disabled by default. + Derives from DEFAULT_DETECTOR_DISABLING_CONFIGS using the detection_enabled_key. + """ + from sentry.issue_detection.detectors.disable_detectors import ( + DEFAULT_DETECTOR_DISABLING_CONFIGS, + ) + + disabled_by_detector_type: dict[str, frozenset[str]] = {} + + for disable_config in DEFAULT_DETECTOR_DISABLING_CONFIGS: + detector_option_key = disable_config["detector_project_option"] + languages_to_disable = disable_config["languages_to_disable"] + + # Find matching WFE detector via detection_enabled_key + for mapping in PERFORMANCE_DETECTOR_CONFIG_MAPPINGS.values(): + if mapping.detection_enabled_key == detector_option_key: + disabled_by_detector_type[mapping.wfe_detector_type] = frozenset( + languages_to_disable + ) + break + + return disabled_by_detector_type + + +class UnableToAcquireLockApiError(SentryAPIException): + status_code = status.HTTP_400_BAD_REQUEST + code = "unable_to_acquire_lock" + message = "Unable to acquire lock for issue alert migration." + + +def _ensure_detector(project: Project, type: str, default_enabled: bool = True) -> Detector: + """ + Ensure that a detector of a given type exists for a project. + If the Detector doesn't already exist, we try to acquire a lock to avoid double-creating, + and UnableToAcquireLockApiError if that fails. + """ + group_type = grouptype.registry.get_by_slug(type) + if not group_type: + raise ValueError(f"Group type {type} not registered") + slug = group_type.slug + if slug not in VALID_DEFAULT_DETECTOR_TYPES: + raise ValueError(f"Invalid default detector type: {slug}") + + # If it already exists, life is simple and we can return immediately. + # If there happen to be duplicates, we prefer the oldest. + existing = Detector.objects.filter(type=slug, project=project).order_by("id").first() + if existing: + return existing + + # If we may need to create it, we acquire a lock to avoid double-creating. + # There isn't a unique constraint on the detector, so we can't rely on get_or_create + # to avoid duplicates. + # However, by only locking during the one-time creation, the window for a race condition is small. + lock = locks.get( + f"workflow-engine-project-{slug}-detector:{project.id}", + duration=2, + name=f"workflow_engine_default_{slug}_detector", + ) + try: + with ( + # Creation should be fast, so it's worth blocking a little rather + # than failing a request. + lock.blocking_acquire(initial_delay=0.1, timeout=3), + transaction.atomic(router.db_for_write(Detector)), + ): + detector, _ = Detector.objects.get_or_create( + type=slug, + project=project, + defaults={ + "config": {}, + "name": ( + ERROR_DETECTOR_NAME + if slug == ErrorGroupType.slug + else ISSUE_STREAM_DETECTOR_NAME + if slug == IssueStreamGroupType.slug + else group_type.description + ), + "enabled": default_enabled, + }, + ) + return detector + except UnableToAcquireLock: + raise UnableToAcquireLockApiError + + +def ensure_default_anomaly_detector( + project: Project, owner_team_id: int | None = None, enabled: bool = True +) -> Detector | None: + """ + Ensure that a default anomaly detection metric monitor exists for a project. + If the Detector doesn't already exist, we try to acquire a lock to avoid double-creating. + """ + # If it already exists, return immediately. Prefer the oldest if duplicates exist. + existing = ( + Detector.objects.filter(type=MetricIssue.slug, project=project).order_by("id").first() + ) + if existing: + logger.info( + "create_default_anomaly_detector.already_exists", + extra={"project_id": project.id, "detector_id": existing.id}, + ) + return existing + + lock = locks.get( + f"workflow-engine-project-{MetricIssue.slug}-detector:{project.id}", + duration=2, + name=f"workflow_engine_default_{MetricIssue.slug}_detector", + ) + try: + with ( + lock.blocking_acquire(initial_delay=0.1, timeout=3), + transaction.atomic(router.db_for_write(Detector)), + ): + # Double-check after acquiring lock in case another process created it + existing = ( + Detector.objects.filter(type=MetricIssue.slug, project=project) + .order_by("id") + .first() + ) + if existing: + return existing + + try: + condition_group = DataConditionGroup.objects.create( + logic_type=DataConditionGroup.Type.ANY, + organization_id=project.organization_id, + ) + + DataCondition.objects.create( + comparison={ + "sensitivity": AnomalyDetectionSensitivity.LOW, + "seasonality": AnomalyDetectionSeasonality.AUTO, + "threshold_type": AnomalyDetectionThresholdType.ABOVE, + }, + condition_result=DetectorPriorityLevel.HIGH, + type=Condition.ANOMALY_DETECTION, + condition_group=condition_group, + ) + + detector = Detector.objects.create( + project=project, + name="High Error Count (Default)", + description="Automatically monitors for anomalous spikes in error count", + workflow_condition_group=condition_group, + type=MetricIssue.slug, + config={ + "detection_type": AlertRuleDetectionType.DYNAMIC.value, + "comparison_delta": None, + }, + owner_team_id=owner_team_id, + enabled=enabled, + ) + + snuba_query = create_snuba_query( + query_type=SnubaQuery.Type.ERROR, + dataset=Dataset.Events, + query="", + aggregate="count()", + time_window=timedelta(minutes=15), + resolution=timedelta(minutes=15), + environment=None, + event_types=[SnubaQueryEventType.EventType.ERROR], + ) + + query_subscription = create_snuba_subscription( + project=project, + subscription_type=INCIDENTS_SNUBA_SUBSCRIPTION_TYPE, + snuba_query=snuba_query, + ) + + data_source = DataSource.objects.create( + organization_id=project.organization_id, + source_id=str(query_subscription.id), + type=DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION, + ) + + DataSourceDetector.objects.create( + data_source=data_source, + detector=detector, + ) + except Exception: + logger.exception( + "create_default_anomaly_detector.create_models_failed", + extra={"project_id": project.id, "organization_id": project.organization_id}, + ) + raise + + try: + send_new_detector_data(detector) + except Exception: + logger.exception( + "create_default_anomaly_detector.send_to_seer_failed", + extra={"project_id": project.id, "organization_id": project.organization_id}, + ) + raise + + return detector + except UnableToAcquireLock: + raise UnableToAcquireLockApiError + + +def ensure_performance_detectors(project: Project) -> dict[str, Detector]: + if not features.has("projects:workflow-engine-performance-detectors", project): + return {} + + disabled_platforms_map = get_disabled_platforms_by_detector_type() + + detectors = {} + for mapping in PERFORMANCE_DETECTOR_CONFIG_MAPPINGS.values(): + detector_type = mapping.wfe_detector_type + + # Determine initial enabled state based on platform and default settings + disabled_platforms = disabled_platforms_map.get(detector_type, frozenset()) + default_enabled = DEFAULT_PROJECT_PERFORMANCE_DETECTION_SETTINGS[ + mapping.detection_enabled_key + ] + enabled = (project.platform not in disabled_platforms) and default_enabled + + detectors[detector_type] = _ensure_detector(project, detector_type, default_enabled=enabled) + + return detectors + + +def ensure_default_detectors(project: Project) -> dict[str, Detector]: + detectors: dict[str, Detector] = {} + detectors[ErrorGroupType.slug] = _ensure_detector(project, ErrorGroupType.slug) + detectors[IssueStreamGroupType.slug] = _ensure_detector(project, IssueStreamGroupType.slug) + detectors.update(ensure_performance_detectors(project)) + return detectors diff --git a/src/sentry/workflow_engine/processors/detector.py b/src/sentry/workflow_engine/processors/detector.py index 3e89c592b311..fcd67574b72e 100644 --- a/src/sentry/workflow_engine/processors/detector.py +++ b/src/sentry/workflow_engine/processors/detector.py @@ -1,300 +1,35 @@ from __future__ import annotations import logging -from collections.abc import Mapping from dataclasses import dataclass -from datetime import timedelta -from functools import cache import sentry_sdk -from django.db import router, transaction -from rest_framework import status from sentry import features, options -from sentry.api.exceptions import SentryAPIException from sentry.grouping.grouptype import ErrorGroupType from sentry.incidents.grouptype import MetricIssue -from sentry.incidents.models.alert_rule import AlertRuleDetectionType -from sentry.incidents.utils.constants import INCIDENTS_SNUBA_SUBSCRIPTION_TYPE -from sentry.incidents.utils.types import DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION -from sentry.issue_detection.performance_detection import PERFORMANCE_DETECTOR_CONFIG_MAPPINGS -from sentry.issues import grouptype from sentry.issues.issue_occurrence import IssueOccurrence from sentry.issues.producer import PayloadType, produce_occurrence_to_kafka -from sentry.locks import locks from sentry.models.activity import Activity from sentry.models.group import Group -from sentry.models.project import Project -from sentry.projectoptions.defaults import DEFAULT_PROJECT_PERFORMANCE_DETECTION_SETTINGS -from sentry.seer.anomaly_detection.store_data_workflow_engine import send_new_detector_data -from sentry.seer.anomaly_detection.types import ( - AnomalyDetectionSeasonality, - AnomalyDetectionSensitivity, - AnomalyDetectionThresholdType, -) from sentry.services.eventstore.models import GroupEvent -from sentry.snuba.dataset import Dataset -from sentry.snuba.models import SnubaQuery, SnubaQueryEventType -from sentry.snuba.subscriptions import create_snuba_query, create_snuba_subscription from sentry.utils import metrics -from sentry.utils.locking import UnableToAcquireLock -from sentry.workflow_engine.models import DataPacket, DataSource, Detector -from sentry.workflow_engine.models.data_condition import Condition, DataCondition -from sentry.workflow_engine.models.data_condition_group import DataConditionGroup -from sentry.workflow_engine.models.data_source_detector import DataSourceDetector + +# TODO - remove this import once getsentry can be updated +from sentry.workflow_engine.defaults.detectors import ( + ensure_default_detectors as ensure_default_detectors, +) +from sentry.workflow_engine.models import DataPacket, Detector from sentry.workflow_engine.models.detector_group import DetectorGroup from sentry.workflow_engine.types import ( - ERROR_DETECTOR_NAME, - ISSUE_STREAM_DETECTOR_NAME, DetectorEvaluationResult, DetectorGroupKey, - DetectorPriorityLevel, WorkflowEventData, ) from sentry.workflow_engine.typings.grouptype import IssueStreamGroupType logger = logging.getLogger(__name__) -VALID_DEFAULT_DETECTOR_TYPES = [ - ErrorGroupType.slug, - IssueStreamGroupType.slug, - *[m.wfe_detector_type for m in PERFORMANCE_DETECTOR_CONFIG_MAPPINGS.values()], -] - - -@cache -def get_disabled_platforms_by_detector_type() -> Mapping[str, frozenset[str]]: - """ - Map WFE detector types to platforms where they should be disabled by default. - Derives from DEFAULT_DETECTOR_DISABLING_CONFIGS using the detection_enabled_key. - """ - from sentry.issue_detection.detectors.disable_detectors import ( - DEFAULT_DETECTOR_DISABLING_CONFIGS, - ) - - disabled_by_detector_type: dict[str, frozenset[str]] = {} - - for disable_config in DEFAULT_DETECTOR_DISABLING_CONFIGS: - detector_option_key = disable_config["detector_project_option"] - languages_to_disable = disable_config["languages_to_disable"] - - # Find matching WFE detector via detection_enabled_key - for mapping in PERFORMANCE_DETECTOR_CONFIG_MAPPINGS.values(): - if mapping.detection_enabled_key == detector_option_key: - disabled_by_detector_type[mapping.wfe_detector_type] = frozenset( - languages_to_disable - ) - break - - return disabled_by_detector_type - - -class UnableToAcquireLockApiError(SentryAPIException): - status_code = status.HTTP_400_BAD_REQUEST - code = "unable_to_acquire_lock" - message = "Unable to acquire lock for issue alert migration." - - -def _ensure_detector(project: Project, type: str, default_enabled: bool = True) -> Detector: - """ - Ensure that a detector of a given type exists for a project. - If the Detector doesn't already exist, we try to acquire a lock to avoid double-creating, - and UnableToAcquireLockApiError if that fails. - """ - group_type = grouptype.registry.get_by_slug(type) - if not group_type: - raise ValueError(f"Group type {type} not registered") - slug = group_type.slug - if slug not in VALID_DEFAULT_DETECTOR_TYPES: - raise ValueError(f"Invalid default detector type: {slug}") - - # If it already exists, life is simple and we can return immediately. - # If there happen to be duplicates, we prefer the oldest. - existing = Detector.objects.filter(type=slug, project=project).order_by("id").first() - if existing: - return existing - - # If we may need to create it, we acquire a lock to avoid double-creating. - # There isn't a unique constraint on the detector, so we can't rely on get_or_create - # to avoid duplicates. - # However, by only locking during the one-time creation, the window for a race condition is small. - lock = locks.get( - f"workflow-engine-project-{slug}-detector:{project.id}", - duration=2, - name=f"workflow_engine_default_{slug}_detector", - ) - try: - with ( - # Creation should be fast, so it's worth blocking a little rather - # than failing a request. - lock.blocking_acquire(initial_delay=0.1, timeout=3), - transaction.atomic(router.db_for_write(Detector)), - ): - detector, _ = Detector.objects.get_or_create( - type=slug, - project=project, - defaults={ - "config": {}, - "name": ( - ERROR_DETECTOR_NAME - if slug == ErrorGroupType.slug - else ISSUE_STREAM_DETECTOR_NAME - if slug == IssueStreamGroupType.slug - else group_type.description - ), - "enabled": default_enabled, - }, - ) - return detector - except UnableToAcquireLock: - raise UnableToAcquireLockApiError - - -def ensure_default_anomaly_detector( - project: Project, owner_team_id: int | None = None, enabled: bool = True -) -> Detector | None: - """ - Ensure that a default anomaly detection metric monitor exists for a project. - If the Detector doesn't already exist, we try to acquire a lock to avoid double-creating. - """ - # If it already exists, return immediately. Prefer the oldest if duplicates exist. - existing = ( - Detector.objects.filter(type=MetricIssue.slug, project=project).order_by("id").first() - ) - if existing: - logger.info( - "create_default_anomaly_detector.already_exists", - extra={"project_id": project.id, "detector_id": existing.id}, - ) - return existing - - lock = locks.get( - f"workflow-engine-project-{MetricIssue.slug}-detector:{project.id}", - duration=2, - name=f"workflow_engine_default_{MetricIssue.slug}_detector", - ) - try: - with ( - lock.blocking_acquire(initial_delay=0.1, timeout=3), - transaction.atomic(router.db_for_write(Detector)), - ): - # Double-check after acquiring lock in case another process created it - existing = ( - Detector.objects.filter(type=MetricIssue.slug, project=project) - .order_by("id") - .first() - ) - if existing: - return existing - - try: - condition_group = DataConditionGroup.objects.create( - logic_type=DataConditionGroup.Type.ANY, - organization_id=project.organization_id, - ) - - DataCondition.objects.create( - comparison={ - "sensitivity": AnomalyDetectionSensitivity.LOW, - "seasonality": AnomalyDetectionSeasonality.AUTO, - "threshold_type": AnomalyDetectionThresholdType.ABOVE, - }, - condition_result=DetectorPriorityLevel.HIGH, - type=Condition.ANOMALY_DETECTION, - condition_group=condition_group, - ) - - detector = Detector.objects.create( - project=project, - name="High Error Count (Default)", - description="Automatically monitors for anomalous spikes in error count", - workflow_condition_group=condition_group, - type=MetricIssue.slug, - config={ - "detection_type": AlertRuleDetectionType.DYNAMIC.value, - "comparison_delta": None, - }, - owner_team_id=owner_team_id, - enabled=enabled, - ) - - snuba_query = create_snuba_query( - query_type=SnubaQuery.Type.ERROR, - dataset=Dataset.Events, - query="", - aggregate="count()", - time_window=timedelta(minutes=15), - resolution=timedelta(minutes=15), - environment=None, - event_types=[SnubaQueryEventType.EventType.ERROR], - ) - - query_subscription = create_snuba_subscription( - project=project, - subscription_type=INCIDENTS_SNUBA_SUBSCRIPTION_TYPE, - snuba_query=snuba_query, - ) - - data_source = DataSource.objects.create( - organization_id=project.organization_id, - source_id=str(query_subscription.id), - type=DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION, - ) - - DataSourceDetector.objects.create( - data_source=data_source, - detector=detector, - ) - except Exception: - logger.exception( - "create_default_anomaly_detector.create_models_failed", - extra={"project_id": project.id, "organization_id": project.organization_id}, - ) - raise - - try: - send_new_detector_data(detector) - except Exception: - logger.exception( - "create_default_anomaly_detector.send_to_seer_failed", - extra={"project_id": project.id, "organization_id": project.organization_id}, - ) - raise - - return detector - except UnableToAcquireLock: - raise UnableToAcquireLockApiError - - -def ensure_performance_detectors(project: Project) -> dict[str, Detector]: - if not features.has("projects:workflow-engine-performance-detectors", project): - return {} - - disabled_platforms_map = get_disabled_platforms_by_detector_type() - - detectors = {} - for mapping in PERFORMANCE_DETECTOR_CONFIG_MAPPINGS.values(): - detector_type = mapping.wfe_detector_type - - # Determine initial enabled state based on platform and default settings - disabled_platforms = disabled_platforms_map.get(detector_type, frozenset()) - default_enabled = DEFAULT_PROJECT_PERFORMANCE_DETECTION_SETTINGS[ - mapping.detection_enabled_key - ] - enabled = (project.platform not in disabled_platforms) and default_enabled - - detectors[detector_type] = _ensure_detector(project, detector_type, default_enabled=enabled) - - return detectors - - -def ensure_default_detectors(project: Project) -> dict[str, Detector]: - detectors: dict[str, Detector] = {} - detectors[ErrorGroupType.slug] = _ensure_detector(project, ErrorGroupType.slug) - detectors[IssueStreamGroupType.slug] = _ensure_detector(project, IssueStreamGroupType.slug) - detectors.update(ensure_performance_detectors(project)) - return detectors - @dataclass(frozen=True) class EventDetectors: @@ -328,6 +63,7 @@ def detectors(self) -> set[Detector]: return {d for d in [self.issue_stream_detector, self.event_detector] if d is not None} +# TODO - Delete this once the issue stream is fully rolled out. def _is_issue_stream_detector_enabled(event_data: WorkflowEventData) -> bool: """ Check if the issue stream detector should be enabled for this event's group type. @@ -550,6 +286,7 @@ def process_detectors[T]( return results +# TODO - move to another file / location def associate_new_group_with_detector(group: Group, detector_id: int | None = None) -> bool: """ Associate a new Group with it's Detector in the database. @@ -627,6 +364,7 @@ def associate_new_group_with_detector(group: Group, detector_id: int | None = No return True +# TODO - move to another file / location def ensure_association_with_detector(group: Group, detector_id: int | None = None) -> bool: """ Ensure a Group has a DetectorGroup association, creating it if missing. diff --git a/src/sentry/workflow_engine/receivers/__init__.py b/src/sentry/workflow_engine/receivers/__init__.py index fc1f322310e9..0434ce88dd83 100644 --- a/src/sentry/workflow_engine/receivers/__init__.py +++ b/src/sentry/workflow_engine/receivers/__init__.py @@ -5,5 +5,6 @@ from .data_source_detector import * # NOQA from .detector import * # NOQA from .detector_workflow import * # NOQA +from .project_detectors import * # noqa: F401,F403 from .workflow import * # NOQA from .workflow_data_condition_group import * # NOQA diff --git a/src/sentry/receivers/project_detectors.py b/src/sentry/workflow_engine/receivers/project_detectors.py similarity index 71% rename from src/sentry/receivers/project_detectors.py rename to src/sentry/workflow_engine/receivers/project_detectors.py index 0725399df098..3618abe5c429 100644 --- a/src/sentry/receivers/project_detectors.py +++ b/src/sentry/workflow_engine/receivers/project_detectors.py @@ -1,5 +1,6 @@ import logging from contextlib import contextmanager +from typing import Any, Iterator import sentry_sdk from django.db.models.signals import post_save @@ -7,7 +8,8 @@ from sentry import features from sentry.models.project import Project from sentry.signals import project_created -from sentry.workflow_engine.processors.detector import ( +from sentry.users.models.user import User +from sentry.workflow_engine.defaults.detectors import ( UnableToAcquireLockApiError, ensure_default_anomaly_detector, ensure_default_detectors, @@ -17,7 +19,7 @@ @contextmanager -def disable_default_detector_creation(): +def disable_default_detector_creation() -> Iterator[None]: """ Context manager that temporarily disconnects the signal handlers that create default detectors, preventing them from being created when a project is saved. @@ -46,7 +48,11 @@ def disable_default_detector_creation(): ) -def create_project_detectors(instance, created, **kwargs): +def create_project_detectors( + instance: Project, + created: bool, + **kwargs: Any, +) -> None: if created: try: ensure_default_detectors(instance) @@ -54,7 +60,11 @@ def create_project_detectors(instance, created, **kwargs): sentry_sdk.capture_exception(e) -def create_default_anomaly_detector(project: Project, user=None, user_id=None, **kwargs): +def create_default_anomaly_detector( + project: Project, + user: User | None = None, + **kwargs: Any, +) -> None: """ Creates default anomaly detector when project is created, with the team as owner. This listens to project_created signal which provides user information. @@ -76,28 +86,43 @@ def create_default_anomaly_detector(project: Project, user=None, user_id=None, * "organizations:anomaly-detection-alerts", project.organization, actor=user ) detector = ensure_default_anomaly_detector( - project, owner_team_id=owner_team.id if owner_team else None, enabled=enabled + project, + owner_team_id=owner_team.id if owner_team else None, + enabled=enabled, ) if detector: logger.info( "create_default_anomaly_detector.created", - extra={"project_id": project.id, "detector_id": detector.id, "enabled": enabled}, + extra={ + "project_id": project.id, + "detector_id": detector.id, + "enabled": enabled, + }, ) except UnableToAcquireLockApiError as e: logger.warning( "create_default_anomaly_detector.lock_failed", - extra={"project_id": project.id, "organization_id": project.organization_id}, + extra={ + "project_id": project.id, + "organization_id": project.organization_id, + }, ) sentry_sdk.capture_exception(e) except Exception: logger.exception( "create_default_anomaly_detector.failed", - extra={"project_id": project.id, "organization_id": project.organization_id}, + extra={ + "project_id": project.id, + "organization_id": project.organization_id, + }, ) post_save.connect( - create_project_detectors, sender=Project, dispatch_uid="create_project_detectors", weak=False + create_project_detectors, + sender=Project, + dispatch_uid="create_project_detectors", + weak=False, ) project_created.connect( create_default_anomaly_detector, diff --git a/tests/sentry/workflow_engine/defaults/test_detectors.py b/tests/sentry/workflow_engine/defaults/test_detectors.py new file mode 100644 index 000000000000..153d0506a76b --- /dev/null +++ b/tests/sentry/workflow_engine/defaults/test_detectors.py @@ -0,0 +1,51 @@ +from unittest.mock import patch + +import pytest + +from sentry.grouping.grouptype import ErrorGroupType +from sentry.testutils.cases import TestCase +from sentry.utils.locking import UnableToAcquireLock +from sentry.workflow_engine.defaults.detectors import ( + UnableToAcquireLockApiError, + ensure_default_detectors, +) +from sentry.workflow_engine.models import Detector +from sentry.workflow_engine.types import ERROR_DETECTOR_NAME, ISSUE_STREAM_DETECTOR_NAME +from sentry.workflow_engine.typings.grouptype import IssueStreamGroupType + + +class TestEnsureDefaultDetectors(TestCase): + def setUp(self) -> None: + self.slugs = [ErrorGroupType.slug, IssueStreamGroupType.slug] + self.names = [ERROR_DETECTOR_NAME, ISSUE_STREAM_DETECTOR_NAME] + + def test_ensure_default_detector(self) -> None: + project = self.create_project() + detectors = ensure_default_detectors(project) + + error_detector = detectors[ErrorGroupType.slug] + assert error_detector.name == ERROR_DETECTOR_NAME + assert error_detector.project_id == project.id + assert error_detector.type == ErrorGroupType.slug + + issue_stream_detector = detectors[IssueStreamGroupType.slug] + assert issue_stream_detector.name == ISSUE_STREAM_DETECTOR_NAME + assert issue_stream_detector.project_id == project.id + assert issue_stream_detector.type == IssueStreamGroupType.slug + + def test_ensure_default_detector__already_exists(self) -> None: + project = self.create_project() + existing = Detector.objects.filter(project=project) + + with patch("sentry.workflow_engine.defaults.detectors.locks.get") as mock_lock: + default_detectors = ensure_default_detectors(project) + assert {d.id for d in default_detectors.values()} == {d.id for d in existing} + # No lock if it already exists. + mock_lock.assert_not_called() + + def test_ensure_default_detector__lock_fails(self) -> None: + with patch("sentry.workflow_engine.defaults.detectors.locks.get") as mock_lock: + mock_lock.return_value.blocking_acquire.side_effect = UnableToAcquireLock + with pytest.raises(UnableToAcquireLockApiError): + project = self.create_project() + ensure_default_detectors(project) diff --git a/tests/sentry/workflow_engine/migration_helpers/test_issue_alert_migration.py b/tests/sentry/workflow_engine/migration_helpers/test_issue_alert_migration.py index a8581b32748c..3a4059f13c1b 100644 --- a/tests/sentry/workflow_engine/migration_helpers/test_issue_alert_migration.py +++ b/tests/sentry/workflow_engine/migration_helpers/test_issue_alert_migration.py @@ -25,7 +25,6 @@ from sentry.rules.match import MatchType from sentry.testutils.cases import TestCase from sentry.testutils.helpers import install_slack -from sentry.utils.locking import UnableToAcquireLock from sentry.workflow_engine.migration_helpers.issue_alert_migration import IssueAlertMigrator from sentry.workflow_engine.models import ( Action, @@ -40,11 +39,6 @@ WorkflowDataConditionGroup, ) from sentry.workflow_engine.models.data_condition import Condition -from sentry.workflow_engine.processors.detector import ( - UnableToAcquireLockApiError, - ensure_default_detectors, -) -from sentry.workflow_engine.types import ERROR_DETECTOR_NAME, ISSUE_STREAM_DETECTOR_NAME from sentry.workflow_engine.typings.grouptype import IssueStreamGroupType @@ -626,39 +620,3 @@ def test_dry_run__action_validation_fails(self) -> None: IssueAlertMigrator(issue_alert, self.user.id, is_dry_run=True).run() self.assert_nothing_migrated(issue_alert) - - -class TestEnsureDefaultDetectors(TestCase): - def setUp(self) -> None: - self.slugs = [ErrorGroupType.slug, IssueStreamGroupType.slug] - self.names = [ERROR_DETECTOR_NAME, ISSUE_STREAM_DETECTOR_NAME] - - def test_ensure_default_detector(self) -> None: - project = self.create_project() - detectors = ensure_default_detectors(project) - - error_detector = detectors[ErrorGroupType.slug] - assert error_detector.name == ERROR_DETECTOR_NAME - assert error_detector.project_id == project.id - assert error_detector.type == ErrorGroupType.slug - - issue_stream_detector = detectors[IssueStreamGroupType.slug] - assert issue_stream_detector.name == ISSUE_STREAM_DETECTOR_NAME - assert issue_stream_detector.project_id == project.id - assert issue_stream_detector.type == IssueStreamGroupType.slug - - def test_ensure_default_detector__already_exists(self) -> None: - project = self.create_project() - existing = Detector.objects.filter(project=project) - with patch("sentry.workflow_engine.processors.detector.locks.get") as mock_lock: - default_detectors = ensure_default_detectors(project) - assert {d.id for d in default_detectors.values()} == {d.id for d in existing} - # No lock if it already exists. - mock_lock.assert_not_called() - - def test_ensure_default_detector__lock_fails(self) -> None: - with patch("sentry.workflow_engine.processors.detector.locks.get") as mock_lock: - mock_lock.return_value.blocking_acquire.side_effect = UnableToAcquireLock - with pytest.raises(UnableToAcquireLockApiError): - project = self.create_project() - ensure_default_detectors(project) diff --git a/tests/sentry/receivers/test_default_detector.py b/tests/sentry/workflow_engine/receivers/test_project_detectors.py similarity index 91% rename from tests/sentry/receivers/test_default_detector.py rename to tests/sentry/workflow_engine/receivers/test_project_detectors.py index 9a794c573eea..4b1c6a1f3704 100644 --- a/tests/sentry/receivers/test_default_detector.py +++ b/tests/sentry/workflow_engine/receivers/test_project_detectors.py @@ -8,20 +8,20 @@ from sentry.incidents.models.alert_rule import AlertRuleDetectionType from sentry.issue_detection.performance_detection import PERFORMANCE_DETECTOR_CONFIG_MAPPINGS from sentry.models.project import Project -from sentry.receivers.project_detectors import ( - create_default_anomaly_detector, - disable_default_detector_creation, -) from sentry.signals import project_created from sentry.snuba.models import QuerySubscription from sentry.testutils.cases import TestCase from sentry.testutils.helpers.features import with_feature -from sentry.workflow_engine.models import DataSource, Detector -from sentry.workflow_engine.models.data_condition import Condition, DataCondition -from sentry.workflow_engine.processors.detector import ( +from sentry.workflow_engine.defaults.detectors import ( ensure_default_anomaly_detector, ensure_performance_detectors, ) +from sentry.workflow_engine.models import DataSource, Detector +from sentry.workflow_engine.models.data_condition import Condition, DataCondition +from sentry.workflow_engine.receivers.project_detectors import ( + create_default_anomaly_detector, + disable_default_detector_creation, +) from sentry.workflow_engine.types import DetectorPriorityLevel from sentry.workflow_engine.typings.grouptype import IssueStreamGroupType @@ -34,7 +34,7 @@ def test_creates_detector_with_all_components(self) -> None: project.add_team(team) with mock.patch( - "sentry.workflow_engine.processors.detector.send_new_detector_data" + "sentry.workflow_engine.defaults.detectors.send_new_detector_data" ) as mock_send: detector = ensure_default_anomaly_detector( project, owner_team_id=team.id, enabled=False @@ -77,7 +77,7 @@ def test_creates_detector_without_team(self) -> None: """Test that detector can be created without an owner team.""" project = self.create_project() - with mock.patch("sentry.workflow_engine.processors.detector.send_new_detector_data"): + with mock.patch("sentry.workflow_engine.defaults.detectors.send_new_detector_data"): detector = ensure_default_anomaly_detector(project, owner_team_id=None, enabled=True) assert detector is not None @@ -89,7 +89,7 @@ def test_send_new_detector_data_failure_blocks_creation(self) -> None: project = self.create_project() with mock.patch( - "sentry.workflow_engine.processors.detector.send_new_detector_data", + "sentry.workflow_engine.defaults.detectors.send_new_detector_data", side_effect=Exception("Seer unavailable"), ): with pytest.raises(Exception, match="Seer unavailable"): @@ -102,7 +102,7 @@ def test_returns_existing_detector_without_creating_duplicates(self) -> None: """Test that calling ensure_default_anomaly_detector twice returns the same detector.""" project = self.create_project() - with mock.patch("sentry.workflow_engine.processors.detector.send_new_detector_data"): + with mock.patch("sentry.workflow_engine.defaults.detectors.send_new_detector_data"): detector1 = ensure_default_anomaly_detector(project) detector2 = ensure_default_anomaly_detector(project) @@ -124,7 +124,7 @@ def test_creates_enabled_detector_when_both_features_enabled(self) -> None: team = project.teams.first() assert team is not None - with mock.patch("sentry.workflow_engine.processors.detector.send_new_detector_data"): + with mock.patch("sentry.workflow_engine.defaults.detectors.send_new_detector_data"): create_default_anomaly_detector(project, user=self.user) detector = Detector.objects.get(project=project, type=MetricIssue.slug) @@ -137,7 +137,7 @@ def test_creates_disabled_detector_when_plan_feature_missing(self) -> None: """Test that detector is created but disabled when anomaly-detection-alerts is off.""" project = self.create_project() - with mock.patch("sentry.workflow_engine.processors.detector.send_new_detector_data"): + with mock.patch("sentry.workflow_engine.defaults.detectors.send_new_detector_data"): create_default_anomaly_detector(project, user=self.user) detector = Detector.objects.get(project=project, type=MetricIssue.slug) @@ -159,7 +159,7 @@ def test_creates_detector_without_team(self) -> None: # Remove all teams project.teams.clear() - with mock.patch("sentry.workflow_engine.processors.detector.send_new_detector_data"): + with mock.patch("sentry.workflow_engine.defaults.detectors.send_new_detector_data"): create_default_anomaly_detector(project, user=self.user) detector = Detector.objects.get(project=project, type=MetricIssue.slug) @@ -197,7 +197,7 @@ def test_context_manager_disables_metric_detector_signal(self) -> None: """Test that disable_default_detector_creation also prevents metric detector creation.""" with ( disable_default_detector_creation(), - mock.patch("sentry.workflow_engine.processors.detector.send_new_detector_data"), + mock.patch("sentry.workflow_engine.defaults.detectors.send_new_detector_data"), ): # fire_project_created=True ensures the project_created signal is sent project = self.create_project(fire_project_created=True) @@ -248,7 +248,7 @@ def test_disable_default_detector_creation_prevents_performance_detectors(self) @with_feature("projects:workflow-engine-performance-detectors") @mock.patch( - "sentry.workflow_engine.processors.detector.DEFAULT_PROJECT_PERFORMANCE_DETECTION_SETTINGS", + "sentry.workflow_engine.defaults.detectors.DEFAULT_PROJECT_PERFORMANCE_DETECTION_SETTINGS", { "slow_db_queries_detection_enabled": True, "large_http_payload_detection_enabled": True, @@ -256,12 +256,12 @@ def test_disable_default_detector_creation_prevents_performance_detectors(self) }, ) @mock.patch( - "sentry.workflow_engine.processors.detector.get_disabled_platforms_by_detector_type", + "sentry.workflow_engine.defaults.detectors.get_disabled_platforms_by_detector_type", return_value={ "performance_slow_db_query": frozenset({"ruby", "php"}), }, ) - def test_respects_default_enabled_state(self, mock_disabled_platforms): + def test_respects_default_enabled_state(self, mock_disabled: mock.MagicMock) -> None: """Test that detectors respect both platform-specific disabling and default enabled state.""" with disable_default_detector_creation(): project = self.create_project(platform="ruby")