@@ -611,20 +611,32 @@ def reconfigure_duplicate_cluster(original, cluster_outside):
611611 cluster_outside .exclude (id = new_original .id ).update (duplicate_finding = new_original )
612612
613613
614- def prepare_duplicates_for_delete (obj ):
614+ def prepare_duplicates_for_delete (obj , * , preview_only = False ):
615615 """
616616 Prepare duplicate clusters before deleting a Test, Engagement, Product, or Product_Type.
617617
618618 Resets inside-scope duplicate FKs and reconfigures outside-scope clusters
619619 so that cascade_delete won't hit FK violations on the self-referential
620620 duplicate_finding field.
621+
622+ When preview_only=True, no data is modified. Returns the count of outside-scope
623+ findings that would be deleted (non-zero only when DUPLICATE_CLUSTER_CASCADE_DELETE=True).
621624 """
622625 from dojo .utils import FINDING_SCOPE_FILTERS # noqa: PLC0415 circular import
623626
624627 scope_field = FINDING_SCOPE_FILTERS .get (type (obj ))
625628 if scope_field is None :
626- logger .warning ("prepare_duplicates_for_delete: unsupported object type %s" , type (obj ).__name__ )
627- return
629+ if not preview_only :
630+ logger .warning ("prepare_duplicates_for_delete: unsupported object type %s" , type (obj ).__name__ )
631+ return 0 if preview_only else None
632+
633+ if preview_only :
634+ if not settings .DUPLICATE_CLUSTER_CASCADE_DELETE :
635+ return 0
636+ scope_ids_subquery = Finding .objects .filter (** {scope_field : obj }).values_list ("id" , flat = True )
637+ return Finding .objects .filter (
638+ duplicate_finding_id__in = scope_ids_subquery ,
639+ ).exclude (id__in = scope_ids_subquery ).count ()
628640
629641 logger .debug ("prepare_duplicates_for_delete: %s %d" , type (obj ).__name__ , obj .id )
630642
@@ -637,7 +649,7 @@ def prepare_duplicates_for_delete(obj):
637649
638650 if not scope_ids_subquery .exists ():
639651 logger .debug ("no findings in scope, nothing to prepare" )
640- return
652+ return None
641653
642654 # Bulk-reset inside-scope duplicates: single UPDATE instead of per-original mass_model_updater.
643655 # Clears the duplicate_finding FK so cascade_delete won't trip over dangling self-references.
@@ -694,6 +706,8 @@ def prepare_duplicates_for_delete(obj):
694706 outside_orphan_count ,
695707 )
696708
709+ return None
710+
697711
698712@receiver (pre_delete , sender = Test )
699713def test_pre_delete (sender , instance , ** kwargs ):
@@ -830,13 +844,15 @@ def _bulk_delete_findings_internal(finding_qs, chunk_size=1000, *, order_desc=Fa
830844 )
831845
832846
833- def bulk_delete_findings (finding_qs , chunk_size = 1000 , cascade_root = None , * , order_desc = False ):
847+ def bulk_delete_findings (finding_qs , chunk_size = 1000 , cascade_root = None , * , order_desc = False , preview_only = False ):
834848 """
835849 Entry point; may delegate to Pro via settings.BULK_DELETE_FINDINGS_METHOD.
836850
837851 cascade_root: optional dict describing the top-level object whose cascade triggered
838852 this bulk delete (e.g. {"model": "dojo.engagement", "pk": 9}). Ignored by OSS
839853 when no custom method is configured.
854+
855+ preview_only: when True, return a ``{product_id: finding_count}`` dict without deleting anything.
840856 """
841857 from dojo .utils import get_custom_method # noqa: PLC0415 circular import
842858
@@ -846,7 +862,10 @@ def bulk_delete_findings(finding_qs, chunk_size=1000, cascade_root=None, *, orde
846862 chunk_size = chunk_size ,
847863 cascade_root = cascade_root ,
848864 order_desc = order_desc ,
865+ preview_only = preview_only ,
849866 )
867+ if preview_only :
868+ return None
850869 return _bulk_delete_findings_internal (finding_qs , chunk_size = chunk_size , order_desc = order_desc )
851870
852871
0 commit comments