Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion spp_programs/__manifest__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"name": "OpenSPP Programs",
"summary": "Manage programs, cycles, beneficiary enrollment, entitlements (cash and in-kind), payments, and fund tracking for social protection.",
"category": "OpenSPP/Core",
"version": "19.0.2.0.6",
"version": "19.0.2.0.8",
"sequence": 1,
"author": "OpenSPP.org",
"website": "https://github.com/OpenSPP/OpenSPP2",
Expand Down
10 changes: 10 additions & 0 deletions spp_programs/models/cycle.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,16 @@ def _compute_entitlements_count(self):
entitlements_count = self.env["spp.entitlement"].search_count([("cycle_id", "=", rec.id)])
rec.entitlements_count = entitlements_count

def refresh_statistics(self):
"""Refresh all cycle statistics after bulk operations.

Call this after raw SQL inserts that bypass ORM dependency tracking
(e.g. bulk_create_memberships with skip_duplicates=True).
"""
self._compute_members_count()
self._compute_entitlements_count()
self._compute_total_entitlements_count()
Comment on lines +278 to +286
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The refresh_statistics method appears to be redundant in its current implementation. The fields it attempts to refresh (members_count, entitlements_count, and total_entitlements_count) are all store=False and do not implement the "canary" skip logic found in other models.

Since these fields are computed on-demand and the underlying relation caches are correctly invalidated in the managers (e.g., via cycle.invalidate_recordset(['cycle_membership_ids'])), they will naturally reflect the correct values upon the next access without an explicit refresh call. If the intention was to optimize these fields for bulk operations, they should be made store=True and implement the skip logic, similar to has_members in spp.program.


@api.depends("entitlement_ids", "inkind_entitlement_ids")
def _compute_total_entitlements_count(self):
if not self.ids:
Expand Down
73 changes: 72 additions & 1 deletion spp_programs/models/cycle_membership.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
# Part of OpenSPP. See LICENSE file for full copyright and licensing details.
from odoo import _, fields, models
import logging

from odoo import _, api, fields, models
from odoo.exceptions import ValidationError

_logger = logging.getLogger(__name__)


class SPPCycleMembership(models.Model):
_name = "spp.cycle.membership"
Expand Down Expand Up @@ -87,6 +91,73 @@ def open_registrant_form(self):
},
}

@api.model
def bulk_create_memberships(self, vals_list, chunk_size=1000, skip_duplicates=False):
"""Create cycle memberships in bulk with optional duplicate skipping.

:param vals_list: List of dicts with membership values
:param chunk_size: Number of records per batch (default 1000)
:param skip_duplicates: When True, use INSERT ... ON CONFLICT DO NOTHING
to silently skip duplicate (partner_id, cycle_id) pairs.
Returns the count of inserted rows.
:return: Recordset (skip_duplicates=False) or int count (skip_duplicates=True)
"""
if not vals_list:
return 0 if skip_duplicates else self.env["spp.cycle.membership"]

if skip_duplicates:
return self._bulk_insert_on_conflict(vals_list, chunk_size)

return self.create(vals_list)

def _bulk_insert_on_conflict(self, vals_list, chunk_size=1000):
"""Insert cycle memberships using raw SQL with ON CONFLICT DO NOTHING.

:param vals_list: List of dicts with at least partner_id, cycle_id, state
:param chunk_size: Number of records per SQL INSERT batch
:return: Total number of rows actually inserted
"""
cr = self.env.cr
uid = self.env.uid
total_inserted = 0
today = fields.Date.today()

for i in range(0, len(vals_list), chunk_size):
batch = vals_list[i : i + chunk_size]
values = []
params = []
for v in batch:
values.append("(%s, %s, %s, %s, %s, %s, now(), now())")
params.extend(
[
v["partner_id"],
v["cycle_id"],
v.get("state", "draft"),
v.get("enrollment_date", today),
uid,
uid,
]
)

sql = """
INSERT INTO spp_cycle_membership
(partner_id, cycle_id, state, enrollment_date,
create_uid, write_uid, create_date, write_date)
VALUES {}
ON CONFLICT (partner_id, cycle_id) DO NOTHING
""".format( # noqa: S608 # nosec B608
", ".join(values)
)
cr.execute(sql, params)
total_inserted += cr.rowcount

_logger.info(
"Bulk inserted %d cycle memberships (%d skipped as duplicates)",
total_inserted,
len(vals_list) - total_inserted,
)
return total_inserted

def unlink(self):
if not self:
return
Expand Down
39 changes: 20 additions & 19 deletions spp_programs/models/managers/cycle_manager_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,8 +325,8 @@ def mark_import_as_done(self, cycle, msg):
cycle.locked_reason = None
cycle.message_post(body=msg)

# Update Statistics
cycle._compute_members_count()
# Refresh statistics after bulk operations
cycle.refresh_statistics()

def mark_prepare_entitlement_as_done(self, cycle, msg):
"""Complete the preparation of entitlements.
Expand Down Expand Up @@ -835,25 +835,26 @@ def _add_beneficiaries(self, cycle, beneficiaries, state="draft", do_count=False
"""Add Beneficiaries

:param cycle: Recordset of cycle
:param beneficiaries: Recordset of beneficiaries
:param beneficiaries: List of partner IDs
:param state: String state to be set to beneficiary
:param do_count: Boolean - set to False to not run compute functions
:return: Integer - count of not enrolled members
"""
new_beneficiaries = []
for r in beneficiaries:
new_beneficiaries.append(
[
0,
0,
{
"partner_id": r,
"enrollment_date": fields.Date.today(),
"state": state,
},
]
)
cycle.update({"cycle_membership_ids": new_beneficiaries})
:return: Integer - count of inserted members
"""
today = fields.Date.today()
vals_list = [
{
"partner_id": partner_id,
"cycle_id": cycle.id,
"enrollment_date": today,
"state": state,
}
for partner_id in beneficiaries
]
self.env["spp.cycle.membership"].bulk_create_memberships(vals_list, skip_duplicates=True)

# Raw SQL bypasses the ORM cache — invalidate so subsequent reads
# (e.g. cycle.cycle_membership_ids) reflect the new rows.
cycle.invalidate_recordset(["cycle_membership_ids"])

if do_count:
# Update Statistics
Expand Down
17 changes: 9 additions & 8 deletions spp_programs/models/managers/eligibility_manager.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Part of OpenSPP. See LICENSE file for full copyright and licensing details.
import logging

from odoo import Command, _, api, fields, models
from odoo import _, api, fields, models

from odoo.addons.job_worker.delay import group

Expand Down Expand Up @@ -165,20 +165,21 @@ def _import_registrants_async(self, new_beneficiaries, state="draft"):

def mark_import_as_done(self):
self.ensure_one()
self.program_id._compute_eligible_beneficiary_count()
self.program_id._compute_beneficiary_count()
self.program_id.refresh_beneficiary_counts()

self.program_id.is_locked = False
self.program_id.locked_reason = None
self.program_id.message_post(body=_("Import finished."))

def _import_registrants(self, new_beneficiaries, state="draft", do_count=False):
_logger.info("Importing %s beneficiaries", len(new_beneficiaries))
_logger.info("updated")
beneficiaries_val = []
for beneficiary in new_beneficiaries:
beneficiaries_val.append(Command.create({"partner_id": beneficiary.id, "state": state}))
self.program_id.update({"program_membership_ids": beneficiaries_val})
vals_list = [{"partner_id": b.id, "program_id": self.program_id.id, "state": state} for b in new_beneficiaries]
count = self.env["spp.program.membership"].bulk_create_memberships(vals_list, skip_duplicates=True)
_logger.info("Imported %d new memberships (%d duplicates skipped)", count, len(vals_list) - count)

# Raw SQL bypasses the ORM cache — invalidate so subsequent reads
# (e.g. program.program_membership_ids) reflect the new rows.
self.program_id.invalidate_recordset(["program_membership_ids"])

if do_count:
# Compute Statistics
Expand Down
82 changes: 70 additions & 12 deletions spp_programs/models/program_membership.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Part of OpenSPP. See LICENSE file for full copyright and licensing details.
import logging

from lxml import etree

Expand All @@ -7,6 +8,8 @@

from . import constants

_logger = logging.getLogger(__name__)


class SPPProgramMembership(models.Model):
_inherit = [
Expand Down Expand Up @@ -345,26 +348,26 @@ def action_exit(self):
}
)

@api.model_create_multi
def bulk_create_memberships(self, vals_list, chunk_size=1000):
@api.model
def bulk_create_memberships(self, vals_list, chunk_size=1000, skip_duplicates=False):
"""Create program memberships in bulk with optional chunking.

This helper is intended for large enrollment jobs (e.g. CEL-driven
bulk enrollment) where thousands of memberships need to be created
in a single operation.

It preserves the normal create() semantics, including:
- standard ORM validations and constraints
- audit logging (via spp_audit rules)
- source tracking mixins

The only optimisation is to:
- accept already-prepared value dicts
- optionally split very large batches into smaller chunks to keep
memory use and per-transaction work bounded.
:param vals_list: List of dicts with membership values
:param chunk_size: Number of records per batch (default 1000)
:param skip_duplicates: When True, use INSERT ... ON CONFLICT DO NOTHING
to silently skip duplicate (partner_id, program_id) pairs instead of
raising IntegrityError. Returns the count of inserted rows.
:return: Recordset (skip_duplicates=False) or int count (skip_duplicates=True)
"""
if not vals_list:
return self.env["spp.program.membership"]
return 0 if skip_duplicates else self.env["spp.program.membership"]

if skip_duplicates:
return self._bulk_insert_on_conflict(vals_list, chunk_size)

if chunk_size and chunk_size > 0:
all_memberships = self.env["spp.program.membership"]
Expand All @@ -386,3 +389,58 @@ def bulk_create_memberships(self, vals_list, chunk_size=1000):
SPPProgramMembership,
self.sudo(), # nosemgrep: odoo-sudo-without-context
).create(vals_list)

def _bulk_insert_on_conflict(self, vals_list, chunk_size=1000):
"""Insert memberships using raw SQL with ON CONFLICT DO NOTHING.

Bypasses ORM for maximum throughput during bulk enrollment. Duplicates
(matching the UNIQUE constraint on partner_id, program_id) are silently
skipped.

:param vals_list: List of dicts with at least partner_id, program_id, state
:param chunk_size: Number of records per SQL INSERT batch
:return: Total number of rows actually inserted
"""
cr = self.env.cr
uid = self.env.uid
total_inserted = 0

now = fields.Datetime.now()

for i in range(0, len(vals_list), chunk_size):
batch = vals_list[i : i + chunk_size]
values = []
params = []
for v in batch:
state = v.get("state", "draft")
enrollment_date = now if state == "enrolled" else None
values.append("(%s, %s, %s, %s, %s, %s, now(), now())")
params.extend(
[
v["partner_id"],
v["program_id"],
state,
enrollment_date,
uid,
uid,
]
)

sql = """
INSERT INTO spp_program_membership
(partner_id, program_id, state, enrollment_date,
create_uid, write_uid, create_date, write_date)
VALUES {}
ON CONFLICT (partner_id, program_id) DO NOTHING
""".format( # noqa: S608 # nosec B608
", ".join(values)
)
cr.execute(sql, params)
total_inserted += cr.rowcount

_logger.info(
"Bulk inserted %d program memberships (%d skipped as duplicates)",
total_inserted,
len(vals_list) - total_inserted,
)
return total_inserted
27 changes: 26 additions & 1 deletion spp_programs/models/programs.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,8 +187,23 @@ def _check_unique_program_name(self):

@api.depends("program_membership_ids")
def _compute_has_members(self):
if self.env.context.get("skip_program_statistics"):
return
if not self.ids:
for rec in self:
rec.has_members = False
return
self.env.cr.execute(
"""
SELECT program_id FROM spp_program_membership
WHERE program_id IN %s
GROUP BY program_id
""",
(tuple(self.ids),),
)
programs_with_members = {row[0] for row in self.env.cr.fetchall()}
for rec in self:
rec.has_members = bool(rec.program_membership_ids)
rec.has_members = rec.id in programs_with_members

@api.depends("compliance_manager_ids", "compliance_manager_ids.manager_ref_id")
def _compute_has_compliance_criteria(self):
Expand Down Expand Up @@ -273,6 +288,16 @@ def _compute_beneficiary_count(self):
count = rec.count_beneficiaries(None)["value"]
rec.update({"beneficiaries_count": count})

def refresh_beneficiary_counts(self):
"""Refresh all beneficiary statistics after bulk operations.

Call this after raw SQL inserts that bypass ORM dependency tracking
(e.g. bulk_create_memberships with skip_duplicates=True).
"""
self._compute_beneficiary_count()
self._compute_eligible_beneficiary_count()
self._compute_has_members()

@api.depends("cycle_ids")
def _compute_cycle_count(self):
for rec in self:
Expand Down
8 changes: 8 additions & 0 deletions spp_programs/models/registrant.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ def _compute_total_entitlements_count(self):
@api.depends("program_membership_ids")
def _compute_program_membership_count(self):
"""Batch-efficient program membership count using read_group."""
if self.env.context.get("skip_registrant_statistics"):
return
Comment on lines +46 to +47
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The skip_registrant_statistics context flag allows bypassing the recomputation of store=True fields such as program_membership_count. However, unlike the spp.program and spp.cycle models, no corresponding refresh method is provided for res.partner to recompute these statistics after bulk operations. This will result in stale data remaining in the database if the flag is used or if raw SQL is employed.

Please consider adding a refresh_registrant_statistics() method to the res.partner inheritance and calling it in the appropriate completion handlers (e.g., in mark_import_as_done within eligibility_manager.py).

if not self:
return

Expand All @@ -66,6 +68,8 @@ def _compute_program_membership_count(self):
@api.depends("entitlement_ids")
def _compute_entitlements_count(self):
"""Batch-efficient entitlements count using _read_group."""
if self.env.context.get("skip_registrant_statistics"):
return
if not self:
return

Expand All @@ -89,6 +93,8 @@ def _compute_entitlements_count(self):
@api.depends("cycle_ids")
def _compute_cycle_count(self):
"""Batch-efficient cycle membership count using _read_group."""
if self.env.context.get("skip_registrant_statistics"):
return
if not self:
return

Expand All @@ -112,6 +118,8 @@ def _compute_cycle_count(self):
@api.depends("inkind_entitlement_ids")
def _compute_inkind_entitlements_count(self):
"""Batch-efficient in-kind entitlements count using _read_group."""
if self.env.context.get("skip_registrant_statistics"):
return
if not self:
return

Expand Down
Loading
Loading