forked from plastic-labs/honcho
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathqueue_manager.py
More file actions
1284 lines (1156 loc) · 53.7 KB
/
queue_manager.py
File metadata and controls
1284 lines (1156 loc) · 53.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import asyncio
import signal
import time
from asyncio import Task
from collections.abc import Sequence
from datetime import datetime, timedelta, timezone
from logging import getLogger
from typing import Any, NamedTuple, cast
import sentry_sdk
from dotenv import load_dotenv
from nanoid import generate as generate_nanoid
from sentry_sdk.integrations.asyncio import AsyncioIntegration
from sqlalchemy import and_, case, delete, or_, select, update
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.engine import CursorResult
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.sql import func
from src import models
from src.cache.client import close_cache, init_cache
from src.config import settings
from src.dependencies import tracked_db
from src.deriver.consumer import (
process_item,
process_representation_batch,
)
from src.dreamer.dream_scheduler import (
DreamScheduler,
get_dream_scheduler,
set_dream_scheduler,
)
from src.models import QueueItem
from src.reconciler import (
ReconcilerScheduler,
get_reconciler_scheduler,
set_reconciler_scheduler,
)
from src.schemas import ResolvedConfiguration
from src.telemetry import prometheus_metrics
from src.telemetry.sentry import initialize_sentry
from src.utils.work_unit import parse_work_unit_key
from src.webhooks.events import (
QueueEmptyEvent,
publish_webhook_event,
)
logger = getLogger(__name__)
load_dotenv(override=True)
class WorkerOwnership(NamedTuple):
"""Represents the instance of a work unit that a worker is processing."""
work_unit_key: str
aqs_id: str # The ID of the ActiveQueueSession that the worker is processing
def _detach_queue_batch_objects(
db: AsyncSession,
messages_context: list[models.Message],
items_to_process: list[QueueItem],
) -> None:
"""Detach loaded batch objects so they remain usable after tracked_db exits."""
seen: set[int] = set()
for obj in [*messages_context, *items_to_process]:
obj_id = id(obj)
if obj_id in seen:
continue
db.expunge(obj)
seen.add(obj_id)
def _resolve_batch_configuration(
items_to_process: list[QueueItem],
) -> tuple[list[QueueItem], ResolvedConfiguration | None]:
"""Keep only the initial homogeneous configuration prefix for a batch."""
if not items_to_process:
return [], None
raw_config = items_to_process[0].payload.get("configuration")
resolved_config = (
None if raw_config is None else ResolvedConfiguration.model_validate(raw_config)
)
valid_items: list[QueueItem] = []
for item in items_to_process:
item_raw_config = item.payload.get("configuration")
item_config = (
None
if item_raw_config is None
else ResolvedConfiguration.model_validate(item_raw_config)
)
if item_config != resolved_config:
break
valid_items.append(item)
return valid_items, resolved_config
class QueueManager:
def __init__(self):
self.shutdown_event: asyncio.Event = asyncio.Event()
self.active_tasks: set[asyncio.Task[None]] = set()
self.worker_ownership: dict[str, WorkerOwnership] = {}
self.queue_empty_flag: asyncio.Event = asyncio.Event()
self.last_queue_metrics_refresh_at: float = 0.0
self.queue_metrics_refresh_interval_seconds: float = 5.0
self.seen_queue_depth_labels: set[tuple[str, str, str]] = set()
self.seen_queue_oldest_age_labels: set[tuple[str, str, str]] = set()
self.seen_queue_error_backlog_labels: set[tuple[str, str]] = set()
self.seen_sessions_active_workspaces: set[str] = set()
self.seen_session_last_message_labels: set[tuple[str, str]] = set()
self.seen_session_queue_depth_labels: set[tuple[str, str, str]] = set()
self.seen_session_queue_oldest_age_labels: set[tuple[str, str, str]] = set()
# Initialize from settings
self.workers: int = settings.DERIVER.WORKERS
self.semaphore: asyncio.Semaphore = asyncio.Semaphore(self.workers)
# Get or create the singleton dream scheduler
existing_scheduler = get_dream_scheduler()
if existing_scheduler is None:
self.dream_scheduler: DreamScheduler = DreamScheduler()
set_dream_scheduler(self.dream_scheduler)
else:
self.dream_scheduler = existing_scheduler
# Get or create the singleton reconciler scheduler
existing_reconciler = get_reconciler_scheduler()
if existing_reconciler is None:
self.reconciler_scheduler: ReconcilerScheduler = ReconcilerScheduler()
set_reconciler_scheduler(self.reconciler_scheduler)
else:
self.reconciler_scheduler = existing_reconciler
# Initialize Sentry if enabled, using settings
if settings.SENTRY.ENABLED:
initialize_sentry(integrations=[AsyncioIntegration()])
def add_task(self, task: asyncio.Task[None]) -> None:
"""Track a new task"""
self.active_tasks.add(task)
task.add_done_callback(self.active_tasks.discard)
def track_worker_work_unit(
self, worker_id: str, work_unit_key: str, aqs_id: str
) -> None:
"""Track a work unit owned by a specific worker"""
self.worker_ownership[worker_id] = WorkerOwnership(work_unit_key, aqs_id)
def untrack_worker_work_unit(self, worker_id: str, work_unit_key: str) -> None:
"""Remove a work unit from worker tracking"""
ownership = self.worker_ownership.get(worker_id)
if ownership and ownership.work_unit_key == work_unit_key:
del self.worker_ownership[worker_id]
def create_worker_id(self) -> str:
"""Generate a unique worker ID for this processing task"""
return generate_nanoid()
def get_total_owned_work_units(self) -> int:
"""Get the total number of work units owned by all workers"""
return len(self.worker_ownership)
async def initialize(self) -> None:
"""Setup signal handlers, initialize client, and start the main polling loop"""
logger.debug(f"Initializing QueueManager with {self.workers} workers")
# Set up signal handlers
loop = asyncio.get_running_loop()
signals = (signal.SIGTERM, signal.SIGINT)
for sig in signals:
loop.add_signal_handler(
sig, lambda s=sig: asyncio.create_task(self.shutdown(s))
)
logger.debug("Signal handlers registered")
# Start the reconciler scheduler
try:
await self.reconciler_scheduler.start()
except Exception:
logger.exception("Failed to start reconciler scheduler")
# Run the polling loop directly in this task
logger.debug("Starting polling loop directly")
try:
await self.polling_loop()
finally:
await self.cleanup()
async def shutdown(self, sig: signal.Signals) -> None:
"""Handle graceful shutdown"""
logger.info(f"Received exit signal {sig.name}...")
self.shutdown_event.set()
# Cancel all pending dreams
await self.dream_scheduler.shutdown()
# Stop the reconciler scheduler
await self.reconciler_scheduler.shutdown()
if self.active_tasks:
logger.info(
f"Waiting for {len(self.active_tasks)} active tasks to complete..."
)
await asyncio.gather(*self.active_tasks, return_exceptions=True)
async def cleanup(self) -> None:
"""Clean up owned work units"""
total_work_units = self.get_total_owned_work_units()
if total_work_units > 0:
logger.debug(f"Cleaning up {total_work_units} owned work units...")
try:
# Use the tracked_db dependency for transaction safety
async with tracked_db("queue_cleanup") as db:
aqs_ids = [
ownership.aqs_id for ownership in self.worker_ownership.values()
]
if aqs_ids:
await db.execute(
delete(models.ActiveQueueSession).where(
models.ActiveQueueSession.id.in_(aqs_ids)
)
)
await db.commit()
except Exception as e:
logger.error(f"Error during cleanup: {str(e)}")
if settings.SENTRY.ENABLED:
sentry_sdk.capture_exception(e)
finally:
self.worker_ownership.clear()
##########################
# Polling and Scheduling #
##########################
async def cleanup_stale_work_units(self) -> None:
"""Clean up stale work units"""
async with tracked_db("cleanup_stale_work_units") as db:
cutoff = datetime.now(timezone.utc) - timedelta(
minutes=settings.DERIVER.STALE_SESSION_TIMEOUT_MINUTES
)
stale_ids = (
(
await db.execute(
select(models.ActiveQueueSession.id)
.where(models.ActiveQueueSession.last_updated < cutoff)
.order_by(models.ActiveQueueSession.last_updated)
.with_for_update(skip_locked=True)
)
)
.scalars()
.all()
)
# Delete only the records we successfully got locks for
if stale_ids:
await db.execute(
delete(models.ActiveQueueSession).where(
models.ActiveQueueSession.id.in_(stale_ids)
)
)
await db.commit()
async def get_and_claim_work_units(self) -> dict[str, str]:
"""
Get available work units that aren't being processed.
For representation tasks, only returns work units with accumulated tokens
>= REPRESENTATION_BATCH_MAX_TOKENS (forced batching), unless FLUSH_ENABLED is True.
Returns a dict mapping work_unit_key to aqs_id.
"""
limit: int = max(0, self.workers - self.get_total_owned_work_units())
if limit == 0:
return {}
batch_max_tokens = settings.DERIVER.REPRESENTATION_BATCH_MAX_TOKENS
async with tracked_db("get_available_work_units") as db:
representation_prefix = "representation:"
token_stats_subq = (
select(
models.QueueItem.work_unit_key,
func.sum(models.Message.token_count).label("total_tokens"),
)
.join(
models.Message,
models.QueueItem.message_id == models.Message.id,
)
.where(~models.QueueItem.processed)
.where(models.QueueItem.work_unit_key.startswith(representation_prefix))
.group_by(models.QueueItem.work_unit_key)
.subquery()
)
work_units_subq = (
select(models.QueueItem.work_unit_key)
.where(~models.QueueItem.processed)
.group_by(models.QueueItem.work_unit_key)
.subquery()
)
query = (
select(work_units_subq.c.work_unit_key)
.limit(limit)
.outerjoin(
token_stats_subq,
work_units_subq.c.work_unit_key == token_stats_subq.c.work_unit_key,
)
.where(
~select(models.ActiveQueueSession.id)
.where(
models.ActiveQueueSession.work_unit_key
== work_units_subq.c.work_unit_key
)
.exists()
)
)
# Apply batch threshold filter (skip if FLUSH_ENABLED is True)
if not settings.DERIVER.FLUSH_ENABLED and batch_max_tokens > 0:
query = query.where(
or_(
~work_units_subq.c.work_unit_key.startswith(
representation_prefix
),
func.coalesce(token_stats_subq.c.total_tokens, 0)
>= batch_max_tokens,
)
)
result = await db.execute(query)
available_units = result.scalars().all()
if not available_units:
await db.commit()
return {}
claimed_mapping = await self.claim_work_units(db, available_units)
await db.commit()
return claimed_mapping
async def claim_work_units(
self, db: AsyncSession, work_unit_keys: Sequence[str]
) -> dict[str, str]:
"""
Claim work units and return a mapping of work_unit_key to aqs_id.
Returns only the work units that were successfully claimed.
"""
values = [{"work_unit_key": key} for key in work_unit_keys]
stmt = (
insert(models.ActiveQueueSession)
.values(values)
.on_conflict_do_nothing()
.returning(
models.ActiveQueueSession.work_unit_key, models.ActiveQueueSession.id
)
)
result = await db.execute(stmt)
claimed_rows = result.all()
claimed_mapping = {row[0]: row[1] for row in claimed_rows}
logger.debug(
f"Claimed {len(claimed_mapping)} work units: {list(claimed_mapping.keys())}"
)
return claimed_mapping
async def polling_loop(self) -> None:
"""Main polling loop to find and process new work units"""
logger.debug("Starting polling loop")
try:
while not self.shutdown_event.is_set():
if settings.METRICS.ENABLED:
prometheus_metrics.set_deriver_active_workers(
count=self.get_total_owned_work_units()
)
if self.queue_empty_flag.is_set():
# logger.debug("Queue empty flag set, waiting")
await asyncio.sleep(settings.DERIVER.POLLING_SLEEP_INTERVAL_SECONDS)
self.queue_empty_flag.clear()
continue
# Always reap stale AQS rows, even when the pool is full.
# Previously this was gated behind `if self.semaphore.locked()`
# below — which deadlocks the system when all workers are
# wedged on a hung LLM call: cleanup never runs, AQS rows go
# unbounded, no new claims possible. Cleanup is cheap; do it
# unconditionally on every poll tick.
try:
await self.cleanup_stale_work_units()
except Exception as cleanup_exc:
logger.exception(
"Stale work unit cleanup failed: %s", cleanup_exc
)
# Check if we have capacity before querying for new work
if self.semaphore.locked():
# logger.debug("All workers busy, waiting")
await asyncio.sleep(settings.DERIVER.POLLING_SLEEP_INTERVAL_SECONDS)
continue
try:
await self.refresh_queue_health_metrics()
claimed_work_units = await self.get_and_claim_work_units()
if claimed_work_units:
for work_unit_key, aqs_id in claimed_work_units.items():
# Create a new task for processing this work unit
if not self.shutdown_event.is_set():
# Track worker ownership
worker_id = self.create_worker_id()
self.track_worker_work_unit(
worker_id, work_unit_key, aqs_id
)
task: Task[None] = asyncio.create_task(
self.process_work_unit(work_unit_key, worker_id)
)
self.add_task(task)
else:
self.queue_empty_flag.set()
await asyncio.sleep(
settings.DERIVER.POLLING_SLEEP_INTERVAL_SECONDS
)
except Exception as e:
logger.exception("Error in polling loop")
if settings.SENTRY.ENABLED:
sentry_sdk.capture_exception(e)
# Note: rollback is handled by tracked_db dependency
await asyncio.sleep(settings.DERIVER.POLLING_SLEEP_INTERVAL_SECONDS)
finally:
logger.info("Polling loop stopped")
async def refresh_queue_health_metrics(self) -> None:
"""Refresh queue health gauges on a short interval."""
now = time.monotonic()
if (
now - self.last_queue_metrics_refresh_at
< self.queue_metrics_refresh_interval_seconds
):
return
self.last_queue_metrics_refresh_at = now
async with tracked_db("refresh_queue_health_metrics") as db:
is_in_progress = (~models.QueueItem.processed) & (
models.ActiveQueueSession.id.isnot(None)
)
is_pending = (~models.QueueItem.processed) & (
models.ActiveQueueSession.id.is_(None)
)
stmt = (
select(
models.QueueItem.workspace_name,
models.QueueItem.task_type,
func.count(case((is_pending, 1))).label("pending_count"),
func.count(case((is_in_progress, 1))).label("in_progress_count"),
func.count(case((models.QueueItem.error.isnot(None), 1))).label(
"error_backlog_count"
),
func.min(case((is_pending, models.QueueItem.created_at))).label(
"oldest_pending_at"
),
func.min(case((is_in_progress, models.QueueItem.created_at))).label(
"oldest_in_progress_at"
),
)
.select_from(models.QueueItem)
.outerjoin(
models.ActiveQueueSession,
models.QueueItem.work_unit_key
== models.ActiveQueueSession.work_unit_key,
)
.where(models.QueueItem.workspace_name.isnot(None))
.where(
models.QueueItem.task_type.in_(("representation", "summary", "dream"))
)
.group_by(models.QueueItem.workspace_name, models.QueueItem.task_type)
)
result = await db.execute(stmt)
rows = result.all()
await db.commit()
current_depth_labels: set[tuple[str, str, str]] = set()
current_oldest_age_labels: set[tuple[str, str, str]] = set()
current_error_backlog_labels: set[tuple[str, str]] = set()
current_sessions_active_workspaces: set[str] = set()
current_session_last_message_labels: set[tuple[str, str]] = set()
current_session_queue_depth_labels: set[tuple[str, str, str]] = set()
current_session_queue_oldest_age_labels: set[tuple[str, str, str]] = set()
now_utc = datetime.now(timezone.utc)
for row in rows:
workspace_name = row.workspace_name
task_type = row.task_type
if workspace_name is None:
continue
pending_count = int(row.pending_count or 0)
in_progress_count = int(row.in_progress_count or 0)
error_backlog_count = int(row.error_backlog_count or 0)
for state, count in (
("pending", pending_count),
("in_progress", in_progress_count),
):
label = (workspace_name, task_type, state)
current_depth_labels.add(label)
prometheus_metrics.set_deriver_queue_depth(
workspace_name=workspace_name,
task_type=task_type,
state=state,
count=count,
)
if row.oldest_pending_at is not None:
current_oldest_age_labels.add((workspace_name, task_type, "pending"))
prometheus_metrics.set_deriver_queue_oldest_age(
workspace_name=workspace_name,
task_type=task_type,
state="pending",
age_seconds=max(
0.0, (now_utc - row.oldest_pending_at).total_seconds()
),
)
if row.oldest_in_progress_at is not None:
current_oldest_age_labels.add(
(workspace_name, task_type, "in_progress")
)
prometheus_metrics.set_deriver_queue_oldest_age(
workspace_name=workspace_name,
task_type=task_type,
state="in_progress",
age_seconds=max(
0.0, (now_utc - row.oldest_in_progress_at).total_seconds()
),
)
backlog_label = (workspace_name, task_type)
current_error_backlog_labels.add(backlog_label)
prometheus_metrics.set_deriver_queue_error_backlog(
workspace_name=workspace_name,
task_type=task_type,
count=error_backlog_count,
)
async with tracked_db("refresh_session_metrics") as db:
active_sessions_stmt = (
select(
models.Session.workspace_name,
func.count(models.Session.id).label("active_session_count"),
)
.where(models.Session.is_active == True) # noqa: E712
.group_by(models.Session.workspace_name)
)
active_sessions_rows = (await db.execute(active_sessions_stmt)).all()
session_activity_stmt = (
select(
models.Session.workspace_name,
models.Session.name,
func.max(models.Message.created_at).label("last_message_at"),
models.Session.created_at.label("session_created_at"),
)
.select_from(models.Session)
.outerjoin(
models.Message,
and_(
models.Message.workspace_name == models.Session.workspace_name,
models.Message.session_name == models.Session.name,
),
)
.where(models.Session.is_active == True) # noqa: E712
.group_by(
models.Session.workspace_name,
models.Session.name,
models.Session.created_at,
)
)
session_activity_rows = (await db.execute(session_activity_stmt)).all()
session_queue_stmt = (
select(
models.QueueItem.workspace_name,
models.Session.name.label("session_name"),
func.count(case((is_pending, 1))).label("pending_count"),
func.count(case((is_in_progress, 1))).label("in_progress_count"),
func.min(case((is_pending, models.QueueItem.created_at))).label(
"oldest_pending_at"
),
func.min(case((is_in_progress, models.QueueItem.created_at))).label(
"oldest_in_progress_at"
),
)
.select_from(models.QueueItem)
.join(models.Session, models.QueueItem.session_id == models.Session.id)
.outerjoin(
models.ActiveQueueSession,
models.QueueItem.work_unit_key
== models.ActiveQueueSession.work_unit_key,
)
.where(models.QueueItem.workspace_name.isnot(None))
.where(models.Session.is_active == True) # noqa: E712
.where(~models.QueueItem.processed)
.where(
models.QueueItem.task_type.in_(("representation", "summary", "dream"))
)
.group_by(models.QueueItem.workspace_name, models.Session.name)
)
session_queue_rows = (await db.execute(session_queue_stmt)).all()
await db.commit()
for row in active_sessions_rows:
workspace_name = row.workspace_name
if workspace_name is None:
continue
current_sessions_active_workspaces.add(workspace_name)
prometheus_metrics.set_sessions_active(
workspace_name=workspace_name,
count=int(row.active_session_count or 0),
)
for row in session_activity_rows:
workspace_name = row.workspace_name
session_name = row.name
if workspace_name is None or session_name is None:
continue
current_session_last_message_labels.add((workspace_name, session_name))
last_activity_at = row.last_message_at or row.session_created_at
prometheus_metrics.set_session_last_message_age(
workspace_name=workspace_name,
session_name=session_name,
age_seconds=max(0.0, (now_utc - last_activity_at).total_seconds()),
)
for row in session_queue_rows:
workspace_name = row.workspace_name
session_name = row.session_name
if workspace_name is None or session_name is None:
continue
pending_count = int(row.pending_count or 0)
in_progress_count = int(row.in_progress_count or 0)
for state, count in (
("pending", pending_count),
("in_progress", in_progress_count),
):
label = (workspace_name, session_name, state)
current_session_queue_depth_labels.add(label)
prometheus_metrics.set_session_queue_depth(
workspace_name=workspace_name,
session_name=session_name,
state=state,
count=count,
)
if row.oldest_pending_at is not None:
current_session_queue_oldest_age_labels.add(
(workspace_name, session_name, "pending")
)
prometheus_metrics.set_session_queue_oldest_age(
workspace_name=workspace_name,
session_name=session_name,
state="pending",
age_seconds=max(
0.0, (now_utc - row.oldest_pending_at).total_seconds()
),
)
if row.oldest_in_progress_at is not None:
current_session_queue_oldest_age_labels.add(
(workspace_name, session_name, "in_progress")
)
prometheus_metrics.set_session_queue_oldest_age(
workspace_name=workspace_name,
session_name=session_name,
state="in_progress",
age_seconds=max(
0.0, (now_utc - row.oldest_in_progress_at).total_seconds()
),
)
for workspace_name, task_type, state in (
self.seen_queue_depth_labels - current_depth_labels
):
prometheus_metrics.set_deriver_queue_depth(
workspace_name=workspace_name,
task_type=task_type,
state=state,
count=0,
)
for workspace_name, task_type, state in (
self.seen_queue_oldest_age_labels - current_oldest_age_labels
):
prometheus_metrics.set_deriver_queue_oldest_age(
workspace_name=workspace_name,
task_type=task_type,
state=state,
age_seconds=0.0,
)
for workspace_name, task_type in (
self.seen_queue_error_backlog_labels - current_error_backlog_labels
):
prometheus_metrics.set_deriver_queue_error_backlog(
workspace_name=workspace_name,
task_type=task_type,
count=0,
)
for workspace_name in (
self.seen_sessions_active_workspaces - current_sessions_active_workspaces
):
prometheus_metrics.set_sessions_active(
workspace_name=workspace_name,
count=0,
)
for workspace_name, session_name in (
self.seen_session_last_message_labels - current_session_last_message_labels
):
prometheus_metrics.set_session_last_message_age(
workspace_name=workspace_name,
session_name=session_name,
age_seconds=0.0,
)
for workspace_name, session_name, state in (
self.seen_session_queue_depth_labels - current_session_queue_depth_labels
):
prometheus_metrics.set_session_queue_depth(
workspace_name=workspace_name,
session_name=session_name,
state=state,
count=0,
)
for workspace_name, session_name, state in (
self.seen_session_queue_oldest_age_labels
- current_session_queue_oldest_age_labels
):
prometheus_metrics.set_session_queue_oldest_age(
workspace_name=workspace_name,
session_name=session_name,
state=state,
age_seconds=0.0,
)
self.seen_queue_depth_labels = current_depth_labels
self.seen_queue_oldest_age_labels = current_oldest_age_labels
self.seen_queue_error_backlog_labels = current_error_backlog_labels
self.seen_sessions_active_workspaces = current_sessions_active_workspaces
self.seen_session_last_message_labels = current_session_last_message_labels
self.seen_session_queue_depth_labels = current_session_queue_depth_labels
self.seen_session_queue_oldest_age_labels = (
current_session_queue_oldest_age_labels
)
######################
# Queue Worker Logic #
######################
async def _handle_processing_error(
self,
error: Exception,
items: list[QueueItem],
work_unit_key: str,
context: str,
) -> None:
"""
Handle processing errors by marking queue items as errored, logging, and forwarding to Sentry.
We only mark the first queue item as errored so we don't potentially throw away a batch. This allows us
to incrementally attempt to process the batch while still maintaining progress in a work unit.
Args:
error: The exception that occurred
items: The queue items that were being processed
work_unit_key: The work unit key for the queue items
context: Context string describing what was being processed (e.g., "processing representation batch")
"""
error_msg = f"{error.__class__.__name__}: {str(error)}"
try:
if items:
await self.mark_queue_item_as_errored(
items[0], work_unit_key, error_msg
)
except Exception as mark_error:
logger.error(
f"Failed to mark queue items as errored for work unit {work_unit_key}: {mark_error}",
exc_info=True,
)
logger.error(
f"Error {context} for work unit {work_unit_key}: {error}",
exc_info=True,
)
if settings.SENTRY.ENABLED:
sentry_sdk.capture_exception(error)
async def process_work_unit(self, work_unit_key: str, worker_id: str) -> None:
"""Process all queue items for a specific work unit by routing to the correct handler."""
logger.debug(f"Starting to process work unit {work_unit_key}")
work_unit = parse_work_unit_key(work_unit_key)
async with self.semaphore:
queue_item_count = 0
try:
while not self.shutdown_event.is_set():
# Get worker ownership info for verification
ownership = self.worker_ownership.get(worker_id)
if not ownership or ownership.work_unit_key != work_unit_key:
logger.warning(
f"Worker {worker_id} lost ownership of work unit {work_unit_key}, stopping processing {work_unit_key}"
)
break
try:
if work_unit.task_type == "representation":
(
messages_context,
items_to_process,
message_level_configuration,
) = await self.get_queue_item_batch(
work_unit.task_type, work_unit_key, ownership.aqs_id
)
logger.debug(
f"Worker {worker_id} retrieved {len(messages_context)} messages and {len(items_to_process)} queue items for work unit {work_unit_key} (AQS ID: {ownership.aqs_id})"
)
if not items_to_process:
logger.debug(
f"No more queue items to process for work unit {work_unit_key} for worker {worker_id}"
)
break
try:
# Extract observers from the payload (handle both old and new format)
payload = items_to_process[0].payload
observers = payload.get("observers")
if observers is None:
# Legacy format: single observer string
legacy_observer = payload.get("observer")
if legacy_observer:
observers = [legacy_observer]
else:
observers = []
queue_item_message_ids = [
item.message_id
for item in items_to_process
if item.message_id is not None
]
# Bounded by DERIVER.WORK_UNIT_TIMEOUT_SECONDS so a hung
# LLM call (CF Gateway streaming a Gemini response that
# never terminates) raises TimeoutError instead of holding
# the semaphore forever. Without this the worker pool
# deadlocks: every slot held by a hung call, no new claims
# possible. See cleanup_stale_work_units gating fix above.
await asyncio.wait_for(
process_representation_batch(
messages_context,
message_level_configuration,
observers=observers,
observed=work_unit.observed,
queue_item_message_ids=queue_item_message_ids,
),
timeout=settings.DERIVER.WORK_UNIT_TIMEOUT_SECONDS,
)
await self.mark_queue_items_as_processed(
items_to_process, work_unit_key
)
queue_item_count += len(items_to_process)
except Exception as e:
await self._handle_processing_error(
e,
items_to_process,
work_unit_key,
f"processing {work_unit.task_type} batch",
)
else:
queue_item = await self.get_next_queue_item(
work_unit.task_type, work_unit_key, ownership.aqs_id
)
if not queue_item:
logger.debug(
f"No more queue items to process for work unit {work_unit_key} for worker {worker_id}"
)
break
try:
# Same WORK_UNIT_TIMEOUT_SECONDS bound as the
# representation path — covers summary/dream/webhook
# task types so a hung specialist call cannot hold
# the semaphore indefinitely.
await asyncio.wait_for(
process_item(queue_item),
timeout=settings.DERIVER.WORK_UNIT_TIMEOUT_SECONDS,
)
await self.mark_queue_items_as_processed(
[queue_item], work_unit_key
)
queue_item_count += 1
except Exception as e:
await self._handle_processing_error(
e,
[queue_item],
work_unit_key,
"processing queue item",
)
except Exception as e:
logger.error(
f"Error in processing loop for work unit {work_unit_key}: {e}",
exc_info=True,
)
if settings.SENTRY.ENABLED:
sentry_sdk.capture_exception(e)
# Check for shutdown after processing each batch
if self.shutdown_event.is_set():
logger.debug(
"Shutdown requested, stopping processing for work unit %s",
work_unit_key,
)
break
finally:
# Remove work unit from active_queue_sessions when done
ownership: WorkerOwnership | None = self.worker_ownership.get(worker_id)
if ownership and ownership.work_unit_key == work_unit_key:
removed = await self._cleanup_work_unit(
ownership.aqs_id, work_unit_key
)
else:
removed = False
self.untrack_worker_work_unit(worker_id, work_unit_key)
if removed and queue_item_count > 0:
# Only publish webhook if we actually removed an active session
try:
if (
work_unit.task_type in ["representation", "summary"]
and work_unit.workspace_name is not None
):
logger.debug(
f"Publishing queue.empty event for {work_unit_key} in workspace {work_unit.workspace_name}"
)
await publish_webhook_event(
QueueEmptyEvent(
workspace_id=work_unit.workspace_name,
queue_type=work_unit.task_type,
session_id=work_unit.session_name,
observer=work_unit.observer,
observed=work_unit.observed,
)
)
except Exception:
logger.exception("Error triggering queue_empty webhook")
else:
logger.debug(
f"Work unit {work_unit_key} already cleaned up by another worker, skipping webhook"
)
@sentry_sdk.trace
async def get_next_queue_item(
self, task_type: str, work_unit_key: str, aqs_id: str
) -> QueueItem | None:
"""Get the next queue item to process for a specific work unit."""
if task_type == "representation":
raise ValueError(
"representation tasks are not supported for get_next_queue_item"
)
async with tracked_db("get_next_queue_item") as db:
# ActiveQueueSession conditions for worker ownership verification
aqs_conditions = [
models.ActiveQueueSession.work_unit_key == work_unit_key,
models.ActiveQueueSession.id == aqs_id,
]
query = (
select(models.QueueItem)
.join(
models.ActiveQueueSession,
models.QueueItem.work_unit_key
== models.ActiveQueueSession.work_unit_key,
)
.where(models.QueueItem.work_unit_key == work_unit_key)
.where(~models.QueueItem.processed)
.where(*aqs_conditions)
.order_by(models.QueueItem.id)
.limit(1)
)
result = await db.execute(query)
queue_item = result.scalar_one_or_none()