Skip to content

Commit 716d65a

Browse files
Mark work queue items through pool mark fn; remove rb_gc_register_address
Co-authored-by: Cursor <cursoragent@cursor.com>
1 parent f21bc22 commit 716d65a

1 file changed

Lines changed: 25 additions & 15 deletions

File tree

ext/io/event/worker_pool.c

Lines changed: 25 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,28 @@ static void worker_pool_mark(void *ptr)
102102
rb_gc_mark(worker->thread); // thread objects are currently pinned anyway
103103
worker = next;
104104
}
105+
106+
// Mark all VALUES in queued work items. While work is executing, the work
107+
// struct is on the C stack of worker_pool_call (the blocked fiber), so the
108+
// conservative scanner already covers it. But while it is waiting in the
109+
// queue we need to mark from here to keep blocking_operation_value alive
110+
// across any GC that might run during that window.
111+
//
112+
// We use trylock rather than lock: if a worker thread is currently
113+
// dequeuing (which it does while holding the mutex but without the GVL),
114+
// we skip this cycle. The work will still be on the blocked fiber's
115+
// C stack and the conservative scan will keep the VALUEs alive.
116+
if (pthread_mutex_trylock(&pool->mutex) == 0) {
117+
struct IO_Event_WorkerPool_Work *work = pool->work_queue;
118+
while (work) {
119+
rb_gc_mark(work->blocking_operation_value);
120+
rb_gc_mark(work->scheduler);
121+
rb_gc_mark(work->blocker);
122+
rb_gc_mark(work->fiber);
123+
work = work->next;
124+
}
125+
pthread_mutex_unlock(&pool->mutex);
126+
}
105127
}
106128

107129
// Size functions for Ruby GC
@@ -342,11 +364,9 @@ static VALUE worker_pool_call(VALUE self, VALUE _blocking_operation) {
342364
}
343365

344366
// Create work item.
345-
// Keep blocking_operation_value as an explicit GC root: the raw C pointer
346-
// is used by the worker thread without the GVL, and if an exception causes
347-
// the caller's Ruby frames to be unwound, nothing else will keep the VALUE
348-
// alive — which would allow GC to free the embedded state the worker is
349-
// still writing into.
367+
// blocking_operation_value is marked by worker_pool_mark while the item
368+
// sits in the queue. Once dequeued and executing, the work struct lives on
369+
// the blocked fiber's C stack so the conservative scanner covers it.
350370
struct IO_Event_WorkerPool_Work work = {
351371
.blocking_operation = blocking_operation,
352372
.blocking_operation_value = _blocking_operation,
@@ -356,10 +376,6 @@ static VALUE worker_pool_call(VALUE self, VALUE _blocking_operation) {
356376
.fiber = fiber,
357377
.next = NULL
358378
};
359-
rb_gc_register_address(&work.blocking_operation_value);
360-
rb_gc_register_address(&work.scheduler);
361-
rb_gc_register_address(&work.blocker);
362-
rb_gc_register_address(&work.fiber);
363379

364380
// Enqueue work:
365381
pthread_mutex_lock(&pool->mutex);
@@ -394,12 +410,6 @@ static VALUE worker_pool_call(VALUE self, VALUE _blocking_operation) {
394410

395411
if (DEBUG) fprintf(stderr, "<- worker_pool_call:work completed=%d, state=%d\n", work.completed, state);
396412

397-
// Unregister GC roots before returning or re-raising.
398-
rb_gc_unregister_address(&work.blocking_operation_value);
399-
rb_gc_unregister_address(&work.scheduler);
400-
rb_gc_unregister_address(&work.blocker);
401-
rb_gc_unregister_address(&work.fiber);
402-
403413
if (state) {
404414
if (DEBUG) fprintf(stderr, "[worker_pool] exception path: blocking_operation=%p state_tag=%d\n",
405415
(void*)blocking_operation, state);

0 commit comments

Comments
 (0)