Skip to content

Commit 0c3bb5f

Browse files
committed
Parallel sweep: add more fiber pool lock assertions to cont.c
1 parent 603ac42 commit 0c3bb5f

File tree

1 file changed

+24
-7
lines changed

1 file changed

+24
-7
lines changed

cont.c

Lines changed: 24 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -298,24 +298,36 @@ rb_free_shared_fiber_pool(void)
298298

299299
static ID fiber_initialize_keywords[3] = {0};
300300

301+
// We don't use the VM lock to protect the shared fiber pool because the sweep
302+
// thread needs to be able to free fibers and it can't take the VM lock.
301303
rb_nativethread_lock_t fiber_lock;
302304
#ifdef RUBY_THREAD_PTHREAD_H
303305
pthread_t fiber_pool_lock_owner;
304306
#endif
305307

308+
MAYBE_UNUSED(static inline bool
309+
fiber_pool_locked_p(bool fallback))
310+
{
311+
#ifdef RUBY_THREAD_PTHREAD_H
312+
return pthread_self() == fiber_pool_lock_owner;
313+
#else
314+
return fallback;
315+
#endif
316+
}
317+
306318
static inline void
307319
ASSERT_fiber_pool_locked(void)
308320
{
309321
#ifdef RUBY_THREAD_PTHREAD_H
310-
VM_ASSERT(pthread_self() == fiber_pool_lock_owner);
322+
VM_ASSERT(fiber_pool_locked_p(true));
311323
#endif
312324
}
313325

314326
static inline void
315327
ASSERT_fiber_pool_unlocked(void)
316328
{
317329
#ifdef RUBY_THREAD_PTHREAD_H
318-
VM_ASSERT(pthread_self() != fiber_pool_lock_owner);
330+
VM_ASSERT(!fiber_pool_locked_p(false));
319331
#endif
320332
}
321333

@@ -439,6 +451,7 @@ fiber_pool_vacancy_reset(struct fiber_pool_vacancy * vacancy)
439451
inline static struct fiber_pool_vacancy *
440452
fiber_pool_vacancy_push(struct fiber_pool_vacancy * vacancy, struct fiber_pool_vacancy * head)
441453
{
454+
ASSERT_fiber_pool_locked();
442455
vacancy->next = head;
443456

444457
#ifdef FIBER_POOL_ALLOCATION_FREE
@@ -471,7 +484,7 @@ fiber_pool_vacancy_remove(struct fiber_pool_vacancy * vacancy)
471484
inline static struct fiber_pool_vacancy *
472485
fiber_pool_vacancy_pop(struct fiber_pool * pool)
473486
{
474-
// fiber_pool_lock is acquired
487+
ASSERT_fiber_pool_locked();
475488
struct fiber_pool_vacancy * vacancy = pool->vacancies;
476489

477490
if (vacancy) {
@@ -484,7 +497,7 @@ fiber_pool_vacancy_pop(struct fiber_pool * pool)
484497
inline static struct fiber_pool_vacancy *
485498
fiber_pool_vacancy_pop(struct fiber_pool * pool)
486499
{
487-
// fiber_pool_lock is acquired
500+
ASSERT_fiber_pool_locked();
488501
struct fiber_pool_vacancy * vacancy = pool->vacancies;
489502

490503
if (vacancy) {
@@ -583,7 +596,7 @@ fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count, bool needs_lock,
583596
// must not run after base is mapped, or the region would leak.
584597
struct fiber_pool_allocation * allocation = RB_ALLOC(struct fiber_pool_allocation);
585598

586-
if (needs_lock) fiber_pool_lock();
599+
if (needs_lock) fiber_pool_lock(); // no xmalloc allocations can occur with this lock held
587600
{
588601
STACK_GROW_DIR_DETECTION;
589602

@@ -702,9 +715,11 @@ fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count, bool needs_lock,
702715
static struct fiber_pool_vacancy *
703716
fiber_pool_expand_and_pop(struct fiber_pool * fiber_pool, size_t count, bool needs_lock, bool unlock_before_raise)
704717
{
705-
struct fiber_pool_vacancy *vacancy_out;
718+
RUBY_ASSERT(needs_lock || (!needs_lock && fiber_pool_locked_p(true)));
719+
struct fiber_pool_vacancy *vacancy_out = NULL;
706720
struct fiber_pool_allocation *allocation = fiber_pool_expand(fiber_pool, count, needs_lock, unlock_before_raise, &vacancy_out);
707721
if (allocation) {
722+
RUBY_ASSERT(vacancy_out);
708723
return vacancy_out;
709724
}
710725
else {
@@ -731,7 +746,7 @@ fiber_pool_initialize(struct fiber_pool * fiber_pool, size_t size, size_t minimu
731746
fiber_pool->vm_stack_size = vm_stack_size;
732747

733748
if (fiber_pool->minimum_count > 0) {
734-
if (RB_UNLIKELY(!fiber_pool_expand(fiber_pool, fiber_pool->minimum_count, false, false, NULL))) {
749+
if (RB_UNLIKELY(!fiber_pool_expand(fiber_pool, fiber_pool->minimum_count, true, true, NULL))) {
735750
rb_raise(rb_eFiberError, "can't allocate initial fiber stacks (%"PRIuSIZE" x %"PRIuSIZE" bytes): %s", fiber_pool->minimum_count, fiber_pool->size, strerror(errno));
736751
}
737752
}
@@ -786,6 +801,7 @@ fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
786801
static size_t
787802
fiber_pool_stack_expand_count(const struct fiber_pool *pool)
788803
{
804+
ASSERT_fiber_pool_locked();
789805
const size_t maximum_allocations = FIBER_POOL_MAXIMUM_ALLOCATIONS;
790806
const size_t minimum_count = FIBER_POOL_MINIMUM_COUNT;
791807

@@ -964,6 +980,7 @@ fiber_pool_stack_free(struct fiber_pool_stack * stack)
964980
static void
965981
fiber_pool_stack_release(struct fiber_pool_stack * stack)
966982
{
983+
ASSERT_fiber_pool_locked();
967984
struct fiber_pool * pool = stack->pool;
968985
struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer(stack->base, stack->size);
969986

0 commit comments

Comments
 (0)