@@ -303,19 +303,29 @@ rb_nativethread_lock_t fiber_lock;
303303pthread_t fiber_pool_lock_owner ;
304304#endif
305305
306+ static inline bool
307+ fiber_pool_locked_p (bool fallback )
308+ {
309+ #ifdef RUBY_THREAD_PTHREAD_H
310+ return pthread_self () == fiber_pool_lock_owner ;
311+ #else
312+ return fallback ;
313+ #endif
314+ }
315+
306316static inline void
307317ASSERT_fiber_pool_locked (void )
308318{
309319#ifdef RUBY_THREAD_PTHREAD_H
310- VM_ASSERT (pthread_self () == fiber_pool_lock_owner );
320+ VM_ASSERT (fiber_pool_locked_p (true) );
311321#endif
312322}
313323
314324static inline void
315325ASSERT_fiber_pool_unlocked (void )
316326{
317327#ifdef RUBY_THREAD_PTHREAD_H
318- VM_ASSERT (pthread_self () != fiber_pool_lock_owner );
328+ VM_ASSERT (! fiber_pool_locked_p (false) );
319329#endif
320330}
321331
@@ -439,6 +449,7 @@ fiber_pool_vacancy_reset(struct fiber_pool_vacancy * vacancy)
439449inline static struct fiber_pool_vacancy *
440450fiber_pool_vacancy_push (struct fiber_pool_vacancy * vacancy , struct fiber_pool_vacancy * head )
441451{
452+ ASSERT_fiber_pool_locked ();
442453 vacancy -> next = head ;
443454
444455#ifdef FIBER_POOL_ALLOCATION_FREE
@@ -471,7 +482,7 @@ fiber_pool_vacancy_remove(struct fiber_pool_vacancy * vacancy)
471482inline static struct fiber_pool_vacancy *
472483fiber_pool_vacancy_pop (struct fiber_pool * pool )
473484{
474- // fiber_pool_lock is acquired
485+ ASSERT_fiber_pool_locked ();
475486 struct fiber_pool_vacancy * vacancy = pool -> vacancies ;
476487
477488 if (vacancy ) {
@@ -484,7 +495,7 @@ fiber_pool_vacancy_pop(struct fiber_pool * pool)
484495inline static struct fiber_pool_vacancy *
485496fiber_pool_vacancy_pop (struct fiber_pool * pool )
486497{
487- // fiber_pool_lock is acquired
498+ ASSERT_fiber_pool_locked ();
488499 struct fiber_pool_vacancy * vacancy = pool -> vacancies ;
489500
490501 if (vacancy ) {
@@ -583,7 +594,7 @@ fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count, bool needs_lock,
583594 // must not run after base is mapped, or the region would leak.
584595 struct fiber_pool_allocation * allocation = RB_ALLOC (struct fiber_pool_allocation );
585596
586- if (needs_lock ) fiber_pool_lock ();
597+ if (needs_lock ) fiber_pool_lock (); // no xmalloc allocations can occur with this lock held
587598 {
588599 STACK_GROW_DIR_DETECTION ;
589600
@@ -702,9 +713,11 @@ fiber_pool_expand(struct fiber_pool * fiber_pool, size_t count, bool needs_lock,
702713static struct fiber_pool_vacancy *
703714fiber_pool_expand_and_pop (struct fiber_pool * fiber_pool , size_t count , bool needs_lock , bool unlock_before_raise )
704715{
705- struct fiber_pool_vacancy * vacancy_out ;
716+ RUBY_ASSERT (needs_lock || (!needs_lock && fiber_pool_locked_p (true)));
717+ struct fiber_pool_vacancy * vacancy_out = NULL ;
706718 struct fiber_pool_allocation * allocation = fiber_pool_expand (fiber_pool , count , needs_lock , unlock_before_raise , & vacancy_out );
707719 if (allocation ) {
720+ RUBY_ASSERT (vacancy_out );
708721 return vacancy_out ;
709722 }
710723 else {
@@ -731,7 +744,7 @@ fiber_pool_initialize(struct fiber_pool * fiber_pool, size_t size, size_t minimu
731744 fiber_pool -> vm_stack_size = vm_stack_size ;
732745
733746 if (fiber_pool -> minimum_count > 0 ) {
734- if (RB_UNLIKELY (!fiber_pool_expand (fiber_pool , fiber_pool -> minimum_count , false, false , NULL ))) {
747+ if (RB_UNLIKELY (!fiber_pool_expand (fiber_pool , fiber_pool -> minimum_count , true, true , NULL ))) {
735748 rb_raise (rb_eFiberError , "can't allocate initial fiber stacks (%" PRIuSIZE " x %" PRIuSIZE " bytes): %s" , fiber_pool -> minimum_count , fiber_pool -> size , strerror (errno ));
736749 }
737750 }
@@ -786,6 +799,7 @@ fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
786799static size_t
787800fiber_pool_stack_expand_count (const struct fiber_pool * pool )
788801{
802+ ASSERT_fiber_pool_locked ();
789803 const size_t maximum_allocations = FIBER_POOL_MAXIMUM_ALLOCATIONS ;
790804 const size_t minimum_count = FIBER_POOL_MINIMUM_COUNT ;
791805
@@ -964,6 +978,7 @@ fiber_pool_stack_free(struct fiber_pool_stack * stack)
964978static void
965979fiber_pool_stack_release (struct fiber_pool_stack * stack )
966980{
981+ ASSERT_fiber_pool_locked ();
967982 struct fiber_pool * pool = stack -> pool ;
968983 struct fiber_pool_vacancy * vacancy = fiber_pool_vacancy_pointer (stack -> base , stack -> size );
969984
0 commit comments