@@ -475,6 +475,7 @@ fiber_pool_allocate_memory(size_t * count, size_t stride)
475475 void * base = VirtualAlloc (0 , (* count )* stride , MEM_COMMIT , PAGE_READWRITE );
476476
477477 if (!base ) {
478+ errno = rb_w32_map_errno (GetLastError ());
478479 * count = (* count ) >> 1 ;
479480 }
480481 else {
@@ -506,93 +507,105 @@ fiber_pool_allocate_memory(size_t * count, size_t stride)
506507}
507508
508509// Given an existing fiber pool, expand it by the specified number of stacks.
510+ //
509511// @param count the maximum number of stacks to allocate.
510- // @return the allocated fiber pool.
512+ // @return the new allocation on success, or NULL on failure with errno set.
513+ // @raise NoMemoryError if the struct or memory allocation fails.
514+ //
515+ // Call from fiber_pool_stack_acquire_expand with VM lock held, or from
516+ // fiber_pool_initialize before the pool is shared across threads.
511517// @sa fiber_pool_allocation_free
512518static struct fiber_pool_allocation *
513519fiber_pool_expand (struct fiber_pool * fiber_pool , size_t count )
514520{
515- struct fiber_pool_allocation * allocation ;
516- RB_VM_LOCK_ENTER ();
517- {
518- STACK_GROW_DIR_DETECTION ;
521+ STACK_GROW_DIR_DETECTION ;
519522
520- size_t size = fiber_pool -> size ;
521- size_t stride = size + RB_PAGE_SIZE ;
523+ size_t size = fiber_pool -> size ;
524+ size_t stride = size + RB_PAGE_SIZE ;
522525
523- // Allocate the memory required for the stacks:
524- void * base = fiber_pool_allocate_memory (& count , stride );
526+ // Allocate metadata before mmap: ruby_xmalloc (RB_ALLOC) raises on failure and
527+ // must not run after base is mapped, or the region would leak.
528+ struct fiber_pool_allocation * allocation = RB_ALLOC (struct fiber_pool_allocation );
525529
526- if (base == NULL ) {
527- rb_raise (rb_eFiberError , "can't alloc machine stack to fiber (%" PRIuSIZE " x %" PRIuSIZE " bytes): %s" , count , size , ERRNOMSG );
528- }
530+ // Allocate the memory required for the stacks:
531+ void * base = fiber_pool_allocate_memory (& count , stride );
529532
530- struct fiber_pool_vacancy * vacancies = fiber_pool -> vacancies ;
531- allocation = RB_ALLOC (struct fiber_pool_allocation );
533+ if (base == NULL ) {
534+ if (!errno ) errno = ENOMEM ;
535+ ruby_xfree (allocation );
536+ return NULL ;
537+ }
538+
539+ struct fiber_pool_vacancy * vacancies = fiber_pool -> vacancies ;
532540
533- // Initialize fiber pool allocation:
534- allocation -> base = base ;
535- allocation -> size = size ;
536- allocation -> stride = stride ;
537- allocation -> count = count ;
541+ // Initialize fiber pool allocation:
542+ allocation -> base = base ;
543+ allocation -> size = size ;
544+ allocation -> stride = stride ;
545+ allocation -> count = count ;
538546#ifdef FIBER_POOL_ALLOCATION_FREE
539- allocation -> used = 0 ;
547+ allocation -> used = 0 ;
540548#endif
541- allocation -> pool = fiber_pool ;
549+ allocation -> pool = fiber_pool ;
542550
543- if (DEBUG ) {
544- fprintf (stderr , "fiber_pool_expand(%" PRIuSIZE "): %p, %" PRIuSIZE "/%" PRIuSIZE " x [%" PRIuSIZE ":%" PRIuSIZE "]\n" ,
545- count , (void * )fiber_pool , fiber_pool -> used , fiber_pool -> count , size , fiber_pool -> vm_stack_size );
546- }
551+ if (DEBUG ) {
552+ fprintf (stderr , "fiber_pool_expand(%" PRIuSIZE "): %p, %" PRIuSIZE "/%" PRIuSIZE " x [%" PRIuSIZE ":%" PRIuSIZE "]\n" ,
553+ count , (void * )fiber_pool , fiber_pool -> used , fiber_pool -> count , size , fiber_pool -> vm_stack_size );
554+ }
547555
548- // Iterate over all stacks, initializing the vacancy list:
549- for (size_t i = 0 ; i < count ; i += 1 ) {
550- void * base = (char * )allocation -> base + (stride * i );
551- void * page = (char * )base + STACK_DIR_UPPER (size , 0 );
556+ // Iterate over all stacks, initializing the vacancy list:
557+ for (size_t i = 0 ; i < count ; i += 1 ) {
558+ void * base = (char * )allocation -> base + (stride * i );
559+ void * page = (char * )base + STACK_DIR_UPPER (size , 0 );
552560#if defined(_WIN32 )
553- DWORD old_protect ;
554-
555- if (!VirtualProtect (page , RB_PAGE_SIZE , PAGE_READWRITE | PAGE_GUARD , & old_protect )) {
556- VirtualFree (allocation -> base , 0 , MEM_RELEASE );
557- rb_raise (rb_eFiberError , "can't set a guard page: %s" , ERRNOMSG );
558- }
561+ DWORD old_protect ;
562+
563+ if (!VirtualProtect (page , RB_PAGE_SIZE , PAGE_READWRITE | PAGE_GUARD , & old_protect )) {
564+ int error = rb_w32_map_errno (GetLastError ());
565+ VirtualFree (allocation -> base , 0 , MEM_RELEASE );
566+ ruby_xfree (allocation );
567+ errno = error ;
568+ return NULL ;
569+ }
559570#elif defined(__wasi__ )
560- // wasi-libc's mprotect emulation doesn't support PROT_NONE.
561- (void )page ;
571+ // wasi-libc's mprotect emulation doesn't support PROT_NONE.
572+ (void )page ;
562573#else
563- if (mprotect (page , RB_PAGE_SIZE , PROT_NONE ) < 0 ) {
564- munmap (allocation -> base , count * stride );
565- rb_raise (rb_eFiberError , "can't set a guard page: %s" , ERRNOMSG );
566- }
574+ if (mprotect (page , RB_PAGE_SIZE , PROT_NONE ) < 0 ) {
575+ int error = errno ;
576+ if (!error ) error = ENOMEM ;
577+ munmap (allocation -> base , count * stride );
578+ ruby_xfree (allocation );
579+ errno = error ;
580+ return NULL ;
581+ }
567582#endif
568583
569- vacancies = fiber_pool_vacancy_initialize (
570- fiber_pool , vacancies ,
571- (char * )base + STACK_DIR_UPPER (0 , RB_PAGE_SIZE ),
572- size
573- );
584+ vacancies = fiber_pool_vacancy_initialize (
585+ fiber_pool , vacancies ,
586+ (char * )base + STACK_DIR_UPPER (0 , RB_PAGE_SIZE ),
587+ size
588+ );
574589
575590#ifdef FIBER_POOL_ALLOCATION_FREE
576- vacancies -> stack .allocation = allocation ;
591+ vacancies -> stack .allocation = allocation ;
577592#endif
578- }
593+ }
579594
580- // Insert the allocation into the head of the pool:
581- allocation -> next = fiber_pool -> allocations ;
595+ // Insert the allocation into the head of the pool:
596+ allocation -> next = fiber_pool -> allocations ;
582597
583598#ifdef FIBER_POOL_ALLOCATION_FREE
584- if (allocation -> next ) {
585- allocation -> next -> previous = allocation ;
586- }
599+ if (allocation -> next ) {
600+ allocation -> next -> previous = allocation ;
601+ }
587602
588- allocation -> previous = NULL ;
603+ allocation -> previous = NULL ;
589604#endif
590605
591- fiber_pool -> allocations = allocation ;
592- fiber_pool -> vacancies = vacancies ;
593- fiber_pool -> count += count ;
594- }
595- RB_VM_LOCK_LEAVE ();
606+ fiber_pool -> allocations = allocation ;
607+ fiber_pool -> vacancies = vacancies ;
608+ fiber_pool -> count += count ;
596609
597610 return allocation ;
598611}
@@ -614,7 +627,9 @@ fiber_pool_initialize(struct fiber_pool * fiber_pool, size_t size, size_t count,
614627
615628 fiber_pool -> vm_stack_size = vm_stack_size ;
616629
617- fiber_pool_expand (fiber_pool , count );
630+ if (RB_UNLIKELY (!fiber_pool_expand (fiber_pool , count ))) {
631+ rb_raise (rb_eFiberError , "can't allocate initial fiber stacks (%" PRIuSIZE " x %" PRIuSIZE " bytes): %s" , count , fiber_pool -> size , strerror (errno ));
632+ }
618633}
619634
620635#ifdef FIBER_POOL_ALLOCATION_FREE
@@ -662,31 +677,79 @@ fiber_pool_allocation_free(struct fiber_pool_allocation * allocation)
662677}
663678#endif
664679
680+ // Number of stacks to request when expanding the pool (clamped to min/max).
681+ static inline size_t
682+ fiber_pool_stack_expand_count (const struct fiber_pool * pool )
683+ {
684+ const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE ;
685+ const size_t minimum = pool -> initial_count ;
686+
687+ size_t count = pool -> count ;
688+ if (count > maximum ) count = maximum ;
689+ if (count < minimum ) count = minimum ;
690+
691+ return count ;
692+ }
693+
694+ // When the vacancy list is empty, grow the pool (and run GC only if mmap fails). Caller holds the VM lock.
695+ // Returns NULL if expansion failed after GC + retry; errno is set. Otherwise returns a vacancy.
696+ static struct fiber_pool_vacancy *
697+ fiber_pool_stack_acquire_expand (struct fiber_pool * fiber_pool )
698+ {
699+ size_t count = fiber_pool_stack_expand_count (fiber_pool );
700+
701+ if (DEBUG ) fprintf (stderr , "fiber_pool_stack_acquire: expanding fiber pool by %" PRIuSIZE " stacks\n" , count );
702+
703+ struct fiber_pool_vacancy * vacancy = NULL ;
704+
705+ if (RB_LIKELY (fiber_pool_expand (fiber_pool , count ))) {
706+ return fiber_pool_vacancy_pop (fiber_pool );
707+ }
708+ else {
709+ if (DEBUG ) fprintf (stderr , "fiber_pool_stack_acquire: expand failed (%s), collecting garbage\n" , strerror (errno ));
710+
711+ rb_gc ();
712+
713+ // After running GC, the vacancy list may have some stacks:
714+ vacancy = fiber_pool_vacancy_pop (fiber_pool );
715+ if (RB_LIKELY (vacancy )) {
716+ return vacancy ;
717+ }
718+
719+ // Try to expand the fiber pool again:
720+ if (RB_LIKELY (fiber_pool_expand (fiber_pool , count ))) {
721+ return fiber_pool_vacancy_pop (fiber_pool );
722+ }
723+ else {
724+ // Okay, we really failed to acquire a stack. Give up and return NULL with errno set:
725+ return NULL ;
726+ }
727+ }
728+ }
729+
665730// Acquire a stack from the given fiber pool. If none are available, allocate more.
666731static struct fiber_pool_stack
667732fiber_pool_stack_acquire (struct fiber_pool * fiber_pool )
668733{
669- struct fiber_pool_vacancy * vacancy ;
670- RB_VM_LOCK_ENTER ();
734+ struct fiber_pool_vacancy * vacancy ;
735+
736+ unsigned int lev ;
737+ RB_VM_LOCK_ENTER_LEV (& lev );
671738 {
739+ // Fast path: try to acquire a stack from the vacancy list:
672740 vacancy = fiber_pool_vacancy_pop (fiber_pool );
673741
674742 if (DEBUG ) fprintf (stderr , "fiber_pool_stack_acquire: %p used=%" PRIuSIZE "\n" , (void * )fiber_pool -> vacancies , fiber_pool -> used );
675743
676- if (!vacancy ) {
677- const size_t maximum = FIBER_POOL_ALLOCATION_MAXIMUM_SIZE ;
678- const size_t minimum = fiber_pool -> initial_count ;
679-
680- size_t count = fiber_pool -> count ;
681- if (count > maximum ) count = maximum ;
682- if (count < minimum ) count = minimum ;
744+ // Slow path: If the pool has no vacancies, expand first. Only run GC when expansion fails (e.g. mmap), so we can reclaim stacks from dead fibers before retrying:
745+ if (RB_UNLIKELY (!vacancy )) {
746+ vacancy = fiber_pool_stack_acquire_expand (fiber_pool );
683747
684- fiber_pool_expand (fiber_pool , count );
685-
686- // The free list should now contain some stacks:
687- VM_ASSERT (fiber_pool -> vacancies );
688-
689- vacancy = fiber_pool_vacancy_pop (fiber_pool );
748+ // If expansion failed, raise an error:
749+ if (RB_UNLIKELY (!vacancy )) {
750+ RB_VM_LOCK_LEAVE_LEV (& lev );
751+ rb_raise (rb_eFiberError , "can't allocate fiber stack: %s" , strerror (errno ));
752+ }
690753 }
691754
692755 VM_ASSERT (vacancy );
@@ -705,7 +768,7 @@ fiber_pool_stack_acquire(struct fiber_pool * fiber_pool)
705768
706769 fiber_pool_stack_reset (& vacancy -> stack );
707770 }
708- RB_VM_LOCK_LEAVE ( );
771+ RB_VM_LOCK_LEAVE_LEV ( & lev );
709772
710773 return vacancy -> stack ;
711774}
0 commit comments