@@ -509,7 +509,6 @@ typedef struct rb_heap_struct {
509509 rb_atomic_t background_sweep_steps ; // only incremented/checked by sweep thread
510510 rb_nativethread_cond_t sweep_page_cond ; // associated with global sweep lock
511511 rb_nativethread_lock_t swept_pages_lock ;
512- rb_nativethread_lock_t sweeping_page_lock ;
513512 size_t pre_swept_slots_deferred ;
514513 bool is_finished_sweeping ;
515514 bool done_background_sweep ;
@@ -1097,7 +1096,6 @@ typedef struct lock_stats {
10971096
10981097static lock_stats_t sweep_lock_stats = {"objspace->sweep_lock" , {{0 }}, 0 };
10991098static lock_stats_t swept_pages_lock_stats = {"heap->swept_pages_lock" , {{0 }}, 0 };
1100- static lock_stats_t sweeping_page_lock_stats = {"heap->sweeping_page_lock" , {{0 }}, 0 };
11011099
11021100
11031101static lock_callsite_stats_t *
@@ -1149,7 +1147,7 @@ print_lock_stats(void)
11491147 fprintf (stderr , "%-40s %-30s %12s %12s %10s\n" , "Lock Name" , "Callsite" , "Uncontended" , "Contended" , "Ratio" );
11501148 fprintf (stderr , "%-40s %-30s %12s %12s %10s\n" , "---------" , "--------" , "-----------" , "---------" , "-----" );
11511149
1152- lock_stats_t * all_stats [] = {& sweep_lock_stats , & swept_pages_lock_stats , & sweeping_page_lock_stats };
1150+ lock_stats_t * all_stats [] = {& sweep_lock_stats , & swept_pages_lock_stats };
11531151
11541152 for (int i = 0 ; i < 3 ; i ++ ) {
11551153 lock_stats_t * stats = all_stats [i ];
@@ -4679,14 +4677,8 @@ gc_sweep_step_worker(rb_objspace_t *objspace, rb_heap_t *heap)
46794677 return ;
46804678 }
46814679 while (1 ) {
4682- #if PSWEEP_LOCK_STATS > 0
4683- instrumented_lock_acquire (& heap -> sweeping_page_lock , & sweeping_page_lock_stats );
4684- #else
4685- rb_native_mutex_lock (& heap -> sweeping_page_lock );
4686- #endif
46874680 struct heap_page * sweep_page = heap -> sweeping_page ;
46884681 if (!sweep_page ) {
4689- rb_native_mutex_unlock (& heap -> sweeping_page_lock );
46904682 GC_ASSERT (!heap -> done_background_sweep );
46914683 GC_ASSERT (objspace -> heaps_done_background_sweep < HEAP_COUNT );
46924684 heap -> done_background_sweep = true;
@@ -4698,7 +4690,6 @@ gc_sweep_step_worker(rb_objspace_t *objspace, rb_heap_t *heap)
46984690 struct heap_page * next = ccan_list_next (& heap -> pages , sweep_page , page_node );
46994691
47004692 if (!next ) {
4701- rb_native_mutex_unlock (& heap -> sweeping_page_lock );
47024693 GC_ASSERT (!heap -> done_background_sweep );
47034694 GC_ASSERT (objspace -> heaps_done_background_sweep < HEAP_COUNT );
47044695 heap -> done_background_sweep = true;
@@ -4709,7 +4700,6 @@ gc_sweep_step_worker(rb_objspace_t *objspace, rb_heap_t *heap)
47094700 }
47104701
47114702 heap -> sweeping_page = next ;
4712- rb_native_mutex_unlock (& heap -> sweeping_page_lock );
47134703 heap -> pre_sweeping_page = sweep_page ;
47144704
47154705 sweep_lock_unlock (& objspace -> sweep_lock );
@@ -4777,7 +4767,7 @@ gc_sweep_thread_func(void *ptr)
47774767 rb_objspace_t * objspace = ptr ;
47784768
47794769#ifdef SET_CURRENT_THREAD_NAME
4780- SET_CURRENT_THREAD_NAME ("Ruby/ VM Sweep" );
4770+ SET_CURRENT_THREAD_NAME ("Ruby VM Sweep" );
47814771#endif
47824772
47834773 psweep_debug (1 , "[sweep] sweep_thread start\n" );
@@ -5111,39 +5101,27 @@ gc_sweep_dequeue_page(rb_objspace_t *objspace, rb_heap_t *heap, bool free_in_use
51115101
51125102 struct heap_page * page = NULL ;
51135103
5114- // Avoid taking the global sweep_lock if we can
51155104 if (rb_native_mutex_trylock (& heap -> swept_pages_lock ) == 0 ) {
51165105#if PSWEEP_LOCK_STATS > 0
51175106 step_contention [current_step_type ].swept_pages_trylock_success ++ ;
51185107#endif
5108+ swept_pages_lock_inner :
51195109 if (heap -> swept_pages ) {
51205110 page = heap -> swept_pages ;
51215111 psweep_debug (0 , "[gc] gc_sweep_dequeue_page: got page:%p from heap(%p)->swept_pages (swept_pages lock) (heap %ld)\n" , page , heap , heap - heaps );
51225112 heap -> swept_pages = page -> free_next ;
51235113 }
51245114 rb_native_mutex_unlock (& heap -> swept_pages_lock );
51255115 }
5126- #if PSWEEP_LOCK_STATS > 0
51275116 else {
5128- step_contention [current_step_type ].swept_pages_trylock_fail ++ ;
5129- }
5130- #endif
5131- if (page ) return page ;
5132-
51335117#if PSWEEP_LOCK_STATS > 0
5134- instrumented_lock_acquire (& heap -> sweeping_page_lock , & sweeping_page_lock_stats );
5135- #else
5136- rb_native_mutex_lock (& heap -> sweeping_page_lock );
5118+ step_contention [current_step_type ].swept_pages_trylock_fail ++ ;
51375119#endif
5138- {
5139- if (heap -> sweeping_page ) {
5140- page = heap -> sweeping_page ; // this could be the last page
5141- heap -> sweeping_page = ccan_list_next (& heap -> pages , page , page_node );
5142- psweep_debug (0 , "[gc] gc_sweep_dequeue_page: dequeued unswept page from heap(%p) (heap %ld)\n" , heap , heap - heaps );
5143- * dequeued_unswept_page = true;
5120+ for (volatile int i = 0 ; i < 100 ; i ++ ) {
51445121 }
5122+ rb_native_mutex_lock (& heap -> swept_pages_lock );
5123+ goto swept_pages_lock_inner ;
51455124 }
5146- rb_native_mutex_unlock (& heap -> sweeping_page_lock );
51475125 if (page ) return page ;
51485126
51495127 sweep_lock_lock (& objspace -> sweep_lock );
@@ -5164,6 +5142,12 @@ gc_sweep_dequeue_page(rb_objspace_t *objspace, rb_heap_t *heap, bool free_in_use
51645142 }
51655143 psweep_debug (0 , "[gc] gc_sweep_dequeue_page: got nil page from heap(%p) (heap %ld) end\n" , heap , heap - heaps );
51665144 }
5145+ else {
5146+ * dequeued_unswept_page = true;
5147+ page = heap -> sweeping_page ;
5148+ heap -> sweeping_page = ccan_list_next (& heap -> pages , page , page_node );
5149+ psweep_debug (0 , "[gc] gc_sweep_dequeue_page: dequeued unswept page from heap(%p) (heap %ld)\n" , heap , heap - heaps );
5150+ }
51675151 GC_ASSERT (!objspace -> background_sweep_mode );
51685152 if (!heap -> sweeping_page && !heap -> pre_sweeping_page && !heap -> swept_pages ) {
51695153 heap -> dequeued_last_page = true;
@@ -11178,7 +11162,6 @@ rb_gc_impl_objspace_free(void *objspace_ptr)
1117811162 for (int i = 0 ; i < HEAP_COUNT ; i ++ ) {
1117911163 rb_heap_t * heap = & heaps [i ];
1118011164 rb_native_mutex_destroy (& heap -> swept_pages_lock );
11181- rb_native_mutex_destroy (& heap -> sweeping_page_lock );
1118211165 rb_native_cond_destroy (& heap -> sweep_page_cond );
1118311166 heap -> total_pages = 0 ;
1118411167 heap -> total_slots = 0 ;
@@ -11258,7 +11241,6 @@ rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid)
1125811241 rb_heap_t * heap = & heaps [i ];
1125911242
1126011243 rb_native_mutex_initialize (& heap -> swept_pages_lock );
11261- rb_native_mutex_initialize (& heap -> sweeping_page_lock );
1126211244 rb_native_cond_initialize (& heap -> sweep_page_cond );
1126311245 heap -> pre_sweeping_page = NULL ;
1126411246 heap -> background_sweep_steps = heap -> foreground_sweep_steps ;
@@ -11371,7 +11353,6 @@ rb_gc_impl_objspace_init(void *objspace_ptr)
1137111353
1137211354 ccan_list_head_init (& heap -> pages );
1137311355 rb_native_mutex_initialize (& heap -> swept_pages_lock );
11374- rb_native_mutex_initialize (& heap -> sweeping_page_lock );
1137511356 rb_native_cond_initialize (& heap -> sweep_page_cond );
1137611357 }
1137711358
0 commit comments