@@ -870,7 +870,6 @@ struct heap_page {
870870 struct heap_page_body * body ;
871871 uintptr_t start ;
872872 struct free_slot * freelist ;
873- struct free_slot * deferred_freelist ;
874873 struct ccan_list_node page_node ;
875874
876875 bits_t wb_unprotected_bits [HEAP_PAGE_BITMAP_LIMIT ];
@@ -905,18 +904,6 @@ asan_unlock_freelist(struct heap_page *page)
905904 asan_unpoison_memory_region (& page -> freelist , sizeof (struct free_list * ), false);
906905}
907906
908- static void
909- asan_lock_deferred_freelist (struct heap_page * page )
910- {
911- asan_poison_memory_region (& page -> deferred_freelist , sizeof (struct free_list * ));
912- }
913-
914- static void
915- asan_unlock_deferred_freelist (struct heap_page * page )
916- {
917- asan_unpoison_memory_region (& page -> deferred_freelist , sizeof (struct free_list * ), false);
918- }
919-
920907static inline bool
921908heap_page_in_global_empty_pages_pool (rb_objspace_t * objspace , struct heap_page * page )
922909{
@@ -1977,7 +1964,7 @@ static void mark_stack_free_cache(mark_stack_t *);
19771964static void heap_page_free (rb_objspace_t * objspace , struct heap_page * page , bool log );
19781965
19791966static inline void
1980- heap_page_add_freeobj (rb_objspace_t * objspace , struct heap_page * page , VALUE obj )
1967+ heap_page_add_freeobj (rb_objspace_t * objspace , struct heap_page * page , VALUE obj , bool from_sweep_thread )
19811968{
19821969 rb_asan_unpoison_object (obj , false);
19831970
@@ -1989,8 +1976,10 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj
19891976 page -> freelist = slot ;
19901977 asan_lock_freelist (page );
19911978
1992- // Should have already been reset
1993- GC_ASSERT (RVALUE_AGE_GET (obj ) == 0 );
1979+ if (!from_sweep_thread ) {
1980+ // Should have already been reset
1981+ GC_ASSERT (RVALUE_AGE_GET (obj ) == 0 );
1982+ }
19941983
19951984 if (RGENGC_CHECK_MODE &&
19961985 /* obj should belong to page */
@@ -2004,22 +1993,6 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj
20041993 gc_report (3 , objspace , "heap_page_add_freeobj: add %p to freelist\n" , (void * )obj );
20051994}
20061995
2007- static inline void
2008- heap_page_add_deferred_freeobj (rb_objspace_t * objspace , struct heap_page * page , VALUE obj )
2009- {
2010- rb_asan_unpoison_object (obj , false);
2011-
2012- struct free_slot * slot = (struct free_slot * )obj ;
2013- slot -> flags = 0 ;
2014- asan_unlock_deferred_freelist (page );
2015- slot -> next = page -> deferred_freelist ;
2016- page -> deferred_freelist = slot ;
2017- asan_lock_deferred_freelist (page );
2018-
2019- rb_asan_poison_object (obj );
2020- gc_report (3 , objspace , "heap_page_add_deferred_freeobj: add %p to deferred_freelist\n" , (void * )obj );
2021- }
2022-
20231996static void
20241997heap_allocatable_bytes_expand (rb_objspace_t * objspace ,
20251998 rb_heap_t * heap , size_t free_slots , size_t total_slots , size_t slot_size )
@@ -2384,17 +2357,14 @@ heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page,
23842357 memset (& page -> age_bits [0 ], 0 , sizeof (page -> age_bits ));
23852358
23862359 asan_unlock_freelist (page );
2387- asan_unlock_deferred_freelist (page );
23882360 page -> freelist = NULL ;
2389- page -> deferred_freelist = NULL ;
23902361 asan_unpoison_memory_region (page -> body , HEAP_PAGE_SIZE , false);
23912362 int i = 0 ;
23922363 for (VALUE p = (VALUE )start ; p < start + (slot_count * heap -> slot_size ); p += heap -> slot_size ) {
23932364 i ++ ;
2394- heap_page_add_freeobj (objspace , page , p );
2365+ heap_page_add_freeobj (objspace , page , p , false );
23952366 }
23962367 GC_ASSERT (i == slot_count );
2397- asan_lock_deferred_freelist (page );
23982368 asan_lock_freelist (page );
23992369
24002370 page -> free_slots = slot_count ;
@@ -3956,7 +3926,7 @@ gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bit
39563926 }
39573927 gc_report (3 , objspace , "page_sweep: %s is added to freelist\n" , rb_obj_info (vp ));
39583928 ctx -> empty_slots ++ ;
3959- heap_page_add_freeobj (objspace , sweep_page , vp );
3929+ heap_page_add_freeobj (objspace , sweep_page , vp , false );
39603930 break ;
39613931 case T_ZOMBIE :
39623932 if (ZOMBIE_NEEDS_FREE_P (vp )) {
@@ -3997,7 +3967,7 @@ gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bit
39973967
39983968 gc_report (3 , objspace , "page_sweep: %s (fast path) added to freelist\n" , rb_obj_info (vp ));
39993969 RVALUE_AGE_SET_BITMAP (vp , 0 );
4000- heap_page_add_freeobj (objspace , sweep_page , vp );
3970+ heap_page_add_freeobj (objspace , sweep_page , vp , false );
40013971 (void )VALGRIND_MAKE_MEM_UNDEFINED ((void * )vp , slot_size );
40023972 ctx -> freed_slots ++ ;
40033973 }
@@ -4010,7 +3980,7 @@ gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bit
40103980 if (rb_gc_obj_free (objspace , vp )) {
40113981 gc_report (3 , objspace , "page_sweep: %s is added to freelist\n" , rb_obj_info (vp ));
40123982 RVALUE_AGE_SET_BITMAP (vp , 0 );
4013- heap_page_add_freeobj (objspace , sweep_page , vp );
3983+ heap_page_add_freeobj (objspace , sweep_page , vp , false );
40143984 (void )VALGRIND_MAKE_MEM_UNDEFINED ((void * )vp , slot_size );
40153985 ctx -> freed_slots ++ ;
40163986 }
@@ -4086,7 +4056,7 @@ deferred_free(rb_objspace_t *objspace, VALUE obj)
40864056 struct heap_page * page = GET_HEAP_PAGE (obj );
40874057 psweep_debug (1 , "[gc] deferred free: page(%p) obj(%p) %s (success)\n" , page , (void * )obj , obj_info );
40884058 RVALUE_AGE_SET_BITMAP (obj , 0 );
4089- heap_page_add_freeobj (objspace , page , obj );
4059+ heap_page_add_freeobj (objspace , page , obj , false );
40904060 (void )VALGRIND_MAKE_MEM_UNDEFINED ((void * )obj , page -> slot_size );
40914061 result = true;
40924062 }
@@ -4322,7 +4292,7 @@ heap_page_freelist_append(struct heap_page *page, struct free_slot *freelist)
43224292 }
43234293}
43244294
4325- static void
4295+ static inline void
43264296sweep_in_ruby_thread (rb_objspace_t * objspace , struct heap_page * page , VALUE obj )
43274297{
43284298 page -> pre_deferred_free_slots += 1 ;
@@ -4374,7 +4344,7 @@ gc_pre_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *p
43744344 switch (BUILTIN_TYPE (vp )) {
43754345 case T_MOVED : {
43764346 empties ++ ;
4377- heap_page_add_deferred_freeobj (objspace , page , vp );
4347+ heap_page_add_freeobj (objspace , page , vp , true );
43784348 (void )VALGRIND_MAKE_MEM_UNDEFINED ((void * )vp , page -> slot_size );
43794349 break ;
43804350 }
@@ -4428,9 +4398,9 @@ gc_pre_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *p
44284398 case imemo_throw_data :
44294399 case imemo_tmpbuf :
44304400 case imemo_fields :
4431- case imemo_iseq :
44324401 goto free ;
44334402 case imemo_callinfo :
4403+ case imemo_iseq : // calls rb_yjit_iseq_free which is not concurrency safe
44344404 case imemo_ment :
44354405 // blacklisted due to vm weak references
44364406 sweep_in_ruby_thread (objspace , page , vp );
@@ -4461,7 +4431,7 @@ gc_pre_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *p
44614431 case T_ICLASS :
44624432 debug_free_check (objspace , vp );
44634433 if (!rb_gc_obj_needs_cleanup_p (vp )) {
4464- heap_page_add_deferred_freeobj (objspace , page , vp );
4434+ heap_page_add_freeobj (objspace , page , vp , true );
44654435 psweep_debug (2 , "[sweep] freed: page(%p), obj(%p)\n" , (void * )page , (void * )vp );
44664436 (void )VALGRIND_MAKE_MEM_UNDEFINED ((void * )vp , page -> slot_size );
44674437 freed ++ ;
@@ -4475,7 +4445,7 @@ gc_pre_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *p
44754445 if (RB_LIKELY (rb_gc_obj_free_whitelisted_vm_weak_references_in_sweep_thread (vp ))) {
44764446 bool can_put_back_on_freelist = rb_gc_obj_free (objspace , vp );
44774447 if (can_put_back_on_freelist ) {
4478- heap_page_add_deferred_freeobj (objspace , page , vp );
4448+ heap_page_add_freeobj (objspace , page , vp , true );
44794449 freed ++ ;
44804450 psweep_debug (2 , "[sweep] freed: page(%p), obj(%p)\n" , (void * )page , (void * )vp );
44814451 (void )VALGRIND_MAKE_MEM_UNDEFINED ((void * )vp , page -> slot_size );
@@ -4637,18 +4607,6 @@ clear_pre_sweep_fields(struct heap_page *page)
46374607 page -> pre_freed_malloc_bytes = 0 ;
46384608}
46394609
4640- // add beginning of b to end of a
4641- static void
4642- merge_freelists (struct free_slot * a , struct free_slot * b )
4643- {
4644- if (a && b ) {
4645- while (a -> next ) {
4646- a = a -> next ;
4647- }
4648- a -> next = b ;
4649- }
4650- }
4651-
46524610// Perform incremental (lazy) sweep on a heap by the background sweep thread.
46534611static void
46544612gc_sweep_step_worker (rb_objspace_t * objspace , rb_heap_t * heap )
@@ -5289,32 +5247,12 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
52895247 if (free_in_user_thread_p ) {
52905248 GC_ASSERT (sweep_page -> free_slots == free_slots ); // gc_sweep_page() sets sweep_page->free slots
52915249 GC_ASSERT (sweep_page -> heap -> total_freed_objects >= (unsigned long )ctx .freed_slots );
5292- GC_ASSERT (!sweep_page -> deferred_freelist );
52935250 } else {
52945251 sweep_page -> free_slots = free_slots ;
52955252 // NOTE: sweep_page->final slots have already been updated by make_zombie
52965253 GC_ASSERT (sweep_page -> free_slots <= sweep_page -> total_slots );
52975254 GC_ASSERT (sweep_page -> final_slots <= sweep_page -> total_slots );
52985255 sweep_page -> heap -> total_freed_objects += ctx .freed_slots ;
5299- // merge freelists
5300- asan_unlock_freelist (sweep_page );
5301- asan_unlock_deferred_freelist (sweep_page );
5302- struct free_slot * deferred_freelist = sweep_page -> deferred_freelist ;
5303- psweep_debug (1 , "[gc] gc_sweep_step: deferred freelist size:%d, free slots:%d\n" , freelist_size (deferred_freelist ), free_slots );
5304- if (deferred_freelist ) {
5305- struct free_slot * cur_list = sweep_page -> freelist ;
5306- psweep_debug (1 , "[gc] gc_sweep_step: sweep_page->freelist size:%d\n" , freelist_size (cur_list ));
5307- if (cur_list ) {
5308- merge_freelists (deferred_freelist , cur_list );
5309- }
5310- sweep_page -> freelist = deferred_freelist ;
5311- sweep_page -> deferred_freelist = NULL ;
5312- }
5313- else {
5314- GC_ASSERT (sweep_page -> pre_freed_slots == 0 );
5315- }
5316- asan_lock_deferred_freelist (sweep_page );
5317- asan_lock_freelist (sweep_page );
53185256
53195257 if (sweep_page -> pre_freed_malloc_bytes > 0 ) {
53205258 atomic_sub_nounderflow (& malloc_increase , sweep_page -> pre_freed_malloc_bytes );
@@ -5588,7 +5526,7 @@ invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_
55885526 struct heap_page * orig_page = GET_HEAP_PAGE (object );
55895527 orig_page -> free_slots ++ ;
55905528 RVALUE_AGE_SET_BITMAP (object , 0 );
5591- heap_page_add_freeobj (objspace , orig_page , object );
5529+ heap_page_add_freeobj (objspace , orig_page , object , false );
55925530
55935531 GC_ASSERT (RVALUE_MARKED (objspace , forwarding_object ));
55945532 GC_ASSERT (BUILTIN_TYPE (forwarding_object ) != T_MOVED );
0 commit comments