Skip to content

Commit cb77224

Browse files
etiennebarriebyroot
andcommitted
Improve correctness contention for allocated object counts
Currently the count of allocated object for a heap is incremented without regards to parallelism which leads to incorrect counts. By maintaining a local counter in the ractor newobj cache, and only syncing atomically with some granularity, we can improve the correctness without increasing contention. The allocated object count is also synced when the ractor is freed. Co-authored-by: Jean Boussier <jean.boussier@gmail.com>
1 parent 925da36 commit cb77224

1 file changed

Lines changed: 27 additions & 6 deletions

File tree

gc/default/default.c

Lines changed: 27 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,7 @@
161161
typedef struct ractor_newobj_heap_cache {
162162
struct free_slot *freelist;
163163
struct heap_page *using_page;
164+
size_t allocated_objects_count;
164165
} rb_ractor_newobj_heap_cache_t;
165166

166167
typedef struct ractor_newobj_cache {
@@ -2287,6 +2288,8 @@ rb_gc_impl_size_allocatable_p(size_t size)
22872288
return size <= heap_slot_size(HEAP_COUNT - 1);
22882289
}
22892290

2291+
static const size_t ALLOCATED_COUNT_STEP = 1024;
2292+
22902293
static inline VALUE
22912294
ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
22922295
size_t heap_idx)
@@ -2309,6 +2312,22 @@ ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *ca
23092312
VALUE obj = (VALUE)p;
23102313
rb_asan_unpoison_object(obj, true);
23112314
heap_cache->freelist = p->next;
2315+
2316+
if (rb_gc_multi_ractor_p()) {
2317+
heap_cache->allocated_objects_count++;
2318+
rb_heap_t *heap = &heaps[heap_idx];
2319+
if (heap_cache->allocated_objects_count >= ALLOCATED_COUNT_STEP) {
2320+
RUBY_ATOMIC_SIZE_ADD(heap->total_allocated_objects, heap_cache->allocated_objects_count);
2321+
heap_cache->allocated_objects_count = 0;
2322+
}
2323+
}
2324+
else {
2325+
rb_heap_t *heap = &heaps[heap_idx];
2326+
heap->total_allocated_objects++;
2327+
GC_ASSERT(heap->total_slots >=
2328+
(heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count));
2329+
}
2330+
23122331
#if RGENGC_CHECK_MODE
23132332
GC_ASSERT(rb_gc_impl_obj_slot_size(obj) == heap_slot_size(heap_idx));
23142333
// zero clear
@@ -2461,12 +2480,6 @@ newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t he
24612480
obj = newobj_cache_miss(objspace, cache, heap_idx, vm_locked);
24622481
}
24632482

2464-
rb_heap_t *heap = &heaps[heap_idx];
2465-
heap->total_allocated_objects++;
2466-
GC_ASSERT(rb_gc_multi_ractor_p() ||
2467-
heap->total_slots >=
2468-
(heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count));
2469-
24702483
return obj;
24712484
}
24722485

@@ -6261,6 +6274,14 @@ rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache)
62616274
rb_objspace_t *objspace = objspace_ptr;
62626275

62636276
objspace->live_ractor_cache_count--;
6277+
rb_ractor_newobj_cache_t *newobj_cache = (rb_ractor_newobj_cache_t *)cache;
6278+
6279+
for (size_t heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
6280+
rb_heap_t *heap = &heaps[heap_idx];
6281+
rb_ractor_newobj_heap_cache_t *heap_cache = &newobj_cache->heap_caches[heap_idx];
6282+
RUBY_ATOMIC_SIZE_ADD(heap->total_allocated_objects, heap_cache->allocated_objects_count);
6283+
heap_cache->allocated_objects_count = 0;
6284+
}
62646285

62656286
gc_ractor_newobj_cache_clear(cache, NULL);
62666287
free(cache);

0 commit comments

Comments
 (0)