mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
Decouple incremental marking step from page sizes
Currently, the number of incremental marking steps is calculated based on the number of pooled pages available. This means that if we make Ruby heap pages larger, it would run fewer incremental marking steps (which would mean each incremental marking step takes longer). This commit changes incremental marking to run after every INCREMENTAL_MARK_STEP_ALLOCATIONS number of allocations. This means that the behaviour of incremental marking remains the same regardless of the Ruby heap page size. I've benchmarked against discourse benchmarks and did not get a significant change in response times beyond the margin of error. This is expected as this new incremental marking algorithm behaves very similarly to the previous one.
This commit is contained in:
parent
ad808506b3
commit
dde164e968
Notes:
git
2022-03-30 22:33:47 +09:00
2 changed files with 137 additions and 96 deletions
232
gc.c
232
gc.c
|
@ -883,6 +883,10 @@ enum {
|
|||
#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
|
||||
#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
|
||||
|
||||
#if GC_ENABLE_INCREMENTAL_MARK && !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
|
||||
# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_MMAP
|
||||
# if HAVE_CONST_PAGE_SIZE
|
||||
/* If we have the HEAP_PAGE and it is a constant, then we can directly use it. */
|
||||
|
@ -1182,7 +1186,7 @@ static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, u
|
|||
|
||||
static void gc_marks(rb_objspace_t *objspace, int full_mark);
|
||||
static void gc_marks_start(rb_objspace_t *objspace, int full);
|
||||
static int gc_marks_finish(rb_objspace_t *objspace);
|
||||
static void gc_marks_finish(rb_objspace_t *objspace);
|
||||
static void gc_marks_rest(rb_objspace_t *objspace);
|
||||
static void gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
|
||||
|
||||
|
@ -2398,22 +2402,36 @@ rb_gc_size_allocatable_p(size_t size)
|
|||
}
|
||||
|
||||
static inline VALUE
|
||||
ractor_cached_free_region(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
|
||||
ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
|
||||
size_t size_pool_idx)
|
||||
{
|
||||
rb_ractor_newobj_size_pool_cache_t *cache = &cr->newobj_cache.size_pool_caches[size_pool_idx];
|
||||
RVALUE *p = cache->freelist;
|
||||
rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
|
||||
RVALUE *p = size_pool_cache->freelist;
|
||||
|
||||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
if (is_incremental_marking(objspace)) {
|
||||
// Not allowed to allocate without running an incremental marking step
|
||||
if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
|
||||
return Qfalse;
|
||||
}
|
||||
|
||||
if (p) {
|
||||
cache->incremental_mark_step_allocated_slots++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (p) {
|
||||
VALUE obj = (VALUE)p;
|
||||
MAYBE_UNUSED(const size_t) stride = size_pool_slot_size(size_pool_idx);
|
||||
cache->freelist = p->as.free.next;
|
||||
size_pool_cache->freelist = p->as.free.next;
|
||||
#if USE_RVARGC
|
||||
asan_unpoison_memory_region(p, stride, true);
|
||||
#else
|
||||
asan_unpoison_object(obj, true);
|
||||
#endif
|
||||
#if RGENGC_CHECK_MODE
|
||||
GC_ASSERT(cache->using_page->slot_size == (short)stride);
|
||||
GC_ASSERT(rb_gc_obj_slot_size(obj) == stride);
|
||||
// zero clear
|
||||
MEMZERO((char *)obj, char, stride);
|
||||
#endif
|
||||
|
@ -2425,14 +2443,14 @@ ractor_cached_free_region(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_
|
|||
}
|
||||
|
||||
static struct heap_page *
|
||||
heap_next_freepage(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
|
||||
heap_next_free_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
|
||||
{
|
||||
ASSERT_vm_locking();
|
||||
|
||||
struct heap_page *page;
|
||||
|
||||
while (heap->free_pages == NULL) {
|
||||
heap_prepare(objspace, size_pool, heap);
|
||||
heap_prepare(objspace, size_pool, heap);
|
||||
}
|
||||
page = heap->free_pages;
|
||||
heap->free_pages = page->free_next;
|
||||
|
@ -2446,31 +2464,25 @@ heap_next_freepage(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t
|
|||
}
|
||||
|
||||
static inline void
|
||||
ractor_set_cache(rb_ractor_t *cr, struct heap_page *page, size_t size_pool_idx)
|
||||
ractor_cache_set_page(rb_ractor_newobj_cache_t *cache, size_t size_pool_idx,
|
||||
struct heap_page *page)
|
||||
{
|
||||
gc_report(3, &rb_objspace, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page->start));
|
||||
|
||||
rb_ractor_newobj_size_pool_cache_t *cache = &cr->newobj_cache.size_pool_caches[size_pool_idx];
|
||||
rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
|
||||
|
||||
cache->using_page = page;
|
||||
cache->freelist = page->freelist;
|
||||
GC_ASSERT(size_pool_cache->freelist == NULL);
|
||||
GC_ASSERT(page->free_slots != 0);
|
||||
GC_ASSERT(page->freelist != NULL);
|
||||
|
||||
size_pool_cache->using_page = page;
|
||||
size_pool_cache->freelist = page->freelist;
|
||||
page->free_slots = 0;
|
||||
page->freelist = NULL;
|
||||
|
||||
asan_unpoison_object((VALUE)cache->freelist, false);
|
||||
GC_ASSERT(RB_TYPE_P((VALUE)cache->freelist, T_NONE));
|
||||
asan_poison_object((VALUE)cache->freelist);
|
||||
}
|
||||
|
||||
static inline void
|
||||
ractor_cache_slots(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
|
||||
{
|
||||
ASSERT_vm_locking();
|
||||
|
||||
rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
|
||||
struct heap_page *page = heap_next_freepage(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
|
||||
|
||||
ractor_set_cache(cr, page, size_pool_idx);
|
||||
asan_unpoison_object((VALUE)size_pool_cache->freelist, false);
|
||||
GC_ASSERT(RB_TYPE_P((VALUE)size_pool_cache->freelist, T_NONE));
|
||||
asan_poison_object((VALUE)size_pool_cache->freelist);
|
||||
}
|
||||
|
||||
static inline VALUE
|
||||
|
@ -2509,6 +2521,58 @@ size_pool_idx_for_size(size_t size)
|
|||
#endif
|
||||
}
|
||||
|
||||
static VALUE
|
||||
newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, bool vm_locked)
|
||||
{
|
||||
rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
|
||||
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
|
||||
rb_ractor_newobj_cache_t *cache = &cr->newobj_cache;
|
||||
|
||||
VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
|
||||
|
||||
if (UNLIKELY(obj == Qfalse)) {
|
||||
unsigned int lev;
|
||||
bool unlock_vm = false;
|
||||
|
||||
if (!vm_locked) {
|
||||
RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
|
||||
vm_locked = true;
|
||||
unlock_vm = true;
|
||||
}
|
||||
|
||||
{
|
||||
ASSERT_vm_locking();
|
||||
|
||||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
if (is_incremental_marking(objspace)) {
|
||||
gc_marks_continue(objspace, size_pool, heap);
|
||||
cache->incremental_mark_step_allocated_slots = 0;
|
||||
|
||||
// Retry allocation after resetting incremental_mark_step_allocated_slots
|
||||
obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (obj == Qfalse) {
|
||||
// Get next free page (possibly running GC)
|
||||
struct heap_page *page = heap_next_free_page(objspace, size_pool, heap);
|
||||
ractor_cache_set_page(cache, size_pool_idx, page);
|
||||
|
||||
// Retry allocation after moving to new page
|
||||
obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
|
||||
|
||||
GC_ASSERT(obj != Qfalse);
|
||||
}
|
||||
}
|
||||
|
||||
if (unlock_vm) {
|
||||
RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
|
||||
}
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx));
|
||||
|
||||
static inline VALUE
|
||||
|
@ -2533,11 +2597,7 @@ newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *
|
|||
}
|
||||
}
|
||||
|
||||
// allocate new slot
|
||||
while ((obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) == Qfalse) {
|
||||
ractor_cache_slots(objspace, cr, size_pool_idx);
|
||||
}
|
||||
GC_ASSERT(obj != 0);
|
||||
obj = newobj_alloc(objspace, cr, size_pool_idx, true);
|
||||
newobj_init(klass, flags, wb_protected, objspace, obj);
|
||||
|
||||
gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_fill(obj, 0, 0, 0));
|
||||
|
@ -2584,12 +2644,11 @@ newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t a
|
|||
|
||||
size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
|
||||
|
||||
if ((!UNLIKELY(during_gc ||
|
||||
ruby_gc_stressful ||
|
||||
gc_event_hook_available_p(objspace)) &&
|
||||
wb_protected &&
|
||||
(obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) != Qfalse)) {
|
||||
|
||||
if (!UNLIKELY(during_gc ||
|
||||
ruby_gc_stressful ||
|
||||
gc_event_hook_available_p(objspace)) &&
|
||||
wb_protected) {
|
||||
obj = newobj_alloc(objspace, cr, size_pool_idx, false);
|
||||
newobj_init(klass, flags, wb_protected, objspace, obj);
|
||||
}
|
||||
else {
|
||||
|
@ -7936,6 +7995,27 @@ gc_verify_transient_heap_internal_consistency(VALUE dmy)
|
|||
return Qnil;
|
||||
}
|
||||
|
||||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
static void
|
||||
heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
|
||||
{
|
||||
if (heap->pooled_pages) {
|
||||
if (heap->free_pages) {
|
||||
struct heap_page *free_pages_tail = heap->free_pages;
|
||||
while (free_pages_tail->free_next) {
|
||||
free_pages_tail = free_pages_tail->free_next;
|
||||
}
|
||||
free_pages_tail->free_next = heap->pooled_pages;
|
||||
}
|
||||
else {
|
||||
heap->free_pages = heap->pooled_pages;
|
||||
}
|
||||
|
||||
heap->pooled_pages = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* marks */
|
||||
|
||||
static void
|
||||
|
@ -7947,7 +8027,8 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)
|
|||
|
||||
if (full_mark) {
|
||||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / HEAP_PAGE_OBJ_LIMIT) + 1);
|
||||
size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
|
||||
objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
|
||||
|
||||
if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
|
||||
"objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
|
||||
|
@ -7965,7 +8046,10 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)
|
|||
objspace->marked_slots = 0;
|
||||
|
||||
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
|
||||
rgengc_mark_and_rememberset_clear(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
|
||||
rb_size_pool_t *size_pool = &size_pools[i];
|
||||
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
|
||||
rgengc_mark_and_rememberset_clear(objspace, heap);
|
||||
heap_move_pooled_pages_to_free_pages(heap);
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -8029,48 +8113,21 @@ gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
|
|||
|
||||
gc_mark_stacked_objects_all(objspace);
|
||||
}
|
||||
|
||||
static struct heap_page *
|
||||
heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
|
||||
{
|
||||
struct heap_page *page = heap->pooled_pages;
|
||||
|
||||
if (page) {
|
||||
heap->pooled_pages = page->free_next;
|
||||
heap_add_freepage(heap, page);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
static void
|
||||
gc_marks_finish(rb_objspace_t *objspace)
|
||||
{
|
||||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
/* finish incremental GC */
|
||||
if (is_incremental_marking(objspace)) {
|
||||
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
|
||||
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
|
||||
if (heap->pooled_pages) {
|
||||
heap_move_pooled_pages_to_free_pages(heap);
|
||||
gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
|
||||
return FALSE; /* continue marking phase */
|
||||
}
|
||||
}
|
||||
|
||||
if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
|
||||
rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
|
||||
mark_stack_size(&objspace->mark_stack));
|
||||
}
|
||||
|
||||
gc_mark_roots(objspace, 0);
|
||||
|
||||
if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
|
||||
gc_report(1, objspace, "gc_marks_finish: not empty (%"PRIdSIZE"). retry.\n",
|
||||
mark_stack_size(&objspace->mark_stack));
|
||||
return FALSE;
|
||||
}
|
||||
while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == false);
|
||||
|
||||
#if RGENGC_CHECK_MODE >= 2
|
||||
if (gc_verify_heap_pages(objspace) != 0) {
|
||||
|
@ -8185,8 +8242,6 @@ gc_marks_finish(rb_objspace_t *objspace)
|
|||
rb_ractor_finish_marking();
|
||||
|
||||
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
|
@ -8196,10 +8251,8 @@ gc_marks_step(rb_objspace_t *objspace, size_t slots)
|
|||
GC_ASSERT(is_marking(objspace));
|
||||
|
||||
if (gc_mark_stacked_objects_incremental(objspace, slots)) {
|
||||
if (gc_marks_finish(objspace)) {
|
||||
/* finish */
|
||||
gc_sweep(objspace);
|
||||
}
|
||||
gc_marks_finish(objspace);
|
||||
gc_sweep(objspace);
|
||||
}
|
||||
if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE"\n", objspace->marked_slots);
|
||||
}
|
||||
|
@ -8217,15 +8270,14 @@ gc_marks_rest(rb_objspace_t *objspace)
|
|||
#endif
|
||||
|
||||
if (is_incremental_marking(objspace)) {
|
||||
do {
|
||||
while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
|
||||
} while (gc_marks_finish(objspace) == FALSE);
|
||||
while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
|
||||
}
|
||||
else {
|
||||
gc_mark_stacked_objects_all(objspace);
|
||||
gc_marks_finish(objspace);
|
||||
}
|
||||
|
||||
gc_marks_finish(objspace);
|
||||
|
||||
/* move to sweep */
|
||||
gc_sweep(objspace);
|
||||
}
|
||||
|
@ -8239,24 +8291,8 @@ gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t
|
|||
unsigned int lock_lev;
|
||||
gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
|
||||
|
||||
int slots = 0;
|
||||
const char *from;
|
||||
|
||||
if (heap->pooled_pages) {
|
||||
while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
|
||||
struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
|
||||
slots += page->free_slots;
|
||||
}
|
||||
from = "pooled-pages";
|
||||
}
|
||||
else if (heap_increment(objspace, size_pool, heap)) {
|
||||
slots = heap->free_pages->free_slots;
|
||||
from = "incremented-pages";
|
||||
}
|
||||
|
||||
if (slots > 0) {
|
||||
gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n",
|
||||
slots, from);
|
||||
if (heap->free_pages) {
|
||||
gc_report(2, objspace, "gc_marks_continue: has pooled pages");
|
||||
gc_marks_step(objspace, objspace->rincgc.step_slots);
|
||||
}
|
||||
else {
|
||||
|
@ -8771,6 +8807,10 @@ rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
|
|||
void
|
||||
rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
|
||||
{
|
||||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
newobj_cache->incremental_mark_step_allocated_slots = 0;
|
||||
#endif
|
||||
|
||||
for (size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
|
||||
rb_ractor_newobj_size_pool_cache_t *cache = &newobj_cache->size_pool_caches[size_pool_idx];
|
||||
|
||||
|
|
|
@ -79,6 +79,7 @@ typedef struct ractor_newobj_size_pool_cache {
|
|||
} rb_ractor_newobj_size_pool_cache_t;
|
||||
|
||||
typedef struct ractor_newobj_cache {
|
||||
size_t incremental_mark_step_allocated_slots;
|
||||
rb_ractor_newobj_size_pool_cache_t size_pool_caches[SIZE_POOL_COUNT];
|
||||
} rb_ractor_newobj_cache_t;
|
||||
|
||||
|
|
Loading…
Reference in a new issue