mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
[Feature #18619] Reverse the order of compaction movement
This commit changes the way compaction moves objects and sweeps pages in order to better facilitate object movement between size pools. Previously we would move the scan cursor first until we found an empty slot and then we'd decrement the compact cursor until we found something to move into that slot. We would sweep the page that contained the scan cursor before trying to fill it In this algorithm we first move the compact cursor down until we find an object to move - We then take a free page from the desired destination heap (always the same heap in this current iteration of the code). If there is no free page we sweep the page at the sweeping_page cursor, add it to the free pages, and advance the cursor to the next page, and try again. We sweep one page from each size pool in this way, and then repeat that process until all the size pools are compacted (all the cursors have met), and then we update references and sweep the rest of the heap.
This commit is contained in:
parent
7dfea79ebf
commit
76572e5a7f
Notes:
git
2022-04-01 21:46:19 +09:00
1 changed files with 233 additions and 60 deletions
293
gc.c
293
gc.c
|
@ -709,7 +709,8 @@ typedef struct rb_size_pool_struct {
|
|||
enum gc_mode {
|
||||
gc_mode_none,
|
||||
gc_mode_marking,
|
||||
gc_mode_sweeping
|
||||
gc_mode_sweeping,
|
||||
gc_mode_compacting,
|
||||
};
|
||||
|
||||
typedef struct rb_objspace {
|
||||
|
@ -1007,6 +1008,7 @@ gc_mode_verify(enum gc_mode mode)
|
|||
case gc_mode_none:
|
||||
case gc_mode_marking:
|
||||
case gc_mode_sweeping:
|
||||
case gc_mode_compacting:
|
||||
break;
|
||||
default:
|
||||
rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
|
||||
|
@ -5040,6 +5042,44 @@ try_move_plane(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
try_move2(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page, VALUE src)
|
||||
{
|
||||
if (!free_page) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* We should return true if either src is sucessfully moved, or src is
|
||||
* unmoveable. A false return will cause the sweeping cursor to be
|
||||
* incremented to the next page, and src will attempt to move again */
|
||||
if (gc_is_moveable_obj(objspace, src)) {
|
||||
GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
|
||||
|
||||
VALUE dest = (VALUE)free_page->freelist;
|
||||
asan_unpoison_object(dest, false);
|
||||
if (!dest) {
|
||||
/* if we can't get something from the freelist then the page must be
|
||||
* full */
|
||||
return false;
|
||||
}
|
||||
free_page->freelist = RANY(dest)->as.free.next;
|
||||
|
||||
GC_ASSERT(RB_BUILTIN_TYPE(dest) == T_NONE);
|
||||
GC_ASSERT(free_page->slot_size == GET_HEAP_PAGE(src)->slot_size);
|
||||
|
||||
objspace->rcompactor.moved_count_table[BUILTIN_TYPE(src)]++;
|
||||
objspace->rcompactor.total_moved++;
|
||||
|
||||
//fprintf(stderr, "\t\tMoving objects: %s\n\t\t\t-> %s\n", obj_info(src), obj_info(dest));
|
||||
gc_move(objspace, src, dest, free_page->slot_size);
|
||||
gc_pin(objspace, src);
|
||||
FL_SET(src, FL_FROM_FREELIST);
|
||||
free_page->free_slots--;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static short
|
||||
try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page, VALUE dest)
|
||||
{
|
||||
|
@ -5289,8 +5329,10 @@ check_stack_for_moved(rb_objspace_t *objspace)
|
|||
each_machine_stack_value(ec, revert_machine_stack_references);
|
||||
}
|
||||
|
||||
static void gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode);
|
||||
|
||||
static void
|
||||
gc_compact_finish(rb_objspace_t *objspace, rb_size_pool_t *pool, rb_heap_t *heap)
|
||||
gc_compact_finish(rb_objspace_t *objspace)
|
||||
{
|
||||
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
|
||||
rb_size_pool_t *size_pool = &size_pools[i];
|
||||
|
@ -5314,6 +5356,7 @@ gc_compact_finish(rb_objspace_t *objspace, rb_size_pool_t *pool, rb_heap_t *heap
|
|||
rb_size_pool_t *size_pool = &size_pools[i];
|
||||
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
|
||||
heap->compact_cursor = NULL;
|
||||
heap->free_pages = NULL;
|
||||
heap->compact_cursor_index = 0;
|
||||
}
|
||||
|
||||
|
@ -5448,16 +5491,12 @@ gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bit
|
|||
}
|
||||
#endif
|
||||
if (obj_free(objspace, vp)) {
|
||||
if (heap->compact_cursor) {
|
||||
/* We *want* to fill this slot */
|
||||
MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
|
||||
}
|
||||
else {
|
||||
(void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, BASE_SLOT_SIZE);
|
||||
heap_page_add_freeobj(objspace, sweep_page, vp);
|
||||
gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
|
||||
ctx->freed_slots++;
|
||||
}
|
||||
// always add free slots back to the swept pages freelist,
|
||||
// so that if we're comapacting, we can re-use the slots
|
||||
(void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, BASE_SLOT_SIZE);
|
||||
heap_page_add_freeobj(objspace, sweep_page, vp);
|
||||
gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
|
||||
ctx->freed_slots++;
|
||||
}
|
||||
else {
|
||||
ctx->final_slots++;
|
||||
|
@ -5486,13 +5525,7 @@ gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bit
|
|||
/* already counted */
|
||||
break;
|
||||
case T_NONE:
|
||||
if (heap->compact_cursor) {
|
||||
/* We *want* to fill this slot */
|
||||
MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
|
||||
}
|
||||
else {
|
||||
ctx->empty_slots++; /* already freed */
|
||||
}
|
||||
ctx->empty_slots++; /* already freed */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -5502,7 +5535,7 @@ gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bit
|
|||
}
|
||||
|
||||
static inline void
|
||||
gc_sweep_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct gc_sweep_context *ctx)
|
||||
gc_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct gc_sweep_context *ctx)
|
||||
{
|
||||
struct heap_page *sweep_page = ctx->page;
|
||||
|
||||
|
@ -5513,27 +5546,18 @@ gc_sweep_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *hea
|
|||
|
||||
gc_report(2, objspace, "page_sweep: start.\n");
|
||||
|
||||
if (heap->compact_cursor) {
|
||||
if (sweep_page == heap->compact_cursor) {
|
||||
/* The compaction cursor and sweep page met, so we need to quit compacting */
|
||||
gc_report(5, objspace, "Quit compacting, mark and compact cursor met\n");
|
||||
gc_compact_finish(objspace, size_pool, heap);
|
||||
}
|
||||
else {
|
||||
/* We anticipate filling the page, so NULL out the freelist. */
|
||||
asan_unpoison_memory_region(&sweep_page->freelist, sizeof(RVALUE*), false);
|
||||
sweep_page->freelist = NULL;
|
||||
asan_poison_memory_region(&sweep_page->freelist, sizeof(RVALUE*));
|
||||
}
|
||||
#if RGENGC_CHECK_MODE
|
||||
if (!objspace->flags.immediate_sweep) {
|
||||
GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
|
||||
}
|
||||
|
||||
#endif
|
||||
sweep_page->flags.before_sweep = FALSE;
|
||||
sweep_page->free_slots = 0;
|
||||
|
||||
p = (uintptr_t)sweep_page->start;
|
||||
bits = sweep_page->mark_bits;
|
||||
|
||||
int page_rvalue_count = sweep_page->total_slots * (size_pool->slot_size / BASE_SLOT_SIZE);
|
||||
int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
|
||||
int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
|
||||
if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
|
||||
bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
|
||||
|
@ -5555,12 +5579,6 @@ gc_sweep_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *hea
|
|||
p += BITS_BITLENGTH * BASE_SLOT_SIZE;
|
||||
}
|
||||
|
||||
if (heap->compact_cursor) {
|
||||
if (gc_fill_swept_page(objspace, heap, sweep_page, ctx)) {
|
||||
gc_compact_finish(objspace, size_pool, heap);
|
||||
}
|
||||
}
|
||||
|
||||
if (!heap->compact_cursor) {
|
||||
gc_setup_mark_bits(sweep_page);
|
||||
}
|
||||
|
@ -5626,6 +5644,7 @@ gc_mode_name(enum gc_mode mode)
|
|||
case gc_mode_none: return "none";
|
||||
case gc_mode_marking: return "marking";
|
||||
case gc_mode_sweeping: return "sweeping";
|
||||
case gc_mode_compacting: return "compacting";
|
||||
default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
|
||||
}
|
||||
}
|
||||
|
@ -5638,7 +5657,8 @@ gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
|
|||
switch (prev_mode) {
|
||||
case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
|
||||
case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
|
||||
case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none); break;
|
||||
case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting); break;
|
||||
case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none); break;
|
||||
}
|
||||
#endif
|
||||
if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
|
||||
|
@ -5677,6 +5697,13 @@ gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
|
|||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
heap->pooled_pages = NULL;
|
||||
#endif
|
||||
if (!objspace->flags.immediate_sweep) {
|
||||
struct heap_page *page = NULL;
|
||||
|
||||
ccan_list_for_each(&heap->pages, page, page_node) {
|
||||
page->flags.before_sweep = TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
|
||||
|
@ -5822,7 +5849,7 @@ gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *hea
|
|||
.freed_slots = 0,
|
||||
.empty_slots = 0,
|
||||
};
|
||||
gc_sweep_page(objspace, size_pool, heap, &ctx);
|
||||
gc_sweep_page(objspace, heap, &ctx);
|
||||
int free_slots = ctx.freed_slots + ctx.empty_slots;
|
||||
|
||||
heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
|
||||
|
@ -5995,6 +6022,7 @@ static void
|
|||
gc_compact_start(rb_objspace_t *objspace)
|
||||
{
|
||||
struct heap_page *page = NULL;
|
||||
gc_mode_transition(objspace, gc_mode_compacting);
|
||||
|
||||
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
|
||||
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
|
||||
|
@ -6018,6 +6046,8 @@ gc_compact_start(rb_objspace_t *objspace)
|
|||
install_handlers();
|
||||
}
|
||||
|
||||
static void gc_sweep_compact(rb_objspace_t *objspace);
|
||||
|
||||
static void
|
||||
gc_sweep(rb_objspace_t *objspace)
|
||||
{
|
||||
|
@ -6025,33 +6055,21 @@ gc_sweep(rb_objspace_t *objspace)
|
|||
|
||||
gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
|
||||
|
||||
gc_sweep_start(objspace);
|
||||
if (objspace->flags.during_compacting) {
|
||||
gc_sweep_compact(objspace);
|
||||
}
|
||||
|
||||
if (immediate_sweep) {
|
||||
#if !GC_ENABLE_LAZY_SWEEP
|
||||
gc_prof_sweep_timer_start(objspace);
|
||||
#endif
|
||||
gc_sweep_start(objspace);
|
||||
if (objspace->flags.during_compacting) {
|
||||
gc_compact_start(objspace);
|
||||
}
|
||||
|
||||
gc_sweep_rest(objspace);
|
||||
#if !GC_ENABLE_LAZY_SWEEP
|
||||
gc_prof_sweep_timer_stop(objspace);
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
struct heap_page *page = NULL;
|
||||
gc_sweep_start(objspace);
|
||||
|
||||
if (ruby_enable_autocompact && is_full_marking(objspace)) {
|
||||
gc_compact_start(objspace);
|
||||
}
|
||||
|
||||
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
|
||||
ccan_list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages), page, page_node) {
|
||||
page->flags.before_sweep = TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Sweep every size pool. */
|
||||
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
|
||||
|
@ -8261,6 +8279,155 @@ gc_marks_step(rb_objspace_t *objspace, size_t slots)
|
|||
}
|
||||
#endif
|
||||
|
||||
static bool
|
||||
gc_compact_heap_cursors_met_p(rb_heap_t *heap)
|
||||
{
|
||||
return heap->sweeping_page == heap->compact_cursor;
|
||||
}
|
||||
|
||||
static bool
|
||||
gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, VALUE src)
|
||||
{
|
||||
GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
|
||||
rb_heap_t *dheap = heap;
|
||||
|
||||
if (gc_compact_heap_cursors_met_p(dheap)) {
|
||||
return false;
|
||||
}
|
||||
while (!try_move2(objspace, dheap, dheap->free_pages, src)) {
|
||||
struct gc_sweep_context ctx = {
|
||||
.page = dheap->sweeping_page,
|
||||
.final_slots = 0,
|
||||
.freed_slots = 0,
|
||||
.empty_slots = 0,
|
||||
};
|
||||
gc_sweep_page(objspace, heap, &ctx);
|
||||
if (dheap->sweeping_page->free_slots > 0) {
|
||||
heap_add_freepage(dheap, dheap->sweeping_page);
|
||||
};
|
||||
|
||||
dheap->sweeping_page = ccan_list_next(&dheap->pages, dheap->sweeping_page, page_node);
|
||||
if (gc_compact_heap_cursors_met_p(dheap)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
gc_compact_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct heap_page *page) {
|
||||
short slot_size = page->slot_size;
|
||||
short slot_bits = slot_size / BASE_SLOT_SIZE;
|
||||
GC_ASSERT(slot_bits > 0);
|
||||
|
||||
do {
|
||||
VALUE vp = (VALUE)p;
|
||||
GC_ASSERT(vp % sizeof(RVALUE) == 0);
|
||||
|
||||
if (bitset & 1) {
|
||||
objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
|
||||
|
||||
if (!gc_compact_move(objspace, heap, vp)) {
|
||||
//the cursors met. bubble up
|
||||
return false;
|
||||
}
|
||||
}
|
||||
p += slot_size;
|
||||
bitset >>= slot_bits;
|
||||
} while (bitset);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Iterate up all the objects in page, moving them to where they want to go
|
||||
static bool
|
||||
gc_compact_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
|
||||
{
|
||||
GC_ASSERT(page == heap->compact_cursor);
|
||||
|
||||
bits_t *mark_bits, *pin_bits;
|
||||
bits_t bitset;
|
||||
uintptr_t p = page->start;
|
||||
|
||||
mark_bits = page->mark_bits;
|
||||
pin_bits = page->pinned_bits;
|
||||
|
||||
// objects that can be moved are marked and not pinned
|
||||
bitset = (mark_bits[0] & ~pin_bits[0]);
|
||||
bitset >>= NUM_IN_PAGE(p);
|
||||
if (bitset) {
|
||||
if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page))
|
||||
return false;
|
||||
}
|
||||
p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
|
||||
|
||||
for (int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
|
||||
bitset = (mark_bits[j] & ~pin_bits[j]);
|
||||
if (bitset) {
|
||||
if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page))
|
||||
return false;
|
||||
}
|
||||
p += BITS_BITLENGTH * BASE_SLOT_SIZE;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
gc_compact_all_compacted_p(rb_objspace_t *objspace) {
|
||||
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
|
||||
rb_size_pool_t *size_pool = &size_pools[i];
|
||||
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
|
||||
|
||||
if (heap->total_pages > 0 &&
|
||||
!gc_compact_heap_cursors_met_p(heap)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
gc_sweep_compact(rb_objspace_t *objspace)
|
||||
{
|
||||
gc_compact_start(objspace);
|
||||
#if RGENGC_CHECK_MODE >= 2
|
||||
gc_verify_internal_consistency(objspace);
|
||||
#endif
|
||||
|
||||
while (!gc_compact_all_compacted_p(objspace)) {
|
||||
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
|
||||
rb_size_pool_t *size_pool = &size_pools[i];
|
||||
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
|
||||
|
||||
if (gc_compact_heap_cursors_met_p(heap)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
struct heap_page *start_page = heap->compact_cursor;
|
||||
|
||||
if (!gc_compact_page(objspace, size_pool, heap, start_page)) {
|
||||
GC_ASSERT(heap->sweeping_page == heap->compact_cursor);
|
||||
lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// If we get here, we've finished moving all objects on the compact_cursor page
|
||||
// So we can lock it and move the cursor on to the next one.
|
||||
lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
|
||||
heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
|
||||
}
|
||||
}
|
||||
|
||||
gc_compact_finish(objspace);
|
||||
|
||||
#if RGENGC_CHECK_MODE >= 2
|
||||
gc_verify_internal_consistency(objspace);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
gc_marks_rest(rb_objspace_t *objspace)
|
||||
{
|
||||
|
@ -9042,7 +9209,12 @@ gc_start(rb_objspace_t *objspace, unsigned int reason)
|
|||
objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
|
||||
|
||||
/* Explicitly enable compaction (GC.compact) */
|
||||
objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
|
||||
if (do_full_mark && ruby_enable_autocompact) {
|
||||
objspace->flags.during_compacting = TRUE;
|
||||
}
|
||||
else {
|
||||
objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
|
||||
}
|
||||
|
||||
if (!heap_allocated_pages) return FALSE; /* heap is not ready */
|
||||
if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
|
||||
|
@ -9629,6 +9801,7 @@ gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
|
|||
struct heap_page *page = 0, **page_list = malloc(size);
|
||||
size_t i = 0;
|
||||
|
||||
SIZE_POOL_EDEN_HEAP(size_pool)->free_pages = NULL;
|
||||
ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
|
||||
page_list[i++] = page;
|
||||
GC_ASSERT(page);
|
||||
|
|
Loading…
Reference in a new issue