Add `GC.auto_compact= true/false` and `GC.auto_compact`

* `GC.auto_compact=`, `GC.auto_compact` can be used to control when
  compaction runs.  Setting `auto_compact=` to true will cause
  compaction to occurr duing major collections.  At the moment,
  compaction adds significant overhead to major collections, so please
  test first!

[Feature #17176]
This commit is contained in:
Aaron Patterson 2020-11-02 14:40:29 -08:00
parent 79b242260b
commit 67b2c21c32
No known key found for this signature in database
GPG Key ID: 953170BCB4FFAFC6
9 changed files with 810 additions and 376 deletions

10
NEWS.md
View File

@ -262,6 +262,16 @@ Outstanding ones only.
* Warning#warn now supports a category kwarg.
[[Feature #17122]]
* GC
* New method
* `GC.auto_compact=`, `GC.auto_compact` can be used to control when
compaction runs. Setting `auto_compact=` to true will cause
compaction to occurr duing major collections. At the moment,
compaction adds significant overhead to major collections, so please
test first!
[[Feature #17176]]
## Stdlib updates
Outstanding ones only.

View File

@ -1304,6 +1304,9 @@ new_insn_send(rb_iseq_t *iseq, int line_no, ID id, VALUE argc, const rb_iseq_t *
VALUE *operands = compile_data_calloc2(iseq, sizeof(VALUE), 2);
operands[0] = (VALUE)new_callinfo(iseq, id, FIX2INT(argc), FIX2INT(flag), keywords, blockiseq != NULL);
operands[1] = (VALUE)blockiseq;
if (blockiseq) {
RB_OBJ_WRITTEN(iseq, Qundef, blockiseq);
}
return new_insn_core(iseq, line_no, BIN(send), 2, operands);
}

999
gc.c

File diff suppressed because it is too large Load Diff

56
gc.rb
View File

@ -31,11 +31,32 @@ module GC
# are not guaranteed to be future-compatible, and may be ignored if the
# underlying implementation does not support them.
def self.start full_mark: true, immediate_mark: true, immediate_sweep: true
Primitive.gc_start_internal full_mark, immediate_mark, immediate_sweep
Primitive.gc_start_internal full_mark, immediate_mark, immediate_sweep, false
end
def garbage_collect full_mark: true, immediate_mark: true, immediate_sweep: true
Primitive.gc_start_internal full_mark, immediate_mark, immediate_sweep
Primitive.gc_start_internal full_mark, immediate_mark, immediate_sweep, false
end
# call-seq:
# GC.auto_compact -> true or false
#
# Returns whether or not automatic compaction has been enabled.
#
def self.auto_compact
Primitive.gc_get_auto_compact
end
# call-seq:
# GC.auto_compact = flag
#
# Updates automatic compaction mode.
#
# When enabled, the compactor will execute on every major collection.
#
# Enabling compaction will degrade performance on major collections.
def self.auto_compact=(flag)
Primitive.gc_set_auto_compact(flag)
end
# call-seq:
@ -163,8 +184,23 @@ module GC
Primitive.gc_latest_gc_info hash_or_key
end
# call-seq:
# GC.latest_compact_info -> {:considered=>{:T_CLASS=>11}, :moved=>{:T_CLASS=>11}}
#
# Returns information about object moved in the most recent GC compaction.
#
# The returned hash has two keys :considered and :moved. The hash for
# :considered lists the number of objects that were considered for movement
# by the compactor, and the :moved hash lists the number of objects that
# were actually moved. Some objects can't be moved (maybe they were pinned)
# so these numbers can be used to calculate compaction efficiency.
def self.latest_compact_info
Primitive.gc_compact_stats
end
def self.compact
Primitive.rb_gc_compact
Primitive.gc_start_internal true, true, true, true
Primitive.gc_compact_stats
end
# call-seq:
@ -182,13 +218,23 @@ module GC
# object, that object should be pushed on the mark stack, and will
# make a SEGV.
def self.verify_compaction_references(toward: nil, double_heap: false)
Primitive.gc_verify_compaction_references(toward, double_heap)
if double_heap
Primitive.gc_double_heap_size
end
if toward == :empty
Primitive.gc_sort_heap_by_empty_slots
end
Primitive.gc_start_internal true, true, true, true
Primitive.gc_check_references_for_moved
Primitive.gc_compact_stats
end
end
module ObjectSpace
def garbage_collect full_mark: true, immediate_mark: true, immediate_sweep: true
Primitive.gc_start_internal full_mark, immediate_mark, immediate_sweep
Primitive.gc_start_internal full_mark, immediate_mark, immediate_sweep, false
end
module_function :garbage_collect

View File

@ -47,6 +47,7 @@ rb_serial_t rb_next_class_serial(void);
/* vm.c */
VALUE rb_obj_is_thread(VALUE obj);
void rb_vm_mark(void *ptr);
void rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx);
PUREFUNC(VALUE rb_vm_top_self(void));
void rb_vm_inc_const_missing_count(void);
const void **rb_vm_get_insns_address_table(void);

5
proc.c
View File

@ -67,7 +67,7 @@ block_mark(const struct rb_block *block)
RUBY_MARK_MOVABLE_UNLESS_NULL(captured->self);
RUBY_MARK_MOVABLE_UNLESS_NULL((VALUE)captured->code.val);
if (captured->ep && captured->ep[VM_ENV_DATA_INDEX_ENV] != Qundef /* cfunc_proc_t */) {
RUBY_MARK_MOVABLE_UNLESS_NULL(VM_ENV_ENVVAL(captured->ep));
rb_gc_mark(VM_ENV_ENVVAL(captured->ep));
}
}
break;
@ -90,9 +90,6 @@ block_compact(struct rb_block *block)
struct rb_captured_block *captured = &block->as.captured;
captured->self = rb_gc_location(captured->self);
captured->code.val = rb_gc_location(captured->code.val);
if (captured->ep && captured->ep[VM_ENV_DATA_INDEX_ENV] != Qundef /* cfunc_proc_t */) {
UPDATE_REFERENCE(captured->ep[VM_ENV_DATA_INDEX_ENV]);
}
}
break;
case block_type_symbol:

View File

@ -3,6 +3,71 @@ require 'test/unit'
require 'fiddle'
class TestGCCompact < Test::Unit::TestCase
def test_enable_autocompact
before = GC.auto_compact
GC.auto_compact = true
assert GC.auto_compact
ensure
GC.auto_compact = before
end
def test_disable_autocompact
before = GC.auto_compact
GC.auto_compact = false
refute GC.auto_compact
ensure
GC.auto_compact = before
end
def test_major_compacts
before = GC.auto_compact
GC.auto_compact = true
compact = GC.stat :compact_count
GC.start
assert_operator GC.stat(:compact_count), :>, compact
ensure
GC.auto_compact = before
end
def test_implicit_compaction_does_something
before = GC.auto_compact
list = []
list2 = []
# Try to make some fragmentation
500.times {
list << Object.new
Object.new
Object.new
}
count = GC.stat :compact_count
GC.auto_compact = true
loop do
break if count < GC.stat(:compact_count)
list2 << Object.new
end
compact_stats = GC.latest_compact_info
refute_predicate compact_stats[:considered], :empty?
refute_predicate compact_stats[:moved], :empty?
ensure
GC.auto_compact = before
end
def test_gc_compact_stats
list = []
list2 = []
# Try to make some fragmentation
500.times {
list << Object.new
Object.new
Object.new
}
compact_stats = GC.compact
refute_predicate compact_stats[:considered], :empty?
refute_predicate compact_stats[:moved], :empty?
end
def memory_location(obj)
(Fiddle.dlwrap(obj) >> 1)
end

View File

@ -864,26 +864,17 @@ blocks_clear_marked_index(struct transient_heap_block* block)
static void
transient_heap_block_update_refs(struct transient_heap* theap, struct transient_heap_block* block)
{
int i=0, n=0;
int marked_index = block->info.last_marked_index;
while (i<block->info.index) {
void *ptr = &block->buff[i];
struct transient_alloc_header *header = ptr;
while (marked_index >= 0) {
struct transient_alloc_header *header = alloc_header(block, marked_index);
asan_unpoison_memory_region(header, sizeof *header, false);
void *poisoned = __asan_region_is_poisoned((void *)header->obj, SIZEOF_VALUE);
asan_unpoison_object(header->obj, false);
header->obj = rb_gc_location(header->obj);
if (poisoned) {
asan_poison_object(header->obj);
}
i += header->size;
marked_index = header->next_marked_index;
asan_poison_memory_region(header, sizeof *header);
n++;
}
}

30
vm.c
View File

@ -2494,6 +2494,36 @@ rb_vm_update_references(void *ptr)
}
}
void
rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
{
if (ptr) {
rb_vm_t *vm = ptr;
rb_ractor_t *r = 0;
list_for_each(&vm->ractor.set, r, vmlr_node) {
VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
rb_ractor_status_p(r, ractor_running));
if (r->threads.cnt > 0) {
rb_thread_t *th = 0;
list_for_each(&r->threads.set, th, lt_node) {
VM_ASSERT(th != NULL);
rb_execution_context_t * ec = th->ec;
if (ec->vm_stack) {
VALUE *p = ec->vm_stack;
VALUE *sp = ec->cfp->sp;
while (p <= sp) {
if (!rb_special_const_p(*p)) {
cb(*p, ctx);
}
p++;
}
}
}
}
}
}
}
void
rb_vm_mark(void *ptr)
{