mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
Disable read barrier on explicit compaction request
We don't need a read barrier when the user calls `GC.compact` because we don't allow allocations during GC, and all references should be "live"
This commit is contained in:
parent
b4dd7310ca
commit
490b57783d
Notes:
git
2020-11-25 05:38:36 +09:00
2 changed files with 29 additions and 5 deletions
31
gc.c
31
gc.c
|
@ -682,7 +682,7 @@ typedef struct rb_objspace {
|
||||||
unsigned int dont_gc : 1;
|
unsigned int dont_gc : 1;
|
||||||
unsigned int dont_incremental : 1;
|
unsigned int dont_incremental : 1;
|
||||||
unsigned int during_gc : 1;
|
unsigned int during_gc : 1;
|
||||||
unsigned int during_compacting : 1;
|
unsigned int during_compacting : 2;
|
||||||
unsigned int gc_stressful: 1;
|
unsigned int gc_stressful: 1;
|
||||||
unsigned int has_hook: 1;
|
unsigned int has_hook: 1;
|
||||||
unsigned int during_minor_gc : 1;
|
unsigned int during_minor_gc : 1;
|
||||||
|
@ -4389,6 +4389,11 @@ static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free);
|
||||||
static void
|
static void
|
||||||
lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
|
lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
|
||||||
{
|
{
|
||||||
|
/* If this is an explicit compaction (GC.compact), we don't need a read
|
||||||
|
* barrier, so just return early. */
|
||||||
|
if (objspace->flags.during_compacting >> 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
DWORD old_protect;
|
DWORD old_protect;
|
||||||
|
|
||||||
|
@ -4405,6 +4410,11 @@ lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
|
||||||
static void
|
static void
|
||||||
unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
|
unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
|
||||||
{
|
{
|
||||||
|
/* If this is an explicit compaction (GC.compact), we don't need a read
|
||||||
|
* barrier, so just return early. */
|
||||||
|
if (objspace->flags.during_compacting >> 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
DWORD old_protect;
|
DWORD old_protect;
|
||||||
|
|
||||||
|
@ -7030,7 +7040,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)
|
||||||
#endif
|
#endif
|
||||||
objspace->flags.during_minor_gc = FALSE;
|
objspace->flags.during_minor_gc = FALSE;
|
||||||
if (ruby_enable_autocompact) {
|
if (ruby_enable_autocompact) {
|
||||||
objspace->flags.during_compacting = TRUE;
|
objspace->flags.during_compacting |= TRUE;
|
||||||
}
|
}
|
||||||
objspace->profile.major_gc_count++;
|
objspace->profile.major_gc_count++;
|
||||||
objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
|
objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
|
||||||
|
@ -8057,7 +8067,9 @@ gc_start(rb_objspace_t *objspace, int reason)
|
||||||
|
|
||||||
/* reason may be clobbered, later, so keep set immediate_sweep here */
|
/* reason may be clobbered, later, so keep set immediate_sweep here */
|
||||||
objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
|
objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
|
||||||
objspace->flags.during_compacting = !!((unsigned)reason & GPR_FLAG_COMPACT);
|
|
||||||
|
/* Explicitly enable compaction (GC.compact) */
|
||||||
|
objspace->flags.during_compacting = (!!((unsigned)reason & GPR_FLAG_COMPACT) << 1);
|
||||||
|
|
||||||
if (!heap_allocated_pages) return FALSE; /* heap is not ready */
|
if (!heap_allocated_pages) return FALSE; /* heap is not ready */
|
||||||
if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
|
if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
|
||||||
|
@ -9247,6 +9259,19 @@ heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static VALUE
|
||||||
|
gc_compact(rb_execution_context_t *ec, VALUE self)
|
||||||
|
{
|
||||||
|
/* Clear the heap. */
|
||||||
|
gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse);
|
||||||
|
|
||||||
|
/* At this point, all references are live and the mutator is not allowed
|
||||||
|
* to run, so we don't need a read barrier. */
|
||||||
|
gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
|
||||||
|
|
||||||
|
return gc_compact_stats(ec, self);
|
||||||
|
}
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE toward_empty)
|
gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE toward_empty)
|
||||||
{
|
{
|
||||||
|
|
3
gc.rb
3
gc.rb
|
@ -199,8 +199,7 @@ module GC
|
||||||
end
|
end
|
||||||
|
|
||||||
def self.compact
|
def self.compact
|
||||||
Primitive.gc_start_internal true, true, true, true
|
Primitive.gc_compact
|
||||||
Primitive.gc_compact_stats
|
|
||||||
end
|
end
|
||||||
|
|
||||||
# call-seq:
|
# call-seq:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue