mirror of
				https://github.com/ruby/ruby.git
				synced 2022-11-09 12:17:21 -05:00 
			
		
		
		
	Change heap walking to be safe for object allocation
This commit is contained in:
		
							parent
							
								
									f9f13a4f6d
								
							
						
					
					
						commit
						b25361f731
					
				
				
				Notes:
				
					git
				
				2021-03-25 03:31:33 +09:00 
				
			
			
			
		
		
					 2 changed files with 95 additions and 46 deletions
				
			
		
							
								
								
									
										134
									
								
								gc.c
									
										
									
									
									
								
							
							
						
						
									
										134
									
								
								gc.c
									
										
									
									
									
								
							| 
						 | 
				
			
			@ -3216,53 +3216,67 @@ Init_gc_stress(void)
 | 
			
		|||
 | 
			
		||||
typedef int each_obj_callback(void *, void *, size_t, void *);
 | 
			
		||||
 | 
			
		||||
static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data);
 | 
			
		||||
static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected);
 | 
			
		||||
static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
 | 
			
		||||
 | 
			
		||||
struct each_obj_args {
 | 
			
		||||
struct each_obj_data {
 | 
			
		||||
    rb_objspace_t *objspace;
 | 
			
		||||
    bool reenable_incremental;
 | 
			
		||||
 | 
			
		||||
    each_obj_callback *callback;
 | 
			
		||||
    void *data;
 | 
			
		||||
 | 
			
		||||
    struct heap_page **pages;
 | 
			
		||||
    size_t pages_count;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static void
 | 
			
		||||
objspace_each_objects_without_setup(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
 | 
			
		||||
{
 | 
			
		||||
    size_t i;
 | 
			
		||||
    struct heap_page *page;
 | 
			
		||||
    RVALUE *pstart = NULL, *pend;
 | 
			
		||||
 | 
			
		||||
    i = 0;
 | 
			
		||||
    while (i < heap_allocated_pages) {
 | 
			
		||||
	while (0 < i && pstart < heap_pages_sorted[i-1]->start)              i--;
 | 
			
		||||
	while (i < heap_allocated_pages && heap_pages_sorted[i]->start <= pstart) i++;
 | 
			
		||||
	if (heap_allocated_pages <= i) break;
 | 
			
		||||
 | 
			
		||||
	page = heap_pages_sorted[i];
 | 
			
		||||
 | 
			
		||||
	pstart = page->start;
 | 
			
		||||
	pend = pstart + page->total_slots;
 | 
			
		||||
 | 
			
		||||
        if ((*callback)(pstart, pend, sizeof(RVALUE), data)) {
 | 
			
		||||
	    break;
 | 
			
		||||
	}
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static VALUE
 | 
			
		||||
objspace_each_objects_protected(VALUE arg)
 | 
			
		||||
objspace_each_objects_ensure(VALUE arg)
 | 
			
		||||
{
 | 
			
		||||
    struct each_obj_args *args = (struct each_obj_args *)arg;
 | 
			
		||||
    objspace_each_objects_without_setup(args->objspace, args->callback, args->data);
 | 
			
		||||
    struct each_obj_data *data = (struct each_obj_data *)arg;
 | 
			
		||||
    rb_objspace_t *objspace = data->objspace;
 | 
			
		||||
 | 
			
		||||
    /* Reenable incremental GC */
 | 
			
		||||
    if (data->reenable_incremental) {
 | 
			
		||||
        objspace->flags.dont_incremental = FALSE;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    /* Free pages buffer */
 | 
			
		||||
    struct heap_page **pages = data->pages;
 | 
			
		||||
    GC_ASSERT(pages);
 | 
			
		||||
    free(pages);
 | 
			
		||||
 | 
			
		||||
    return Qnil;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static VALUE
 | 
			
		||||
incremental_enable(VALUE _)
 | 
			
		||||
objspace_each_objects_try(VALUE arg)
 | 
			
		||||
{
 | 
			
		||||
    rb_objspace_t *objspace = &rb_objspace;
 | 
			
		||||
    struct each_obj_data *data = (struct each_obj_data *)arg;
 | 
			
		||||
    rb_objspace_t *objspace = data->objspace;
 | 
			
		||||
    struct heap_page **pages = data->pages;
 | 
			
		||||
    size_t pages_count = data->pages_count;
 | 
			
		||||
 | 
			
		||||
    struct heap_page *page = list_top(&heap_eden->pages, struct heap_page, page_node);
 | 
			
		||||
    for (size_t i = 0; i < pages_count; i++) {
 | 
			
		||||
        /* If we have reached the end of the linked list then there are no
 | 
			
		||||
         * more pages, so break. */
 | 
			
		||||
        if (page == NULL) break;
 | 
			
		||||
 | 
			
		||||
        /* If this page does not match the one in the buffer, then move to
 | 
			
		||||
         * the next page in the buffer. */
 | 
			
		||||
        if (pages[i] != page) continue;
 | 
			
		||||
 | 
			
		||||
        RVALUE *pstart = page->start;
 | 
			
		||||
        RVALUE *pend = pstart + page->total_slots;
 | 
			
		||||
 | 
			
		||||
        if ((*data->callback)(pstart, pend, sizeof(RVALUE), data->data)) {
 | 
			
		||||
            break;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        page = list_next(&heap_eden->pages, page, page_node);
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    objspace->flags.dont_incremental = FALSE;
 | 
			
		||||
    return Qnil;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -3305,30 +3319,58 @@ incremental_enable(VALUE _)
 | 
			
		|||
void
 | 
			
		||||
rb_objspace_each_objects(each_obj_callback *callback, void *data)
 | 
			
		||||
{
 | 
			
		||||
    objspace_each_objects(&rb_objspace, callback, data);
 | 
			
		||||
    objspace_each_objects(&rb_objspace, callback, data, TRUE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void
 | 
			
		||||
objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
 | 
			
		||||
objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
 | 
			
		||||
{
 | 
			
		||||
    int prev_dont_incremental = objspace->flags.dont_incremental;
 | 
			
		||||
    /* Disable incremental GC */
 | 
			
		||||
    bool reenable_incremental = FALSE;
 | 
			
		||||
    if (protected) {
 | 
			
		||||
        reenable_incremental = !objspace->flags.dont_incremental;
 | 
			
		||||
 | 
			
		||||
    gc_rest(objspace);
 | 
			
		||||
    objspace->flags.dont_incremental = TRUE;
 | 
			
		||||
        gc_rest(objspace);
 | 
			
		||||
        objspace->flags.dont_incremental = TRUE;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (prev_dont_incremental) {
 | 
			
		||||
        objspace_each_objects_without_setup(objspace, callback, data);
 | 
			
		||||
    }
 | 
			
		||||
    else {
 | 
			
		||||
        struct each_obj_args args = {objspace, callback, data};
 | 
			
		||||
        rb_ensure(objspace_each_objects_protected, (VALUE)&args, incremental_enable, Qnil);
 | 
			
		||||
    /* Create pages buffer */
 | 
			
		||||
    size_t size = size_mul_or_raise(heap_allocated_pages, sizeof(struct heap_page *), rb_eRuntimeError);
 | 
			
		||||
    struct heap_page **pages = malloc(size);
 | 
			
		||||
    if (!pages) rb_memerror();
 | 
			
		||||
 | 
			
		||||
    /* Set up pages buffer by iterating over all pages in the current eden
 | 
			
		||||
     * heap. This will be a snapshot of the state of the heap before we
 | 
			
		||||
     * call the callback over each page that exists in this buffer. Thus it
 | 
			
		||||
     * is safe for the callback to allocate objects without possibly entering
 | 
			
		||||
     * an infinte loop. */
 | 
			
		||||
    struct heap_page *page;
 | 
			
		||||
    size_t pages_count = 0;
 | 
			
		||||
    list_for_each(&heap_eden->pages, page, page_node) {
 | 
			
		||||
        pages[pages_count] = page;
 | 
			
		||||
        pages_count++;
 | 
			
		||||
    }
 | 
			
		||||
    GC_ASSERT(pages_count <= heap_allocated_pages);
 | 
			
		||||
 | 
			
		||||
    /* Run the callback */
 | 
			
		||||
    struct each_obj_data each_obj_data = {
 | 
			
		||||
        .objspace = objspace,
 | 
			
		||||
        .reenable_incremental = reenable_incremental,
 | 
			
		||||
 | 
			
		||||
        .callback = callback,
 | 
			
		||||
        .data = data,
 | 
			
		||||
 | 
			
		||||
        .pages = pages,
 | 
			
		||||
        .pages_count = pages_count
 | 
			
		||||
    };
 | 
			
		||||
    rb_ensure(objspace_each_objects_try, (VALUE)&each_obj_data,
 | 
			
		||||
              objspace_each_objects_ensure, (VALUE)&each_obj_data);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void
 | 
			
		||||
rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
 | 
			
		||||
{
 | 
			
		||||
    objspace_each_objects_without_setup(&rb_objspace, callback, data);
 | 
			
		||||
    objspace_each_objects(&rb_objspace, callback, data, FALSE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
struct os_each_struct {
 | 
			
		||||
| 
						 | 
				
			
			@ -7133,7 +7175,7 @@ gc_verify_internal_consistency_(rb_objspace_t *objspace)
 | 
			
		|||
 | 
			
		||||
    /* check relations */
 | 
			
		||||
 | 
			
		||||
    objspace_each_objects_without_setup(objspace, verify_internal_consistency_i, &data);
 | 
			
		||||
    objspace_each_objects(objspace, verify_internal_consistency_i, &data, FALSE);
 | 
			
		||||
 | 
			
		||||
    if (data.err_count != 0) {
 | 
			
		||||
#if RGENGC_CHECK_MODE >= 5
 | 
			
		||||
| 
						 | 
				
			
			@ -9545,7 +9587,7 @@ gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE do
 | 
			
		|||
    gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
 | 
			
		||||
 | 
			
		||||
    objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
 | 
			
		||||
    objspace_each_objects(objspace, heap_check_moved_i, NULL);
 | 
			
		||||
    objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
 | 
			
		||||
 | 
			
		||||
    return gc_compact_stats(ec, self);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -233,4 +233,11 @@ End
 | 
			
		|||
    assert_kind_of(meta, sclass)
 | 
			
		||||
    assert_include(ObjectSpace.each_object(meta).to_a, sclass)
 | 
			
		||||
  end
 | 
			
		||||
 | 
			
		||||
  def test_each_object_with_allocation
 | 
			
		||||
    assert_normal_exit(<<-End)
 | 
			
		||||
      list = []
 | 
			
		||||
      ObjectSpace.each_object { |o| list << Object.new }
 | 
			
		||||
    End
 | 
			
		||||
  end
 | 
			
		||||
end
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue