From fca0cf6e6b8ab8882f3403e5909c8eb91c5c351e Mon Sep 17 00:00:00 2001 From: ko1 Date: Fri, 8 Jan 2016 22:15:40 +0000 Subject: [PATCH] * gc.c: rename PAGE_* to HEAP_PAGE_* because PAGE_SIZE is used in Mac OS X. * test/ruby/test_gc.rb: catch up this fix. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53471 b2dd03c8-39d4-4d8f-98ff-823fe69b080e --- ChangeLog | 7 ++++ gc.c | 80 ++++++++++++++++++++++---------------------- test/ruby/test_gc.rb | 4 +-- 3 files changed, 49 insertions(+), 42 deletions(-) diff --git a/ChangeLog b/ChangeLog index f27606dc6e..89a21309d7 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,10 @@ +Sat Jan 9 07:13:33 2016 Koichi Sasada + + * gc.c: rename PAGE_* to HEAP_PAGE_* because PAGE_SIZE is used + in Mac OS X. + + * test/ruby/test_gc.rb: catch up this fix. + Sat Jan 9 05:45:40 2016 Koichi Sasada * gc.c: PAGE_BITMAP_PLANES (the number of bitmap) is 4, not 3. diff --git a/gc.c b/gc.c index 48d9acb9c7..874cb981b3 100644 --- a/gc.c +++ b/gc.c @@ -470,7 +470,7 @@ typedef struct rb_heap_struct { struct heap_page *pooled_pages; #endif size_t total_pages; /* total page count in a heap */ - size_t total_slots; /* total slot count (about total_pages * PAGE_OBJ_LIMIT) */ + size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */ } rb_heap_t; enum gc_stat { @@ -625,20 +625,20 @@ typedef struct rb_objspace { } rb_objspace_t; -#ifndef PAGE_ALIGN_LOG +#ifndef HEAP_PAGE_ALIGN_LOG /* default tiny heap size: 16KB */ -#define PAGE_ALIGN_LOG 14 +#define HEAP_PAGE_ALIGN_LOG 14 #endif #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod)) enum { - PAGE_ALIGN = (1UL << PAGE_ALIGN_LOG), - PAGE_ALIGN_MASK = (~(~0UL << PAGE_ALIGN_LOG)), + HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG), + HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)), REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5), - PAGE_SIZE = (PAGE_ALIGN - REQUIRED_SIZE_BY_MALLOC), - PAGE_OBJ_LIMIT = (unsigned int)((PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)), - PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(PAGE_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH), - PAGE_BITMAP_SIZE = (BITS_SIZE * PAGE_BITMAP_LIMIT), - PAGE_BITMAP_PLANES = USE_RGENGC ? 4 : 1 /* RGENGC: mark, unprotected, uncollectible, marking */ + HEAP_PAGE_SIZE = (HEAP_PAGE_ALIGN - REQUIRED_SIZE_BY_MALLOC), + HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)), + HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH), + HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT), + HEAP_PAGE_BITMAP_PLANES = USE_RGENGC ? 4 : 1 /* RGENGC: mark, unprotected, uncollectible, marking */ }; struct heap_page { @@ -659,21 +659,21 @@ struct heap_page { struct heap_page *next; #if USE_RGENGC - bits_t wb_unprotected_bits[PAGE_BITMAP_LIMIT]; + bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]; #endif /* the following three bitmaps are cleared at the beginning of full GC */ - bits_t mark_bits[PAGE_BITMAP_LIMIT]; + bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT]; #if USE_RGENGC - bits_t uncollectible_bits[PAGE_BITMAP_LIMIT]; - bits_t marking_bits[PAGE_BITMAP_LIMIT]; + bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT]; + bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT]; #endif }; -#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(PAGE_ALIGN_MASK))) +#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK))) #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header) #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page) -#define NUM_IN_PAGE(p) (((bits_t)(p) & PAGE_ALIGN_MASK)/sizeof(RVALUE)) +#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE)) #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH ) #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1)) #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p)) @@ -1440,10 +1440,10 @@ heap_page_allocate(rb_objspace_t *objspace) struct heap_page *page; struct heap_page_body *page_body = 0; size_t hi, lo, mid; - int limit = PAGE_OBJ_LIMIT; + int limit = HEAP_PAGE_OBJ_LIMIT; /* assign heap_page body (contains heap_page_header and RVALUEs) */ - page_body = (struct heap_page_body *)aligned_malloc(PAGE_ALIGN, PAGE_SIZE); + page_body = (struct heap_page_body *)aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE); if (page_body == 0) { rb_memerror(); } @@ -1460,7 +1460,7 @@ heap_page_allocate(rb_objspace_t *objspace) if ((VALUE)start % sizeof(RVALUE) != 0) { int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE))); start = (RVALUE*)((VALUE)start + delta); - limit = (PAGE_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE); + limit = (HEAP_PAGE_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE); } end = start + limit; @@ -1574,7 +1574,7 @@ heap_extend_pages(rb_objspace_t *objspace) size_t next_used_limit = (size_t)(used * gc_params.growth_factor); if (gc_params.growth_max_slots > 0) { - size_t max_used_limit = (size_t)(used + gc_params.growth_max_slots/PAGE_OBJ_LIMIT); + size_t max_used_limit = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT); if (next_used_limit > max_used_limit) next_used_limit = max_used_limit; } @@ -2248,7 +2248,7 @@ Init_heap(void) objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min; #endif - heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / PAGE_OBJ_LIMIT); + heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT); init_mark_stack(&objspace->mark_stack); #ifdef USE_SIGALTSTACK @@ -3327,10 +3327,10 @@ gc_setup_mark_bits(struct heap_page *page) { #if USE_RGENGC /* copy oldgen bitmap to mark bitmap */ - memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], PAGE_BITMAP_SIZE); + memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE); #else /* clear mark bitmap */ - memset(&page->mark_bits[0], 0, PAGE_BITMAP_SIZE); + memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); #endif } @@ -3356,7 +3356,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_ bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1; bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1); - for (i=0; i < PAGE_BITMAP_LIMIT; i++) { + for (i=0; i < HEAP_PAGE_BITMAP_LIMIT; i++) { bitset = ~bits[i]; if (bitset) { p = offset + i * BITS_BITLENGTH; @@ -3486,7 +3486,7 @@ gc_sweep_start(rb_objspace_t *objspace) gc_stat_transition(objspace, gc_stat_sweeping); /* sometimes heap_allocatable_pages is not 0 */ - heap_pages_swept_slots = heap_allocatable_pages * PAGE_OBJ_LIMIT; + heap_pages_swept_slots = heap_allocatable_pages * HEAP_PAGE_OBJ_LIMIT; total_limit_slot = objspace_available_slots(objspace); heap_pages_min_free_slots = (size_t)(total_limit_slot * GC_HEAP_FREE_SLOTS_MIN_RATIO); @@ -5173,7 +5173,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark) #if USE_RGENGC if (full_mark) { #if GC_ENABLE_INCREMENTAL_MARK - objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / PAGE_OBJ_LIMIT) + 1); + objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / HEAP_PAGE_OBJ_LIMIT) + 1); if (0) fprintf(stderr, "objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n", (int)objspace->marked_slots, (int)objspace->rincgc.pooled_slots, (int)objspace->rincgc.step_slots); @@ -5213,7 +5213,7 @@ gc_marks_wb_unprotected_objects(rb_objspace_t *objspace) RVALUE *offset = p - NUM_IN_PAGE(p); size_t j; - for (j=0; jtotal_slots - objspace->marked_slots); /* will be sweep slots */ #if RGENGC_CHECK_MODE @@ -5410,7 +5410,7 @@ gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap) PUSH_MARK_FUNC_DATA(NULL); { if (heap->pooled_pages) { - while (heap->pooled_pages && slots < PAGE_OBJ_LIMIT) { + while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) { struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap); slots += page->free_slots; } @@ -5585,7 +5585,7 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap) if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) { RVALUE *p = page->start; RVALUE *offset = p - NUM_IN_PAGE(p); - bits_t bitset, bits[PAGE_BITMAP_LIMIT]; + bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT]; bits_t *marking_bits = page->marking_bits; bits_t *uncollectible_bits = page->uncollectible_bits; bits_t *wb_unprotected_bits = page->wb_unprotected_bits; @@ -5594,13 +5594,13 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap) else if (page->flags.has_remembered_objects) has_old++; else if (page->flags.has_uncollectible_shady_objects) has_shady++; #endif - for (j=0; jflags.has_remembered_objects = FALSE; - for (j=0; j < PAGE_BITMAP_LIMIT; j++) { + for (j=0; j < HEAP_PAGE_BITMAP_LIMIT; j++) { bitset = bits[j]; if (bitset) { @@ -5645,9 +5645,9 @@ rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap) struct heap_page *page = heap->pages; while (page) { - memset(&page->mark_bits[0], 0, PAGE_BITMAP_SIZE); - memset(&page->marking_bits[0], 0, PAGE_BITMAP_SIZE); - memset(&page->uncollectible_bits[0], 0, PAGE_BITMAP_SIZE); + memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); + memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); + memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE); page->flags.has_uncollectible_shady_objects = FALSE; page->flags.has_remembered_objects = FALSE; page = page->next; @@ -7240,7 +7240,7 @@ gc_set_initial_pages(void) size_t min_pages; rb_objspace_t *objspace = &rb_objspace; - min_pages = gc_params.heap_init_slots / PAGE_OBJ_LIMIT; + min_pages = gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT; if (min_pages > heap_eden->total_pages) { heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages); } @@ -8497,7 +8497,7 @@ gc_prof_set_heap_info(rb_objspace_t *objspace) if (gc_prof_enabled(objspace)) { gc_profile_record *record = gc_prof_record(objspace); size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects; - size_t total = objspace->profile.heap_used_at_gc_start * PAGE_OBJ_LIMIT; + size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT; #if GC_PROFILE_MORE_DETAIL record->heap_use_pages = objspace->profile.heap_used_at_gc_start; @@ -9293,9 +9293,9 @@ Init_GC(void) gc_constants = rb_hash_new(); rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE))); - rb_hash_aset(gc_constants, ID2SYM(rb_intern("PAGE_OBJ_LIMIT")), SIZET2NUM(PAGE_OBJ_LIMIT)); - rb_hash_aset(gc_constants, ID2SYM(rb_intern("PAGE_BITMAP_SIZE")), SIZET2NUM(PAGE_BITMAP_SIZE)); - rb_hash_aset(gc_constants, ID2SYM(rb_intern("PAGE_BITMAP_PLANES")), SIZET2NUM(PAGE_BITMAP_PLANES)); + rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT)); + rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE)); + rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_PLANES")), SIZET2NUM(HEAP_PAGE_BITMAP_PLANES)); OBJ_FREEZE(gc_constants); rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants); diff --git a/test/ruby/test_gc.rb b/test/ruby/test_gc.rb index 56ddc710d5..849f740b14 100644 --- a/test/ruby/test_gc.rb +++ b/test/ruby/test_gc.rb @@ -126,7 +126,7 @@ class TestGc < Test::Unit::TestCase def test_latest_gc_info assert_separately %w[--disable-gem], __FILE__, __LINE__, <<-'eom' GC.start - count = GC.stat(:heap_free_slots) + GC.stat(:heap_allocatable_pages) * GC::INTERNAL_CONSTANTS[:PAGE_OBJ_LIMIT] + count = GC.stat(:heap_free_slots) + GC.stat(:heap_allocatable_pages) * GC::INTERNAL_CONSTANTS[:HEAP_PAGE_OBJ_LIMIT] count.times{ "a" + "b" } assert_equal :newobj, GC.latest_gc_info[:gc_by] eom @@ -301,7 +301,7 @@ class TestGc < Test::Unit::TestCase end def test_gc_internals - assert_not_nil GC::INTERNAL_CONSTANTS[:PAGE_OBJ_LIMIT] + assert_not_nil GC::INTERNAL_CONSTANTS[:HEAP_PAGE_OBJ_LIMIT] assert_not_nil GC::INTERNAL_CONSTANTS[:RVALUE_SIZE] end