1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

* gc.c: rename constant names HEAP_* to PAGE_*.

Keys of GC::INTERNAL_CONSTANTS are also renamed.

* test/ruby/test_gc.rb: catch up this fix.


git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@53468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
ko1 2016-01-08 20:45:02 +00:00
parent 066b825400
commit f8827cef6a
3 changed files with 50 additions and 42 deletions

View file

@ -1,3 +1,11 @@
Sat Jan 9 05:42:57 2016 Koichi Sasada <ko1@atdot.net>
* gc.c: rename constant names HEAP_* to PAGE_*.
Keys of GC::INTERNAL_CONSTANTS are also renamed.
* test/ruby/test_gc.rb: catch up this fix.
Fri Jan 8 22:30:06 2016 Akinori MUSHA <knu@iDaemons.org> Fri Jan 8 22:30:06 2016 Akinori MUSHA <knu@iDaemons.org>
* doc/regexp.rdoc: [DOC] Elaborate on the \G anchor. [ci skip] * doc/regexp.rdoc: [DOC] Elaborate on the \G anchor. [ci skip]

80
gc.c
View file

@ -470,7 +470,7 @@ typedef struct rb_heap_struct {
struct heap_page *pooled_pages; struct heap_page *pooled_pages;
#endif #endif
size_t total_pages; /* total page count in a heap */ size_t total_pages; /* total page count in a heap */
size_t total_slots; /* total slot count (about total_pages * HEAP_OBJ_LIMIT) */ size_t total_slots; /* total slot count (about total_pages * PAGE_OBJ_LIMIT) */
} rb_heap_t; } rb_heap_t;
enum gc_stat { enum gc_stat {
@ -625,20 +625,20 @@ typedef struct rb_objspace {
} rb_objspace_t; } rb_objspace_t;
#ifndef HEAP_ALIGN_LOG #ifndef PAGE_ALIGN_LOG
/* default tiny heap size: 16KB */ /* default tiny heap size: 16KB */
#define HEAP_ALIGN_LOG 14 #define PAGE_ALIGN_LOG 14
#endif #endif
#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod)) #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
enum { enum {
HEAP_ALIGN = (1UL << HEAP_ALIGN_LOG), PAGE_ALIGN = (1UL << PAGE_ALIGN_LOG),
HEAP_ALIGN_MASK = (~(~0UL << HEAP_ALIGN_LOG)), PAGE_ALIGN_MASK = (~(~0UL << PAGE_ALIGN_LOG)),
REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5), REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
HEAP_SIZE = (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC), PAGE_SIZE = (PAGE_ALIGN - REQUIRED_SIZE_BY_MALLOC),
HEAP_OBJ_LIMIT = (unsigned int)((HEAP_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)), PAGE_OBJ_LIMIT = (unsigned int)((PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
HEAP_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH), PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(PAGE_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH),
HEAP_BITMAP_SIZE = ( BITS_SIZE * HEAP_BITMAP_LIMIT), PAGE_BITMAP_SIZE = (BITS_SIZE * PAGE_BITMAP_LIMIT),
HEAP_BITMAP_PLANES = USE_RGENGC ? 3 : 1 /* RGENGC: mark bits, rememberset bits and oldgen bits */ PAGE_BITMAP_PLANES = USE_RGENGC ? 3 : 1 /* RGENGC: mark bits, rememberset bits and oldgen bits */
}; };
struct heap_page { struct heap_page {
@ -659,21 +659,21 @@ struct heap_page {
struct heap_page *next; struct heap_page *next;
#if USE_RGENGC #if USE_RGENGC
bits_t wb_unprotected_bits[HEAP_BITMAP_LIMIT]; bits_t wb_unprotected_bits[PAGE_BITMAP_LIMIT];
#endif #endif
/* the following three bitmaps are cleared at the beginning of full GC */ /* the following three bitmaps are cleared at the beginning of full GC */
bits_t mark_bits[HEAP_BITMAP_LIMIT]; bits_t mark_bits[PAGE_BITMAP_LIMIT];
#if USE_RGENGC #if USE_RGENGC
bits_t uncollectible_bits[HEAP_BITMAP_LIMIT]; bits_t uncollectible_bits[PAGE_BITMAP_LIMIT];
bits_t marking_bits[HEAP_BITMAP_LIMIT]; bits_t marking_bits[PAGE_BITMAP_LIMIT];
#endif #endif
}; };
#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_ALIGN_MASK))) #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(PAGE_ALIGN_MASK)))
#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header) #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page) #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE)) #define NUM_IN_PAGE(p) (((bits_t)(p) & PAGE_ALIGN_MASK)/sizeof(RVALUE))
#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH ) #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1)) #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p)) #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
@ -1440,10 +1440,10 @@ heap_page_allocate(rb_objspace_t *objspace)
struct heap_page *page; struct heap_page *page;
struct heap_page_body *page_body = 0; struct heap_page_body *page_body = 0;
size_t hi, lo, mid; size_t hi, lo, mid;
int limit = HEAP_OBJ_LIMIT; int limit = PAGE_OBJ_LIMIT;
/* assign heap_page body (contains heap_page_header and RVALUEs) */ /* assign heap_page body (contains heap_page_header and RVALUEs) */
page_body = (struct heap_page_body *)aligned_malloc(HEAP_ALIGN, HEAP_SIZE); page_body = (struct heap_page_body *)aligned_malloc(PAGE_ALIGN, PAGE_SIZE);
if (page_body == 0) { if (page_body == 0) {
rb_memerror(); rb_memerror();
} }
@ -1460,7 +1460,7 @@ heap_page_allocate(rb_objspace_t *objspace)
if ((VALUE)start % sizeof(RVALUE) != 0) { if ((VALUE)start % sizeof(RVALUE) != 0) {
int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE))); int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE)));
start = (RVALUE*)((VALUE)start + delta); start = (RVALUE*)((VALUE)start + delta);
limit = (HEAP_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE); limit = (PAGE_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE);
} }
end = start + limit; end = start + limit;
@ -1574,7 +1574,7 @@ heap_extend_pages(rb_objspace_t *objspace)
size_t next_used_limit = (size_t)(used * gc_params.growth_factor); size_t next_used_limit = (size_t)(used * gc_params.growth_factor);
if (gc_params.growth_max_slots > 0) { if (gc_params.growth_max_slots > 0) {
size_t max_used_limit = (size_t)(used + gc_params.growth_max_slots/HEAP_OBJ_LIMIT); size_t max_used_limit = (size_t)(used + gc_params.growth_max_slots/PAGE_OBJ_LIMIT);
if (next_used_limit > max_used_limit) next_used_limit = max_used_limit; if (next_used_limit > max_used_limit) next_used_limit = max_used_limit;
} }
@ -2248,7 +2248,7 @@ Init_heap(void)
objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min; objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
#endif #endif
heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_OBJ_LIMIT); heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / PAGE_OBJ_LIMIT);
init_mark_stack(&objspace->mark_stack); init_mark_stack(&objspace->mark_stack);
#ifdef USE_SIGALTSTACK #ifdef USE_SIGALTSTACK
@ -3327,10 +3327,10 @@ gc_setup_mark_bits(struct heap_page *page)
{ {
#if USE_RGENGC #if USE_RGENGC
/* copy oldgen bitmap to mark bitmap */ /* copy oldgen bitmap to mark bitmap */
memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_BITMAP_SIZE); memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], PAGE_BITMAP_SIZE);
#else #else
/* clear mark bitmap */ /* clear mark bitmap */
memset(&page->mark_bits[0], 0, HEAP_BITMAP_SIZE); memset(&page->mark_bits[0], 0, PAGE_BITMAP_SIZE);
#endif #endif
} }
@ -3356,7 +3356,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1; bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1;
bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1); bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1);
for (i=0; i < HEAP_BITMAP_LIMIT; i++) { for (i=0; i < PAGE_BITMAP_LIMIT; i++) {
bitset = ~bits[i]; bitset = ~bits[i];
if (bitset) { if (bitset) {
p = offset + i * BITS_BITLENGTH; p = offset + i * BITS_BITLENGTH;
@ -3486,7 +3486,7 @@ gc_sweep_start(rb_objspace_t *objspace)
gc_stat_transition(objspace, gc_stat_sweeping); gc_stat_transition(objspace, gc_stat_sweeping);
/* sometimes heap_allocatable_pages is not 0 */ /* sometimes heap_allocatable_pages is not 0 */
heap_pages_swept_slots = heap_allocatable_pages * HEAP_OBJ_LIMIT; heap_pages_swept_slots = heap_allocatable_pages * PAGE_OBJ_LIMIT;
total_limit_slot = objspace_available_slots(objspace); total_limit_slot = objspace_available_slots(objspace);
heap_pages_min_free_slots = (size_t)(total_limit_slot * GC_HEAP_FREE_SLOTS_MIN_RATIO); heap_pages_min_free_slots = (size_t)(total_limit_slot * GC_HEAP_FREE_SLOTS_MIN_RATIO);
@ -5173,7 +5173,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)
#if USE_RGENGC #if USE_RGENGC
if (full_mark) { if (full_mark) {
#if GC_ENABLE_INCREMENTAL_MARK #if GC_ENABLE_INCREMENTAL_MARK
objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / HEAP_OBJ_LIMIT) + 1); objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / PAGE_OBJ_LIMIT) + 1);
if (0) fprintf(stderr, "objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n", if (0) fprintf(stderr, "objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
(int)objspace->marked_slots, (int)objspace->rincgc.pooled_slots, (int)objspace->rincgc.step_slots); (int)objspace->marked_slots, (int)objspace->rincgc.pooled_slots, (int)objspace->rincgc.step_slots);
@ -5213,7 +5213,7 @@ gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
RVALUE *offset = p - NUM_IN_PAGE(p); RVALUE *offset = p - NUM_IN_PAGE(p);
size_t j; size_t j;
for (j=0; j<HEAP_BITMAP_LIMIT; j++) { for (j=0; j<PAGE_BITMAP_LIMIT; j++) {
bits_t bits = mark_bits[j] & wbun_bits[j]; bits_t bits = mark_bits[j] & wbun_bits[j];
if (bits) { if (bits) {
@ -5310,7 +5310,7 @@ gc_marks_finish(rb_objspace_t *objspace)
{ /* decide full GC is needed or not */ { /* decide full GC is needed or not */
rb_heap_t *heap = heap_eden; rb_heap_t *heap = heap_eden;
size_t sweep_slots = size_t sweep_slots =
(heap_allocatable_pages * HEAP_OBJ_LIMIT) + /* allocatable slots in empty pages */ (heap_allocatable_pages * PAGE_OBJ_LIMIT) + /* allocatable slots in empty pages */
(heap->total_slots - objspace->marked_slots); /* will be sweep slots */ (heap->total_slots - objspace->marked_slots); /* will be sweep slots */
#if RGENGC_CHECK_MODE #if RGENGC_CHECK_MODE
@ -5410,7 +5410,7 @@ gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
PUSH_MARK_FUNC_DATA(NULL); PUSH_MARK_FUNC_DATA(NULL);
{ {
if (heap->pooled_pages) { if (heap->pooled_pages) {
while (heap->pooled_pages && slots < HEAP_OBJ_LIMIT) { while (heap->pooled_pages && slots < PAGE_OBJ_LIMIT) {
struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap); struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
slots += page->free_slots; slots += page->free_slots;
} }
@ -5585,7 +5585,7 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) { if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
RVALUE *p = page->start; RVALUE *p = page->start;
RVALUE *offset = p - NUM_IN_PAGE(p); RVALUE *offset = p - NUM_IN_PAGE(p);
bits_t bitset, bits[HEAP_BITMAP_LIMIT]; bits_t bitset, bits[PAGE_BITMAP_LIMIT];
bits_t *marking_bits = page->marking_bits; bits_t *marking_bits = page->marking_bits;
bits_t *uncollectible_bits = page->uncollectible_bits; bits_t *uncollectible_bits = page->uncollectible_bits;
bits_t *wb_unprotected_bits = page->wb_unprotected_bits; bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
@ -5594,13 +5594,13 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
else if (page->flags.has_remembered_objects) has_old++; else if (page->flags.has_remembered_objects) has_old++;
else if (page->flags.has_uncollectible_shady_objects) has_shady++; else if (page->flags.has_uncollectible_shady_objects) has_shady++;
#endif #endif
for (j=0; j<HEAP_BITMAP_LIMIT; j++) { for (j=0; j<PAGE_BITMAP_LIMIT; j++) {
bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]); bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
marking_bits[j] = 0; marking_bits[j] = 0;
} }
page->flags.has_remembered_objects = FALSE; page->flags.has_remembered_objects = FALSE;
for (j=0; j < HEAP_BITMAP_LIMIT; j++) { for (j=0; j < PAGE_BITMAP_LIMIT; j++) {
bitset = bits[j]; bitset = bits[j];
if (bitset) { if (bitset) {
@ -5645,9 +5645,9 @@ rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
struct heap_page *page = heap->pages; struct heap_page *page = heap->pages;
while (page) { while (page) {
memset(&page->mark_bits[0], 0, HEAP_BITMAP_SIZE); memset(&page->mark_bits[0], 0, PAGE_BITMAP_SIZE);
memset(&page->marking_bits[0], 0, HEAP_BITMAP_SIZE); memset(&page->marking_bits[0], 0, PAGE_BITMAP_SIZE);
memset(&page->uncollectible_bits[0], 0, HEAP_BITMAP_SIZE); memset(&page->uncollectible_bits[0], 0, PAGE_BITMAP_SIZE);
page->flags.has_uncollectible_shady_objects = FALSE; page->flags.has_uncollectible_shady_objects = FALSE;
page->flags.has_remembered_objects = FALSE; page->flags.has_remembered_objects = FALSE;
page = page->next; page = page->next;
@ -7240,7 +7240,7 @@ gc_set_initial_pages(void)
size_t min_pages; size_t min_pages;
rb_objspace_t *objspace = &rb_objspace; rb_objspace_t *objspace = &rb_objspace;
min_pages = gc_params.heap_init_slots / HEAP_OBJ_LIMIT; min_pages = gc_params.heap_init_slots / PAGE_OBJ_LIMIT;
if (min_pages > heap_eden->total_pages) { if (min_pages > heap_eden->total_pages) {
heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages); heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages);
} }
@ -8497,7 +8497,7 @@ gc_prof_set_heap_info(rb_objspace_t *objspace)
if (gc_prof_enabled(objspace)) { if (gc_prof_enabled(objspace)) {
gc_profile_record *record = gc_prof_record(objspace); gc_profile_record *record = gc_prof_record(objspace);
size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects; size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
size_t total = objspace->profile.heap_used_at_gc_start * HEAP_OBJ_LIMIT; size_t total = objspace->profile.heap_used_at_gc_start * PAGE_OBJ_LIMIT;
#if GC_PROFILE_MORE_DETAIL #if GC_PROFILE_MORE_DETAIL
record->heap_use_pages = objspace->profile.heap_used_at_gc_start; record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
@ -9293,9 +9293,9 @@ Init_GC(void)
gc_constants = rb_hash_new(); gc_constants = rb_hash_new();
rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE))); rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_OBJ_LIMIT")), SIZET2NUM(HEAP_OBJ_LIMIT)); rb_hash_aset(gc_constants, ID2SYM(rb_intern("PAGE_OBJ_LIMIT")), SIZET2NUM(PAGE_OBJ_LIMIT));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_BITMAP_SIZE")), SIZET2NUM(HEAP_BITMAP_SIZE)); rb_hash_aset(gc_constants, ID2SYM(rb_intern("PAGE_BITMAP_SIZE")), SIZET2NUM(PAGE_BITMAP_SIZE));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_BITMAP_PLANES")), SIZET2NUM(HEAP_BITMAP_PLANES)); rb_hash_aset(gc_constants, ID2SYM(rb_intern("PAGE_BITMAP_PLANES")), SIZET2NUM(PAGE_BITMAP_PLANES));
OBJ_FREEZE(gc_constants); OBJ_FREEZE(gc_constants);
rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants); rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);

View file

@ -126,7 +126,7 @@ class TestGc < Test::Unit::TestCase
def test_latest_gc_info def test_latest_gc_info
assert_separately %w[--disable-gem], __FILE__, __LINE__, <<-'eom' assert_separately %w[--disable-gem], __FILE__, __LINE__, <<-'eom'
GC.start GC.start
count = GC.stat(:heap_free_slots) + GC.stat(:heap_allocatable_pages) * GC::INTERNAL_CONSTANTS[:HEAP_OBJ_LIMIT] count = GC.stat(:heap_free_slots) + GC.stat(:heap_allocatable_pages) * GC::INTERNAL_CONSTANTS[:PAGE_OBJ_LIMIT]
count.times{ "a" + "b" } count.times{ "a" + "b" }
assert_equal :newobj, GC.latest_gc_info[:gc_by] assert_equal :newobj, GC.latest_gc_info[:gc_by]
eom eom
@ -301,7 +301,7 @@ class TestGc < Test::Unit::TestCase
end end
def test_gc_internals def test_gc_internals
assert_not_nil GC::INTERNAL_CONSTANTS[:HEAP_OBJ_LIMIT] assert_not_nil GC::INTERNAL_CONSTANTS[:PAGE_OBJ_LIMIT]
assert_not_nil GC::INTERNAL_CONSTANTS[:RVALUE_SIZE] assert_not_nil GC::INTERNAL_CONSTANTS[:RVALUE_SIZE]
end end