mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
partially merge revision(s) 119697f61e2b2b157816a8aa33aada5863959900,4a627dbdfd1165022fa9e716ba845e937b03773d: [Backport #18014]
[Bug #18014] Fix rb_gc_force_recycle unmark before sweep If we force recycle an object before the page is swept, we should clear it in the mark bitmap. If we don't clear it in the bitmap, then during sweeping we won't account for this free slot so the `free_slots` count of the page will be incorrect. --- gc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) [Bug #18014] Fix memory leak in GC when using Ractors When a Ractor is removed, the freelist in the Ractor cache is not returned to the GC, leaving the freelist permanently lost. This commit recycles the freelist when the Ractor is destroyed, preventing a memory leak from occurring. --- gc.c | 116 +++++++++++++++++++++++++++------------------------------- internal/gc.h | 6 +++ ractor.c | 3 ++ ractor_core.h | 5 +-- 4 files changed, 64 insertions(+), 66 deletions(-) Co-authored-by: Peter Zhu <peter@peterzhu.ca>
This commit is contained in:
parent
c4b602e80f
commit
a215c6d044
5 changed files with 64 additions and 54 deletions
102
gc.c
102
gc.c
|
@ -1713,45 +1713,34 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj
|
|||
gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
static inline void
|
||||
heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
|
||||
{
|
||||
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
|
||||
GC_ASSERT(page->free_slots != 0);
|
||||
GC_ASSERT(page->freelist != NULL);
|
||||
|
||||
if (page->freelist) {
|
||||
page->free_next = heap->free_pages;
|
||||
heap->free_pages = page;
|
||||
page->free_next = heap->free_pages;
|
||||
heap->free_pages = page;
|
||||
|
||||
RUBY_DEBUG_LOG("page:%p freelist:%p", page, page->freelist);
|
||||
RUBY_DEBUG_LOG("page:%p freelist:%p", page, page->freelist);
|
||||
|
||||
asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
|
||||
return false;
|
||||
}
|
||||
asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
|
||||
}
|
||||
|
||||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
static inline int
|
||||
static inline void
|
||||
heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
|
||||
{
|
||||
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
|
||||
if (page->freelist) {
|
||||
page->free_next = heap->pooled_pages;
|
||||
heap->pooled_pages = page;
|
||||
objspace->rincgc.pooled_slots += page->free_slots;
|
||||
asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
|
||||
GC_ASSERT(page->free_slots != 0);
|
||||
GC_ASSERT(page->freelist != NULL);
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
else {
|
||||
asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
|
||||
page->free_next = heap->pooled_pages;
|
||||
heap->pooled_pages = page;
|
||||
objspace->rincgc.pooled_slots += page->free_slots;
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -5060,20 +5049,7 @@ gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
|
|||
|
||||
rb_ractor_t *r = NULL;
|
||||
list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
|
||||
struct heap_page *page = r->newobj_cache.using_page;
|
||||
RVALUE *freelist = r->newobj_cache.freelist;
|
||||
RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", page, freelist);
|
||||
|
||||
if (page && freelist) {
|
||||
RVALUE **p = &page->freelist;
|
||||
while (*p) {
|
||||
p = &(*p)->as.free.next;
|
||||
}
|
||||
*p = freelist;
|
||||
}
|
||||
|
||||
r->newobj_cache.using_page = NULL;
|
||||
r->newobj_cache.freelist = NULL;
|
||||
rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5146,21 +5122,18 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
|
|||
else if (free_slots > 0) {
|
||||
#if GC_ENABLE_INCREMENTAL_MARK
|
||||
if (need_pool) {
|
||||
if (heap_add_poolpage(objspace, heap, sweep_page)) {
|
||||
need_pool = FALSE;
|
||||
}
|
||||
heap_add_poolpage(objspace, heap, sweep_page);
|
||||
need_pool = FALSE;
|
||||
}
|
||||
else {
|
||||
if (heap_add_freepage(heap, sweep_page)) {
|
||||
swept_slots += free_slots;
|
||||
if (swept_slots > 2048) {
|
||||
break;
|
||||
}
|
||||
heap_add_freepage(heap, sweep_page);
|
||||
swept_slots += free_slots;
|
||||
if (swept_slots > 2048) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
#else
|
||||
heap_add_freepage(heap, sweep_page);
|
||||
break;
|
||||
heap_add_freepage(heap, sweep_page);
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
|
@ -7978,6 +7951,37 @@ rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
|
|||
|
||||
/* GC */
|
||||
|
||||
void
|
||||
rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
|
||||
{
|
||||
struct heap_page *page = newobj_cache->using_page;
|
||||
RVALUE *freelist = newobj_cache->freelist;
|
||||
RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", page, freelist);
|
||||
|
||||
if (page && freelist) {
|
||||
asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
|
||||
if (page->freelist) {
|
||||
RVALUE *p = page->freelist;
|
||||
asan_unpoison_object((VALUE)p, false);
|
||||
while (p->as.free.next) {
|
||||
RVALUE *prev = p;
|
||||
p = p->as.free.next;
|
||||
asan_poison_object((VALUE)prev);
|
||||
asan_unpoison_object((VALUE)p, false);
|
||||
}
|
||||
p->as.free.next = freelist;
|
||||
asan_poison_object((VALUE)p);
|
||||
}
|
||||
else {
|
||||
page->freelist = freelist;
|
||||
}
|
||||
asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
|
||||
}
|
||||
|
||||
newobj_cache->using_page = NULL;
|
||||
newobj_cache->freelist = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
rb_gc_force_recycle(VALUE obj)
|
||||
{
|
||||
|
@ -8007,7 +8011,7 @@ rb_gc_force_recycle(VALUE obj)
|
|||
}
|
||||
else {
|
||||
#endif
|
||||
if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
|
||||
if (is_old || GET_HEAP_PAGE(obj)->flags.before_sweep) {
|
||||
CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
|
||||
}
|
||||
CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
|
||||
|
|
|
@ -61,6 +61,11 @@ struct rb_objspace; /* in vm_core.h */
|
|||
rb_obj_write((VALUE)(a), UNALIGNED_MEMBER_ACCESS((VALUE *)(slot)), \
|
||||
(VALUE)(b), __FILE__, __LINE__)
|
||||
|
||||
typedef struct ractor_newobj_cache {
|
||||
struct RVALUE *freelist;
|
||||
struct heap_page *using_page;
|
||||
} rb_ractor_newobj_cache_t;
|
||||
|
||||
/* gc.c */
|
||||
extern VALUE *ruby_initial_gc_stress_ptr;
|
||||
extern int ruby_disable_gc;
|
||||
|
@ -100,6 +105,7 @@ void rb_gc_mark_vm_stack_values(long n, const VALUE *values);
|
|||
void *ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2));
|
||||
void *ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2, 3));
|
||||
void ruby_sized_xfree(void *x, size_t size);
|
||||
void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache);
|
||||
RUBY_SYMBOL_EXPORT_END
|
||||
|
||||
MJIT_SYMBOL_EXPORT_BEGIN
|
||||
|
|
3
ractor.c
3
ractor.c
|
@ -1443,6 +1443,9 @@ vm_remove_ractor(rb_vm_t *vm, rb_ractor_t *cr)
|
|||
}
|
||||
vm->ractor.cnt--;
|
||||
|
||||
/* Clear the cached freelist to prevent a memory leak. */
|
||||
rb_gc_ractor_newobj_cache_clear(&cr->newobj_cache);
|
||||
|
||||
ractor_status_set(cr, ractor_terminated);
|
||||
}
|
||||
RB_VM_UNLOCK();
|
||||
|
|
|
@ -138,10 +138,7 @@ struct rb_ractor_struct {
|
|||
VALUE verbose;
|
||||
VALUE debug;
|
||||
|
||||
struct {
|
||||
struct RVALUE *freelist;
|
||||
struct heap_page *using_page;
|
||||
} newobj_cache;
|
||||
rb_ractor_newobj_cache_t newobj_cache;
|
||||
|
||||
// gc.c rb_objspace_reachable_objects_from
|
||||
struct gc_mark_func_data_struct {
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# define RUBY_VERSION_MINOR RUBY_API_VERSION_MINOR
|
||||
#define RUBY_VERSION_TEENY 3
|
||||
#define RUBY_RELEASE_DATE RUBY_RELEASE_YEAR_STR"-"RUBY_RELEASE_MONTH_STR"-"RUBY_RELEASE_DAY_STR
|
||||
#define RUBY_PATCHLEVEL 111
|
||||
#define RUBY_PATCHLEVEL 112
|
||||
|
||||
#define RUBY_RELEASE_YEAR 2021
|
||||
#define RUBY_RELEASE_MONTH 7
|
||||
|
|
Loading…
Reference in a new issue