diff --git a/class.c b/class.c
index 5fa51df3fa..4c469ffaeb 100644
--- a/class.c
+++ b/class.c
@@ -185,8 +185,7 @@ class_alloc(VALUE flags, VALUE klass)
     RVARGC_NEWOBJ_OF(obj, struct RClass, klass, (flags & T_MASK) | FL_PROMOTED1 /* start from age == 2 */ | (RGENGC_WB_PROTECTED_CLASS ? FL_WB_PROTECTED : 0), payload_size);
 
 #if USE_RVARGC
-    obj->ptr = (rb_classext_t *)rb_rvargc_payload_data_ptr((VALUE)obj + rb_slot_size());
-    RB_OBJ_WRITTEN(obj, Qundef, (VALUE)obj + rb_slot_size());
+    obj->ptr = (rb_classext_t *)rb_gc_rvargc_object_data((VALUE)obj);
 #else
     obj->ptr = ZALLOC(rb_classext_t);
 #endif
diff --git a/gc.c b/gc.c
index 59dcba0a09..4c28c68db4 100644
--- a/gc.c
+++ b/gc.c
@@ -660,6 +660,14 @@ typedef struct mark_stack {
     size_t unused_cache_size;
 } mark_stack_t;
 
+#if USE_RVARGC
+#define SIZE_POOL_COUNT 4
+#else
+#define SIZE_POOL_COUNT 1
+#endif
+#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
+#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
+
 typedef struct rb_heap_struct {
     struct heap_page *free_pages;
     struct list_head pages;
@@ -673,6 +681,29 @@ typedef struct rb_heap_struct {
     size_t total_slots;      /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
 } rb_heap_t;
 
+typedef struct rb_size_pool_struct {
+#if USE_RVARGC
+    RVALUE *freelist;
+    struct heap_page *using_page;
+#endif
+
+    short slot_size;
+
+    size_t allocatable_pages;
+
+#if USE_RVARGC
+    /* Sweeping statistics */
+    size_t freed_slots;
+    size_t empty_slots;
+
+    /* Global statistics */
+    size_t force_major_gc_count;
+#endif
+
+    rb_heap_t eden_heap;
+    rb_heap_t tomb_heap;
+} rb_size_pool_t;
+
 enum gc_mode {
     gc_mode_none,
     gc_mode_marking,
@@ -708,8 +739,7 @@ typedef struct rb_objspace {
     size_t total_allocated_objects;
     VALUE next_object_id;
 
-    rb_heap_t eden_heap;
-    rb_heap_t tomb_heap; /* heap for zombies and ghosts */
+    rb_size_pool_t size_pools[SIZE_POOL_COUNT];
 
     struct {
 	rb_atomic_t finalizing;
@@ -869,6 +899,8 @@ struct heap_page {
 	unsigned int in_tomb : 1;
     } flags;
 
+    rb_size_pool_t *size_pool;
+
     struct heap_page *free_next;
     RVALUE *start;
     RVALUE *freelist;
@@ -921,12 +953,10 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
 #define heap_pages_lomem	objspace->heap_pages.range[0]
 #define heap_pages_himem	objspace->heap_pages.range[1]
-#define heap_allocatable_pages	objspace->heap_pages.allocatable_pages
 #define heap_pages_freeable_pages	objspace->heap_pages.freeable_pages
 #define heap_pages_final_slots		objspace->heap_pages.final_slots
 #define heap_pages_deferred_final	objspace->heap_pages.deferred_final
-#define heap_eden               (&objspace->eden_heap)
-#define heap_tomb               (&objspace->tomb_heap)
+#define size_pools              objspace->size_pools
 #define during_gc		objspace->flags.during_gc
 #define finalizing		objspace->atomic_flags.finalizing
 #define finalizer_table 	objspace->finalizer_table
@@ -967,6 +997,69 @@ gc_mode_verify(enum gc_mode mode)
     return mode;
 }
 
+static inline bool
+has_sweeping_pages(rb_objspace_t *objspace)
+{
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        if (size_pools[i].eden_heap.sweeping_page) {
+            return TRUE;
+        }
+    }
+    return FALSE;
+}
+
+static inline size_t
+heap_eden_total_pages(rb_objspace_t *objspace)
+{
+    size_t count = 0;
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        count += size_pools[i].eden_heap.total_pages;
+    }
+    return count;
+}
+
+static inline size_t
+heap_eden_total_slots(rb_objspace_t *objspace)
+{
+    size_t count = 0;
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        count += size_pools[i].eden_heap.total_slots;
+    }
+    return count;
+}
+
+static inline size_t
+heap_tomb_total_pages(rb_objspace_t *objspace)
+{
+    size_t count = 0;
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        count += size_pools[i].tomb_heap.total_pages;
+    }
+    return count;
+}
+
+static inline size_t
+heap_allocatable_pages(rb_objspace_t *objspace)
+{
+    size_t count = 0;
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        count += size_pools[i].allocatable_pages;
+    }
+    return count;
+}
+
+static inline size_t
+heap_allocatable_slots(rb_objspace_t *objspace)
+{
+    size_t count = 0;
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+        int slot_size_multiple = size_pool->slot_size / sizeof(RVALUE);
+        count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
+    }
+    return count;
+}
+
 #define gc_mode(objspace)                gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
 #define gc_mode_set(objspace, mode)      ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
 
@@ -983,8 +1076,7 @@ gc_mode_verify(enum gc_mode mode)
 #else
 #define will_be_incremental_marking(objspace) FALSE
 #endif
-#define has_sweeping_pages(heap)         ((heap)->sweeping_page != 0)
-#define is_lazy_sweeping(heap)           (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
+#define is_lazy_sweeping(objspace)           (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
 
 #if SIZEOF_LONG == SIZEOF_VOIDP
 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
@@ -1056,14 +1148,14 @@ static void gc_marks(rb_objspace_t *objspace, int full_mark);
 static void gc_marks_start(rb_objspace_t *objspace, int full);
 static int  gc_marks_finish(rb_objspace_t *objspace);
 static void gc_marks_rest(rb_objspace_t *objspace);
-static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap);
+static void gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
 
 static void gc_sweep(rb_objspace_t *objspace);
 static void gc_sweep_start(rb_objspace_t *objspace);
 static void gc_sweep_finish(rb_objspace_t *objspace);
-static int  gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap);
+static int  gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
 static void gc_sweep_rest(rb_objspace_t *objspace);
-static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap);
+static void gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
 
 static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
 static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
@@ -1285,13 +1377,16 @@ check_rvalue_consistency_force(const VALUE obj, int terminate)
         else if (!is_pointer_to_heap(objspace, (void *)obj)) {
             /* check if it is in tomb_pages */
             struct heap_page *page = NULL;
-            list_for_each(&heap_tomb->pages, page, page_node) {
-                if (&page->start[0] <= (RVALUE *)obj &&
-                    (RVALUE *)obj < &page->start[page->total_slots]) {
-                    fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
-                            (void *)obj, (void *)page);
-                    err++;
-                    goto skip;
+            for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+                rb_size_pool_t *size_pool = &size_pools[i];
+                list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
+                    if (&page->start[0] <= (RVALUE *)obj &&
+                            (intptr_t)obj   <  ((intptr_t)page->start + (page->total_slots * size_pool->slot_size))) {
+                        fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
+                                (void *)obj, (void *)page);
+                        err++;
+                        goto skip;
+                    }
                 }
             }
             bp();
@@ -1622,8 +1717,16 @@ rb_objspace_alloc(void)
 {
     rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
     malloc_limit = gc_params.malloc_limit_min;
-    list_head_init(&objspace->eden_heap.pages);
-    list_head_init(&objspace->tomb_heap.pages);
+
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+
+        size_pool->slot_size = sizeof(RVALUE) * (1 << i);
+
+        list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
+        list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
+    }
+
     dont_gc_on();
 
     return objspace;
@@ -1635,7 +1738,7 @@ static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
 void
 rb_objspace_free(rb_objspace_t *objspace)
 {
-    if (is_lazy_sweeping(heap_eden))
+    if (is_lazy_sweeping(objspace))
 	rb_bug("lazy sweeping underway when freeing object space");
 
     if (objspace->profile.records) {
@@ -1661,8 +1764,11 @@ rb_objspace_free(rb_objspace_t *objspace)
 	heap_pages_lomem = 0;
 	heap_pages_himem = 0;
 
-	objspace->eden_heap.total_pages = 0;
-	objspace->eden_heap.total_slots = 0;
+        for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+            rb_size_pool_t *size_pool = &size_pools[i];
+            SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
+            SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
+        }
     }
     st_free_table(objspace->id_to_obj_tbl);
     st_free_table(objspace->obj_to_id_tbl);
@@ -1702,22 +1808,25 @@ heap_pages_expand_sorted(rb_objspace_t *objspace)
      * however, if there are pages which do not have empty slots, then try to create new pages
      * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
      */
-    size_t next_length = heap_allocatable_pages;
-    next_length += heap_eden->total_pages;
-    next_length += heap_tomb->total_pages;
+    size_t next_length = heap_allocatable_pages(objspace);
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+        next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
+        next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
+    }
 
     if (next_length > heap_pages_sorted_length) {
 	heap_pages_expand_sorted_to(objspace, next_length);
     }
 
-    GC_ASSERT(heap_allocatable_pages + heap_eden->total_pages <= heap_pages_sorted_length);
+    GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
     GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
 }
 
 static void
-heap_allocatable_pages_set(rb_objspace_t *objspace, size_t s)
+size_pool_allocatable_pages_set(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t s)
 {
-    heap_allocatable_pages = s;
+    size_pool->allocatable_pages = s;
     heap_pages_expand_sorted(objspace);
 }
 
@@ -1740,7 +1849,7 @@ heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj
     if (RGENGC_CHECK_MODE &&
         /* obj should belong to page */
         !(&page->start[0] <= (RVALUE *)obj &&
-          (RVALUE *)obj   <  &page->start[page->total_slots] &&
+          (intptr_t)obj   <  ((intptr_t)page->start + (page->total_slots * page->size_pool->slot_size)) &&
           obj % sizeof(RVALUE) == 0)) {
         rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
     }
@@ -1804,39 +1913,48 @@ heap_pages_free_unused_pages(rb_objspace_t *objspace)
 {
     size_t i, j;
 
-    if (!list_empty(&heap_tomb->pages)) {
-	for (i = j = 1; j < heap_allocated_pages; i++) {
-	    struct heap_page *page = heap_pages_sorted[i];
+    bool has_pages_in_tomb_heap = FALSE;
+    for (i = 0; i < SIZE_POOL_COUNT; i++) {
+        if (!list_empty(&size_pools[i].tomb_heap.pages)) {
+            has_pages_in_tomb_heap = TRUE;
+            break;
+        }
+    }
 
-	    if (page->flags.in_tomb && page->free_slots == page->total_slots) {
-		heap_unlink_page(objspace, heap_tomb, page);
-		heap_page_free(objspace, page);
-	    }
-	    else {
-		if (i != j) {
-		    heap_pages_sorted[j] = page;
-		}
-		j++;
-	    }
-	}
+    if (has_pages_in_tomb_heap) {
+        for (i = j = 1; j < heap_allocated_pages; i++) {
+            struct heap_page *page = heap_pages_sorted[i];
+
+            if (page->flags.in_tomb && page->free_slots == page->total_slots) {
+                heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
+                heap_page_free(objspace, page);
+            }
+            else {
+                if (i != j) {
+                    heap_pages_sorted[j] = page;
+                }
+                j++;
+            }
+        }
 
         struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
-        RVALUE *himem = hipage->start + hipage->total_slots;
-        GC_ASSERT(himem <= heap_pages_himem);
-        heap_pages_himem = himem;
+        intptr_t himem = (intptr_t)hipage->start + (hipage->total_slots * hipage->size_pool->slot_size);
+        GC_ASSERT(himem <= (intptr_t)heap_pages_himem);
+        heap_pages_himem = (RVALUE *)himem;
 
 	GC_ASSERT(j == heap_allocated_pages);
     }
 }
 
 static struct heap_page *
-heap_page_allocate(rb_objspace_t *objspace)
+heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
 {
-    RVALUE *start, *end, *p;
+    intptr_t start, end, p;
     struct heap_page *page;
     struct heap_page_body *page_body = 0;
-    size_t hi, lo, mid;
-    int limit = HEAP_PAGE_OBJ_LIMIT;
+    intptr_t hi, lo, mid;
+    size_t stride = size_pool->slot_size;
+    unsigned int limit = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)))/(int)stride;
 
     /* assign heap_page body (contains heap_page_header and RVALUEs) */
     page_body = (struct heap_page_body *)rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
@@ -1852,26 +1970,39 @@ heap_page_allocate(rb_objspace_t *objspace)
     }
 
     /* adjust obj_limit (object number available in this page) */
-    start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
+    start = (intptr_t)((VALUE)page_body + sizeof(struct heap_page_header));
+
     if ((VALUE)start % sizeof(RVALUE) != 0) {
-	int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE)));
-	start = (RVALUE*)((VALUE)start + delta);
-	limit = (HEAP_PAGE_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE);
+        int delta = (int)sizeof(RVALUE) - (start % (int)sizeof(RVALUE));
+        start = start + delta;
+        GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
+
+        /* Find a num in page that is evenly divisible by `stride`.
+         * This is to ensure that objects are aligned with bit planes.
+         * In other words, ensure there are an even number of objects
+         * per bit plane. */
+        if (NUM_IN_PAGE(start) == 1) {
+            start += stride - sizeof(RVALUE);
+        }
+
+        GC_ASSERT(NUM_IN_PAGE(start) * sizeof(RVALUE) % stride == 0);
+
+	limit = (HEAP_PAGE_SIZE - (int)(start - (intptr_t)page_body))/(int)stride;
     }
-    end = start + limit;
+    end = start + (limit * (int)stride);
 
     /* setup heap_pages_sorted */
     lo = 0;
-    hi = heap_allocated_pages;
+    hi = (intptr_t)heap_allocated_pages;
     while (lo < hi) {
 	struct heap_page *mid_page;
 
 	mid = (lo + hi) / 2;
 	mid_page = heap_pages_sorted[mid];
-	if (mid_page->start < start) {
+	if ((intptr_t)mid_page->start < start) {
 	    lo = mid + 1;
 	}
-	else if (mid_page->start > start) {
+	else if ((intptr_t)mid_page->start > start) {
 	    hi = mid;
 	}
 	else {
@@ -1879,7 +2010,7 @@ heap_page_allocate(rb_objspace_t *objspace)
 	}
     }
 
-    if (hi < heap_allocated_pages) {
+    if (hi < (intptr_t)heap_allocated_pages) {
 	MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
     }
 
@@ -1887,8 +2018,8 @@ heap_page_allocate(rb_objspace_t *objspace)
 
     heap_allocated_pages++;
 
-    GC_ASSERT(heap_eden->total_pages + heap_allocatable_pages <= heap_pages_sorted_length);
-    GC_ASSERT(heap_eden->total_pages + heap_tomb->total_pages == heap_allocated_pages - 1);
+    GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
+    GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
     GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
 
     objspace->profile.total_allocated_pages++;
@@ -1898,14 +2029,15 @@ heap_page_allocate(rb_objspace_t *objspace)
 	       heap_allocated_pages, heap_pages_sorted_length);
     }
 
-    if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
-    if (heap_pages_himem < end) heap_pages_himem = end;
+    if (heap_pages_lomem == 0 || (intptr_t)heap_pages_lomem > start) heap_pages_lomem = (RVALUE *)start;
+    if ((intptr_t)heap_pages_himem < end) heap_pages_himem = (RVALUE *)end;
 
-    page->start = start;
+    page->start = (RVALUE *)start;
     page->total_slots = limit;
+    page->size_pool = size_pool;
     page_body->header.page = page;
 
-    for (p = start; p != end; p++) {
+    for (p = start; p != end; p += stride) {
 	gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
 	heap_page_add_freeobj(objspace, page, (VALUE)p);
     }
@@ -1916,14 +2048,14 @@ heap_page_allocate(rb_objspace_t *objspace)
 }
 
 static struct heap_page *
-heap_page_resurrect(rb_objspace_t *objspace)
+heap_page_resurrect(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
 {
     struct heap_page *page = 0, *next;
 
-    list_for_each_safe(&heap_tomb->pages, page, next, page_node) {
+    list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
         asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
 	if (page->freelist != NULL) {
-	    heap_unlink_page(objspace, heap_tomb, page);
+	    heap_unlink_page(objspace, &size_pool->tomb_heap, page);
             asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
 	    return page;
 	}
@@ -1933,65 +2065,65 @@ heap_page_resurrect(rb_objspace_t *objspace)
 }
 
 static struct heap_page *
-heap_page_create(rb_objspace_t *objspace)
+heap_page_create(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
 {
     struct heap_page *page;
     const char *method = "recycle";
 
-    heap_allocatable_pages--;
+    size_pool->allocatable_pages--;
 
-    page = heap_page_resurrect(objspace);
+    page = heap_page_resurrect(objspace, size_pool);
 
     if (page == NULL) {
-	page = heap_page_allocate(objspace);
+	page = heap_page_allocate(objspace, size_pool);
 	method = "allocate";
     }
     if (0) fprintf(stderr, "heap_page_create: %s - %p, "
                    "heap_allocated_pages: %"PRIdSIZE", "
                    "heap_allocated_pages: %"PRIdSIZE", "
                    "tomb->total_pages: %"PRIdSIZE"\n",
-                   method, (void *)page, heap_pages_sorted_length, heap_allocated_pages, heap_tomb->total_pages);
+                   method, (void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
     return page;
 }
 
 static void
-heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
+heap_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
 {
     /* Adding to eden heap during incremental sweeping is forbidden */
-    GC_ASSERT(!(heap == heap_eden && heap->sweeping_page));
-    page->flags.in_tomb = (heap == heap_tomb);
+    GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
+    GC_ASSERT(page->size_pool == size_pool);
+    page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
     list_add_tail(&heap->pages, &page->page_node);
     heap->total_pages++;
     heap->total_slots += page->total_slots;
 }
 
 static void
-heap_assign_page(rb_objspace_t *objspace, rb_heap_t *heap)
+heap_assign_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
 {
-    struct heap_page *page = heap_page_create(objspace);
-    heap_add_page(objspace, heap, page);
+    struct heap_page *page = heap_page_create(objspace, size_pool);
+    heap_add_page(objspace, size_pool, heap, page);
     heap_add_freepage(heap, page);
 }
 
 static void
-heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
+heap_add_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, size_t add)
 {
     size_t i;
 
-    heap_allocatable_pages_set(objspace, add);
+    size_pool_allocatable_pages_set(objspace, size_pool, add);
 
     for (i = 0; i < add; i++) {
-	heap_assign_page(objspace, heap);
+	heap_assign_page(objspace, size_pool, heap);
     }
 
-    GC_ASSERT(heap_allocatable_pages == 0);
+    GC_ASSERT(size_pool->allocatable_pages == 0);
 }
 
 static size_t
-heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots)
+heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots, size_t used)
 {
     double goal_ratio = gc_params.heap_free_slots_goal_ratio;
-    size_t used = heap_allocated_pages + heap_allocatable_pages;
     size_t next_used;
 
     if (goal_ratio == 0.0) {
@@ -2031,36 +2163,36 @@ heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots
 }
 
 static int
-heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
+heap_increment(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
 {
-    if (heap_allocatable_pages > 0) {
+    if (size_pool->allocatable_pages > 0) {
 	gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE", "
                   "heap_pages_inc: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
-		  heap_pages_sorted_length, heap_allocatable_pages, heap->total_pages);
+		  heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
 
-	GC_ASSERT(heap_allocatable_pages + heap_eden->total_pages <= heap_pages_sorted_length);
+	GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
 	GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
 
-	heap_assign_page(objspace, heap);
+	heap_assign_page(objspace, size_pool, heap);
 	return TRUE;
     }
     return FALSE;
 }
 
 static void
-heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
+heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
 {
     GC_ASSERT(heap->free_pages == NULL);
 
-    if (is_lazy_sweeping(heap)) {
-	gc_sweep_continue(objspace, heap);
+    if (is_lazy_sweeping(objspace)) {
+	gc_sweep_continue(objspace, size_pool, heap);
     }
     else if (is_incremental_marking(objspace)) {
-	gc_marks_continue(objspace, heap);
+	gc_marks_continue(objspace, size_pool, heap);
     }
 
     if (heap->free_pages == NULL &&
-	(will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) &&
+        (will_be_incremental_marking(objspace) || heap_increment(objspace, size_pool, heap) == FALSE) &&
 	gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
 	rb_memerror();
     }
@@ -2189,19 +2321,24 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace,
 }
 
 static inline void heap_add_freepage(rb_heap_t *heap, struct heap_page *page);
-static struct heap_page * heap_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap);
+static struct heap_page *heap_next_freepage(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
 static inline void ractor_set_cache(rb_ractor_t *cr, struct heap_page *page);
 
 #if USE_RVARGC
 void *
-rb_rvargc_payload_data_ptr(VALUE phead)
+rb_gc_rvargc_object_data(VALUE obj)
 {
-    return (void *)(phead + sizeof(struct RPayload));
+    return (void *)(obj + sizeof(RVALUE));
 }
+#endif
 
 static inline VALUE
 ractor_cached_free_region(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size)
 {
+    if (size != sizeof(RVALUE)) {
+        return Qfalse;
+    }
+
     RVALUE *p = cr->newobj_cache.freelist;
 
     if (p) {
@@ -2216,14 +2353,14 @@ ractor_cached_free_region(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size)
 }
 
 static struct heap_page *
-heap_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
+heap_next_freepage(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
 {
     ASSERT_vm_locking();
 
     struct heap_page *page;
 
     while (heap->free_pages == NULL) {
-	heap_prepare(objspace, heap);
+	heap_prepare(objspace, size_pool, heap);
     }
     page = heap->free_pages;
     heap->free_pages = page->free_next;
@@ -2255,7 +2392,8 @@ ractor_cache_slots(rb_objspace_t *objspace, rb_ractor_t *cr)
 {
     ASSERT_vm_locking();
 
-    struct heap_page *page = heap_next_freepage(objspace, heap_eden);
+    rb_size_pool_t *size_pool = &size_pools[0];
+    struct heap_page *page = heap_next_freepage(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
 
     ractor_set_cache(cr, page);
 }
@@ -2270,6 +2408,49 @@ newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
     return obj;
 }
 
+#if USE_RVARGC
+static inline VALUE
+heap_get_freeobj(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
+{
+    RVALUE *p = size_pool->freelist;
+
+    if (UNLIKELY(p == NULL)) {
+        struct heap_page *page = heap_next_freepage(objspace, size_pool, heap);
+        size_pool->using_page = page;
+
+        asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
+        p = page->freelist;
+        page->freelist = NULL;
+        asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
+        page->free_slots = 0;
+    }
+
+    asan_unpoison_object((VALUE)p, true);
+    size_pool->freelist = p->as.free.next;
+
+    return (VALUE)p;
+}
+
+static inline rb_size_pool_t *
+size_pool_for_size(rb_objspace_t *objspace, size_t size)
+{
+    size_t slot_count = CEILDIV(size, sizeof(RVALUE));
+
+    /* size_pool_idx is ceil(log2(slot_count)) */
+    size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
+    GC_ASSERT(size_pool_idx > 0);
+    if (size_pool_idx >= SIZE_POOL_COUNT) {
+        rb_bug("size_pool_for_size: allocation size too large");
+    }
+
+    rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
+    GC_ASSERT(size_pool->slot_size >= (short)size);
+    GC_ASSERT(size_pools[size_pool_idx - 1].slot_size < (short)size);
+
+    return size_pool;
+}
+#endif
+
 ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t alloc_size));
 
 static inline VALUE
@@ -2294,9 +2475,22 @@ newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *
             }
         }
 
-        // allocate new slot
-        while ((obj = ractor_cached_free_region(objspace, cr, alloc_size)) == Qfalse) {
-            ractor_cache_slots(objspace, cr);
+        if (alloc_size <= sizeof(RVALUE)) {
+            // allocate new slot
+            while ((obj = ractor_cached_free_region(objspace, cr, alloc_size)) == Qfalse) {
+                ractor_cache_slots(objspace, cr);
+            }
+        }
+        else {
+#if USE_RVARGC
+            rb_size_pool_t *size_pool = size_pool_for_size(objspace, alloc_size);
+
+            obj = heap_get_freeobj(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
+
+            memset((void *)obj, 0, size_pool->slot_size);
+#else
+            rb_bug("unreachable when not using rvargc");
+#endif
         }
         GC_ASSERT(obj != 0);
         newobj_init(klass, flags, wb_protected, objspace, obj);
@@ -2636,13 +2830,15 @@ is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
 	mid = (lo + hi) / 2;
 	page = heap_pages_sorted[mid];
 	if (page->start <= p) {
-	    if (p < page->start + page->total_slots) {
+	    if ((intptr_t)p < ((intptr_t)page->start + (page->total_slots * page->size_pool->slot_size))) {
                 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
 
                 if (page->flags.in_tomb) {
                     return FALSE;
                 }
                 else {
+                    if ((NUM_IN_PAGE(p) * sizeof(RVALUE)) % page->size_pool->slot_size != 0) return FALSE;
+
                     return TRUE;
                 }
 	    }
@@ -3275,7 +3471,16 @@ Init_heap(void)
     objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
 #endif
 
-    heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
+    heap_add_pages(objspace, &size_pools[0], &size_pools[0].eden_heap, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
+
+    /* Give other size pools allocatable pages. */
+    for (int i = 1; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+        int multiple = size_pool->slot_size / sizeof(RVALUE);
+        size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
+    }
+    heap_pages_expand_sorted(objspace);
+
     init_mark_stack(&objspace->mark_stack);
 
     objspace->profile.invoke_time = getrusage_time();
@@ -3302,8 +3507,8 @@ struct each_obj_data {
     each_obj_callback *callback;
     void *data;
 
-    struct heap_page **pages;
-    size_t pages_count;
+    struct heap_page **pages[SIZE_POOL_COUNT];
+    size_t pages_counts[SIZE_POOL_COUNT];
 };
 
 static VALUE
@@ -3317,10 +3522,14 @@ objspace_each_objects_ensure(VALUE arg)
         objspace->flags.dont_incremental = FALSE;
     }
 
-    /* Free pages buffer */
-    struct heap_page **pages = data->pages;
-    GC_ASSERT(pages);
-    free(pages);
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        struct heap_page **pages = data->pages[i];
+        /* pages could be NULL if an error was raised during setup (e.g.
+         * malloc failed due to out of memory). */
+        if (pages) {
+            free(pages);
+        }
+    }
 
     return Qnil;
 }
@@ -3330,35 +3539,55 @@ objspace_each_objects_try(VALUE arg)
 {
     struct each_obj_data *data = (struct each_obj_data *)arg;
     rb_objspace_t *objspace = data->objspace;
-    struct heap_page **pages = data->pages;
-    size_t pages_count = data->pages_count;
 
-    struct heap_page *page = list_top(&heap_eden->pages, struct heap_page, page_node);
-    for (size_t i = 0; i < pages_count; i++) {
-        /* If we have reached the end of the linked list then there are no
-         * more pages, so break. */
-        if (page == NULL) break;
+    /* Copy pages from all size_pools to their respective buffers. */
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+        size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
 
-        /* If this page does not match the one in the buffer, then move to
-         * the next page in the buffer. */
-        if (pages[i] != page) continue;
+        struct heap_page **pages = malloc(size);
+        if (!pages) rb_memerror();
 
-        RVALUE *pstart = page->start;
-        RVALUE *pend = pstart + page->total_slots;
-        RVALUE *cursor_end = pstart;
+        /* Set up pages buffer by iterating over all pages in the current eden
+         * heap. This will be a snapshot of the state of the heap before we
+         * call the callback over each page that exists in this buffer. Thus it
+         * is safe for the callback to allocate objects without possibly entering
+         * an infinite loop. */
+        struct heap_page *page = 0;
+        size_t pages_count = 0;
+        list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
+            pages[pages_count] = page;
+            pages_count++;
+        }
+        data->pages[i] = pages;
+        data->pages_counts[i] = pages_count;
+        GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
+    }
 
-        while (cursor_end < pend) {
-            int payload_len = 0;
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+        size_t pages_count = data->pages_counts[i];
+        struct heap_page **pages = data->pages[i];
 
-            cursor_end = pend;
+        struct heap_page *page = list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
+        for (size_t i = 0; i < pages_count; i++) {
+            /* If we have reached the end of the linked list then there are no
+             * more pages, so break. */
+            if (page == NULL) break;
 
-            if ((*data->callback)(pstart, cursor_end, sizeof(RVALUE), data->data)) {
+            /* If this page does not match the one in the buffer, then move to
+             * the next page in the buffer. */
+            if (pages[i] != page) continue;
+
+            intptr_t pstart = (intptr_t)page->start;
+            intptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
+
+            if ((*data->callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) {
                 break;
             }
 
+            page = list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
         }
-
-        page = list_next(&heap_eden->pages, page, page_node);
     }
 
     return Qnil;
@@ -3418,25 +3647,6 @@ objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void
         objspace->flags.dont_incremental = TRUE;
     }
 
-    /* Create pages buffer */
-    size_t size = size_mul_or_raise(heap_eden->total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
-    struct heap_page **pages = malloc(size);
-    if (!pages) rb_memerror();
-
-    /* Set up pages buffer by iterating over all pages in the current eden
-     * heap. This will be a snapshot of the state of the heap before we
-     * call the callback over each page that exists in this buffer. Thus it
-     * is safe for the callback to allocate objects without possibly entering
-     * an infinite loop. */
-    struct heap_page *page = 0;
-    size_t pages_count = 0;
-    list_for_each(&heap_eden->pages, page, page_node) {
-        pages[pages_count] = page;
-        pages_count++;
-    }
-    GC_ASSERT(pages_count == heap_eden->total_pages);
-
-    /* Run the callback */
     struct each_obj_data each_obj_data = {
         .objspace = objspace,
         .reenable_incremental = reenable_incremental,
@@ -3444,8 +3654,8 @@ objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void
         .callback = callback,
         .data = data,
 
-        .pages = pages,
-        .pages_count = pages_count
+        .pages = {NULL},
+        .pages_counts = {0},
     };
     rb_ensure(objspace_each_objects_try, (VALUE)&each_obj_data,
               objspace_each_objects_ensure, (VALUE)&each_obj_data);
@@ -3932,7 +4142,6 @@ bool rb_obj_is_main_ractor(VALUE gv);
 void
 rb_objspace_call_finalizer(rb_objspace_t *objspace)
 {
-    RVALUE *p, *pend;
     size_t i;
 
 #if RGENGC_CHECK_MODE >= 2
@@ -3973,8 +4182,12 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
 
     /* run data/file object's finalizers */
     for (i = 0; i < heap_allocated_pages; i++) {
-	p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->total_slots;
-	while (p < pend) {
+        struct heap_page *page = heap_pages_sorted[i];
+        short stride = page->size_pool->slot_size;
+
+        uintptr_t p = (uintptr_t)page->start;
+        uintptr_t pend = p + page->total_slots * stride;
+        for (; p < pend; p += stride) {
             VALUE vp = (VALUE)p;
             void *poisoned = asan_poisoned_object_p(vp);
             asan_unpoison_object(vp, false);
@@ -3988,7 +4201,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
                 if (RTYPEDDATA_P(vp)) {
 		    RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
 		}
-                p->as.free.flags = 0;
+                RANY(p)->as.free.flags = 0;
 		if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
 		    xfree(DATA_PTR(p));
 		}
@@ -4008,7 +4221,6 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
                 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
                 asan_poison_object(vp);
             }
-	    p++;
 	}
     }
 
@@ -4034,7 +4246,7 @@ is_id_value(rb_objspace_t *objspace, VALUE ptr)
 }
 
 static inline int
-heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
+heap_is_swept_object(rb_objspace_t *objspace, VALUE ptr)
 {
     struct heap_page *page = GET_HEAP_PAGE(ptr);
     return page->flags.before_sweep ? FALSE : TRUE;
@@ -4043,7 +4255,7 @@ heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
 static inline int
 is_swept_object(rb_objspace_t *objspace, VALUE ptr)
 {
-    if (heap_is_swept_object(objspace, heap_eden, ptr)) {
+    if (heap_is_swept_object(objspace, ptr)) {
 	return TRUE;
     }
     else {
@@ -4055,7 +4267,7 @@ is_swept_object(rb_objspace_t *objspace, VALUE ptr)
 static inline int
 is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
 {
-    if (!is_lazy_sweeping(heap_eden) ||
+    if (!is_lazy_sweeping(objspace) ||
 	is_swept_object(objspace, ptr) ||
 	MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
 
@@ -4577,17 +4789,17 @@ count_objects(int argc, VALUE *argv, VALUE os)
 
     for (i = 0; i < heap_allocated_pages; i++) {
 	struct heap_page *page = heap_pages_sorted[i];
-	RVALUE *p, *pend;
-        int stride = 1;
+        short stride = page->size_pool->slot_size;
 
-	p = page->start; pend = p + page->total_slots;
+        uintptr_t p = (uintptr_t)page->start;
+        uintptr_t pend = p + page->total_slots * stride;
         for (;p < pend; p += stride) {
-            stride = 1;
             VALUE vp = (VALUE)p;
+            GC_ASSERT((NUM_IN_PAGE(vp) * sizeof(RVALUE)) % page->size_pool->slot_size == 0);
 
             void *poisoned = asan_poisoned_object_p(vp);
             asan_unpoison_object(vp, false);
-            if (p->as.basic.flags) {
+            if (RANY(p)->as.basic.flags) {
                 counts[BUILTIN_TYPE(vp)]++;
 	    }
 	    else {
@@ -4628,7 +4840,13 @@ count_objects(int argc, VALUE *argv, VALUE os)
 static size_t
 objspace_available_slots(rb_objspace_t *objspace)
 {
-    return heap_eden->total_slots + heap_tomb->total_slots;
+    size_t total_slots = 0;
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+        total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
+        total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
+    }
+    return total_slots;
 }
 
 static size_t
@@ -4651,7 +4869,7 @@ gc_setup_mark_bits(struct heap_page *page)
 }
 
 static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
-static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free);
+static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t slot_size);
 
 static void
 lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
@@ -4688,7 +4906,7 @@ unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
 }
 
 static inline bool
-try_move_in_plane(rb_objspace_t *objspace, rb_heap_t *heap, intptr_t p, bits_t bits, VALUE dest)
+try_move_in_plane(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page, intptr_t p, bits_t bits, VALUE dest)
 {
     if (bits) {
         do {
@@ -4707,7 +4925,7 @@ try_move_in_plane(rb_objspace_t *objspace, rb_heap_t *heap, intptr_t p, bits_t b
                         from_freelist = true;
                     }
 
-                    gc_move(objspace, (VALUE)p, dest);
+                    gc_move(objspace, (VALUE)p, dest, page->size_pool->slot_size);
                     gc_pin(objspace, (VALUE)p);
                     heap->compact_cursor_index = (RVALUE *)p;
                     if (from_freelist) {
@@ -4756,7 +4974,7 @@ try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page,
         bits_t bits = mark_bits[index] & ~pin_bits[index];
 
         bits >>= NUM_IN_PAGE(p);
-        if (try_move_in_plane(objspace, heap, (intptr_t)p, bits, dest)) return 1;
+        if (try_move_in_plane(objspace, heap, sweep_page, (intptr_t)p, bits, dest)) return 1;
 
         if (index == 0) {
             p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start));
@@ -4769,7 +4987,8 @@ try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page,
          * marked, so we iterate using the marking bitmap */
         for (size_t i = index + 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
             bits_t bits = mark_bits[i] & ~pin_bits[i];
-            if (try_move_in_plane(objspace, heap, (intptr_t)p, bits, dest)) return 1;
+
+            if (try_move_in_plane(objspace, heap, sweep_page, (intptr_t)p, bits, dest)) return 1;
             p += BITS_BITLENGTH;
         }
 
@@ -4810,7 +5029,7 @@ gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
     }
 }
 
-static void gc_update_references(rb_objspace_t * objspace, rb_heap_t *heap);
+static void gc_update_references(rb_objspace_t * objspace);
 static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
 
 static void
@@ -4959,11 +5178,14 @@ check_stack_for_moved(rb_objspace_t *objspace)
 }
 
 static void
-gc_compact_finish(rb_objspace_t *objspace, rb_heap_t *heap)
+gc_compact_finish(rb_objspace_t *objspace, rb_size_pool_t *pool, rb_heap_t *heap)
 {
-    GC_ASSERT(heap->sweeping_page == heap->compact_cursor);
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+        rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
+        gc_unprotect_pages(objspace, heap);
+    }
 
-    gc_unprotect_pages(objspace, heap);
     uninstall_handlers();
 
     /* The mutator is allowed to run during incremental sweeping. T_MOVED
@@ -4973,10 +5195,16 @@ gc_compact_finish(rb_objspace_t *objspace, rb_heap_t *heap)
      * then revert any moved objects that made it to the stack. */
     check_stack_for_moved(objspace);
 
-    gc_update_references(objspace, heap);
-    heap->compact_cursor = NULL;
-    heap->compact_cursor_index = 0;
+    gc_update_references(objspace);
     objspace->profile.compact_count++;
+
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+        rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
+        heap->compact_cursor = NULL;
+        heap->compact_cursor_index = 0;
+    }
+
     if (gc_prof_enabled(objspace)) {
         gc_profile_record *record = gc_prof_record(objspace);
         record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
@@ -4998,6 +5226,9 @@ gc_fill_swept_page_plane(rb_objspace_t *objspace, rb_heap_t *heap, intptr_t p, b
     struct heap_page * sweep_page = ctx->page;
 
     if (bitset) {
+        short slot_size = sweep_page->size_pool->slot_size;
+        short slot_bits = slot_size / sizeof(RVALUE);
+
         do {
             if (bitset & 1) {
                 VALUE dest = (VALUE)p;
@@ -5040,8 +5271,8 @@ gc_fill_swept_page_plane(rb_objspace_t *objspace, rb_heap_t *heap, intptr_t p, b
                     }
                 }
             }
-            p += sizeof(RVALUE);
-            bitset >>= 1;
+            p += slot_size;
+            bitset >>= slot_bits;
         } while (bitset);
     }
 }
@@ -5053,12 +5284,12 @@ gc_fill_swept_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *s
     bool finished_compacting = false;
     bits_t *mark_bits, *pin_bits;
     bits_t bitset;
-    RVALUE *p;
+    intptr_t p;
 
     mark_bits = sweep_page->mark_bits;
     pin_bits = sweep_page->pinned_bits;
 
-    p = sweep_page->start;
+    p = (intptr_t)sweep_page->start;
 
     struct heap_page * cursor = heap->compact_cursor;
 
@@ -5067,14 +5298,14 @@ gc_fill_swept_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *s
     /* *Want to move* objects are pinned but not marked. */
     bitset = pin_bits[0] & ~mark_bits[0];
     bitset >>= NUM_IN_PAGE(p); // Skip header / dead space bits
-    gc_fill_swept_page_plane(objspace ,heap, (intptr_t)p, bitset, &finished_compacting, ctx);
-    p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
+    gc_fill_swept_page_plane(objspace, heap, (intptr_t)p, bitset, &finished_compacting, ctx);
+    p += ((BITS_BITLENGTH - NUM_IN_PAGE(p)) * sizeof(RVALUE));
 
     for (int i = 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
         /* *Want to move* objects are pinned but not marked. */
         bitset = pin_bits[i] & ~mark_bits[i];
-        gc_fill_swept_page_plane(objspace ,heap, (intptr_t)p, bitset, &finished_compacting, ctx);
-        p += BITS_BITLENGTH;
+        gc_fill_swept_page_plane(objspace, heap, (intptr_t)p, bitset, &finished_compacting, ctx);
+        p += ((BITS_BITLENGTH) * sizeof(RVALUE));
     }
 
     lock_page_body(objspace, GET_PAGE_BODY(heap->compact_cursor->start));
@@ -5086,9 +5317,14 @@ static inline void
 gc_plane_sweep(rb_objspace_t *objspace, rb_heap_t *heap, intptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
 {
     struct heap_page * sweep_page = ctx->page;
+    short slot_size = sweep_page->size_pool->slot_size;
+    short slot_bits = slot_size / sizeof(RVALUE);
+    GC_ASSERT(slot_bits > 0);
 
     do {
         VALUE vp = (VALUE)p;
+        GC_ASSERT(vp % sizeof(RVALUE) == 0);
+
         asan_unpoison_object(vp, false);
         if (bitset & 1) {
             switch (BUILTIN_TYPE(vp)) {
@@ -5150,21 +5386,18 @@ gc_plane_sweep(rb_objspace_t *objspace, rb_heap_t *heap, intptr_t p, bits_t bits
                     break;
             }
         }
-        p += sizeof(RVALUE);
-        bitset >>= 1;
+        p += slot_size;
+        bitset >>= slot_bits;
     } while (bitset);
 }
 
-static inline int
-gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
+static inline void
+gc_page_sweep(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct gc_sweep_context *ctx)
 {
-    int i;
-    struct gc_sweep_context ctx;
+    struct heap_page *sweep_page = ctx->page;
+    GC_ASSERT(sweep_page->size_pool == size_pool);
 
-    ctx.page = sweep_page;
-    ctx.final_slots = 0;
-    ctx.freed_slots = 0;
-    ctx.empty_slots = 0;
+    int i;
 
     RVALUE *p;
     bits_t *bits, bitset;
@@ -5175,7 +5408,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
         if (sweep_page == heap->compact_cursor) {
             /* The compaction cursor and sweep page met, so we need to quit compacting */
             gc_report(5, objspace, "Quit compacting, mark and compact cursor met\n");
-            gc_compact_finish(objspace, heap);
+            gc_compact_finish(objspace, size_pool, heap);
         }
         else {
             /* We anticipate filling the page, so NULL out the freelist. */
@@ -5190,30 +5423,31 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
     p = sweep_page->start;
     bits = sweep_page->mark_bits;
 
-    int out_of_range_bits = (NUM_IN_PAGE(p) + sweep_page->total_slots) % BITS_BITLENGTH;
+    int page_rvalue_count = sweep_page->total_slots * (size_pool->slot_size / sizeof(RVALUE));
+    int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
     if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
-        bits[BITMAP_INDEX(p) + sweep_page->total_slots / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
+        bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
     }
 
     // Skip out of range slots at the head of the page
     bitset = ~bits[0];
     bitset >>= NUM_IN_PAGE(p);
     if (bitset) {
-        gc_plane_sweep(objspace, heap, (intptr_t)p, bitset, &ctx);
+        gc_plane_sweep(objspace, heap, (intptr_t)p, bitset, ctx);
     }
     p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
 
     for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
         bitset = ~bits[i];
         if (bitset) {
-            gc_plane_sweep(objspace, heap, (intptr_t)p, bitset, &ctx);
+            gc_plane_sweep(objspace, heap, (intptr_t)p, bitset, ctx);
         }
         p += BITS_BITLENGTH;
     }
 
     if (heap->compact_cursor) {
-        if (gc_fill_swept_page(objspace, heap, sweep_page, &ctx)) {
-            gc_compact_finish(objspace, heap);
+        if (gc_fill_swept_page(objspace, heap, sweep_page, ctx)) {
+            gc_compact_finish(objspace, size_pool, heap);
         }
     }
 
@@ -5224,17 +5458,17 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
 #if GC_PROFILE_MORE_DETAIL
     if (gc_prof_enabled(objspace)) {
 	gc_profile_record *record = gc_prof_record(objspace);
-	record->removing_objects += ctx.final_slots + ctx.freed_slots;
-	record->empty_objects += ctx.empty_slots;
+	record->removing_objects += ctx->final_slots + ctx->freed_slots;
+	record->empty_objects += ctx->empty_slots;
     }
 #endif
     if (0) fprintf(stderr, "gc_page_sweep(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
 		   rb_gc_count(),
 		   sweep_page->total_slots,
-		   ctx.freed_slots, ctx.empty_slots, ctx.final_slots);
+		   ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
 
-    sweep_page->free_slots = ctx.freed_slots + ctx.empty_slots;
-    objspace->profile.total_freed_objects += ctx.freed_slots;
+    sweep_page->free_slots = ctx->freed_slots + ctx->empty_slots;
+    objspace->profile.total_freed_objects += ctx->freed_slots;
 
     if (heap_pages_deferred_final && !finalizing) {
         rb_thread_t *th = GET_THREAD();
@@ -5256,22 +5490,24 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
 #endif
 
     gc_report(2, objspace, "page_sweep: end.\n");
-
-    return ctx.freed_slots + ctx.empty_slots;
 }
 
+#if !USE_RVARGC
 /* allocate additional minimum page to work */
 static void
-gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_heap_t *heap)
+gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
 {
-    if (!heap->free_pages && heap_increment(objspace, heap) == FALSE) {
-	/* there is no free after page_sweep() */
-        heap_allocatable_pages_set(objspace, 1);
-	if (!heap_increment(objspace, heap)) { /* can't allocate additional free objects */
-	    rb_memerror();
-	}
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        if (!heap->free_pages && heap_increment(objspace, size_pool, heap) == FALSE) {
+            /* there is no free after page_sweep() */
+            size_pool_allocatable_pages_set(objspace, size_pool, 1);
+            if (!heap_increment(objspace, size_pool, heap)) { /* can't allocate additional free objects */
+                rb_memerror();
+            }
+        }
     }
 }
+#endif
 
 static const char *
 gc_mode_name(enum gc_mode mode)
@@ -5316,18 +5552,69 @@ static void
 gc_sweep_start(rb_objspace_t *objspace)
 {
     gc_mode_transition(objspace, gc_mode_sweeping);
-    gc_sweep_start_heap(objspace, heap_eden);
 
 #if GC_ENABLE_INCREMENTAL_MARK
     objspace->rincgc.pooled_slots = 0;
 #endif
 
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+
+#if USE_RVARGC
+        if (size_pool->freelist) {
+            size_pool->using_page->freelist = (RVALUE *)size_pool->freelist;
+            size_pool->freelist = 0;
+        }
+
+        size_pool->using_page = NULL;
+#endif
+
+        gc_sweep_start_heap(objspace, SIZE_POOL_EDEN_HEAP(size_pool));
+    }
+
     rb_ractor_t *r = NULL;
     list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
         rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
     }
 }
 
+#if USE_RVARGC
+static void
+gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
+{
+    rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
+    size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
+    size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
+    size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
+
+    size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
+
+    if (swept_slots < min_free_slots) {
+        if (is_full_marking(objspace)) {
+            size_t extend_page_count = heap_extend_pages(objspace, swept_slots, total_slots, total_pages);
+
+            if (extend_page_count > size_pool->allocatable_pages) {
+                size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
+            }
+
+            heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
+        }
+        else {
+            /* The heap is a growth heap if it freed more slots than had empty slots. */
+            bool is_growth_heap = size_pool->empty_slots == 0 ||
+                                    size_pool->freed_slots > size_pool->empty_slots;
+
+            /* Only growth heaps are allowed to start a major GC. */
+            if (is_growth_heap &&
+                    objspace->profile.count - objspace->rgengc.last_major_gc >= RVALUE_OLD_AGE) {
+                objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
+                size_pool->force_major_gc_count++;
+            }
+        }
+    }
+}
+#endif
+
 static void
 gc_sweep_finish(rb_objspace_t *objspace)
 {
@@ -5336,10 +5623,36 @@ gc_sweep_finish(rb_objspace_t *objspace)
     gc_prof_set_heap_info(objspace);
     heap_pages_free_unused_pages(objspace);
 
-    /* if heap_pages has unused pages, then assign them to increment */
-    if (heap_allocatable_pages < heap_tomb->total_pages) {
-	heap_allocatable_pages_set(objspace, heap_tomb->total_pages);
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+
+        /* if heap_pages has unused pages, then assign them to increment */
+        size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
+        if (size_pool->allocatable_pages < tomb_pages) {
+            size_pool->allocatable_pages = tomb_pages;
+        }
+
+#if USE_RVARGC
+        size_pool->freed_slots = 0;
+        size_pool->empty_slots = 0;
+
+#if GC_ENABLE_INCREMENTAL_MARK
+        if (!will_be_incremental_marking(objspace)) {
+            rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
+            struct heap_page *end_page = eden_heap->free_pages;
+            if (end_page) {
+                while (end_page->free_next) end_page = end_page->free_next;
+                end_page->free_next = eden_heap->pooled_pages;
+            }
+            else {
+                eden_heap->free_pages = eden_heap->pooled_pages;
+            }
+            objspace->rincgc.pooled_slots = 0;
+        }
+#endif
+#endif
     }
+    heap_pages_expand_sorted(objspace);
 
     gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_SWEEP, 0);
     gc_mode_transition(objspace, gc_mode_none);
@@ -5350,14 +5663,18 @@ gc_sweep_finish(rb_objspace_t *objspace)
 }
 
 static int
-gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
+gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
 {
     struct heap_page *sweep_page = heap->sweeping_page;
     int unlink_limit = 3;
 
 #if GC_ENABLE_INCREMENTAL_MARK
     int swept_slots = 0;
+#if USE_RVARGC
+    bool need_pool = TRUE;
+#else
     int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
+#endif
 
     gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
 #else
@@ -5371,8 +5688,19 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
 #endif
 
     do {
+        GC_ASSERT(sweep_page->size_pool == size_pool);
+
         RUBY_DEBUG_LOG("sweep_page:%p", sweep_page);
-	int free_slots = gc_page_sweep(objspace, heap, sweep_page);
+
+        struct gc_sweep_context ctx = {
+            .page = sweep_page,
+            .final_slots = 0,
+            .freed_slots = 0,
+            .empty_slots = 0,
+        };
+        gc_page_sweep(objspace, size_pool, heap, &ctx);
+        int free_slots = ctx.freed_slots + ctx.empty_slots;
+
         heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
 
 	if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
@@ -5382,9 +5710,14 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
 	    unlink_limit--;
 	    /* there are no living objects -> move this page to tomb heap */
 	    heap_unlink_page(objspace, heap, sweep_page);
-	    heap_add_page(objspace, heap_tomb, sweep_page);
+	    heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
 	}
 	else if (free_slots > 0) {
+#if USE_RVARGC
+            size_pool->freed_slots += ctx.freed_slots;
+            size_pool->empty_slots += ctx.empty_slots;
+#endif
+
 #if GC_ENABLE_INCREMENTAL_MARK
 	    if (need_pool) {
                 heap_add_poolpage(objspace, heap, sweep_page);
@@ -5408,42 +5741,67 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
     } while ((sweep_page = heap->sweeping_page));
 
     if (!heap->sweeping_page) {
-	gc_sweep_finish(objspace);
+#if USE_RVARGC
+        gc_sweep_finish_size_pool(objspace, size_pool);
+#endif
+
+        if (!has_sweeping_pages(objspace)) {
+            gc_sweep_finish(objspace);
+        }
     }
 
 #if GC_ENABLE_LAZY_SWEEP
     gc_prof_sweep_timer_stop(objspace);
 #endif
 
-    GC_ASSERT(gc_mode(objspace) == gc_mode_sweeping ? heap->free_pages != NULL : 1);
-
     return heap->free_pages != NULL;
 }
 
 static void
 gc_sweep_rest(rb_objspace_t *objspace)
 {
-    rb_heap_t *heap = heap_eden; /* lazy sweep only for eden */
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
 
-    while (has_sweeping_pages(heap)) {
-	gc_sweep_step(objspace, heap);
+        while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
+            gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
+        }
     }
 }
 
 static void
-gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
+gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *sweep_size_pool, rb_heap_t *heap)
 {
     GC_ASSERT(dont_gc_val() == FALSE);
     if (!GC_ENABLE_LAZY_SWEEP) return;
 
     unsigned int lock_lev;
     gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
-    gc_sweep_step(objspace, heap);
+
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+        if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
+#if USE_RVARGC
+            /* sweep_size_pool requires a free slot but sweeping did not yield any. */
+            if (size_pool == sweep_size_pool) {
+                if (size_pool->allocatable_pages > 0) {
+                    heap_increment(objspace, size_pool, heap);
+                }
+                else {
+                    /* Not allowed to create a new page so finish sweeping. */
+                    gc_sweep_rest(objspace);
+                    break;
+                }
+            }
+#endif
+        }
+    }
+
     gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
 }
 
 static void
-invalidate_moved_plane(rb_objspace_t *objspace, intptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
+invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, intptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
 {
     if (bitset) {
         do {
@@ -5466,7 +5824,7 @@ invalidate_moved_plane(rb_objspace_t *objspace, intptr_t p, bits_t bitset, struc
                         ctx->freed_slots++;
                     }
 
-                    gc_move(objspace, object, forwarding_object);
+                    gc_move(objspace, object, forwarding_object, page->size_pool->slot_size);
                     /* forwarding_object is now our actual object, and "object"
                      * is the free slot for the original page */
                     heap_page_add_freeobj(objspace, GET_HEAP_PAGE(object), object);
@@ -5504,7 +5862,7 @@ invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
     // Skip out of range slots at the head of the page
     bitset = pin_bits[0] & ~mark_bits[0];
     bitset >>= NUM_IN_PAGE(p);
-    invalidate_moved_plane(objspace, (intptr_t)p, bitset, &ctx);
+    invalidate_moved_plane(objspace, page, (intptr_t)p, bitset, &ctx);
     p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
 
     for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
@@ -5512,7 +5870,7 @@ invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
          * to indicate there is a moved object in this slot. */
         bitset = pin_bits[i] & ~mark_bits[i];
 
-        invalidate_moved_plane(objspace, (intptr_t)p, bitset, &ctx);
+        invalidate_moved_plane(objspace, page, (intptr_t)p, bitset, &ctx);
         p += BITS_BITLENGTH;
     }
 
@@ -5521,10 +5879,19 @@ invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
 }
 
 static void
-gc_compact_start(rb_objspace_t *objspace, rb_heap_t *heap)
+gc_compact_start(rb_objspace_t *objspace)
 {
-    heap->compact_cursor = list_tail(&heap->pages, struct heap_page, page_node);
-    heap->compact_cursor_index = 0;
+    struct heap_page *page = NULL;
+
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_heap_t *heap = &size_pools[i].eden_heap;
+        list_for_each(&heap->pages, page, page_node) {
+            page->flags.before_sweep = TRUE;
+        }
+
+        heap->compact_cursor = list_tail(&heap->pages, struct heap_page, page_node);
+        heap->compact_cursor_index = 0;
+    }
 
     if (gc_prof_enabled(objspace)) {
         gc_profile_record *record = gc_prof_record(objspace);
@@ -5551,13 +5918,7 @@ gc_sweep(rb_objspace_t *objspace)
 #endif
 	gc_sweep_start(objspace);
         if (objspace->flags.during_compacting) {
-            struct heap_page *page = NULL;
-
-            list_for_each(&heap_eden->pages, page, page_node) {
-                page->flags.before_sweep = TRUE;
-            }
-
-            gc_compact_start(objspace, heap_eden);
+            gc_compact_start(objspace);
         }
 
 	gc_sweep_rest(objspace);
@@ -5570,16 +5931,26 @@ gc_sweep(rb_objspace_t *objspace)
 	gc_sweep_start(objspace);
 
         if (ruby_enable_autocompact && is_full_marking(objspace)) {
-            gc_compact_start(objspace, heap_eden);
+            gc_compact_start(objspace);
         }
 
-        list_for_each(&heap_eden->pages, page, page_node) {
-            page->flags.before_sweep = TRUE;
+        for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+            list_for_each(&size_pools[i].eden_heap.pages, page, page_node) {
+                page->flags.before_sweep = TRUE;
+            }
+        }
+
+        /* Sweep every size pool. */
+        for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+            rb_size_pool_t *size_pool = &size_pools[i];
+            gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
         }
-	gc_sweep_step(objspace, heap_eden);
     }
 
-    gc_heap_prepare_minimum_pages(objspace, heap_eden);
+#if !USE_RVARGC
+    rb_size_pool_t *size_pool = &size_pools[0];
+    gc_heap_prepare_minimum_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
+#endif
 }
 
 /* Marking - Marking stack */
@@ -7280,7 +7651,7 @@ gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
     int remembered_old_objects = 0;
     int free_objects = 0;
     int zombie_objects = 0;
-    int stride = 1;
+    int stride = page->size_pool->slot_size / sizeof(RVALUE);
 
     for (i=0; i<page->total_slots; i+=stride) {
 	VALUE val = (VALUE)&page->start[i];
@@ -7367,8 +7738,10 @@ static int
 gc_verify_heap_pages(rb_objspace_t *objspace)
 {
     int remembered_old_objects = 0;
-    remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_eden->pages);
-    remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_tomb->pages);
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        remembered_old_objects += gc_verify_heap_pages_(objspace, &size_pools[i].eden_heap.pages);
+        remembered_old_objects += gc_verify_heap_pages_(objspace, &size_pools[i].tomb_heap.pages);
+    }
     return remembered_old_objects;
 }
 
@@ -7400,7 +7773,12 @@ gc_verify_internal_consistency_(rb_objspace_t *objspace)
     /* check relations */
     for (size_t i = 0; i < heap_allocated_pages; i++) {
         struct heap_page *page = heap_pages_sorted[i];
-        verify_internal_consistency_i(page->start, page->start + page->total_slots, sizeof(RVALUE), &data);
+        short slot_size = page->size_pool->slot_size;
+
+        intptr_t start = (intptr_t)page->start;
+        intptr_t end = start + page->total_slots * slot_size;
+
+        verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
     }
 
     if (data.err_count != 0) {
@@ -7417,7 +7795,7 @@ gc_verify_internal_consistency_(rb_objspace_t *objspace)
 
     /* check counters */
 
-    if (!is_lazy_sweeping(heap_eden) &&
+    if (!is_lazy_sweeping(objspace) &&
         !finalizing &&
         ruby_single_main_ractor != NULL) {
 	if (objspace_live_slots(objspace) != data.live_object_count) {
@@ -7524,14 +7902,20 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)
 	objspace->rgengc.old_objects = 0;
 	objspace->rgengc.last_major_gc = objspace->profile.count;
 	objspace->marked_slots = 0;
-	rgengc_mark_and_rememberset_clear(objspace, heap_eden);
+
+        for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+            rgengc_mark_and_rememberset_clear(objspace, &size_pools[i].eden_heap);
+        }
     }
     else {
 	objspace->flags.during_minor_gc = TRUE;
 	objspace->marked_slots =
 	  objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
 	objspace->profile.minor_gc_count++;
-	rgengc_rememberset_mark(objspace, heap_eden);
+
+        for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+            rgengc_rememberset_mark(objspace, &size_pools[i].eden_heap);
+        }
     }
 
     gc_mark_roots(objspace, NULL);
@@ -7605,11 +7989,14 @@ gc_marks_finish(rb_objspace_t *objspace)
 #if GC_ENABLE_INCREMENTAL_MARK
     /* finish incremental GC */
     if (is_incremental_marking(objspace)) {
-	if (heap_eden->pooled_pages) {
-	    heap_move_pooled_pages_to_free_pages(heap_eden);
-	    gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
-	    return FALSE; /* continue marking phase */
-	}
+        for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+            rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
+            if (heap->pooled_pages) {
+                heap_move_pooled_pages_to_free_pages(heap);
+                gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
+                return FALSE; /* continue marking phase */
+            }
+        }
 
 	if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
 	    rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
@@ -7632,7 +8019,9 @@ gc_marks_finish(rb_objspace_t *objspace)
 
 	objspace->flags.during_incremental_marking = FALSE;
 	/* check children of all marked wb-unprotected objects */
-        gc_marks_wb_unprotected_objects(objspace, heap_eden);
+        for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+            gc_marks_wb_unprotected_objects(objspace, &size_pools[i].eden_heap);
+        }
     }
 #endif /* GC_ENABLE_INCREMENTAL_MARK */
 
@@ -7655,8 +8044,7 @@ gc_marks_finish(rb_objspace_t *objspace)
 
     {
 	/* decide full GC is needed or not */
-	rb_heap_t *heap = heap_eden;
-	size_t total_slots = heap_allocatable_pages * HEAP_PAGE_OBJ_LIMIT + heap->total_slots;
+        size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
 	size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
 	size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
 	size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
@@ -7664,7 +8052,7 @@ gc_marks_finish(rb_objspace_t *objspace)
         const int r_cnt = GET_VM()->ractor.cnt;
         const int r_mul = r_cnt > 8 ? 8 : r_cnt; // upto 8
 
-	GC_ASSERT(heap->total_slots >= objspace->marked_slots);
+        GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
 
 	/* setup free-able page counts */
         if (max_free_slots < gc_params.heap_init_slots * r_mul) {
@@ -7695,12 +8083,17 @@ gc_marks_finish(rb_objspace_t *objspace)
 		    objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
 		}
 	    }
+
+#if !USE_RVARGC
             if (full_marking) {
               /* increment: */
 		gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
-                heap_allocatable_pages_set(objspace, heap_extend_pages(objspace, sweep_slots, total_slots));
-		heap_increment(objspace, heap);
+                rb_size_pool_t *size_pool = &size_pools[0];
+                size_pool_allocatable_pages_set(objspace, size_pool, heap_extend_pages(objspace, sweep_slots, total_slots, heap_allocated_pages + heap_allocatable_pages(objspace)));
+
+                heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
 	    }
+#endif
 	}
 
 	if (full_marking) {
@@ -7723,7 +8116,7 @@ gc_marks_finish(rb_objspace_t *objspace)
 	gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
                   "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
                   "sweep %"PRIdSIZE" slots, increment: %"PRIdSIZE", next GC: %s)\n",
-                  objspace->marked_slots, objspace->rgengc.old_objects, heap->total_slots, sweep_slots, heap_allocatable_pages,
+                  objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
 		  objspace->rgengc.need_major_gc ? "major" : "minor");
     }
 
@@ -7757,7 +8150,9 @@ gc_marks_rest(rb_objspace_t *objspace)
     gc_report(1, objspace, "gc_marks_rest\n");
 
 #if GC_ENABLE_INCREMENTAL_MARK
-    heap_eden->pooled_pages = NULL;
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        size_pools[i].eden_heap.pooled_pages = NULL;
+    }
 #endif
 
     if (is_incremental_marking(objspace)) {
@@ -7775,7 +8170,7 @@ gc_marks_rest(rb_objspace_t *objspace)
 }
 
 static void
-gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
+gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
 {
     GC_ASSERT(dont_gc_val() == FALSE);
 #if GC_ENABLE_INCREMENTAL_MARK
@@ -7793,7 +8188,7 @@ gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
         }
         from = "pooled-pages";
     }
-    else if (heap_increment(objspace, heap)) {
+    else if (heap_increment(objspace, size_pool, heap)) {
         slots = heap->free_pages->free_slots;
         from = "incremented-pages";
     }
@@ -7849,7 +8244,7 @@ gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
 	    status = is_full_marking(objspace) ? "+" : "-";
 	}
 	else {
-	    if (is_lazy_sweeping(heap_eden)) {
+	    if (is_lazy_sweeping(objspace)) {
 		status = "S";
 	    }
 	    if (is_incremental_marking(objspace)) {
@@ -8470,13 +8865,13 @@ enum {
     (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
 
 static void
-heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
+heap_ready_to_gc(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
 {
     if (!heap->free_pages) {
-	if (!heap_increment(objspace, heap)) {
-            heap_allocatable_pages_set(objspace, 1);
-	    heap_increment(objspace, heap);
-	}
+        if (!heap_increment(objspace, size_pool, heap)) {
+            size_pool_allocatable_pages_set(objspace, size_pool, 1);
+            heap_increment(objspace, size_pool, heap);
+        }
     }
 }
 
@@ -8484,8 +8879,11 @@ static int
 ready_to_gc(rb_objspace_t *objspace)
 {
     if (dont_gc_val() || during_gc || ruby_disable_gc) {
-	heap_ready_to_gc(objspace, heap_eden);
-	return FALSE;
+        for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+            rb_size_pool_t *size_pool = &size_pools[i];
+            heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
+        }
+        return FALSE;
     }
     else {
 	return TRUE;
@@ -8602,7 +9000,7 @@ gc_start(rb_objspace_t *objspace, unsigned int reason)
     if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
 
     GC_ASSERT(gc_mode(objspace) == gc_mode_none);
-    GC_ASSERT(!is_lazy_sweeping(heap_eden));
+    GC_ASSERT(!is_lazy_sweeping(objspace));
     GC_ASSERT(!is_incremental_marking(objspace));
 
     unsigned int lock_lev;
@@ -8703,7 +9101,7 @@ static void
 gc_rest(rb_objspace_t *objspace)
 {
     int marking = is_incremental_marking(objspace);
-    int sweeping = is_lazy_sweeping(heap_eden);
+    int sweeping = is_lazy_sweeping(objspace);
 
     if (marking || sweeping) {
         unsigned int lock_lev;
@@ -8714,7 +9112,7 @@ gc_rest(rb_objspace_t *objspace)
 	if (is_incremental_marking(objspace)) {
             gc_marks_rest(objspace);
         }
-	if (is_lazy_sweeping(heap_eden)) {
+	if (is_lazy_sweeping(objspace)) {
 	    gc_sweep_rest(objspace);
 	}
 	gc_exit(objspace, gc_enter_event_rest, &lock_lev);
@@ -8739,7 +9137,7 @@ gc_current_status_fill(rb_objspace_t *objspace, char *buff)
     }
     else if (is_sweeping(objspace)) {
 	buff[i++] = 'S';
-	if (is_lazy_sweeping(heap_eden))      buff[i++] = 'L';
+	if (is_lazy_sweeping(objspace))      buff[i++] = 'L';
     }
     else {
 	buff[i++] = 'N';
@@ -8987,7 +9385,7 @@ gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
 }
 
 static VALUE
-gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free)
+gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t slot_size)
 {
     int marked;
     int wb_unprotected;
@@ -9037,8 +9435,8 @@ gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free)
     }
 
     /* Move the object */
-    memcpy(dest, src, sizeof(RVALUE));
-    memset(src, 0, sizeof(RVALUE));
+    memcpy(dest, src, slot_size);
+    memset(src, 0, slot_size);
 
     /* Set bits for object in new location */
     if (marking) {
@@ -9093,33 +9491,37 @@ compare_free_slots(const void *left, const void *right, void *dummy)
 static void
 gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
 {
-    size_t total_pages = heap_eden->total_pages;
-    size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
-    struct heap_page *page = 0, **page_list = malloc(size);
-    size_t i = 0;
+    for (int j = 0; j < SIZE_POOL_COUNT; j++) {
+        rb_size_pool_t *size_pool = &size_pools[j];
 
-    list_for_each(&heap_eden->pages, page, page_node) {
-        page_list[i++] = page;
-        assert(page != NULL);
-    }
-    assert(total_pages > 0);
-    assert((size_t)i == total_pages);
+        size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
+        size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
+        struct heap_page *page = 0, **page_list = malloc(size);
+        size_t i = 0;
 
-    /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
-     * head of the list, so empty pages will end up at the start of the heap */
-    ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL);
-
-    /* Reset the eden heap */
-    list_head_init(&objspace->eden_heap.pages);
-
-    for (i = 0; i < total_pages; i++) {
-        list_add(&heap_eden->pages, &page_list[i]->page_node);
-        if (page_list[i]->free_slots != 0) {
-            heap_add_freepage(heap_eden, page_list[i]);
+        list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
+            page_list[i++] = page;
+            GC_ASSERT(page);
         }
-    }
 
-    free(page_list);
+        GC_ASSERT((size_t)i == total_pages);
+
+        /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
+         * head of the list, so empty pages will end up at the start of the heap */
+        ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL);
+
+        /* Reset the eden heap */
+        list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
+
+        for (i = 0; i < total_pages; i++) {
+            list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
+            if (page_list[i]->free_slots != 0) {
+                heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
+            }
+        }
+
+        free(page_list);
+    }
 }
 
 static void
@@ -9752,21 +10154,29 @@ extern rb_symbols_t ruby_global_symbols;
 #define global_symbols ruby_global_symbols
 
 static void
-gc_update_references(rb_objspace_t * objspace, rb_heap_t *heap)
+gc_update_references(rb_objspace_t *objspace)
 {
     rb_execution_context_t *ec = GET_EC();
     rb_vm_t *vm = rb_ec_vm_ptr(ec);
-    short should_set_mark_bits = 1;
 
     struct heap_page *page = NULL;
 
-    list_for_each(&heap->pages, page, page_node) {
-        gc_ref_update(page->start, page->start + page->total_slots, sizeof(RVALUE), objspace, page);
-        if (page == heap->sweeping_page) {
-            should_set_mark_bits = 0;
-        }
-        if (should_set_mark_bits) {
-            gc_setup_mark_bits(page);
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        bool should_set_mark_bits = TRUE;
+        rb_size_pool_t *size_pool = &size_pools[i];
+        rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
+
+        list_for_each(&heap->pages, page, page_node) {
+            intptr_t start = (intptr_t)page->start;
+            intptr_t end = start + (page->total_slots * size_pool->slot_size);
+
+            gc_ref_update((void *)start, (void *)end, size_pool->slot_size, objspace, page);
+            if (page == heap->sweeping_page) {
+                should_set_mark_bits = FALSE;
+            }
+            if (should_set_mark_bits) {
+                gc_setup_mark_bits(page);
+            }
         }
     }
     rb_vm_update_references(vm);
@@ -9894,7 +10304,11 @@ gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE do
         gc_rest(objspace);
 
         if (RTEST(double_heap)) {
-            heap_add_pages(objspace, heap_eden, heap_allocated_pages);
+            for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+                rb_size_pool_t *size_pool = &size_pools[i];
+                rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
+                heap_add_pages(objspace, size_pool, heap, heap->total_pages);
+            }
         }
 
         if (RTEST(toward_empty)) {
@@ -10197,14 +10611,14 @@ gc_stat_internal(VALUE hash_or_sym)
     /* implementation dependent counters */
     SET(heap_allocated_pages, heap_allocated_pages);
     SET(heap_sorted_length, heap_pages_sorted_length);
-    SET(heap_allocatable_pages, heap_allocatable_pages);
+    SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
     SET(heap_available_slots, objspace_available_slots(objspace));
     SET(heap_live_slots, objspace_live_slots(objspace));
     SET(heap_free_slots, objspace_free_slots(objspace));
     SET(heap_final_slots, heap_pages_final_slots);
     SET(heap_marked_slots, objspace->marked_slots);
-    SET(heap_eden_pages, heap_eden->total_pages);
-    SET(heap_tomb_pages, heap_tomb->total_pages);
+    SET(heap_eden_pages, heap_eden_total_pages(objspace));
+    SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
     SET(total_allocated_pages, objspace->profile.total_allocated_pages);
     SET(total_freed_pages, objspace->profile.total_freed_pages);
     SET(total_allocated_objects, objspace->total_allocated_objects);
@@ -10507,9 +10921,16 @@ gc_set_initial_pages(void)
     gc_rest(objspace);
 
     min_pages = gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT;
-    if (min_pages > heap_eden->total_pages) {
-	heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages);
+
+    size_t pages_per_class = (min_pages - heap_eden_total_pages(objspace)) / SIZE_POOL_COUNT;
+
+    for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+        rb_size_pool_t *size_pool = &size_pools[i];
+
+        heap_add_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool), pages_per_class);
     }
+
+    heap_add_pages(objspace, &size_pools[0], &size_pools[0].eden_heap, min_pages - heap_eden_total_pages(objspace));
 }
 
 /*
@@ -10927,7 +11348,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si
     if (type == MEMOP_TYPE_MALLOC) {
       retry:
 	if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
-	    if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) {
+	    if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
 		gc_rest(objspace); /* gc_rest can reduce malloc_increase */
 		goto retry;
 	    }
@@ -13001,7 +13422,7 @@ rb_gcdebug_print_obj_condition(VALUE obj)
     fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
     fprintf(stderr, "remembered?  : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
 
-    if (is_lazy_sweeping(heap_eden)) {
+    if (is_lazy_sweeping(objspace)) {
         fprintf(stderr, "lazy sweeping?: true\n");
         fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
     }
diff --git a/internal/gc.h b/internal/gc.h
index 7d1efebfca..548ae5568a 100644
--- a/internal/gc.h
+++ b/internal/gc.h
@@ -101,6 +101,7 @@ static inline void *ruby_sized_xrealloc2_inlined(void *ptr, size_t new_count, si
 static inline void ruby_sized_xfree_inlined(void *ptr, size_t size);
 VALUE rb_class_allocate_instance(VALUE klass);
 void rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache);
+void *rb_gc_rvargc_object_data(VALUE obj);
 
 RUBY_SYMBOL_EXPORT_BEGIN
 /* gc.c (export) */
@@ -116,7 +117,6 @@ void rb_gc_mark_vm_stack_values(long n, const VALUE *values);
 void *ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2));
 void *ruby_sized_xrealloc2(void *ptr, size_t new_count, size_t element_size, size_t old_count) RUBY_ATTR_RETURNS_NONNULL RUBY_ATTR_ALLOC_SIZE((2, 3));
 void ruby_sized_xfree(void *x, size_t size);
-int rb_slot_size(void);
 RUBY_SYMBOL_EXPORT_END
 
 MJIT_SYMBOL_EXPORT_BEGIN
diff --git a/misc/lldb_cruby.py b/misc/lldb_cruby.py
index 1dbdc03eee..8461eea8b3 100755
--- a/misc/lldb_cruby.py
+++ b/misc/lldb_cruby.py
@@ -533,14 +533,15 @@ class HeapPageIter:
         self.target = target
         self.start = page.GetChildMemberWithName('start').GetValueAsUnsigned();
         self.num_slots = page.GetChildMemberWithName('total_slots').unsigned
+        self.slot_size = page.GetChildMemberWithName('size_pool').GetChildMemberWithName('slot_size').unsigned
         self.counter = 0
         self.tRBasic = target.FindFirstType("struct RBasic")
         self.tRValue = target.FindFirstType("struct RVALUE")
 
     def is_valid(self):
         heap_page_header_size = self.target.FindFirstType("struct heap_page_header").GetByteSize()
-        rvalue_size = self.tRValue.GetByteSize()
-        heap_page_obj_limit = int((HEAP_PAGE_SIZE - heap_page_header_size) / rvalue_size)
+        rvalue_size = self.slot_size
+        heap_page_obj_limit = int((HEAP_PAGE_SIZE - heap_page_header_size) / self.slot_size)
 
         return (heap_page_obj_limit - 1) <= self.num_slots <= heap_page_obj_limit
 
@@ -549,7 +550,7 @@ class HeapPageIter:
 
     def __next__(self):
         if self.counter < self.num_slots:
-            obj_addr_i = self.start + (self.counter * self.tRValue.GetByteSize())
+            obj_addr_i = self.start + (self.counter * self.slot_size)
             obj_addr = lldb.SBAddress(obj_addr_i, self.target)
             slot_info = (self.counter, obj_addr_i, self.target.CreateValueFromAddress("object", obj_addr, self.tRBasic))
             self.counter += 1