1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Make sure all threads are scanned on unload_units

This has been a TODO since 79df14c04b. While adcf0316d1 covered the
root_fiber of the initial thread, it didn't cover root_fibers of other
threads. Now it's hooked properly in rb_threadptr_root_fiber_setup.

With regards to "XXX: Is this mjit_cont `mjit_cont_free`d?", when
rb_threadptr_root_fiber_release is called, although I'm not sure when
th->root_fiber is truthy, fiber_free seems to call cont_free and
mjit_cont_free. So mjit_conts of root_fibers seem to be freed properly.
This commit is contained in:
Takashi Kokubun 2020-11-21 19:24:59 -08:00
parent eb3906c6b8
commit e0156bd396
No known key found for this signature in database
GPG key ID: 6FFC433B12EE23DD
3 changed files with 17 additions and 14 deletions

9
cont.c
View file

@ -945,7 +945,8 @@ cont_free(void *ptr)
RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr); RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr);
if (mjit_enabled && cont->mjit_cont != NULL) { if (mjit_enabled) {
VM_ASSERT(cont->mjit_cont != NULL);
mjit_cont_free(cont->mjit_cont); mjit_cont_free(cont->mjit_cont);
} }
/* free rb_cont_t or rb_fiber_t */ /* free rb_cont_t or rb_fiber_t */
@ -1155,11 +1156,10 @@ VALUE rb_fiberptr_self(struct rb_fiber_struct *fiber)
return fiber->cont.self; return fiber->cont.self;
} }
// This is used for root_fiber because other fibers call cont_init_mjit_cont through cont_new.
void void
rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber) rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber)
{ {
// Currently this function is meant for root_fiber. Others go through cont_new.
// XXX: Is this mjit_cont `mjit_cont_free`d?
cont_init_mjit_cont(&fiber->cont); cont_init_mjit_cont(&fiber->cont);
} }
@ -1987,6 +1987,9 @@ rb_threadptr_root_fiber_setup(rb_thread_t *th)
fiber->blocking = 1; fiber->blocking = 1;
fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */ fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */
th->ec = &fiber->cont.saved_ec; th->ec = &fiber->cont.saved_ec;
// This skips mjit_cont_new for the initial thread because mjit_enabled is always false
// at this point. mjit_init calls rb_fiber_init_mjit_cont again for this root_fiber.
rb_fiber_init_mjit_cont(fiber);
} }
void void

20
mjit.c
View file

@ -262,7 +262,7 @@ mark_ec_units(rb_execution_context_t *ec)
if (cfp->pc && (iseq = cfp->iseq) != NULL if (cfp->pc && (iseq = cfp->iseq) != NULL
&& imemo_type((VALUE) iseq) == imemo_iseq && imemo_type((VALUE) iseq) == imemo_iseq
&& (iseq->body->jit_unit) != NULL) { && (iseq->body->jit_unit) != NULL) {
iseq->body->jit_unit->used_code_p = TRUE; iseq->body->jit_unit->used_code_p = true;
} }
if (cfp == ec->cfp) if (cfp == ec->cfp)
@ -275,8 +275,6 @@ mark_ec_units(rb_execution_context_t *ec)
static void static void
unload_units(void) unload_units(void)
{ {
//rb_vm_t *vm = GET_THREAD()->vm;
//rb_thread_t *th = NULL;
struct rb_mjit_unit *unit = 0, *next, *worst; struct rb_mjit_unit *unit = 0, *next, *worst;
struct mjit_cont *cont; struct mjit_cont *cont;
int delete_num, units_num = active_units.length; int delete_num, units_num = active_units.length;
@ -293,16 +291,14 @@ unload_units(void)
// Detect units which are in use and can't be unloaded. // Detect units which are in use and can't be unloaded.
list_for_each(&active_units.head, unit, unode) { list_for_each(&active_units.head, unit, unode) {
assert(unit->iseq != NULL && unit->handle != NULL); assert(unit->iseq != NULL && unit->handle != NULL);
unit->used_code_p = FALSE; unit->used_code_p = false;
} }
// TODO // All threads have a root_fiber which has a mjit_cont. Other normal fibers also
//list_for_each(&vm->living_threads, th, lt_node) { // have a mjit_cont. Thus we can check ISeqs in use by scanning ec of mjit_conts.
// mark_ec_units(th->ec);
//}
for (cont = first_cont; cont != NULL; cont = cont->next) { for (cont = first_cont; cont != NULL; cont = cont->next) {
mark_ec_units(cont->ec); mark_ec_units(cont->ec);
} }
// TODO: check slale_units and unload unused ones! (note that the unit is not associated to ISeq anymore) // TODO: check stale_units and unload unused ones! (note that the unit is not associated to ISeq anymore)
// Remove 1/10 units more to decrease unloading calls. // Remove 1/10 units more to decrease unloading calls.
// TODO: Calculate max total_calls in unit_queue and don't unload units // TODO: Calculate max total_calls in unit_queue and don't unload units
@ -801,7 +797,11 @@ mjit_init(const struct mjit_options *opts)
rb_native_cond_initialize(&mjit_worker_wakeup); rb_native_cond_initialize(&mjit_worker_wakeup);
rb_native_cond_initialize(&mjit_gc_wakeup); rb_native_cond_initialize(&mjit_gc_wakeup);
// Make sure root_fiber's saved_ec is scanned by mark_ec_units // Make sure the saved_ec of the initial thread's root_fiber is scanned by mark_ec_units.
//
// rb_threadptr_root_fiber_setup for the initial thread is called before mjit_init,
// meaning mjit_cont_new is skipped for the root_fiber. Therefore we need to call
// rb_fiber_init_mjit_cont again with mjit_enabled=true to set the root_fiber's mjit_cont.
rb_fiber_init_mjit_cont(GET_EC()->fiber_ptr); rb_fiber_init_mjit_cont(GET_EC()->fiber_ptr);
// Initialize class_serials cache for compilation // Initialize class_serials cache for compilation

View file

@ -166,7 +166,7 @@ struct rb_mjit_unit {
char *so_file; char *so_file;
#endif #endif
// Only used by unload_units. Flag to check this unit is currently on stack or not. // Only used by unload_units. Flag to check this unit is currently on stack or not.
char used_code_p; bool used_code_p;
struct list_node unode; struct list_node unode;
// mjit_compile's optimization switches // mjit_compile's optimization switches
struct rb_mjit_compile_info compile_info; struct rb_mjit_compile_info compile_info;