1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Run unload_units in the JIT worker thread

to avoid "Too many JIT code, but skipped unloading units for JIT compaction".
Now we can forget the `in_compact` locking.

Moving some functions from mjit.c to mjit_worker.c because mjit_worker.c
should have functions executed in the JIT worker.
This commit is contained in:
Takashi Kokubun 2020-11-27 21:28:57 -08:00
parent ddb1736bfc
commit 16dab6b692
No known key found for this signature in database
GPG key ID: 6FFC433B12EE23DD
3 changed files with 108 additions and 119 deletions

102
mjit.c
View file

@ -166,16 +166,6 @@ free_list(struct rb_mjit_unit_list *list, bool close_handle_p)
list->length = 0;
}
// MJIT info related to an existing continutaion.
struct mjit_cont {
rb_execution_context_t *ec; // continuation ec
struct mjit_cont *prev, *next; // used to form lists
};
// Double linked list of registered continuations. This is used to detect
// units which are in use in unload_units.
static struct mjit_cont *first_cont;
// Register a new continuation with execution context `ec`. Return MJIT info about
// the continuation.
struct mjit_cont *
@ -253,87 +243,6 @@ create_unit(const rb_iseq_t *iseq)
iseq->body->jit_unit = unit;
}
// Set up field `used_code_p` for unit iseqs whose iseq on the stack of ec.
static void
mark_ec_units(rb_execution_context_t *ec)
{
const rb_control_frame_t *cfp;
if (ec->vm_stack == NULL)
return;
for (cfp = RUBY_VM_END_CONTROL_FRAME(ec) - 1; ; cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
const rb_iseq_t *iseq;
if (cfp->pc && (iseq = cfp->iseq) != NULL
&& imemo_type((VALUE) iseq) == imemo_iseq
&& (iseq->body->jit_unit) != NULL) {
iseq->body->jit_unit->used_code_p = true;
}
if (cfp == ec->cfp)
break; // reached the most recent cfp
}
}
// Unload JIT code of some units to satisfy the maximum permitted
// number of units with a loaded code.
static void
unload_units(void)
{
struct rb_mjit_unit *unit = 0, *next, *worst;
struct mjit_cont *cont;
int delete_num, units_num = active_units.length;
// For now, we don't unload units when ISeq is GCed. We should
// unload such ISeqs first here.
list_for_each_safe(&active_units.head, unit, next, unode) {
if (unit->iseq == NULL) { // ISeq is GCed.
remove_from_list(unit, &active_units);
free_unit(unit);
}
}
// Detect units which are in use and can't be unloaded.
list_for_each(&active_units.head, unit, unode) {
assert(unit->iseq != NULL && unit->handle != NULL);
unit->used_code_p = false;
}
// All threads have a root_fiber which has a mjit_cont. Other normal fibers also
// have a mjit_cont. Thus we can check ISeqs in use by scanning ec of mjit_conts.
for (cont = first_cont; cont != NULL; cont = cont->next) {
mark_ec_units(cont->ec);
}
// TODO: check stale_units and unload unused ones! (note that the unit is not associated to ISeq anymore)
// Remove 1/10 units more to decrease unloading calls.
// TODO: Calculate max total_calls in unit_queue and don't unload units
// whose total_calls are larger than the max.
delete_num = active_units.length / 10;
for (; active_units.length > mjit_opts.max_cache_size - delete_num;) {
// Find one unit that has the minimum total_calls.
worst = NULL;
list_for_each(&active_units.head, unit, unode) {
if (unit->used_code_p) // We can't unload code on stack.
continue;
if (worst == NULL || worst->iseq->body->total_calls > unit->iseq->body->total_calls) {
worst = unit;
}
}
if (worst == NULL)
break;
// Unload the worst node.
verbose(2, "Unloading unit %d (calls=%lu)", worst->id, worst->iseq->body->total_calls);
assert(worst->handle != NULL);
remove_from_list(worst, &active_units);
free_unit(worst);
}
if (units_num > active_units.length) {
verbose(1, "Too many JIT code -- %d units unloaded", units_num - active_units.length);
}
}
static void
mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_info *compile_info)
{
@ -352,16 +261,7 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_inf
CRITICAL_SECTION_START(3, "in add_iseq_to_process");
add_to_list(iseq->body->jit_unit, &unit_queue);
if (active_units.length >= mjit_opts.max_cache_size) {
if (in_compact) {
verbose(1, "Too many JIT code, but skipped unloading units for JIT compaction");
} else {
RB_DEBUG_COUNTER_INC(mjit_unload_units);
unload_units();
}
if (active_units.length == mjit_opts.max_cache_size && mjit_opts.wait) { // Sometimes all methods may be in use
mjit_opts.max_cache_size++; // avoid infinite loop on `rb_mjit_wait_call`. Note that --jit-wait is just for testing.
verbose(1, "No units can be unloaded -- incremented max-cache-size to %d for --jit-wait", mjit_opts.max_cache_size);
}
unload_units_p = true;
}
verbose(3, "Sending wakeup signal to workers in mjit_add_iseq_to_process");
rb_native_cond_broadcast(&mjit_worker_wakeup);

View file

@ -225,8 +225,8 @@ static rb_nativethread_cond_t mjit_gc_wakeup;
static int in_gc = 0;
// True when JIT is working.
static bool in_jit = false;
// True when JIT compaction is running.
static bool in_compact = false;
// True when unload_units is requested from Ruby threads.
static bool unload_units_p = false;
// Set to true to stop worker.
static bool stop_worker_p;
// Set to true if worker is stopped.
@ -973,10 +973,6 @@ compact_all_jit_code(void)
sprint_uniq_filename(c_file, (int)sizeof(c_file), unit->id, MJIT_TMP_PREFIX, c_ext);
sprint_uniq_filename(so_file, (int)sizeof(so_file), unit->id, MJIT_TMP_PREFIX, so_ext);
CRITICAL_SECTION_START(3, "in compact_all_jit_code to guard .c files from unload_units");
in_compact = true;
CRITICAL_SECTION_FINISH(3, "in compact_all_jit_code to guard .c files from unload_units");
bool success = compile_compact_jit_code(c_file);
double start_time = real_ms_time();
if (success) {
@ -986,10 +982,6 @@ compact_all_jit_code(void)
}
double end_time = real_ms_time();
CRITICAL_SECTION_START(3, "in compact_all_jit_code to release .c files");
in_compact = false;
CRITICAL_SECTION_FINISH(3, "in compact_all_jit_code to release .c files");
if (success) {
void *handle = dlopen(so_file, RTLD_NOW);
if (handle == NULL) {
@ -1228,6 +1220,97 @@ mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const
return cc_entries_index;
}
// Set up field `used_code_p` for unit iseqs whose iseq on the stack of ec.
static void
mark_ec_units(rb_execution_context_t *ec)
{
const rb_control_frame_t *cfp;
if (ec->vm_stack == NULL)
return;
for (cfp = RUBY_VM_END_CONTROL_FRAME(ec) - 1; ; cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
const rb_iseq_t *iseq;
if (cfp->pc && (iseq = cfp->iseq) != NULL
&& imemo_type((VALUE) iseq) == imemo_iseq
&& (iseq->body->jit_unit) != NULL) {
iseq->body->jit_unit->used_code_p = true;
}
if (cfp == ec->cfp)
break; // reached the most recent cfp
}
}
// MJIT info related to an existing continutaion.
struct mjit_cont {
rb_execution_context_t *ec; // continuation ec
struct mjit_cont *prev, *next; // used to form lists
};
// Double linked list of registered continuations. This is used to detect
// units which are in use in unload_units.
static struct mjit_cont *first_cont;
// Unload JIT code of some units to satisfy the maximum permitted
// number of units with a loaded code.
static void
unload_units(void)
{
struct rb_mjit_unit *unit = 0, *next, *worst;
struct mjit_cont *cont;
int delete_num, units_num = active_units.length;
// For now, we don't unload units when ISeq is GCed. We should
// unload such ISeqs first here.
list_for_each_safe(&active_units.head, unit, next, unode) {
if (unit->iseq == NULL) { // ISeq is GCed.
remove_from_list(unit, &active_units);
free_unit(unit);
}
}
// Detect units which are in use and can't be unloaded.
list_for_each(&active_units.head, unit, unode) {
assert(unit->iseq != NULL && unit->handle != NULL);
unit->used_code_p = false;
}
// All threads have a root_fiber which has a mjit_cont. Other normal fibers also
// have a mjit_cont. Thus we can check ISeqs in use by scanning ec of mjit_conts.
for (cont = first_cont; cont != NULL; cont = cont->next) {
mark_ec_units(cont->ec);
}
// TODO: check stale_units and unload unused ones! (note that the unit is not associated to ISeq anymore)
// Remove 1/10 units more to decrease unloading calls.
// TODO: Calculate max total_calls in unit_queue and don't unload units
// whose total_calls are larger than the max.
delete_num = active_units.length / 10;
for (; active_units.length > mjit_opts.max_cache_size - delete_num;) {
// Find one unit that has the minimum total_calls.
worst = NULL;
list_for_each(&active_units.head, unit, unode) {
if (unit->used_code_p) // We can't unload code on stack.
continue;
if (worst == NULL || worst->iseq->body->total_calls > unit->iseq->body->total_calls) {
worst = unit;
}
}
if (worst == NULL)
break;
// Unload the worst node.
verbose(2, "Unloading unit %d (calls=%lu)", worst->id, worst->iseq->body->total_calls);
assert(worst->handle != NULL);
remove_from_list(worst, &active_units);
free_unit(worst);
}
if (units_num > active_units.length) {
verbose(1, "Too many JIT code -- %d units unloaded", units_num - active_units.length);
}
}
// The function implementing a worker. It is executed in a separate
// thread by rb_thread_create_mjit_thread. It compiles precompiled header
// and then compiles requested ISeqs.
@ -1263,6 +1346,17 @@ mjit_worker(void)
while ((list_empty(&unit_queue.head) || active_units.length >= mjit_opts.max_cache_size) && !stop_worker_p) {
rb_native_cond_wait(&mjit_worker_wakeup, &mjit_engine_mutex);
verbose(3, "Getting wakeup from client");
if (unload_units_p) {
RB_DEBUG_COUNTER_INC(mjit_unload_units);
unload_units();
unload_units_p = false;
if (active_units.length == mjit_opts.max_cache_size && mjit_opts.wait) { // Sometimes all methods may be in use
mjit_opts.max_cache_size++; // avoid infinite loop on `rb_mjit_wait_call`. Note that --jit-wait is just for testing.
verbose(1, "No units can be unloaded -- incremented max-cache-size to %d for --jit-wait", mjit_opts.max_cache_size);
}
}
}
unit = get_from_list(&unit_queue);
CRITICAL_SECTION_FINISH(3, "in worker dequeue");

View file

@ -690,16 +690,11 @@ class TestJIT < Test::Unit::TestCase
assert_match(/\A#{JIT_SUCCESS_PREFIX}: mjit#{i}@\(eval\):/, errs[i], debug_info)
end
assert_equal("Too many JIT code -- 1 units unloaded\n", errs[10], debug_info)
assert_match(/\A#{JIT_SUCCESS_PREFIX}: mjit10@\(eval\):/, errs[11], debug_info)
# On --jit-wait, when the number of JIT-ed code reaches --jit-max-cache,
# it should trigger compaction.
if RUBY_PLATFORM.match?(/mswin|mingw/) # compaction is not supported on Windows yet
assert_equal("Too many JIT code -- 1 units unloaded\n", errs[10], debug_info)
assert_match(/\A#{JIT_SUCCESS_PREFIX}: mjit10@\(eval\):/, errs[11], debug_info)
else
assert_equal("Too many JIT code, but skipped unloading units for JIT compaction\n", errs[10], debug_info)
assert_equal("No units can be unloaded -- incremented max-cache-size to 11 for --jit-wait\n", errs[11], debug_info)
assert_match(/\A#{JIT_SUCCESS_PREFIX}: mjit10@\(eval\):/, errs[12], debug_info)
unless RUBY_PLATFORM.match?(/mswin|mingw/) # compaction is not supported on Windows yet
assert_equal(3, compactions.size, debug_info)
end