mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
parent
64fb3279d2
commit
1162523bae
Notes:
git
2022-06-16 01:41:19 +09:00
Merged-By: k0kubun <takashikkbn@gmail.com>
8 changed files with 356 additions and 470 deletions
|
@ -9657,6 +9657,8 @@ mjit.$(OBJEXT): {$(VPATH)}mjit_worker.c
|
||||||
mjit.$(OBJEXT): {$(VPATH)}node.h
|
mjit.$(OBJEXT): {$(VPATH)}node.h
|
||||||
mjit.$(OBJEXT): {$(VPATH)}onigmo.h
|
mjit.$(OBJEXT): {$(VPATH)}onigmo.h
|
||||||
mjit.$(OBJEXT): {$(VPATH)}oniguruma.h
|
mjit.$(OBJEXT): {$(VPATH)}oniguruma.h
|
||||||
|
mjit.$(OBJEXT): {$(VPATH)}ractor.h
|
||||||
|
mjit.$(OBJEXT): {$(VPATH)}ractor_core.h
|
||||||
mjit.$(OBJEXT): {$(VPATH)}ruby_assert.h
|
mjit.$(OBJEXT): {$(VPATH)}ruby_assert.h
|
||||||
mjit.$(OBJEXT): {$(VPATH)}ruby_atomic.h
|
mjit.$(OBJEXT): {$(VPATH)}ruby_atomic.h
|
||||||
mjit.$(OBJEXT): {$(VPATH)}st.h
|
mjit.$(OBJEXT): {$(VPATH)}st.h
|
||||||
|
@ -13800,6 +13802,7 @@ signal.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
|
||||||
signal.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
|
signal.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
|
||||||
signal.$(OBJEXT): $(CCAN_DIR)/list/list.h
|
signal.$(OBJEXT): $(CCAN_DIR)/list/list.h
|
||||||
signal.$(OBJEXT): $(CCAN_DIR)/str/str.h
|
signal.$(OBJEXT): $(CCAN_DIR)/str/str.h
|
||||||
|
signal.$(OBJEXT): $(hdrdir)/ruby.h
|
||||||
signal.$(OBJEXT): $(hdrdir)/ruby/ruby.h
|
signal.$(OBJEXT): $(hdrdir)/ruby/ruby.h
|
||||||
signal.$(OBJEXT): $(top_srcdir)/internal/array.h
|
signal.$(OBJEXT): $(top_srcdir)/internal/array.h
|
||||||
signal.$(OBJEXT): $(top_srcdir)/internal/compilers.h
|
signal.$(OBJEXT): $(top_srcdir)/internal/compilers.h
|
||||||
|
@ -13985,6 +13988,7 @@ signal.$(OBJEXT): {$(VPATH)}internal/warning_push.h
|
||||||
signal.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
|
signal.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
|
||||||
signal.$(OBJEXT): {$(VPATH)}method.h
|
signal.$(OBJEXT): {$(VPATH)}method.h
|
||||||
signal.$(OBJEXT): {$(VPATH)}missing.h
|
signal.$(OBJEXT): {$(VPATH)}missing.h
|
||||||
|
signal.$(OBJEXT): {$(VPATH)}mjit.h
|
||||||
signal.$(OBJEXT): {$(VPATH)}node.h
|
signal.$(OBJEXT): {$(VPATH)}node.h
|
||||||
signal.$(OBJEXT): {$(VPATH)}onigmo.h
|
signal.$(OBJEXT): {$(VPATH)}onigmo.h
|
||||||
signal.$(OBJEXT): {$(VPATH)}oniguruma.h
|
signal.$(OBJEXT): {$(VPATH)}oniguruma.h
|
||||||
|
@ -14000,6 +14004,7 @@ signal.$(OBJEXT): {$(VPATH)}thread_native.h
|
||||||
signal.$(OBJEXT): {$(VPATH)}vm_core.h
|
signal.$(OBJEXT): {$(VPATH)}vm_core.h
|
||||||
signal.$(OBJEXT): {$(VPATH)}vm_debug.h
|
signal.$(OBJEXT): {$(VPATH)}vm_debug.h
|
||||||
signal.$(OBJEXT): {$(VPATH)}vm_opts.h
|
signal.$(OBJEXT): {$(VPATH)}vm_opts.h
|
||||||
|
signal.$(OBJEXT): {$(VPATH)}yjit.h
|
||||||
sprintf.$(OBJEXT): $(hdrdir)/ruby/ruby.h
|
sprintf.$(OBJEXT): $(hdrdir)/ruby/ruby.h
|
||||||
sprintf.$(OBJEXT): $(top_srcdir)/internal/bignum.h
|
sprintf.$(OBJEXT): $(top_srcdir)/internal/bignum.h
|
||||||
sprintf.$(OBJEXT): $(top_srcdir)/internal/bits.h
|
sprintf.$(OBJEXT): $(top_srcdir)/internal/bits.h
|
||||||
|
|
3
gc.c
3
gc.c
|
@ -9497,8 +9497,6 @@ gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_
|
||||||
if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
|
if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
|
||||||
if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
|
if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
|
||||||
|
|
||||||
mjit_gc_start_hook();
|
|
||||||
|
|
||||||
during_gc = TRUE;
|
during_gc = TRUE;
|
||||||
RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
|
RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
|
||||||
gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
|
gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
|
||||||
|
@ -9517,7 +9515,6 @@ gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_l
|
||||||
gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
|
gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
|
||||||
during_gc = FALSE;
|
during_gc = FALSE;
|
||||||
|
|
||||||
mjit_gc_exit_hook();
|
|
||||||
gc_exit_clock(objspace, event);
|
gc_exit_clock(objspace, event);
|
||||||
RB_VM_LOCK_LEAVE_LEV(lock_lev);
|
RB_VM_LOCK_LEAVE_LEV(lock_lev);
|
||||||
|
|
||||||
|
|
316
mjit.c
316
mjit.c
|
@ -25,6 +25,7 @@
|
||||||
#include "internal/hash.h"
|
#include "internal/hash.h"
|
||||||
#include "internal/warnings.h"
|
#include "internal/warnings.h"
|
||||||
#include "vm_sync.h"
|
#include "vm_sync.h"
|
||||||
|
#include "ractor_core.h"
|
||||||
|
|
||||||
#include "mjit_worker.c"
|
#include "mjit_worker.c"
|
||||||
|
|
||||||
|
@ -50,40 +51,6 @@ get_uniq_filename(unsigned long id, const char *prefix, const char *suffix)
|
||||||
return str;
|
return str;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait until workers don't compile any iseq. It is called at the
|
|
||||||
// start of GC.
|
|
||||||
void
|
|
||||||
mjit_gc_start_hook(void)
|
|
||||||
{
|
|
||||||
if (!mjit_enabled)
|
|
||||||
return;
|
|
||||||
CRITICAL_SECTION_START(4, "mjit_gc_start_hook");
|
|
||||||
while (in_jit) {
|
|
||||||
verbose(4, "Waiting wakeup from a worker for GC");
|
|
||||||
rb_native_cond_wait(&mjit_client_wakeup, &mjit_engine_mutex);
|
|
||||||
verbose(4, "Getting wakeup from a worker for GC");
|
|
||||||
}
|
|
||||||
in_gc++;
|
|
||||||
CRITICAL_SECTION_FINISH(4, "mjit_gc_start_hook");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a signal to workers to continue iseq compilations. It is
|
|
||||||
// called at the end of GC.
|
|
||||||
void
|
|
||||||
mjit_gc_exit_hook(void)
|
|
||||||
{
|
|
||||||
if (!mjit_enabled)
|
|
||||||
return;
|
|
||||||
CRITICAL_SECTION_START(4, "mjit_gc_exit_hook");
|
|
||||||
in_gc--;
|
|
||||||
RUBY_ASSERT_ALWAYS(in_gc >= 0);
|
|
||||||
if (!in_gc) {
|
|
||||||
verbose(4, "Sending wakeup signal to workers after GC");
|
|
||||||
rb_native_cond_broadcast(&mjit_gc_wakeup);
|
|
||||||
}
|
|
||||||
CRITICAL_SECTION_FINISH(4, "mjit_gc_exit_hook");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prohibit calling JIT-ed code and let existing JIT-ed frames exit before the next insn.
|
// Prohibit calling JIT-ed code and let existing JIT-ed frames exit before the next insn.
|
||||||
void
|
void
|
||||||
mjit_cancel_all(const char *reason)
|
mjit_cancel_all(const char *reason)
|
||||||
|
@ -133,9 +100,6 @@ mjit_free_iseq(const rb_iseq_t *iseq)
|
||||||
if (!mjit_enabled)
|
if (!mjit_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
CRITICAL_SECTION_START(4, "mjit_free_iseq");
|
|
||||||
RUBY_ASSERT_ALWAYS(in_gc);
|
|
||||||
RUBY_ASSERT_ALWAYS(!in_jit);
|
|
||||||
if (ISEQ_BODY(iseq)->jit_unit) {
|
if (ISEQ_BODY(iseq)->jit_unit) {
|
||||||
// jit_unit is not freed here because it may be referred by multiple
|
// jit_unit is not freed here because it may be referred by multiple
|
||||||
// lists of units. `get_from_list` and `mjit_finish` do the job.
|
// lists of units. `get_from_list` and `mjit_finish` do the job.
|
||||||
|
@ -150,7 +114,6 @@ mjit_free_iseq(const rb_iseq_t *iseq)
|
||||||
unit->iseq = NULL;
|
unit->iseq = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
CRITICAL_SECTION_FINISH(4, "mjit_free_iseq");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Free unit list. This should be called only when worker is finished
|
// Free unit list. This should be called only when worker is finished
|
||||||
|
@ -245,19 +208,169 @@ finish_conts(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create unit for `iseq`. This function may be called from an MJIT worker.
|
static void mjit_wait(struct rb_iseq_constant_body *body);
|
||||||
|
|
||||||
|
// Check the unit queue and start mjit_compile if nothing is in progress.
|
||||||
static void
|
static void
|
||||||
|
check_unit_queue(void)
|
||||||
|
{
|
||||||
|
if (worker_stopped) return;
|
||||||
|
if (current_cc_pid != 0) return; // still compiling
|
||||||
|
|
||||||
|
// Run unload_units after it's requested `max_cache_size / 10` (default: 10) times.
|
||||||
|
// This throttles the call to mitigate locking in unload_units. It also throttles JIT compaction.
|
||||||
|
int throttle_threshold = mjit_opts.max_cache_size / 10;
|
||||||
|
if (unload_requests >= throttle_threshold) {
|
||||||
|
unload_units();
|
||||||
|
unload_requests = 0;
|
||||||
|
if (active_units.length == mjit_opts.max_cache_size && mjit_opts.wait) { // Sometimes all methods may be in use
|
||||||
|
mjit_opts.max_cache_size++; // avoid infinite loop on `rb_mjit_wait_call`. Note that --jit-wait is just for testing.
|
||||||
|
verbose(1, "No units can be unloaded -- incremented max-cache-size to %d for --jit-wait", mjit_opts.max_cache_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (active_units.length >= mjit_opts.max_cache_size) return; // wait until unload_units makes a progress
|
||||||
|
|
||||||
|
// Dequeue a unit
|
||||||
|
struct rb_mjit_unit *unit = get_from_list(&unit_queue);
|
||||||
|
if (unit == NULL) return;
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
// Synchronously compile methods on Windows.
|
||||||
|
// mswin: No SIGCHLD, MinGW: directly compiling .c to .so doesn't work
|
||||||
|
mjit_func_t func = convert_unit_to_func(unit);
|
||||||
|
if ((uintptr_t)func > (uintptr_t)LAST_JIT_ISEQ_FUNC) {
|
||||||
|
add_to_list(unit, &active_units);
|
||||||
|
MJIT_ATOMIC_SET(ISEQ_BODY(unit->iseq)->jit_func, func);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
current_cc_ms = real_ms_time();
|
||||||
|
current_cc_unit = unit;
|
||||||
|
current_cc_pid = start_mjit_compile(unit);
|
||||||
|
// TODO: handle -1
|
||||||
|
if (mjit_opts.wait) {
|
||||||
|
mjit_wait(unit->iseq->body);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create unit for `iseq`. This function may be called from an MJIT worker.
|
||||||
|
static struct rb_mjit_unit*
|
||||||
create_unit(const rb_iseq_t *iseq)
|
create_unit(const rb_iseq_t *iseq)
|
||||||
{
|
{
|
||||||
struct rb_mjit_unit *unit;
|
// To prevent GC, don't use ZALLOC // TODO: just use ZALLOC
|
||||||
|
struct rb_mjit_unit *unit = calloc(1, sizeof(struct rb_mjit_unit));
|
||||||
unit = calloc(1, sizeof(struct rb_mjit_unit));
|
|
||||||
if (unit == NULL)
|
if (unit == NULL)
|
||||||
return;
|
return NULL;
|
||||||
|
|
||||||
unit->id = current_unit_num++;
|
unit->id = current_unit_num++;
|
||||||
unit->iseq = (rb_iseq_t *)iseq;
|
if (iseq == NULL) { // Compact unit
|
||||||
ISEQ_BODY(iseq)->jit_unit = unit;
|
unit->compact_p = true;
|
||||||
|
} else { // Normal unit
|
||||||
|
unit->iseq = (rb_iseq_t *)iseq;
|
||||||
|
ISEQ_BODY(iseq)->jit_unit = unit;
|
||||||
|
}
|
||||||
|
return unit;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it should compact all JIT code and start it as needed
|
||||||
|
static void
|
||||||
|
check_compaction(void)
|
||||||
|
{
|
||||||
|
#if USE_JIT_COMPACTION
|
||||||
|
// Allow only `max_cache_size / 100` times (default: 100) of compaction.
|
||||||
|
// Note: GC of compacted code has not been implemented yet.
|
||||||
|
int max_compact_size = mjit_opts.max_cache_size / 100;
|
||||||
|
if (max_compact_size < 10) max_compact_size = 10;
|
||||||
|
|
||||||
|
// Run unload_units after it's requested `max_cache_size / 10` (default: 10) times.
|
||||||
|
// This throttles the call to mitigate locking in unload_units. It also throttles JIT compaction.
|
||||||
|
int throttle_threshold = mjit_opts.max_cache_size / 10;
|
||||||
|
|
||||||
|
if (compact_units.length < max_compact_size
|
||||||
|
&& ((!mjit_opts.wait && unit_queue.length == 0 && active_units.length > 1)
|
||||||
|
|| (active_units.length == mjit_opts.max_cache_size && compact_units.length * throttle_threshold <= total_unloads))) { // throttle compaction by total_unloads
|
||||||
|
struct rb_mjit_unit *unit = create_unit(NULL);
|
||||||
|
if (unit != NULL) {
|
||||||
|
// TODO: assert unit is null
|
||||||
|
current_cc_ms = real_ms_time();
|
||||||
|
current_cc_unit = unit;
|
||||||
|
current_cc_pid = start_mjit_compact(unit);
|
||||||
|
// TODO: check -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the current CC process if any, and start a next C compiler process as needed.
|
||||||
|
void
|
||||||
|
mjit_notify_waitpid(int status)
|
||||||
|
{
|
||||||
|
// TODO: check current_cc_pid?
|
||||||
|
current_cc_pid = 0;
|
||||||
|
|
||||||
|
// Delete .c file
|
||||||
|
char c_file[MAXPATHLEN];
|
||||||
|
sprint_uniq_filename(c_file, (int)sizeof(c_file), current_cc_unit->id, MJIT_TMP_PREFIX, ".c");
|
||||||
|
if (!mjit_opts.save_temps)
|
||||||
|
remove_file(c_file);
|
||||||
|
|
||||||
|
// Check the result
|
||||||
|
bool success = false;
|
||||||
|
if (WIFEXITED(status)) {
|
||||||
|
success = (WEXITSTATUS(status) == 0);
|
||||||
|
}
|
||||||
|
if (!success) {
|
||||||
|
verbose(2, "Failed to generate so");
|
||||||
|
// TODO: free unit?
|
||||||
|
// TODO: set NOT_COMPILED_JIT_ISEQ_FUNC?
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load .so file
|
||||||
|
char so_file[MAXPATHLEN];
|
||||||
|
sprint_uniq_filename(so_file, (int)sizeof(so_file), current_cc_unit->id, MJIT_TMP_PREFIX, DLEXT);
|
||||||
|
if (current_cc_unit->compact_p) { // Compact unit
|
||||||
|
#if USE_JIT_COMPACTION
|
||||||
|
load_compact_funcs_from_so(current_cc_unit, c_file, so_file);
|
||||||
|
current_cc_unit = NULL;
|
||||||
|
#else
|
||||||
|
RUBY_ASSERT(!current_cc_unit->compact_p);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
else { // Normal unit
|
||||||
|
// Load the function from so
|
||||||
|
char funcname[MAXPATHLEN];
|
||||||
|
sprint_funcname(funcname, current_cc_unit);
|
||||||
|
void *func = load_func_from_so(so_file, funcname, current_cc_unit);
|
||||||
|
|
||||||
|
// Delete .so file
|
||||||
|
if (!mjit_opts.save_temps)
|
||||||
|
remove_file(so_file);
|
||||||
|
|
||||||
|
// Set the jit_func if successful
|
||||||
|
if ((uintptr_t)func > (uintptr_t)LAST_JIT_ISEQ_FUNC) {
|
||||||
|
rb_iseq_t *iseq = current_cc_unit->iseq;
|
||||||
|
double end_time = real_ms_time();
|
||||||
|
verbose(1, "JIT success (%.1fms): %s@%s:%ld -> %s",
|
||||||
|
end_time - current_cc_ms, RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
|
||||||
|
RSTRING_PTR(rb_iseq_path(iseq)), FIX2LONG(ISEQ_BODY(iseq)->location.first_lineno), c_file);
|
||||||
|
|
||||||
|
add_to_list(current_cc_unit, &active_units);
|
||||||
|
MJIT_ATOMIC_SET(ISEQ_BODY(iseq)->jit_func, func);
|
||||||
|
}
|
||||||
|
current_cc_unit = NULL;
|
||||||
|
|
||||||
|
// Run compaction if it should
|
||||||
|
if (!stop_worker_p) {
|
||||||
|
check_compaction();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip further compilation if mjit_finish is trying to stop it
|
||||||
|
if (!stop_worker_p) {
|
||||||
|
// Start the next one as needed
|
||||||
|
check_unit_queue();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return true if given ISeq body should be compiled by MJIT
|
// Return true if given ISeq body should be compiled by MJIT
|
||||||
|
@ -273,23 +386,14 @@ mjit_target_iseq_p(struct rb_iseq_constant_body *body)
|
||||||
static void
|
static void
|
||||||
mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_info *compile_info, bool recompile_p)
|
mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_info *compile_info, bool recompile_p)
|
||||||
{
|
{
|
||||||
if (!mjit_enabled || pch_status == PCH_FAILED)
|
// TODO: Support non-main Ractors
|
||||||
|
if (!mjit_enabled || pch_status == PCH_FAILED || !rb_ractor_main_p())
|
||||||
return;
|
return;
|
||||||
if (!mjit_target_iseq_p(ISEQ_BODY(iseq))) {
|
if (!mjit_target_iseq_p(ISEQ_BODY(iseq))) {
|
||||||
ISEQ_BODY(iseq)->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC; // skip mjit_wait
|
ISEQ_BODY(iseq)->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC; // skip mjit_wait
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!recompile_p) {
|
|
||||||
CRITICAL_SECTION_START(3, "in add_iseq_to_process");
|
|
||||||
|
|
||||||
// This prevents multiple Ractors from enqueueing the same ISeq twice.
|
|
||||||
if (rb_multi_ractor_p() && (uintptr_t)ISEQ_BODY(iseq)->jit_func != NOT_ADDED_JIT_ISEQ_FUNC) {
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in add_iseq_to_process");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RB_DEBUG_COUNTER_INC(mjit_add_iseq_to_process);
|
RB_DEBUG_COUNTER_INC(mjit_add_iseq_to_process);
|
||||||
ISEQ_BODY(iseq)->jit_func = (mjit_func_t)NOT_READY_JIT_ISEQ_FUNC;
|
ISEQ_BODY(iseq)->jit_func = (mjit_func_t)NOT_READY_JIT_ISEQ_FUNC;
|
||||||
create_unit(iseq);
|
create_unit(iseq);
|
||||||
|
@ -302,12 +406,6 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_inf
|
||||||
if (active_units.length >= mjit_opts.max_cache_size) {
|
if (active_units.length >= mjit_opts.max_cache_size) {
|
||||||
unload_requests++;
|
unload_requests++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!recompile_p) {
|
|
||||||
verbose(3, "Sending wakeup signal to workers in mjit_add_iseq_to_process");
|
|
||||||
rb_native_cond_broadcast(&mjit_worker_wakeup);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in add_iseq_to_process");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add ISEQ to be JITed in parallel with the current thread.
|
// Add ISEQ to be JITed in parallel with the current thread.
|
||||||
|
@ -316,6 +414,7 @@ void
|
||||||
rb_mjit_add_iseq_to_process(const rb_iseq_t *iseq)
|
rb_mjit_add_iseq_to_process(const rb_iseq_t *iseq)
|
||||||
{
|
{
|
||||||
mjit_add_iseq_to_process(iseq, NULL, false);
|
mjit_add_iseq_to_process(iseq, NULL, false);
|
||||||
|
check_unit_queue();
|
||||||
}
|
}
|
||||||
|
|
||||||
// For this timeout seconds, --jit-wait will wait for JIT compilation finish.
|
// For this timeout seconds, --jit-wait will wait for JIT compilation finish.
|
||||||
|
@ -324,23 +423,21 @@ rb_mjit_add_iseq_to_process(const rb_iseq_t *iseq)
|
||||||
static void
|
static void
|
||||||
mjit_wait(struct rb_iseq_constant_body *body)
|
mjit_wait(struct rb_iseq_constant_body *body)
|
||||||
{
|
{
|
||||||
|
pid_t initial_pid = current_cc_pid;
|
||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
int tries = 0;
|
int tries = 0;
|
||||||
tv.tv_sec = 0;
|
tv.tv_sec = 0;
|
||||||
tv.tv_usec = 1000;
|
tv.tv_usec = 1000;
|
||||||
while (body->jit_func == (mjit_func_t)NOT_READY_JIT_ISEQ_FUNC) {
|
while (body == NULL ? current_cc_pid == initial_pid : body->jit_func == (mjit_func_t)NOT_READY_JIT_ISEQ_FUNC) { // TODO: refactor this
|
||||||
tries++;
|
tries++;
|
||||||
if (tries / 1000 > MJIT_WAIT_TIMEOUT_SECONDS || pch_status == PCH_FAILED) {
|
if (tries / 1000 > MJIT_WAIT_TIMEOUT_SECONDS || pch_status == PCH_FAILED) {
|
||||||
CRITICAL_SECTION_START(3, "in rb_mjit_wait_call to set jit_func");
|
if (body != NULL) {
|
||||||
body->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC; // JIT worker seems dead. Give up.
|
body->jit_func = (mjit_func_t) NOT_COMPILED_JIT_ISEQ_FUNC; // JIT worker seems dead. Give up.
|
||||||
CRITICAL_SECTION_FINISH(3, "in rb_mjit_wait_call to set jit_func");
|
}
|
||||||
mjit_warning("timed out to wait for JIT finish");
|
mjit_warning("timed out to wait for JIT finish");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
CRITICAL_SECTION_START(3, "in rb_mjit_wait_call for a client wakeup");
|
|
||||||
rb_native_cond_broadcast(&mjit_worker_wakeup);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in rb_mjit_wait_call for a client wakeup");
|
|
||||||
rb_thread_wait_for(tv);
|
rb_thread_wait_for(tv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -377,24 +474,8 @@ mjit_recompile(const rb_iseq_t *iseq)
|
||||||
RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno));
|
RSTRING_PTR(rb_iseq_path(iseq)), FIX2INT(ISEQ_BODY(iseq)->location.first_lineno));
|
||||||
assert(ISEQ_BODY(iseq)->jit_unit != NULL);
|
assert(ISEQ_BODY(iseq)->jit_unit != NULL);
|
||||||
|
|
||||||
if (UNLIKELY(mjit_opts.wait)) {
|
mjit_add_iseq_to_process(iseq, &ISEQ_BODY(iseq)->jit_unit->compile_info, true);
|
||||||
CRITICAL_SECTION_START(3, "in rb_mjit_recompile_iseq");
|
check_unit_queue();
|
||||||
remove_from_list(ISEQ_BODY(iseq)->jit_unit, &active_units);
|
|
||||||
add_to_list(ISEQ_BODY(iseq)->jit_unit, &stale_units);
|
|
||||||
mjit_add_iseq_to_process(iseq, &ISEQ_BODY(iseq)->jit_unit->compile_info, true);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in rb_mjit_recompile_iseq");
|
|
||||||
mjit_wait(ISEQ_BODY(iseq));
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// Lazily move active_units to stale_units to avoid race conditions around active_units with compaction.
|
|
||||||
// Also, it's lazily moved to unit_queue as well because otherwise it won't be added to stale_units properly.
|
|
||||||
// It's good to avoid a race condition between mjit_add_iseq_to_process and mjit_compile around jit_unit as well.
|
|
||||||
CRITICAL_SECTION_START(3, "in rb_mjit_recompile_iseq");
|
|
||||||
ISEQ_BODY(iseq)->jit_unit->stale_p = true;
|
|
||||||
ISEQ_BODY(iseq)->jit_func = (mjit_func_t)NOT_READY_JIT_ISEQ_FUNC;
|
|
||||||
pending_stale_p = true;
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in rb_mjit_recompile_iseq");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recompile iseq, disabling send optimization
|
// Recompile iseq, disabling send optimization
|
||||||
|
@ -642,17 +723,6 @@ start_worker(void)
|
||||||
{
|
{
|
||||||
stop_worker_p = false;
|
stop_worker_p = false;
|
||||||
worker_stopped = false;
|
worker_stopped = false;
|
||||||
|
|
||||||
if (!rb_thread_create_mjit_thread(mjit_worker)) {
|
|
||||||
mjit_enabled = false;
|
|
||||||
rb_native_mutex_destroy(&mjit_engine_mutex);
|
|
||||||
rb_native_cond_destroy(&mjit_pch_wakeup);
|
|
||||||
rb_native_cond_destroy(&mjit_client_wakeup);
|
|
||||||
rb_native_cond_destroy(&mjit_worker_wakeup);
|
|
||||||
rb_native_cond_destroy(&mjit_gc_wakeup);
|
|
||||||
verbose(1, "Failure in MJIT thread initialization\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -816,21 +886,21 @@ mjit_init(const struct mjit_options *opts)
|
||||||
|
|
||||||
// Initialize worker thread
|
// Initialize worker thread
|
||||||
start_worker();
|
start_worker();
|
||||||
|
|
||||||
|
#ifndef _MSC_VER
|
||||||
|
// TODO: Consider running C compiler asynchronously
|
||||||
|
make_pch();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
stop_worker(void)
|
stop_worker(void)
|
||||||
{
|
{
|
||||||
rb_execution_context_t *ec = GET_EC();
|
stop_worker_p = true;
|
||||||
|
if (current_cc_unit != NULL) {
|
||||||
while (!worker_stopped) {
|
mjit_wait(current_cc_unit->iseq->body);
|
||||||
verbose(3, "Sending cancel signal to worker");
|
|
||||||
CRITICAL_SECTION_START(3, "in stop_worker");
|
|
||||||
stop_worker_p = true; // Setting this inside loop because RUBY_VM_CHECK_INTS may make this false.
|
|
||||||
rb_native_cond_broadcast(&mjit_worker_wakeup);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in stop_worker");
|
|
||||||
RUBY_VM_CHECK_INTS(ec);
|
|
||||||
}
|
}
|
||||||
|
worker_stopped = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop JIT-compiling methods but compiled code is kept available.
|
// Stop JIT-compiling methods but compiled code is kept available.
|
||||||
|
@ -846,15 +916,12 @@ mjit_pause(bool wait_p)
|
||||||
|
|
||||||
// Flush all queued units with no option or `wait: true`
|
// Flush all queued units with no option or `wait: true`
|
||||||
if (wait_p) {
|
if (wait_p) {
|
||||||
struct timeval tv;
|
while (current_cc_unit != NULL) {
|
||||||
tv.tv_sec = 0;
|
if (current_cc_unit->compact_p) {
|
||||||
tv.tv_usec = 1000;
|
mjit_wait(NULL);
|
||||||
|
} else {
|
||||||
while (unit_queue.length > 0 && active_units.length < mjit_opts.max_cache_size) { // inverse of condition that waits for mjit_worker_wakeup
|
mjit_wait(current_cc_unit->iseq->body);
|
||||||
CRITICAL_SECTION_START(3, "in mjit_pause for a worker wakeup");
|
}
|
||||||
rb_native_cond_broadcast(&mjit_worker_wakeup);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in mjit_pause for a worker wakeup");
|
|
||||||
rb_thread_wait_for(tv);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -932,21 +999,8 @@ mjit_finish(bool close_handle_p)
|
||||||
if (!mjit_enabled)
|
if (!mjit_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
// Wait for pch finish
|
|
||||||
verbose(2, "Stopping worker thread");
|
|
||||||
CRITICAL_SECTION_START(3, "in mjit_finish to wakeup from pch");
|
|
||||||
// As our threads are detached, we could just cancel them. But it
|
|
||||||
// is a bad idea because OS processes (C compiler) started by
|
|
||||||
// threads can produce temp files. And even if the temp files are
|
|
||||||
// removed, the used C compiler still complaint about their
|
|
||||||
// absence. So wait for a clean finish of the threads.
|
|
||||||
while (pch_status == PCH_NOT_READY) {
|
|
||||||
verbose(3, "Waiting wakeup from make_pch");
|
|
||||||
rb_native_cond_wait(&mjit_pch_wakeup, &mjit_engine_mutex);
|
|
||||||
}
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in mjit_finish to wakeup from pch");
|
|
||||||
|
|
||||||
// Stop worker
|
// Stop worker
|
||||||
|
verbose(2, "Stopping worker thread");
|
||||||
stop_worker();
|
stop_worker();
|
||||||
|
|
||||||
rb_native_mutex_destroy(&mjit_engine_mutex);
|
rb_native_mutex_destroy(&mjit_engine_mutex);
|
||||||
|
@ -1002,7 +1056,6 @@ mjit_mark(void)
|
||||||
//
|
//
|
||||||
// Because an MJIT worker may modify active_units anytime, we need to convert
|
// Because an MJIT worker may modify active_units anytime, we need to convert
|
||||||
// the linked list to an array to safely loop its ISeqs without keeping a lock.
|
// the linked list to an array to safely loop its ISeqs without keeping a lock.
|
||||||
CRITICAL_SECTION_START(4, "mjit_mark");
|
|
||||||
int length = 0;
|
int length = 0;
|
||||||
if (compiling_iseqs != NULL) {
|
if (compiling_iseqs != NULL) {
|
||||||
while (compiling_iseqs[length]) length++;
|
while (compiling_iseqs[length]) length++;
|
||||||
|
@ -1023,7 +1076,6 @@ mjit_mark(void)
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
assert(i == length);
|
assert(i == length);
|
||||||
CRITICAL_SECTION_FINISH(4, "mjit_mark");
|
|
||||||
|
|
||||||
for (i = 0; i < length; i++) {
|
for (i = 0; i < length; i++) {
|
||||||
if (iseqs[i] == NULL) // ISeq is GC-ed
|
if (iseqs[i] == NULL) // ISeq is GC-ed
|
||||||
|
|
5
mjit.h
5
mjit.h
|
@ -95,14 +95,13 @@ RUBY_SYMBOL_EXPORT_END
|
||||||
extern void mjit_cancel_all(const char *reason);
|
extern void mjit_cancel_all(const char *reason);
|
||||||
extern bool mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname, int id);
|
extern bool mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname, int id);
|
||||||
extern void mjit_init(const struct mjit_options *opts);
|
extern void mjit_init(const struct mjit_options *opts);
|
||||||
extern void mjit_gc_start_hook(void);
|
|
||||||
extern void mjit_gc_exit_hook(void);
|
|
||||||
extern void mjit_free_iseq(const rb_iseq_t *iseq);
|
extern void mjit_free_iseq(const rb_iseq_t *iseq);
|
||||||
extern void mjit_update_references(const rb_iseq_t *iseq);
|
extern void mjit_update_references(const rb_iseq_t *iseq);
|
||||||
extern void mjit_mark(void);
|
extern void mjit_mark(void);
|
||||||
extern struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec);
|
extern struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec);
|
||||||
extern void mjit_cont_free(struct mjit_cont *cont);
|
extern void mjit_cont_free(struct mjit_cont *cont);
|
||||||
extern void mjit_mark_cc_entries(const struct rb_iseq_constant_body *const body);
|
extern void mjit_mark_cc_entries(const struct rb_iseq_constant_body *const body);
|
||||||
|
extern void mjit_notify_waitpid(int status);
|
||||||
|
|
||||||
# ifdef MJIT_HEADER
|
# ifdef MJIT_HEADER
|
||||||
NOINLINE(static COLDFUNC VALUE mjit_exec_slowpath(rb_execution_context_t *ec, const rb_iseq_t *iseq, struct rb_iseq_constant_body *body));
|
NOINLINE(static COLDFUNC VALUE mjit_exec_slowpath(rb_execution_context_t *ec, const rb_iseq_t *iseq, struct rb_iseq_constant_body *body));
|
||||||
|
@ -215,8 +214,6 @@ void mjit_finish(bool close_handle_p);
|
||||||
static inline void mjit_cancel_all(const char *reason){}
|
static inline void mjit_cancel_all(const char *reason){}
|
||||||
static inline struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec){return NULL;}
|
static inline struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec){return NULL;}
|
||||||
static inline void mjit_cont_free(struct mjit_cont *cont){}
|
static inline void mjit_cont_free(struct mjit_cont *cont){}
|
||||||
static inline void mjit_gc_start_hook(void){}
|
|
||||||
static inline void mjit_gc_exit_hook(void){}
|
|
||||||
static inline void mjit_free_iseq(const rb_iseq_t *iseq){}
|
static inline void mjit_free_iseq(const rb_iseq_t *iseq){}
|
||||||
static inline void mjit_mark(void){}
|
static inline void mjit_mark(void){}
|
||||||
static inline VALUE mjit_exec(rb_execution_context_t *ec) { return Qundef; /* unreachable */ }
|
static inline VALUE mjit_exec(rb_execution_context_t *ec) { return Qundef; /* unreachable */ }
|
||||||
|
|
392
mjit_worker.c
392
mjit_worker.c
|
@ -164,8 +164,8 @@ struct rb_mjit_unit {
|
||||||
#endif
|
#endif
|
||||||
// Only used by unload_units. Flag to check this unit is currently on stack or not.
|
// Only used by unload_units. Flag to check this unit is currently on stack or not.
|
||||||
bool used_code_p;
|
bool used_code_p;
|
||||||
// True if this is still in active_units but it's to be lazily removed
|
// True if it's a unit for JIT compaction
|
||||||
bool stale_p;
|
bool compact_p;
|
||||||
// mjit_compile's optimization switches
|
// mjit_compile's optimization switches
|
||||||
struct rb_mjit_compile_info compile_info;
|
struct rb_mjit_compile_info compile_info;
|
||||||
// captured CC values, they should be marked with iseq.
|
// captured CC values, they should be marked with iseq.
|
||||||
|
@ -191,7 +191,7 @@ extern void rb_native_cond_broadcast(rb_nativethread_cond_t *cond);
|
||||||
extern void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
|
extern void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
|
||||||
|
|
||||||
// process.c
|
// process.c
|
||||||
extern rb_pid_t ruby_waitpid_locked(rb_vm_t *, rb_pid_t, int *status, int options, rb_nativethread_cond_t *cond);
|
extern void mjit_add_waiting_pid(rb_vm_t *vm, rb_pid_t pid);
|
||||||
|
|
||||||
// A copy of MJIT portion of MRI options since MJIT initialization. We
|
// A copy of MJIT portion of MRI options since MJIT initialization. We
|
||||||
// need them as MJIT threads still can work when the most MRI data were
|
// need them as MJIT threads still can work when the most MRI data were
|
||||||
|
@ -227,12 +227,6 @@ static rb_nativethread_cond_t mjit_client_wakeup;
|
||||||
static rb_nativethread_cond_t mjit_worker_wakeup;
|
static rb_nativethread_cond_t mjit_worker_wakeup;
|
||||||
// A thread conditional to wake up workers if at the end of GC.
|
// A thread conditional to wake up workers if at the end of GC.
|
||||||
static rb_nativethread_cond_t mjit_gc_wakeup;
|
static rb_nativethread_cond_t mjit_gc_wakeup;
|
||||||
// Greater than 0 when GC is working.
|
|
||||||
static int in_gc = 0;
|
|
||||||
// True when JIT is working.
|
|
||||||
static bool in_jit = false;
|
|
||||||
// True when active_units has at least one stale_p=true unit.
|
|
||||||
static bool pending_stale_p = false;
|
|
||||||
// The times when unload_units is requested. unload_units is called after some requests.
|
// The times when unload_units is requested. unload_units is called after some requests.
|
||||||
static int unload_requests = 0;
|
static int unload_requests = 0;
|
||||||
// The total number of unloaded units.
|
// The total number of unloaded units.
|
||||||
|
@ -259,6 +253,13 @@ static rb_pid_t pch_owner_pid;
|
||||||
// shared by the workers and the pch thread.
|
// shared by the workers and the pch thread.
|
||||||
static enum {PCH_NOT_READY, PCH_FAILED, PCH_SUCCESS} pch_status;
|
static enum {PCH_NOT_READY, PCH_FAILED, PCH_SUCCESS} pch_status;
|
||||||
|
|
||||||
|
// The start timestamp of current compilation
|
||||||
|
static double current_cc_ms = 0.0; // TODO: make this part of unit?
|
||||||
|
// Currently compiling MJIT unit
|
||||||
|
static struct rb_mjit_unit *current_cc_unit = NULL;
|
||||||
|
// PID of currently running C compiler process. 0 if nothing is running.
|
||||||
|
static pid_t current_cc_pid = 0; // TODO: make this part of unit?
|
||||||
|
|
||||||
#ifndef _MSC_VER
|
#ifndef _MSC_VER
|
||||||
// Name of the header file.
|
// Name of the header file.
|
||||||
static char *header_file;
|
static char *header_file;
|
||||||
|
@ -492,12 +493,6 @@ real_ms_time(void)
|
||||||
static struct rb_mjit_unit *
|
static struct rb_mjit_unit *
|
||||||
get_from_list(struct rb_mjit_unit_list *list)
|
get_from_list(struct rb_mjit_unit_list *list)
|
||||||
{
|
{
|
||||||
while (in_gc) {
|
|
||||||
verbose(3, "Waiting wakeup from GC");
|
|
||||||
rb_native_cond_wait(&mjit_gc_wakeup, &mjit_engine_mutex);
|
|
||||||
}
|
|
||||||
in_jit = true; // Lock GC
|
|
||||||
|
|
||||||
// Find iseq with max total_calls
|
// Find iseq with max total_calls
|
||||||
struct rb_mjit_unit *unit = NULL, *next, *best = NULL;
|
struct rb_mjit_unit *unit = NULL, *next, *best = NULL;
|
||||||
ccan_list_for_each_safe(&list->head, unit, next, unode) {
|
ccan_list_for_each_safe(&list->head, unit, next, unode) {
|
||||||
|
@ -512,10 +507,6 @@ get_from_list(struct rb_mjit_unit_list *list)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
in_jit = false; // Unlock GC
|
|
||||||
verbose(3, "Sending wakeup signal to client in a mjit-worker for GC");
|
|
||||||
rb_native_cond_signal(&mjit_client_wakeup);
|
|
||||||
|
|
||||||
if (best) {
|
if (best) {
|
||||||
remove_from_list(best, list);
|
remove_from_list(best, list);
|
||||||
}
|
}
|
||||||
|
@ -632,18 +623,9 @@ static int
|
||||||
exec_process(const char *path, char *const argv[])
|
exec_process(const char *path, char *const argv[])
|
||||||
{
|
{
|
||||||
int stat, exit_code = -2;
|
int stat, exit_code = -2;
|
||||||
rb_vm_t *vm = WAITPID_USE_SIGCHLD ? GET_VM() : 0;
|
|
||||||
rb_nativethread_cond_t cond;
|
|
||||||
|
|
||||||
if (vm) {
|
|
||||||
rb_native_cond_initialize(&cond);
|
|
||||||
rb_native_mutex_lock(&vm->waitpid_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
pid_t pid = start_process(path, argv);
|
pid_t pid = start_process(path, argv);
|
||||||
for (;pid > 0;) {
|
for (;pid > 0;) {
|
||||||
pid_t r = vm ? ruby_waitpid_locked(vm, pid, &stat, 0, &cond)
|
pid_t r = waitpid(pid, &stat, 0);
|
||||||
: waitpid(pid, &stat, 0);
|
|
||||||
if (r == -1) {
|
if (r == -1) {
|
||||||
if (errno == EINTR) continue;
|
if (errno == EINTR) continue;
|
||||||
fprintf(stderr, "[%"PRI_PIDT_PREFIX"d] waitpid(%lu): %s (SIGCHLD=%d,%u)\n",
|
fprintf(stderr, "[%"PRI_PIDT_PREFIX"d] waitpid(%lu): %s (SIGCHLD=%d,%u)\n",
|
||||||
|
@ -662,11 +644,6 @@ exec_process(const char *path, char *const argv[])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vm) {
|
|
||||||
rb_native_mutex_unlock(&vm->waitpid_lock);
|
|
||||||
rb_native_cond_destroy(&cond);
|
|
||||||
}
|
|
||||||
return exit_code;
|
return exit_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -881,16 +858,13 @@ make_pch(void)
|
||||||
char **args = form_args(4, cc_common_args, CC_CODEFLAG_ARGS, cc_added_args, rest_args);
|
char **args = form_args(4, cc_common_args, CC_CODEFLAG_ARGS, cc_added_args, rest_args);
|
||||||
if (args == NULL) {
|
if (args == NULL) {
|
||||||
mjit_warning("making precompiled header failed on forming args");
|
mjit_warning("making precompiled header failed on forming args");
|
||||||
CRITICAL_SECTION_START(3, "in make_pch");
|
|
||||||
pch_status = PCH_FAILED;
|
pch_status = PCH_FAILED;
|
||||||
CRITICAL_SECTION_FINISH(3, "in make_pch");
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
int exit_code = exec_process(cc_path, args);
|
int exit_code = exec_process(cc_path, args);
|
||||||
free(args);
|
free(args);
|
||||||
|
|
||||||
CRITICAL_SECTION_START(3, "in make_pch");
|
|
||||||
if (exit_code == 0) {
|
if (exit_code == 0) {
|
||||||
pch_status = PCH_SUCCESS;
|
pch_status = PCH_SUCCESS;
|
||||||
}
|
}
|
||||||
|
@ -898,14 +872,10 @@ make_pch(void)
|
||||||
mjit_warning("Making precompiled header failed on compilation. Stopping MJIT worker...");
|
mjit_warning("Making precompiled header failed on compilation. Stopping MJIT worker...");
|
||||||
pch_status = PCH_FAILED;
|
pch_status = PCH_FAILED;
|
||||||
}
|
}
|
||||||
/* wakeup `mjit_finish` */
|
|
||||||
rb_native_cond_broadcast(&mjit_pch_wakeup);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in make_pch");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compile .c file to .so file. It returns true if it succeeds. (non-mswin)
|
static pid_t
|
||||||
static bool
|
start_compiling_c_to_so(const char *c_file, const char *so_file)
|
||||||
compile_c_to_so(const char *c_file, const char *so_file)
|
|
||||||
{
|
{
|
||||||
const char *so_args[] = {
|
const char *so_args[] = {
|
||||||
"-o", so_file,
|
"-o", so_file,
|
||||||
|
@ -919,21 +889,27 @@ compile_c_to_so(const char *c_file, const char *so_file)
|
||||||
};
|
};
|
||||||
char **args = form_args(7, CC_LDSHARED_ARGS, CC_CODEFLAG_ARGS, cc_added_args,
|
char **args = form_args(7, CC_LDSHARED_ARGS, CC_CODEFLAG_ARGS, cc_added_args,
|
||||||
so_args, CC_LIBS, CC_DLDFLAGS_ARGS, CC_LINKER_ARGS);
|
so_args, CC_LIBS, CC_DLDFLAGS_ARGS, CC_LINKER_ARGS);
|
||||||
if (args == NULL) return false;
|
if (args == NULL) return -1;
|
||||||
int exit_code = exec_process(cc_path, args);
|
|
||||||
|
rb_vm_t *vm = GET_VM();
|
||||||
|
rb_native_mutex_lock(&vm->waitpid_lock);
|
||||||
|
|
||||||
|
pid_t pid = start_process(cc_path, args);
|
||||||
|
mjit_add_waiting_pid(vm, pid);
|
||||||
|
|
||||||
|
rb_native_mutex_unlock(&vm->waitpid_lock);
|
||||||
|
|
||||||
free(args);
|
free(args);
|
||||||
if (exit_code != 0) {
|
return pid;
|
||||||
verbose(2, "compile_c_to_so: failed to compile .c to .so: %d", exit_code);
|
|
||||||
}
|
|
||||||
return exit_code == 0;
|
|
||||||
}
|
}
|
||||||
#endif // _MSC_VER
|
#endif // _MSC_VER
|
||||||
|
|
||||||
#if USE_JIT_COMPACTION
|
#if USE_JIT_COMPACTION
|
||||||
static void compile_prelude(FILE *f);
|
static void compile_prelude(FILE *f);
|
||||||
|
|
||||||
|
// Compile all JIT code into a single .c file
|
||||||
static bool
|
static bool
|
||||||
compile_compact_jit_code(char* c_file)
|
mjit_compact(char* c_file)
|
||||||
{
|
{
|
||||||
FILE *f;
|
FILE *f;
|
||||||
int fd = rb_cloexec_open(c_file, c_file_access_mode, 0600);
|
int fd = rb_cloexec_open(c_file, c_file_access_mode, 0600);
|
||||||
|
@ -946,32 +922,6 @@ compile_compact_jit_code(char* c_file)
|
||||||
|
|
||||||
compile_prelude(f);
|
compile_prelude(f);
|
||||||
|
|
||||||
// wait until mjit_gc_exit_hook is called
|
|
||||||
CRITICAL_SECTION_START(3, "before mjit_compile to wait GC finish");
|
|
||||||
while (in_gc) {
|
|
||||||
verbose(3, "Waiting wakeup from GC");
|
|
||||||
rb_native_cond_wait(&mjit_gc_wakeup, &mjit_engine_mutex);
|
|
||||||
}
|
|
||||||
// We need to check again here because we could've waited on GC above
|
|
||||||
bool iseq_gced = false;
|
|
||||||
struct rb_mjit_unit *child_unit = 0, *next;
|
|
||||||
ccan_list_for_each_safe(&active_units.head, child_unit, next, unode) {
|
|
||||||
if (child_unit->iseq == NULL) { // ISeq is GC-ed
|
|
||||||
iseq_gced = true;
|
|
||||||
verbose(1, "JIT compaction: A method for JIT code u%d is obsoleted. Compaction will be skipped.", child_unit->id);
|
|
||||||
remove_from_list(child_unit, &active_units);
|
|
||||||
free_unit(child_unit); // unload it without waiting for throttled unload_units to retry compaction quickly
|
|
||||||
}
|
|
||||||
}
|
|
||||||
in_jit = !iseq_gced;
|
|
||||||
CRITICAL_SECTION_FINISH(3, "before mjit_compile to wait GC finish");
|
|
||||||
if (!in_jit) {
|
|
||||||
fclose(f);
|
|
||||||
if (!mjit_opts.save_temps)
|
|
||||||
remove_file(c_file);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This entire loop lock GC so that we do not need to consider a case that
|
// This entire loop lock GC so that we do not need to consider a case that
|
||||||
// ISeq is GC-ed in a middle of re-compilation. It takes 3~4ms with 100 methods
|
// ISeq is GC-ed in a middle of re-compilation. It takes 3~4ms with 100 methods
|
||||||
// on my machine. It's not too bad compared to compilation time of C (7200~8000ms),
|
// on my machine. It's not too bad compared to compilation time of C (7200~8000ms),
|
||||||
|
@ -980,10 +930,9 @@ compile_compact_jit_code(char* c_file)
|
||||||
// TODO: Consider using a more granular lock after we implement inlining across
|
// TODO: Consider using a more granular lock after we implement inlining across
|
||||||
// compacted functions (not done yet).
|
// compacted functions (not done yet).
|
||||||
bool success = true;
|
bool success = true;
|
||||||
|
struct rb_mjit_unit *child_unit = 0;
|
||||||
ccan_list_for_each(&active_units.head, child_unit, unode) {
|
ccan_list_for_each(&active_units.head, child_unit, unode) {
|
||||||
CRITICAL_SECTION_START(3, "before set_compiling_iseqs");
|
|
||||||
success &= set_compiling_iseqs(child_unit->iseq);
|
success &= set_compiling_iseqs(child_unit->iseq);
|
||||||
CRITICAL_SECTION_FINISH(3, "after set_compiling_iseqs");
|
|
||||||
if (!success) continue;
|
if (!success) continue;
|
||||||
|
|
||||||
char funcname[MAXPATHLEN];
|
char funcname[MAXPATHLEN];
|
||||||
|
@ -1000,86 +949,68 @@ compile_compact_jit_code(char* c_file)
|
||||||
fprintf(f, "\n/* %s%s%s:%ld */\n", iseq_label, sep, iseq_path, iseq_lineno);
|
fprintf(f, "\n/* %s%s%s:%ld */\n", iseq_label, sep, iseq_path, iseq_lineno);
|
||||||
success &= mjit_compile(f, child_unit->iseq, funcname, child_unit->id);
|
success &= mjit_compile(f, child_unit->iseq, funcname, child_unit->id);
|
||||||
|
|
||||||
CRITICAL_SECTION_START(3, "before compiling_iseqs free");
|
|
||||||
free_compiling_iseqs();
|
free_compiling_iseqs();
|
||||||
CRITICAL_SECTION_FINISH(3, "after compiling_iseqs free");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// release blocking mjit_gc_start_hook
|
|
||||||
CRITICAL_SECTION_START(3, "after mjit_compile to wakeup client for GC");
|
|
||||||
in_jit = false;
|
|
||||||
verbose(3, "Sending wakeup signal to client in a mjit-worker for GC");
|
|
||||||
rb_native_cond_signal(&mjit_client_wakeup);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in worker to wakeup client for GC");
|
|
||||||
|
|
||||||
fclose(f);
|
fclose(f);
|
||||||
return success;
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compile all cached .c files and build a single .so file. Reload all JIT func from it.
|
// Compile all cached .c files and build a single .so file. Reload all JIT func from it.
|
||||||
// This improves the code locality for better performance in terms of iTLB and iCache.
|
// This improves the code locality for better performance in terms of iTLB and iCache.
|
||||||
static void
|
static pid_t
|
||||||
compact_all_jit_code(void)
|
start_mjit_compact(struct rb_mjit_unit *unit)
|
||||||
{
|
{
|
||||||
struct rb_mjit_unit *unit, *cur = 0;
|
|
||||||
static const char c_ext[] = ".c";
|
static const char c_ext[] = ".c";
|
||||||
static const char so_ext[] = DLEXT;
|
static const char so_ext[] = DLEXT;
|
||||||
char c_file[MAXPATHLEN], so_file[MAXPATHLEN];
|
char c_file[MAXPATHLEN], so_file[MAXPATHLEN];
|
||||||
|
|
||||||
// Abnormal use case of rb_mjit_unit that doesn't have ISeq
|
|
||||||
unit = calloc(1, sizeof(struct rb_mjit_unit)); // To prevent GC, don't use ZALLOC
|
|
||||||
if (unit == NULL) return;
|
|
||||||
unit->id = current_unit_num++;
|
|
||||||
sprint_uniq_filename(c_file, (int)sizeof(c_file), unit->id, MJIT_TMP_PREFIX, c_ext);
|
sprint_uniq_filename(c_file, (int)sizeof(c_file), unit->id, MJIT_TMP_PREFIX, c_ext);
|
||||||
sprint_uniq_filename(so_file, (int)sizeof(so_file), unit->id, MJIT_TMP_PREFIX, so_ext);
|
sprint_uniq_filename(so_file, (int)sizeof(so_file), unit->id, MJIT_TMP_PREFIX, so_ext);
|
||||||
|
|
||||||
bool success = compile_compact_jit_code(c_file);
|
bool success = mjit_compact(c_file);
|
||||||
double start_time = real_ms_time();
|
|
||||||
if (success) {
|
if (success) {
|
||||||
success = compile_c_to_so(c_file, so_file);
|
return start_compiling_c_to_so(c_file, so_file);
|
||||||
if (!mjit_opts.save_temps)
|
|
||||||
remove_file(c_file);
|
|
||||||
}
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
load_compact_funcs_from_so(struct rb_mjit_unit *unit, char *c_file, char *so_file)
|
||||||
|
{
|
||||||
|
struct rb_mjit_unit *cur = 0;
|
||||||
double end_time = real_ms_time();
|
double end_time = real_ms_time();
|
||||||
|
|
||||||
if (success) {
|
void *handle = dlopen(so_file, RTLD_NOW);
|
||||||
void *handle = dlopen(so_file, RTLD_NOW);
|
if (handle == NULL) {
|
||||||
if (handle == NULL) {
|
mjit_warning("failure in loading code from compacted '%s': %s", so_file, dlerror());
|
||||||
mjit_warning("failure in loading code from compacted '%s': %s", so_file, dlerror());
|
|
||||||
free(unit);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
unit->handle = handle;
|
|
||||||
|
|
||||||
// lazily dlclose handle (and .so file for win32) on `mjit_finish()`.
|
|
||||||
add_to_list(unit, &compact_units);
|
|
||||||
|
|
||||||
if (!mjit_opts.save_temps)
|
|
||||||
remove_so_file(so_file, unit);
|
|
||||||
|
|
||||||
CRITICAL_SECTION_START(3, "in compact_all_jit_code to read list");
|
|
||||||
ccan_list_for_each(&active_units.head, cur, unode) {
|
|
||||||
void *func;
|
|
||||||
char funcname[MAXPATHLEN];
|
|
||||||
sprint_funcname(funcname, cur);
|
|
||||||
|
|
||||||
if ((func = dlsym(handle, funcname)) == NULL) {
|
|
||||||
mjit_warning("skipping to reload '%s' from '%s': %s", funcname, so_file, dlerror());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cur->iseq) { // Check whether GCed or not
|
|
||||||
// Usage of jit_code might be not in a critical section.
|
|
||||||
MJIT_ATOMIC_SET(ISEQ_BODY(cur->iseq)->jit_func, (mjit_func_t)func);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in compact_all_jit_code to read list");
|
|
||||||
verbose(1, "JIT compaction (%.1fms): Compacted %d methods %s -> %s", end_time - start_time, active_units.length, c_file, so_file);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
free(unit);
|
free(unit);
|
||||||
verbose(1, "JIT compaction failure (%.1fms): Failed to compact methods", end_time - start_time);
|
return;
|
||||||
}
|
}
|
||||||
|
unit->handle = handle;
|
||||||
|
|
||||||
|
// lazily dlclose handle (and .so file for win32) on `mjit_finish()`.
|
||||||
|
add_to_list(unit, &compact_units);
|
||||||
|
|
||||||
|
if (!mjit_opts.save_temps)
|
||||||
|
remove_so_file(so_file, unit);
|
||||||
|
|
||||||
|
ccan_list_for_each(&active_units.head, cur, unode) {
|
||||||
|
void *func;
|
||||||
|
char funcname[MAXPATHLEN];
|
||||||
|
sprint_funcname(funcname, cur);
|
||||||
|
|
||||||
|
if ((func = dlsym(handle, funcname)) == NULL) {
|
||||||
|
mjit_warning("skipping to reload '%s' from '%s': %s", funcname, so_file, dlerror());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cur->iseq) { // Check whether GCed or not
|
||||||
|
// Usage of jit_code might be not in a critical section.
|
||||||
|
MJIT_ATOMIC_SET(ISEQ_BODY(cur->iseq)->jit_func, (mjit_func_t)func);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
verbose(1, "JIT compaction (%.1fms): Compacted %d methods %s -> %s", end_time - current_cc_ms, active_units.length, c_file, so_file);
|
||||||
}
|
}
|
||||||
#endif // USE_JIT_COMPACTION
|
#endif // USE_JIT_COMPACTION
|
||||||
|
|
||||||
|
@ -1142,6 +1073,62 @@ compile_prelude(FILE *f)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compile ISeq in UNIT and return function pointer of JIT-ed code.
|
||||||
|
// It may return NOT_COMPILED_JIT_ISEQ_FUNC if something went wrong.
|
||||||
|
static pid_t
|
||||||
|
start_mjit_compile(struct rb_mjit_unit *unit)
|
||||||
|
{
|
||||||
|
static const char c_ext[] = ".c";
|
||||||
|
static const char so_ext[] = DLEXT;
|
||||||
|
char c_file[MAXPATHLEN], so_file[MAXPATHLEN], funcname[MAXPATHLEN];
|
||||||
|
|
||||||
|
sprint_uniq_filename(c_file, (int)sizeof(c_file), unit->id, MJIT_TMP_PREFIX, c_ext);
|
||||||
|
sprint_uniq_filename(so_file, (int)sizeof(so_file), unit->id, MJIT_TMP_PREFIX, so_ext);
|
||||||
|
sprint_funcname(funcname, unit);
|
||||||
|
|
||||||
|
FILE *f;
|
||||||
|
int fd = rb_cloexec_open(c_file, c_file_access_mode, 0600);
|
||||||
|
if (fd < 0 || (f = fdopen(fd, "w")) == NULL) {
|
||||||
|
int e = errno;
|
||||||
|
if (fd >= 0) (void)close(fd);
|
||||||
|
verbose(1, "Failed to fopen '%s', giving up JIT for it (%s)", c_file, strerror(e));
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// print #include of MJIT header, etc.
|
||||||
|
compile_prelude(f);
|
||||||
|
|
||||||
|
// This is no longer necessary. TODO: Just reference the ISeq directly in the compiler.
|
||||||
|
if (!set_compiling_iseqs(unit->iseq)) return -1;
|
||||||
|
|
||||||
|
// To make MJIT worker thread-safe against GC.compact, copy ISeq values while `in_jit` is true.
|
||||||
|
long iseq_lineno = 0;
|
||||||
|
if (FIXNUM_P(ISEQ_BODY(unit->iseq)->location.first_lineno))
|
||||||
|
// FIX2INT may fallback to rb_num2long(), which is a method call and dangerous in MJIT worker. So using only FIX2LONG.
|
||||||
|
iseq_lineno = FIX2LONG(ISEQ_BODY(unit->iseq)->location.first_lineno);
|
||||||
|
char *iseq_label = alloca(RSTRING_LEN(ISEQ_BODY(unit->iseq)->location.label) + 1);
|
||||||
|
char *iseq_path = alloca(RSTRING_LEN(rb_iseq_path(unit->iseq)) + 1);
|
||||||
|
strcpy(iseq_label, RSTRING_PTR(ISEQ_BODY(unit->iseq)->location.label));
|
||||||
|
strcpy(iseq_path, RSTRING_PTR(rb_iseq_path(unit->iseq)));
|
||||||
|
|
||||||
|
verbose(2, "start compilation: %s@%s:%ld -> %s", iseq_label, iseq_path, iseq_lineno, c_file);
|
||||||
|
fprintf(f, "/* %s@%s:%ld */\n\n", iseq_label, iseq_path, iseq_lineno);
|
||||||
|
bool success = mjit_compile(f, unit->iseq, funcname, unit->id);
|
||||||
|
|
||||||
|
free_compiling_iseqs();
|
||||||
|
|
||||||
|
fclose(f);
|
||||||
|
if (!success) {
|
||||||
|
if (!mjit_opts.save_temps)
|
||||||
|
remove_file(c_file);
|
||||||
|
verbose(1, "JIT failure: %s@%s:%ld -> %s", iseq_label, iseq_path, iseq_lineno, c_file);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return start_compiling_c_to_so(c_file, so_file);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
// Compile ISeq in UNIT and return function pointer of JIT-ed code.
|
// Compile ISeq in UNIT and return function pointer of JIT-ed code.
|
||||||
// It may return NOT_COMPILED_JIT_ISEQ_FUNC if something went wrong.
|
// It may return NOT_COMPILED_JIT_ISEQ_FUNC if something went wrong.
|
||||||
static mjit_func_t
|
static mjit_func_t
|
||||||
|
@ -1167,18 +1154,7 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
|
||||||
// print #include of MJIT header, etc.
|
// print #include of MJIT header, etc.
|
||||||
compile_prelude(f);
|
compile_prelude(f);
|
||||||
|
|
||||||
// wait until mjit_gc_exit_hook is called
|
if (!set_compiling_iseqs(unit->iseq)) {
|
||||||
CRITICAL_SECTION_START(3, "before mjit_compile to wait GC finish");
|
|
||||||
while (in_gc) {
|
|
||||||
verbose(3, "Waiting wakeup from GC");
|
|
||||||
rb_native_cond_wait(&mjit_gc_wakeup, &mjit_engine_mutex);
|
|
||||||
}
|
|
||||||
// We need to check again here because we could've waited on GC above
|
|
||||||
in_jit = (unit->iseq != NULL);
|
|
||||||
if (in_jit)
|
|
||||||
in_jit &= set_compiling_iseqs(unit->iseq);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "before mjit_compile to wait GC finish");
|
|
||||||
if (!in_jit) {
|
|
||||||
fclose(f);
|
fclose(f);
|
||||||
if (!mjit_opts.save_temps)
|
if (!mjit_opts.save_temps)
|
||||||
remove_file(c_file);
|
remove_file(c_file);
|
||||||
|
@ -1200,12 +1176,7 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
|
||||||
bool success = mjit_compile(f, unit->iseq, funcname, unit->id);
|
bool success = mjit_compile(f, unit->iseq, funcname, unit->id);
|
||||||
|
|
||||||
// release blocking mjit_gc_start_hook
|
// release blocking mjit_gc_start_hook
|
||||||
CRITICAL_SECTION_START(3, "after mjit_compile to wakeup client for GC");
|
|
||||||
free_compiling_iseqs();
|
free_compiling_iseqs();
|
||||||
in_jit = false;
|
|
||||||
verbose(3, "Sending wakeup signal to client in a mjit-worker for GC");
|
|
||||||
rb_native_cond_signal(&mjit_client_wakeup);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in worker to wakeup client for GC");
|
|
||||||
|
|
||||||
fclose(f);
|
fclose(f);
|
||||||
if (!success) {
|
if (!success) {
|
||||||
|
@ -1236,6 +1207,7 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
|
||||||
}
|
}
|
||||||
return (mjit_func_t)func;
|
return (mjit_func_t)func;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// To see cc_entries using index returned by `mjit_capture_cc_entries` in mjit_compile.c
|
// To see cc_entries using index returned by `mjit_capture_cc_entries` in mjit_compile.c
|
||||||
const struct rb_callcache **
|
const struct rb_callcache **
|
||||||
|
@ -1382,119 +1354,3 @@ unload_units(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_info *compile_info, bool worker_p);
|
static void mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_info *compile_info, bool worker_p);
|
||||||
|
|
||||||
// The function implementing a worker. It is executed in a separate
|
|
||||||
// thread by rb_thread_create_mjit_thread. It compiles precompiled header
|
|
||||||
// and then compiles requested ISeqs.
|
|
||||||
void
|
|
||||||
mjit_worker(void)
|
|
||||||
{
|
|
||||||
// Allow only `max_cache_size / 100` times (default: 100) of compaction.
|
|
||||||
// Note: GC of compacted code has not been implemented yet.
|
|
||||||
int max_compact_size = mjit_opts.max_cache_size / 100;
|
|
||||||
if (max_compact_size < 10) max_compact_size = 10;
|
|
||||||
|
|
||||||
// Run unload_units after it's requested `max_cache_size / 10` (default: 10) times.
|
|
||||||
// This throttles the call to mitigate locking in unload_units. It also throttles JIT compaction.
|
|
||||||
int throttle_threshold = mjit_opts.max_cache_size / 10;
|
|
||||||
|
|
||||||
#ifndef _MSC_VER
|
|
||||||
if (pch_status == PCH_NOT_READY) {
|
|
||||||
make_pch();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
if (pch_status == PCH_FAILED) {
|
|
||||||
mjit_enabled = false;
|
|
||||||
CRITICAL_SECTION_START(3, "in worker to update worker_stopped");
|
|
||||||
worker_stopped = true;
|
|
||||||
verbose(3, "Sending wakeup signal to client in a mjit-worker");
|
|
||||||
rb_native_cond_signal(&mjit_client_wakeup);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in worker to update worker_stopped");
|
|
||||||
return; // TODO: do the same thing in the latter half of mjit_finish
|
|
||||||
}
|
|
||||||
|
|
||||||
// main worker loop
|
|
||||||
while (!stop_worker_p) {
|
|
||||||
struct rb_mjit_unit *unit;
|
|
||||||
|
|
||||||
// Wait until a unit becomes available
|
|
||||||
CRITICAL_SECTION_START(3, "in worker dequeue");
|
|
||||||
while ((ccan_list_empty(&unit_queue.head) || active_units.length >= mjit_opts.max_cache_size) && !stop_worker_p) {
|
|
||||||
rb_native_cond_wait(&mjit_worker_wakeup, &mjit_engine_mutex);
|
|
||||||
verbose(3, "Getting wakeup from client");
|
|
||||||
|
|
||||||
// Lazily move active_units to stale_units to avoid race conditions around active_units with compaction
|
|
||||||
if (pending_stale_p) {
|
|
||||||
pending_stale_p = false;
|
|
||||||
struct rb_mjit_unit *next;
|
|
||||||
ccan_list_for_each_safe(&active_units.head, unit, next, unode) {
|
|
||||||
if (unit->stale_p) {
|
|
||||||
unit->stale_p = false;
|
|
||||||
remove_from_list(unit, &active_units);
|
|
||||||
add_to_list(unit, &stale_units);
|
|
||||||
// Lazily put it to unit_queue as well to avoid race conditions on jit_unit with mjit_compile.
|
|
||||||
mjit_add_iseq_to_process(unit->iseq, &ISEQ_BODY(unit->iseq)->jit_unit->compile_info, true);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unload some units as needed
|
|
||||||
if (unload_requests >= throttle_threshold) {
|
|
||||||
while (in_gc) {
|
|
||||||
verbose(3, "Waiting wakeup from GC");
|
|
||||||
rb_native_cond_wait(&mjit_gc_wakeup, &mjit_engine_mutex);
|
|
||||||
}
|
|
||||||
in_jit = true; // Lock GC
|
|
||||||
|
|
||||||
RB_DEBUG_COUNTER_INC(mjit_unload_units);
|
|
||||||
unload_units();
|
|
||||||
unload_requests = 0;
|
|
||||||
|
|
||||||
in_jit = false; // Unlock GC
|
|
||||||
verbose(3, "Sending wakeup signal to client in a mjit-worker for GC");
|
|
||||||
rb_native_cond_signal(&mjit_client_wakeup);
|
|
||||||
}
|
|
||||||
if (active_units.length == mjit_opts.max_cache_size && mjit_opts.wait) { // Sometimes all methods may be in use
|
|
||||||
mjit_opts.max_cache_size++; // avoid infinite loop on `rb_mjit_wait_call`. Note that --jit-wait is just for testing.
|
|
||||||
verbose(1, "No units can be unloaded -- incremented max-cache-size to %d for --jit-wait", mjit_opts.max_cache_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
unit = get_from_list(&unit_queue);
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in worker dequeue");
|
|
||||||
|
|
||||||
if (unit) {
|
|
||||||
// JIT compile
|
|
||||||
mjit_func_t func = convert_unit_to_func(unit);
|
|
||||||
(void)RB_DEBUG_COUNTER_INC_IF(mjit_compile_failures, func == (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC);
|
|
||||||
|
|
||||||
CRITICAL_SECTION_START(3, "in jit func replace");
|
|
||||||
while (in_gc) { // Make sure we're not GC-ing when touching ISeq
|
|
||||||
verbose(3, "Waiting wakeup from GC");
|
|
||||||
rb_native_cond_wait(&mjit_gc_wakeup, &mjit_engine_mutex);
|
|
||||||
}
|
|
||||||
if (unit->iseq) { // Check whether GCed or not
|
|
||||||
if ((uintptr_t)func > (uintptr_t)LAST_JIT_ISEQ_FUNC) {
|
|
||||||
add_to_list(unit, &active_units);
|
|
||||||
}
|
|
||||||
// Usage of jit_code might be not in a critical section.
|
|
||||||
MJIT_ATOMIC_SET(ISEQ_BODY(unit->iseq)->jit_func, func);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
free_unit(unit);
|
|
||||||
}
|
|
||||||
CRITICAL_SECTION_FINISH(3, "in jit func replace");
|
|
||||||
|
|
||||||
#if USE_JIT_COMPACTION
|
|
||||||
// Combine .o files to one .so and reload all jit_func to improve memory locality.
|
|
||||||
if (compact_units.length < max_compact_size
|
|
||||||
&& ((!mjit_opts.wait && unit_queue.length == 0 && active_units.length > 1)
|
|
||||||
|| (active_units.length == mjit_opts.max_cache_size && compact_units.length * throttle_threshold <= total_unloads))) { // throttle compaction by total_unloads
|
|
||||||
compact_all_jit_code();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// To keep mutex unlocked when it is destroyed by mjit_finish, don't wrap CRITICAL_SECTION here.
|
|
||||||
worker_stopped = true;
|
|
||||||
}
|
|
||||||
|
|
84
process.c
84
process.c
|
@ -1091,6 +1091,15 @@ void rb_sigwait_sleep(const rb_thread_t *, int fd, const rb_hrtime_t *);
|
||||||
void rb_sigwait_fd_put(const rb_thread_t *, int fd);
|
void rb_sigwait_fd_put(const rb_thread_t *, int fd);
|
||||||
void rb_thread_sleep_interruptible(void);
|
void rb_thread_sleep_interruptible(void);
|
||||||
|
|
||||||
|
#if USE_MJIT
|
||||||
|
static struct waitpid_state mjit_waitpid_state;
|
||||||
|
|
||||||
|
// variables shared with thread.c
|
||||||
|
// TODO: implement the same thing with postponed_job and obviate these variables
|
||||||
|
bool mjit_waitpid_finished = false;
|
||||||
|
int mjit_waitpid_status = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
static int
|
static int
|
||||||
waitpid_signal(struct waitpid_state *w)
|
waitpid_signal(struct waitpid_state *w)
|
||||||
{
|
{
|
||||||
|
@ -1098,12 +1107,13 @@ waitpid_signal(struct waitpid_state *w)
|
||||||
rb_threadptr_interrupt(rb_ec_thread_ptr(w->ec));
|
rb_threadptr_interrupt(rb_ec_thread_ptr(w->ec));
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
else { /* ruby_waitpid_locked */
|
#if USE_MJIT
|
||||||
if (w->cond) {
|
else if (w == &mjit_waitpid_state && w->ret) { /* mjit_add_waiting_pid */
|
||||||
rb_native_cond_signal(w->cond);
|
mjit_waitpid_finished = true;
|
||||||
return TRUE;
|
mjit_waitpid_status = w->status;
|
||||||
}
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
return FALSE;
|
return FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1199,68 +1209,18 @@ waitpid_state_init(struct waitpid_state *w, rb_pid_t pid, int options)
|
||||||
w->status = 0;
|
w->status = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const rb_hrtime_t *
|
#if USE_MJIT
|
||||||
sigwait_sleep_time(void)
|
|
||||||
{
|
|
||||||
if (SIGCHLD_LOSSY) {
|
|
||||||
static const rb_hrtime_t busy_wait = 100 * RB_HRTIME_PER_MSEC;
|
|
||||||
|
|
||||||
return &busy_wait;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* must be called with vm->waitpid_lock held, this is not interruptible
|
* must be called with vm->waitpid_lock held, this is not interruptible
|
||||||
*/
|
*/
|
||||||
rb_pid_t
|
void
|
||||||
ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
|
mjit_add_waiting_pid(rb_vm_t *vm, rb_pid_t pid)
|
||||||
rb_nativethread_cond_t *cond)
|
|
||||||
{
|
{
|
||||||
struct waitpid_state w;
|
waitpid_state_init(&mjit_waitpid_state, pid, 0);
|
||||||
|
mjit_waitpid_state.ec = 0; // switch the behavior of waitpid_signal
|
||||||
assert(!ruby_thread_has_gvl_p() && "must not have GVL");
|
ccan_list_add(&vm->waiting_pids, &mjit_waitpid_state.wnode);
|
||||||
|
|
||||||
waitpid_state_init(&w, pid, options);
|
|
||||||
if (w.pid > 0 || ccan_list_empty(&vm->waiting_pids))
|
|
||||||
w.ret = do_waitpid(w.pid, &w.status, w.options | WNOHANG);
|
|
||||||
if (w.ret) {
|
|
||||||
if (w.ret == -1) w.errnum = errno;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
int sigwait_fd = -1;
|
|
||||||
|
|
||||||
w.ec = 0;
|
|
||||||
ccan_list_add(w.pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w.wnode);
|
|
||||||
do {
|
|
||||||
if (sigwait_fd < 0)
|
|
||||||
sigwait_fd = rb_sigwait_fd_get(0);
|
|
||||||
|
|
||||||
if (sigwait_fd >= 0) {
|
|
||||||
w.cond = 0;
|
|
||||||
rb_native_mutex_unlock(&vm->waitpid_lock);
|
|
||||||
rb_sigwait_sleep(0, sigwait_fd, sigwait_sleep_time());
|
|
||||||
rb_native_mutex_lock(&vm->waitpid_lock);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
w.cond = cond;
|
|
||||||
rb_native_cond_wait(w.cond, &vm->waitpid_lock);
|
|
||||||
}
|
|
||||||
} while (!w.ret);
|
|
||||||
ccan_list_del(&w.wnode);
|
|
||||||
|
|
||||||
/* we're done, maybe other waitpid callers are not: */
|
|
||||||
if (sigwait_fd >= 0) {
|
|
||||||
rb_sigwait_fd_put(0, sigwait_fd);
|
|
||||||
sigwait_fd_migrate_sleeper(vm);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (status) {
|
|
||||||
*status = w.status;
|
|
||||||
}
|
|
||||||
if (w.ret == -1) errno = w.errnum;
|
|
||||||
return w.ret;
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
waitpid_sleep(VALUE x)
|
waitpid_sleep(VALUE x)
|
||||||
|
|
|
@ -87,7 +87,11 @@ class TestRubyVMMJIT < Test::Unit::TestCase
|
||||||
print RubyVM::MJIT.pause(wait: false)
|
print RubyVM::MJIT.pause(wait: false)
|
||||||
EOS
|
EOS
|
||||||
assert_equal('truefalse', out)
|
assert_equal('truefalse', out)
|
||||||
assert_equal(true, err.scan(/#{JITSupport::JIT_SUCCESS_PREFIX}/).size < 10)
|
if RUBY_PLATFORM.match?(/mswin|mingw/) # MJIT synchronously compiles methods on Windows
|
||||||
|
assert_equal(10, err.scan(/#{JITSupport::JIT_SUCCESS_PREFIX}/).size)
|
||||||
|
else
|
||||||
|
assert_equal(true, err.scan(/#{JITSupport::JIT_SUCCESS_PREFIX}/).size < 10)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_resume
|
def test_resume
|
||||||
|
|
15
thread.c
15
thread.c
|
@ -2233,6 +2233,12 @@ threadptr_get_interrupts(rb_thread_t *th)
|
||||||
return interrupt & (rb_atomic_t)~ec->interrupt_mask;
|
return interrupt & (rb_atomic_t)~ec->interrupt_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if USE_MJIT
|
||||||
|
// process.c
|
||||||
|
extern bool mjit_waitpid_finished;
|
||||||
|
extern int mjit_waitpid_status;
|
||||||
|
#endif
|
||||||
|
|
||||||
MJIT_FUNC_EXPORTED int
|
MJIT_FUNC_EXPORTED int
|
||||||
rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
|
rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
|
||||||
{
|
{
|
||||||
|
@ -2280,6 +2286,15 @@ rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
|
||||||
ret |= rb_signal_exec(th, sig);
|
ret |= rb_signal_exec(th, sig);
|
||||||
}
|
}
|
||||||
th->status = prev_status;
|
th->status = prev_status;
|
||||||
|
|
||||||
|
#if USE_MJIT
|
||||||
|
// Handle waitpid_signal for MJIT issued by ruby_sigchld_handler. This needs to be done
|
||||||
|
// outside ruby_sigchld_handler to avoid recursively relying on the SIGCHLD handler.
|
||||||
|
if (mjit_waitpid_finished) {
|
||||||
|
mjit_waitpid_finished = false;
|
||||||
|
mjit_notify_waitpid(mjit_waitpid_status);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* exception from another thread */
|
/* exception from another thread */
|
||||||
|
|
Loading…
Add table
Reference in a new issue