mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
Eliminate IVC sync between JIT and Ruby threads (#3799)
Thanks to Ractor (https://github.com/ruby/ruby/pull/2888 and https://github.com/ruby/ruby/pull/3662), inline caches support parallel access now.
This commit is contained in:
parent
4988758e3a
commit
0960f56a1d
Notes:
git
2020-11-21 15:19:02 +09:00
Merged-By: k0kubun <takashikkbn@gmail.com>
3 changed files with 15 additions and 122 deletions
47
mjit.c
47
mjit.c
|
@ -27,40 +27,6 @@
|
|||
|
||||
#include "mjit_worker.c"
|
||||
|
||||
// Copy ISeq's states so that race condition does not happen on compilation.
|
||||
static void
|
||||
mjit_copy_job_handler(void *data)
|
||||
{
|
||||
mjit_copy_job_t *job = data;
|
||||
if (stop_worker_p) { // check if mutex is still alive, before calling CRITICAL_SECTION_START.
|
||||
return;
|
||||
}
|
||||
|
||||
CRITICAL_SECTION_START(3, "in mjit_copy_job_handler");
|
||||
// Make sure that this job is never executed when:
|
||||
// 1. job is being modified
|
||||
// 2. alloca memory inside job is expired
|
||||
// 3. ISeq is GC-ed
|
||||
if (job->finish_p) {
|
||||
CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
|
||||
return;
|
||||
}
|
||||
else if (job->iseq == NULL) { // ISeq GC notified in mjit_free_iseq
|
||||
job->finish_p = true;
|
||||
CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
|
||||
return;
|
||||
}
|
||||
|
||||
const struct rb_iseq_constant_body *body = job->iseq->body;
|
||||
if (job->is_entries) {
|
||||
memcpy(job->is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size);
|
||||
}
|
||||
|
||||
job->finish_p = true;
|
||||
rb_native_cond_broadcast(&mjit_worker_wakeup);
|
||||
CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
|
||||
}
|
||||
|
||||
extern int rb_thread_create_mjit_thread(void (*worker_func)(void));
|
||||
|
||||
// Return an unique file name in /tmp with PREFIX and SUFFIX and
|
||||
|
@ -154,9 +120,6 @@ mjit_free_iseq(const rb_iseq_t *iseq)
|
|||
return;
|
||||
|
||||
CRITICAL_SECTION_START(4, "mjit_free_iseq");
|
||||
if (mjit_copy_job.iseq == iseq) {
|
||||
mjit_copy_job.iseq = NULL;
|
||||
}
|
||||
if (iseq->body->jit_unit) {
|
||||
// jit_unit is not freed here because it may be referred by multiple
|
||||
// lists of units. `get_from_list` and `mjit_finish` do the job.
|
||||
|
@ -1050,19 +1013,11 @@ mjit_mark(void)
|
|||
return;
|
||||
RUBY_MARK_ENTER("mjit");
|
||||
|
||||
CRITICAL_SECTION_START(4, "mjit_mark");
|
||||
VALUE iseq = (VALUE)mjit_copy_job.iseq;
|
||||
CRITICAL_SECTION_FINISH(4, "mjit_mark");
|
||||
|
||||
// Don't wrap critical section with this. This may trigger GC,
|
||||
// and in that case mjit_gc_start_hook causes deadlock.
|
||||
if (iseq) rb_gc_mark(iseq);
|
||||
|
||||
struct rb_mjit_unit *unit = NULL;
|
||||
CRITICAL_SECTION_START(4, "mjit_mark");
|
||||
list_for_each(&unit_queue.head, unit, unode) {
|
||||
if (unit->iseq) { // ISeq is still not GCed
|
||||
iseq = (VALUE)unit->iseq;
|
||||
VALUE iseq = (VALUE)unit->iseq;
|
||||
CRITICAL_SECTION_FINISH(4, "mjit_mark rb_gc_mark");
|
||||
|
||||
// Don't wrap critical section with this. This may trigger GC,
|
||||
|
|
|
@ -320,8 +320,16 @@ compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct
|
|||
extern int
|
||||
mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const struct rb_iseq_constant_body *captured_iseq);
|
||||
|
||||
extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq,
|
||||
union iseq_inline_storage_entry *is_entries);
|
||||
// Copy current is_entries and use it throughout the current compilation consistently.
|
||||
// While ic->entry has been immutable since https://github.com/ruby/ruby/pull/3662,
|
||||
// we still need this to avoid a race condition between entries and ivar_serial/max_ivar_index.
|
||||
static void
|
||||
mjit_capture_is_entries(const struct rb_iseq_constant_body *body, union iseq_inline_storage_entry *is_entries)
|
||||
{
|
||||
if (is_entries == NULL)
|
||||
return;
|
||||
memcpy(is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size);
|
||||
}
|
||||
|
||||
static bool
|
||||
mjit_compile_body(FILE *f, const rb_iseq_t *iseq, struct compile_status *status)
|
||||
|
@ -429,6 +437,8 @@ inlinable_iseq_p(const struct rb_iseq_constant_body *body)
|
|||
static void
|
||||
init_ivar_compile_status(const struct rb_iseq_constant_body *body, struct compile_status *status)
|
||||
{
|
||||
mjit_capture_is_entries(body, status->is_entries);
|
||||
|
||||
int num_ivars = 0;
|
||||
unsigned int pos = 0;
|
||||
status->max_ivar_index = 0;
|
||||
|
@ -528,8 +538,7 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
|
|||
.param_size = child_iseq->body->param.size,
|
||||
.local_size = child_iseq->body->local_table_size
|
||||
};
|
||||
if ((child_iseq->body->ci_size > 0 && child_status.cc_entries_index == -1)
|
||||
|| (child_status.is_entries != NULL && !mjit_copy_cache_from_main_thread(child_iseq, child_status.is_entries))) {
|
||||
if (child_iseq->body->ci_size > 0 && child_status.cc_entries_index == -1) {
|
||||
return false;
|
||||
}
|
||||
init_ivar_compile_status(child_iseq->body, &child_status);
|
||||
|
@ -556,8 +565,7 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname, int id)
|
|||
{
|
||||
struct compile_status status = { .compiled_iseq = iseq->body, .compiled_id = id };
|
||||
INIT_COMPILE_STATUS(status, iseq->body, true);
|
||||
if ((iseq->body->ci_size > 0 && status.cc_entries_index == -1)
|
||||
|| (status.is_entries != NULL && !mjit_copy_cache_from_main_thread(iseq, status.is_entries))) {
|
||||
if (iseq->body->ci_size > 0 && status.cc_entries_index == -1) {
|
||||
return false;
|
||||
}
|
||||
init_ivar_compile_status(iseq->body, &status);
|
||||
|
|
|
@ -1205,21 +1205,6 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
|
|||
return (mjit_func_t)func;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
const rb_iseq_t *iseq;
|
||||
union iseq_inline_storage_entry *is_entries;
|
||||
bool finish_p;
|
||||
} mjit_copy_job_t;
|
||||
|
||||
// Singleton MJIT copy job. This is made global since it needs to be durable even when MJIT worker thread is stopped.
|
||||
// (ex: register job -> MJIT pause -> MJIT resume -> dispatch job. Actually this should be just cancelled by finish_p check)
|
||||
static mjit_copy_job_t mjit_copy_job = { .iseq = NULL, .finish_p = true };
|
||||
|
||||
static void mjit_copy_job_handler(void *data);
|
||||
|
||||
// vm_trace.c
|
||||
int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *);
|
||||
|
||||
// To see cc_entries using index returned by `mjit_capture_cc_entries` in mjit_compile.c
|
||||
const struct rb_callcache **
|
||||
mjit_iseq_cc_entries(const struct rb_iseq_constant_body *const body)
|
||||
|
@ -1266,61 +1251,6 @@ mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const
|
|||
return cc_entries_index;
|
||||
}
|
||||
|
||||
// Copy inline cache values of `iseq` to `cc_entries` and `is_entries`.
|
||||
// These buffers should be pre-allocated properly prior to calling this function.
|
||||
// Return true if copy succeeds or is not needed.
|
||||
//
|
||||
// We're lazily copying cache values from main thread because these cache values
|
||||
// could be different between ones on enqueue timing and ones on dequeue timing.
|
||||
bool
|
||||
mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, union iseq_inline_storage_entry *is_entries)
|
||||
{
|
||||
mjit_copy_job_t *job = &mjit_copy_job; // just a short hand
|
||||
|
||||
CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread");
|
||||
job->finish_p = true; // disable dispatching this job in mjit_copy_job_handler while it's being modified
|
||||
CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread");
|
||||
|
||||
job->is_entries = is_entries;
|
||||
|
||||
CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread");
|
||||
job->iseq = iseq; // Prevernt GC of this ISeq from here
|
||||
VM_ASSERT(in_jit);
|
||||
in_jit = false; // To avoid deadlock, allow running GC while waiting for copy job
|
||||
rb_native_cond_signal(&mjit_client_wakeup); // Unblock main thread waiting in `mjit_gc_start_hook`
|
||||
|
||||
job->finish_p = false; // allow dispatching this job in mjit_copy_job_handler
|
||||
CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread");
|
||||
|
||||
if (UNLIKELY(mjit_opts.wait)) {
|
||||
mjit_copy_job_handler((void *)job);
|
||||
}
|
||||
else if (rb_workqueue_register(0, mjit_copy_job_handler, (void *)job)) {
|
||||
CRITICAL_SECTION_START(3, "in MJIT copy job wait");
|
||||
// checking `stop_worker_p` too because `RUBY_VM_CHECK_INTS(ec)` may not
|
||||
// lush mjit_copy_job_handler when EC_EXEC_TAG() is not TAG_NONE, and then
|
||||
// `stop_worker()` could dead lock with this function.
|
||||
while (!job->finish_p && !stop_worker_p) {
|
||||
rb_native_cond_wait(&mjit_worker_wakeup, &mjit_engine_mutex);
|
||||
verbose(3, "Getting wakeup from client");
|
||||
}
|
||||
CRITICAL_SECTION_FINISH(3, "in MJIT copy job wait");
|
||||
}
|
||||
|
||||
CRITICAL_SECTION_START(3, "in mjit_copy_cache_from_main_thread");
|
||||
bool success_p = job->finish_p;
|
||||
// Disable dispatching this job in mjit_copy_job_handler while memory allocated by alloca
|
||||
// could be expired after finishing this function.
|
||||
job->finish_p = true;
|
||||
|
||||
in_jit = true; // Prohibit GC during JIT compilation
|
||||
if (job->iseq == NULL) // ISeq GC is notified in mjit_free_iseq
|
||||
success_p = false;
|
||||
job->iseq = NULL; // Allow future GC of this ISeq from here
|
||||
CRITICAL_SECTION_FINISH(3, "in mjit_copy_cache_from_main_thread");
|
||||
return success_p;
|
||||
}
|
||||
|
||||
// The function implementing a worker. It is executed in a separate
|
||||
// thread by rb_thread_create_mjit_thread. It compiles precompiled header
|
||||
// and then compiles requested ISeqs.
|
||||
|
|
Loading…
Add table
Reference in a new issue