Drop rb_mjit_unit from mjit_copy_job

and guard iseq from GC by marking iseq in mjit_copy_job.

This is a refactoring for implementing inlining later and
should not be fixing or introducing any bugs.

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@67286 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
k0kubun 2019-03-17 17:12:47 +00:00
parent cebc640790
commit 3fc26f6013
2 changed files with 23 additions and 16 deletions

27
mjit.c
View File

@ -25,22 +25,21 @@ static void
mjit_copy_job_handler(void *data)
{
mjit_copy_job_t *job = data;
const struct rb_iseq_constant_body *body;
if (stop_worker_p) { /* check if mutex is still alive, before calling CRITICAL_SECTION_START. */
return;
}
CRITICAL_SECTION_START(3, "in mjit_copy_job_handler");
/* Make sure that this job is never executed when:
1. job is being modified
2. alloca memory inside job is expired
3. ISeq is GC-ed */
if (job->finish_p || job->unit->iseq == NULL) {
// Make sure that this job is never executed when:
// 1. job is being modified
// 2. alloca memory inside job is expired
// Note that job->iseq is guarded from GC by `mjit_mark`.
if (job->finish_p) {
CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
return;
}
body = job->unit->iseq->body;
const struct rb_iseq_constant_body *body = job->iseq->body;
if (job->cc_entries) {
memcpy(job->cc_entries, body->cc_entries, sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size));
}
@ -828,14 +827,23 @@ mjit_finish(bool close_handle_p)
void
mjit_mark(void)
{
struct rb_mjit_unit *unit = 0;
if (!mjit_enabled)
return;
RUBY_MARK_ENTER("mjit");
CRITICAL_SECTION_START(4, "mjit_mark");
VALUE iseq = (VALUE)mjit_copy_job.iseq;
CRITICAL_SECTION_FINISH(4, "mjit_mark");
// Don't wrap critical section with this. This may trigger GC,
// and in that case mjit_gc_start_hook causes deadlock.
if (iseq) rb_gc_mark(iseq);
struct rb_mjit_unit *unit = NULL;
CRITICAL_SECTION_START(4, "mjit_mark");
list_for_each(&unit_queue.head, unit, unode) {
if (unit->iseq) { /* ISeq is still not GCed */
VALUE iseq = (VALUE)unit->iseq;
iseq = (VALUE)unit->iseq;
CRITICAL_SECTION_FINISH(4, "mjit_mark rb_gc_mark");
/* Don't wrap critical section with this. This may trigger GC,
@ -846,6 +854,7 @@ mjit_mark(void)
}
}
CRITICAL_SECTION_FINISH(4, "mjit_mark");
RUBY_MARK_LEAVE("mjit");
}

View File

@ -1126,7 +1126,7 @@ convert_unit_to_func(struct rb_mjit_unit *unit, struct rb_call_cache *cc_entries
}
typedef struct {
struct rb_mjit_unit *unit;
const rb_iseq_t *iseq;
struct rb_call_cache *cc_entries;
union iseq_inline_storage_entry *is_entries;
bool finish_p;
@ -1134,7 +1134,7 @@ typedef struct {
/* Singleton MJIT copy job. This is made global since it needs to be durable even when MJIT worker thread is stopped.
(ex: register job -> MJIT pause -> MJIT resume -> dispatch job. Actually this should be just cancelled by finish_p check) */
static mjit_copy_job_t mjit_copy_job;
static mjit_copy_job_t mjit_copy_job = { .iseq = NULL, .finish_p = true };
static void mjit_copy_job_handler(void *data);
@ -1204,14 +1204,12 @@ mjit_worker(void)
verbose(3, "Getting wakeup from client");
}
unit = get_from_list(&unit_queue);
if (unit) job->iseq = unit->iseq;
job->finish_p = true; // disable dispatching this job in mjit_copy_job_handler while it's being modified
CRITICAL_SECTION_FINISH(3, "in worker dequeue");
if (unit) {
mjit_func_t func;
const struct rb_iseq_constant_body *body = unit->iseq->body;
job->unit = unit;
job->cc_entries = NULL;
if (body->ci_size > 0 || body->ci_kw_size > 0)
job->cc_entries = alloca(sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size));
@ -1226,8 +1224,8 @@ mjit_worker(void)
}
}
/* JIT compile */
func = convert_unit_to_func(unit, job->cc_entries, job->is_entries);
// JIT compile
mjit_func_t func = convert_unit_to_func(unit, job->cc_entries, job->is_entries);
CRITICAL_SECTION_START(3, "in jit func replace");
while (in_gc) { /* Make sure we're not GC-ing when touching ISeq */