1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Unify comment styles across MJIT sources

I'm writing `//` comments in newer MJIT code after C99 enablement
(because I write 1-line comments more often than multi-line comments
 and `//` requires fewer chars on 1-line) and then they are mixed
with `/* */` now.

For consistency and to avoid the conversion in future changes, let me
finish the rewrite in MJIT-related code.

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@67533 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
k0kubun 2019-04-14 05:26:46 +00:00
parent 19513c88d5
commit fa13bb1a6f
5 changed files with 321 additions and 327 deletions

231
mjit.c
View file

@ -6,10 +6,10 @@
**********************************************************************/
/* Functions in this file are never executed on MJIT worker thread.
So you can safely use Ruby methods and GC in this file. */
// Functions in this file are never executed on MJIT worker thread.
// So you can safely use Ruby methods and GC in this file.
/* To share variables privately, include mjit_worker.c instead of linking. */
// To share variables privately, include mjit_worker.c instead of linking.
#include "internal.h"
@ -20,12 +20,12 @@
#include "constant.h"
#include "id_table.h"
/* Copy ISeq's states so that race condition does not happen on compilation. */
// Copy ISeq's states so that race condition does not happen on compilation.
static void
mjit_copy_job_handler(void *data)
{
mjit_copy_job_t *job = data;
if (stop_worker_p) { /* check if mutex is still alive, before calling CRITICAL_SECTION_START. */
if (stop_worker_p) { // check if mutex is still alive, before calling CRITICAL_SECTION_START.
return;
}
@ -54,9 +54,9 @@ mjit_copy_job_handler(void *data)
extern int rb_thread_create_mjit_thread(void (*worker_func)(void));
/* Return an unique file name in /tmp with PREFIX and SUFFIX and
number ID. Use getpid if ID == 0. The return file name exists
until the next function call. */
// Return an unique file name in /tmp with PREFIX and SUFFIX and
// number ID. Use getpid if ID == 0. The return file name exists
// until the next function call.
static char *
get_uniq_filename(unsigned long id, const char *prefix, const char *suffix)
{
@ -74,8 +74,8 @@ get_uniq_filename(unsigned long id, const char *prefix, const char *suffix)
return str;
}
/* Wait until workers don't compile any iseq. It is called at the
start of GC. */
// Wait until workers don't compile any iseq. It is called at the
// start of GC.
void
mjit_gc_start_hook(void)
{
@ -91,8 +91,8 @@ mjit_gc_start_hook(void)
CRITICAL_SECTION_FINISH(4, "mjit_gc_start_hook");
}
/* Send a signal to workers to continue iseq compilations. It is
called at the end of GC. */
// Send a signal to workers to continue iseq compilations. It is
// called at the end of GC.
void
mjit_gc_finish_hook(void)
{
@ -105,8 +105,8 @@ mjit_gc_finish_hook(void)
CRITICAL_SECTION_FINISH(4, "mjit_gc_finish_hook");
}
/* Iseqs can be garbage collected. This function should call when it
happens. It removes iseq from the unit. */
// Iseqs can be garbage collected. This function should call when it
// happens. It removes iseq from the unit.
void
mjit_free_iseq(const rb_iseq_t *iseq)
{
@ -114,16 +114,16 @@ mjit_free_iseq(const rb_iseq_t *iseq)
return;
CRITICAL_SECTION_START(4, "mjit_free_iseq");
if (iseq->body->jit_unit) {
/* jit_unit is not freed here because it may be referred by multiple
lists of units. `get_from_list` and `mjit_finish` do the job. */
// jit_unit is not freed here because it may be referred by multiple
// lists of units. `get_from_list` and `mjit_finish` do the job.
iseq->body->jit_unit->iseq = NULL;
}
CRITICAL_SECTION_FINISH(4, "mjit_free_iseq");
}
/* Free unit list. This should be called only when worker is finished
because node of unit_queue and one of active_units may have the same unit
during proceeding unit. */
// Free unit list. This should be called only when worker is finished
// because node of unit_queue and one of active_units may have the same unit
// during proceeding unit.
static void
free_list(struct rb_mjit_unit_list *list, bool close_handle_p)
{
@ -137,18 +137,18 @@ free_list(struct rb_mjit_unit_list *list, bool close_handle_p)
list->length = 0;
}
/* MJIT info related to an existing continutaion. */
// MJIT info related to an existing continutaion.
struct mjit_cont {
rb_execution_context_t *ec; /* continuation ec */
struct mjit_cont *prev, *next; /* used to form lists */
rb_execution_context_t *ec; // continuation ec
struct mjit_cont *prev, *next; // used to form lists
};
/* Double linked list of registered continuations. This is used to detect
units which are in use in unload_units. */
// Double linked list of registered continuations. This is used to detect
// units which are in use in unload_units.
static struct mjit_cont *first_cont;
/* Register a new continuation with thread TH. Return MJIT info about
the continuation. */
// Register a new continuation with execution context `ec`. Return MJIT info about
// the continuation.
struct mjit_cont *
mjit_cont_new(rb_execution_context_t *ec)
{
@ -172,7 +172,7 @@ mjit_cont_new(rb_execution_context_t *ec)
return cont;
}
/* Unregister continuation CONT. */
// Unregister continuation `cont`.
void
mjit_cont_free(struct mjit_cont *cont)
{
@ -192,7 +192,7 @@ mjit_cont_free(struct mjit_cont *cont)
xfree(cont);
}
/* Finish work with continuation info. */
// Finish work with continuation info.
static void
finish_conts(void)
{
@ -204,7 +204,7 @@ finish_conts(void)
}
}
/* Create unit for ISEQ. */
// Create unit for `iseq`.
static void
create_unit(const rb_iseq_t *iseq)
{
@ -219,7 +219,7 @@ create_unit(const rb_iseq_t *iseq)
iseq->body->jit_unit = unit;
}
/* Set up field used_code_p for unit iseqs whose iseq on the stack of ec. */
// Set up field `used_code_p` for unit iseqs whose iseq on the stack of ec.
static void
mark_ec_units(rb_execution_context_t *ec)
{
@ -236,12 +236,12 @@ mark_ec_units(rb_execution_context_t *ec)
}
if (cfp == ec->cfp)
break; /* reached the most recent cfp */
break; // reached the most recent cfp
}
}
/* Unload JIT code of some units to satisfy the maximum permitted
number of units with a loaded code. */
// Unload JIT code of some units to satisfy the maximum permitted
// number of units with a loaded code.
static void
unload_units(void)
{
@ -251,16 +251,16 @@ unload_units(void)
struct mjit_cont *cont;
int delete_num, units_num = active_units.length;
/* For now, we don't unload units when ISeq is GCed. We should
unload such ISeqs first here. */
// For now, we don't unload units when ISeq is GCed. We should
// unload such ISeqs first here.
list_for_each_safe(&active_units.head, unit, next, unode) {
if (unit->iseq == NULL) { /* ISeq is GCed. */
if (unit->iseq == NULL) { // ISeq is GCed.
remove_from_list(unit, &active_units);
free_unit(unit);
}
}
/* Detect units which are in use and can't be unloaded. */
// Detect units which are in use and can't be unloaded.
list_for_each(&active_units.head, unit, unode) {
assert(unit->iseq != NULL && unit->handle != NULL);
unit->used_code_p = FALSE;
@ -272,15 +272,15 @@ unload_units(void)
mark_ec_units(cont->ec);
}
/* Remove 1/10 units more to decrease unloading calls. */
/* TODO: Calculate max total_calls in unit_queue and don't unload units
whose total_calls are larger than the max. */
// Remove 1/10 units more to decrease unloading calls.
// TODO: Calculate max total_calls in unit_queue and don't unload units
// whose total_calls are larger than the max.
delete_num = active_units.length / 10;
for (; active_units.length > mjit_opts.max_cache_size - delete_num;) {
/* Find one unit that has the minimum total_calls. */
// Find one unit that has the minimum total_calls.
worst = NULL;
list_for_each(&active_units.head, unit, unode) {
if (unit->used_code_p) /* We can't unload code on stack. */
if (unit->used_code_p) // We can't unload code on stack.
continue;
if (worst == NULL || worst->iseq->body->total_calls > unit->iseq->body->total_calls) {
@ -290,7 +290,7 @@ unload_units(void)
if (worst == NULL)
break;
/* Unload the worst node. */
// Unload the worst node.
verbose(2, "Unloading unit %d (calls=%lu)", worst->id, worst->iseq->body->total_calls);
assert(worst->handle != NULL);
remove_from_list(worst, &active_units);
@ -310,7 +310,7 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_inf
if (compile_info != NULL)
iseq->body->jit_unit->compile_info = *compile_info;
if (iseq->body->jit_unit == NULL)
/* Failure in creating the unit. */
// Failure in creating the unit.
return;
CRITICAL_SECTION_START(3, "in add_iseq_to_process");
@ -323,15 +323,15 @@ mjit_add_iseq_to_process(const rb_iseq_t *iseq, const struct rb_mjit_compile_inf
CRITICAL_SECTION_FINISH(3, "in add_iseq_to_process");
}
/* Add ISEQ to be JITed in parallel with the current thread.
Unload some JIT codes if there are too many of them. */
// Add ISEQ to be JITed in parallel with the current thread.
// Unload some JIT codes if there are too many of them.
void
rb_mjit_add_iseq_to_process(const rb_iseq_t *iseq)
{
mjit_add_iseq_to_process(iseq, NULL);
}
/* For this timeout seconds, --jit-wait will wait for JIT compilation finish. */
// For this timeout seconds, --jit-wait will wait for JIT compilation finish.
#define MJIT_WAIT_TIMEOUT_SECONDS 60
static void
@ -345,7 +345,7 @@ mjit_wait(struct rb_iseq_constant_body *body)
tries++;
if (tries / 1000 > MJIT_WAIT_TIMEOUT_SECONDS || pch_status == PCH_FAILED) {
CRITICAL_SECTION_START(3, "in mjit_wait_call to set jit_func");
body->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC; /* JIT worker seems dead. Give up. */
body->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC; // JIT worker seems dead. Give up.
CRITICAL_SECTION_FINISH(3, "in mjit_wait_call to set jit_func");
mjit_warning("timed out to wait for JIT finish");
break;
@ -358,8 +358,8 @@ mjit_wait(struct rb_iseq_constant_body *body)
}
}
/* Wait for JIT compilation finish for --jit-wait, and call the function pointer
if the compiled result is not NOT_COMPILED_JIT_ISEQ_FUNC. */
// Wait for JIT compilation finish for --jit-wait, and call the function pointer
// if the compiled result is not NOT_COMPILED_JIT_ISEQ_FUNC.
VALUE
mjit_wait_call(rb_execution_context_t *ec, struct rb_iseq_constant_body *body)
{
@ -406,7 +406,7 @@ init_header_filename(void)
{
int fd;
#ifdef LOAD_RELATIVE
/* Root path of the running ruby process. Equal to RbConfig::TOPDIR. */
// Root path of the running ruby process. Equal to RbConfig::TOPDIR.
VALUE basedir_val;
#endif
const char *basedir = NULL;
@ -429,9 +429,9 @@ init_header_filename(void)
baselen = RSTRING_LEN(basedir_val);
#else
if (getenv("MJIT_SEARCH_BUILD_DIR")) {
/* This path is not intended to be used on production, but using build directory's
header file here because people want to run `make test-all` without running
`make install`. Don't use $MJIT_SEARCH_BUILD_DIR except for test-all. */
// This path is not intended to be used on production, but using build directory's
// header file here because people want to run `make test-all` without running
// `make install`. Don't use $MJIT_SEARCH_BUILD_DIR except for test-all.
struct stat st;
const char *hdr = dlsym(RTLD_DEFAULT, "MJIT_HEADER");
@ -451,10 +451,10 @@ init_header_filename(void)
return FALSE;
}
else {
/* Do not pass PRELOADENV to child processes, on
* multi-arch environment */
// Do not pass PRELOADENV to child processes, on
// multi-arch environment
verbose(3, "PRELOADENV("PRELOADENV")=%s", getenv(PRELOADENV));
/* assume no other PRELOADENV in test-all */
// assume no other PRELOADENV in test-all
unsetenv(PRELOADENV);
verbose(3, "MJIT_HEADER: %s", hdr);
header_file = ruby_strdup(hdr);
@ -465,7 +465,7 @@ init_header_filename(void)
#endif
#ifndef _MSC_VER
{
/* A name of the header file included in any C file generated by MJIT for iseqs. */
// A name of the header file included in any C file generated by MJIT for iseqs.
static const char header_name[] = MJIT_HEADER_INSTALL_DIR "/" MJIT_MIN_HEADER_NAME;
const size_t header_name_len = sizeof(header_name) - 1;
@ -534,7 +534,7 @@ UINT rb_w32_system_tmpdir(WCHAR *path, UINT len);
static char *
system_default_tmpdir(void)
{
/* c.f. ext/etc/etc.c:etc_systmpdir() */
// c.f. ext/etc/etc.c:etc_systmpdir()
#ifdef _WIN32
WCHAR tmppath[_MAX_PATH];
UINT len = rb_w32_system_tmpdir(tmppath, numberof(tmppath));
@ -610,7 +610,7 @@ system_tmpdir(void)
// A default threshold used to add iseq to JIT.
#define DEFAULT_MIN_CALLS_TO_ADD 10000
/* Start MJIT worker. Return TRUE if worker is successfully started. */
// Start MJIT worker. Return TRUE if worker is successfully started.
static bool
start_worker(void)
{
@ -630,9 +630,9 @@ start_worker(void)
return true;
}
/* Initialize MJIT. Start a thread creating the precompiled header and
processing ISeqs. The function should be called first for using MJIT.
If everything is successful, MJIT_INIT_P will be TRUE. */
// Initialize MJIT. Start a thread creating the precompiled header and
// processing ISeqs. The function should be called first for using MJIT.
// If everything is successful, MJIT_INIT_P will be TRUE.
void
mjit_init(struct mjit_options *opts)
{
@ -640,7 +640,7 @@ mjit_init(struct mjit_options *opts)
mjit_enabled = true;
mjit_call_p = true;
/* Normalize options */
// Normalize options
if (mjit_opts.min_calls == 0)
mjit_opts.min_calls = DEFAULT_MIN_CALLS_TO_ADD;
if (mjit_opts.max_cache_size <= 0)
@ -648,9 +648,9 @@ mjit_init(struct mjit_options *opts)
if (mjit_opts.max_cache_size < MIN_CACHE_SIZE)
mjit_opts.max_cache_size = MIN_CACHE_SIZE;
/* Initialize variables for compilation */
// Initialize variables for compilation
#ifdef _MSC_VER
pch_status = PCH_SUCCESS; /* has prebuilt precompiled header */
pch_status = PCH_SUCCESS; // has prebuilt precompiled header
#else
pch_status = PCH_NOT_READY;
#endif
@ -659,11 +659,11 @@ mjit_init(struct mjit_options *opts)
cc_common_args = xmalloc(sizeof(CC_COMMON_ARGS));
memcpy((void *)cc_common_args, CC_COMMON_ARGS, sizeof(CC_COMMON_ARGS));
#if MJIT_CFLAGS_PIPE
{ /* eliminate a flag incompatible with `-pipe` */
{ // eliminate a flag incompatible with `-pipe`
size_t i, j;
for (i = 0, j = 0; i < sizeof(CC_COMMON_ARGS) / sizeof(char *); i++) {
if (CC_COMMON_ARGS[i] && strncmp("-save-temps", CC_COMMON_ARGS[i], strlen("-save-temps")) == 0)
continue; /* skip -save-temps flag */
continue; // skip -save-temps flag
cc_common_args[j] = CC_COMMON_ARGS[i];
j++;
}
@ -680,14 +680,14 @@ mjit_init(struct mjit_options *opts)
}
pch_owner_pid = getpid();
/* Initialize mutex */
// Initialize mutex
rb_native_mutex_initialize(&mjit_engine_mutex);
rb_native_cond_initialize(&mjit_pch_wakeup);
rb_native_cond_initialize(&mjit_client_wakeup);
rb_native_cond_initialize(&mjit_worker_wakeup);
rb_native_cond_initialize(&mjit_gc_wakeup);
/* Initialize class_serials cache for compilation */
// Initialize class_serials cache for compilation
valid_class_serials = rb_hash_new();
rb_obj_hide(valid_class_serials);
rb_gc_register_mark_object(valid_class_serials);
@ -697,7 +697,7 @@ mjit_init(struct mjit_options *opts)
rb_id_table_foreach(RCLASS_CONST_TBL(rb_cObject), valid_class_serials_add_i, NULL);
}
/* Initialize worker thread */
// Initialize worker thread
start_worker();
}
@ -716,7 +716,7 @@ stop_worker(void)
}
}
/* Stop JIT-compiling methods but compiled code is kept available. */
// Stop JIT-compiling methods but compiled code is kept available.
VALUE
mjit_pause(bool wait_p)
{
@ -727,13 +727,13 @@ mjit_pause(bool wait_p)
return Qfalse;
}
/* Flush all queued units with no option or `wait: true` */
// Flush all queued units with no option or `wait: true`
if (wait_p) {
struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = 1000;
while (unit_queue.length > 0 && active_units.length < mjit_opts.max_cache_size) { /* inverse of condition that waits for mjit_worker_wakeup */
while (unit_queue.length > 0 && active_units.length < mjit_opts.max_cache_size) { // inverse of condition that waits for mjit_worker_wakeup
CRITICAL_SECTION_START(3, "in mjit_pause for a worker wakeup");
rb_native_cond_broadcast(&mjit_worker_wakeup);
CRITICAL_SECTION_FINISH(3, "in mjit_pause for a worker wakeup");
@ -745,7 +745,7 @@ mjit_pause(bool wait_p)
return Qtrue;
}
/* Restart JIT-compiling methods after mjit_pause. */
// Restart JIT-compiling methods after mjit_pause.
VALUE
mjit_resume(void)
{
@ -762,40 +762,39 @@ mjit_resume(void)
return Qtrue;
}
/* Skip calling `clean_object_files` for units which currently exist in the list. */
// Skip calling `clean_object_files` for units which currently exist in the list.
static void
skip_cleaning_object_files(struct rb_mjit_unit_list *list)
{
struct rb_mjit_unit *unit = NULL, *next;
/* No mutex for list, assuming MJIT worker does not exist yet since it's immediately after fork. */
// No mutex for list, assuming MJIT worker does not exist yet since it's immediately after fork.
list_for_each_safe(&list->head, unit, next, unode) {
#ifndef _MSC_VER /* Actually mswin does not reach here since it doesn't have fork */
#ifndef _MSC_VER // Actually mswin does not reach here since it doesn't have fork
if (unit->o_file) unit->o_file_inherited_p = true;
#endif
#if defined(_WIN32) /* mswin doesn't reach here either. This is for MinGW. */
#if defined(_WIN32) // mswin doesn't reach here either. This is for MinGW.
if (unit->so_file) unit->so_file = NULL;
#endif
}
}
/* This is called after fork initiated by Ruby's method to launch MJIT worker thread
for child Ruby process.
In multi-process Ruby applications, child Ruby processes do most of the jobs.
Thus we want child Ruby processes to enqueue ISeqs to MJIT worker's queue and
call the JIT-ed code.
But unfortunately current MJIT-generated code is process-specific. After the fork,
JIT-ed code created by parent Ruby process cannot be used in child Ruby process
because the code could rely on inline cache values (ivar's IC, send's CC) which
may vary between processes after fork or embed some process-specific addresses.
So child Ruby process can't request parent process to JIT an ISeq and use the code.
Instead of that, MJIT worker thread is created for all child Ruby processes, even
while child processes would end up with compiling the same ISeqs.
*/
// This is called after fork initiated by Ruby's method to launch MJIT worker thread
// for child Ruby process.
//
// In multi-process Ruby applications, child Ruby processes do most of the jobs.
// Thus we want child Ruby processes to enqueue ISeqs to MJIT worker's queue and
// call the JIT-ed code.
//
// But unfortunately current MJIT-generated code is process-specific. After the fork,
// JIT-ed code created by parent Ruby process cannot be used in child Ruby process
// because the code could rely on inline cache values (ivar's IC, send's CC) which
// may vary between processes after fork or embed some process-specific addresses.
//
// So child Ruby process can't request parent process to JIT an ISeq and use the code.
// Instead of that, MJIT worker thread is created for all child Ruby processes, even
// while child processes would end up with compiling the same ISeqs.
void
mjit_child_after_fork(void)
{
@ -810,33 +809,33 @@ mjit_child_after_fork(void)
start_worker();
}
/* Finish the threads processing units and creating PCH, finalize
and free MJIT data. It should be called last during MJIT
life.
If close_handle_p is true, it calls dlclose() for JIT-ed code. So it should be false
if the code can still be on stack. ...But it means to leak JIT-ed handle forever (FIXME). */
// Finish the threads processing units and creating PCH, finalize
// and free MJIT data. It should be called last during MJIT
// life.
//
// If close_handle_p is true, it calls dlclose() for JIT-ed code. So it should be false
// if the code can still be on stack. ...But it means to leak JIT-ed handle forever (FIXME).
void
mjit_finish(bool close_handle_p)
{
if (!mjit_enabled)
return;
/* Wait for pch finish */
// Wait for pch finish
verbose(2, "Stopping worker thread");
CRITICAL_SECTION_START(3, "in mjit_finish to wakeup from pch");
/* As our threads are detached, we could just cancel them. But it
is a bad idea because OS processes (C compiler) started by
threads can produce temp files. And even if the temp files are
removed, the used C compiler still complaint about their
absence. So wait for a clean finish of the threads. */
// As our threads are detached, we could just cancel them. But it
// is a bad idea because OS processes (C compiler) started by
// threads can produce temp files. And even if the temp files are
// removed, the used C compiler still complaint about their
// absence. So wait for a clean finish of the threads.
while (pch_status == PCH_NOT_READY) {
verbose(3, "Waiting wakeup from make_pch");
rb_native_cond_wait(&mjit_pch_wakeup, &mjit_engine_mutex);
}
CRITICAL_SECTION_FINISH(3, "in mjit_finish to wakeup from pch");
/* Stop worker */
// Stop worker
stop_worker();
rb_native_mutex_destroy(&mjit_engine_mutex);
@ -845,7 +844,7 @@ mjit_finish(bool close_handle_p)
rb_native_cond_destroy(&mjit_worker_wakeup);
rb_native_cond_destroy(&mjit_gc_wakeup);
#ifndef _MSC_VER /* mswin has prebuilt precompiled header */
#ifndef _MSC_VER // mswin has prebuilt precompiled header
if (!mjit_opts.save_temps && getpid() == pch_owner_pid)
remove_file(pch_file);
@ -884,12 +883,12 @@ mjit_mark(void)
struct rb_mjit_unit *unit = NULL;
CRITICAL_SECTION_START(4, "mjit_mark");
list_for_each(&unit_queue.head, unit, unode) {
if (unit->iseq) { /* ISeq is still not GCed */
if (unit->iseq) { // ISeq is still not GCed
iseq = (VALUE)unit->iseq;
CRITICAL_SECTION_FINISH(4, "mjit_mark rb_gc_mark");
/* Don't wrap critical section with this. This may trigger GC,
and in that case mjit_gc_start_hook causes deadlock. */
// Don't wrap critical section with this. This may trigger GC,
// and in that case mjit_gc_start_hook causes deadlock.
rb_gc_mark(iseq);
CRITICAL_SECTION_START(4, "mjit_mark rb_gc_mark");
@ -900,19 +899,19 @@ mjit_mark(void)
RUBY_MARK_LEAVE("mjit");
}
/* A hook to update valid_class_serials. */
// A hook to update valid_class_serials.
void
mjit_add_class_serial(rb_serial_t class_serial)
{
if (!mjit_enabled)
return;
/* Do not wrap CRITICAL_SECTION here. This function is only called in main thread
and guarded by GVL, and `rb_hash_aset` may cause GC and deadlock in it. */
// Do not wrap CRITICAL_SECTION here. This function is only called in main thread
// and guarded by GVL, and `rb_hash_aset` may cause GC and deadlock in it.
rb_hash_aset(valid_class_serials, LONG2FIX(class_serial), Qtrue);
}
/* A hook to update valid_class_serials. */
// A hook to update valid_class_serials.
void
mjit_remove_class_serial(rb_serial_t class_serial)
{

64
mjit.h
View file

@ -14,44 +14,44 @@
#if USE_MJIT
/* Special address values of a function generated from the
corresponding iseq by MJIT: */
// Special address values of a function generated from the
// corresponding iseq by MJIT:
enum rb_mjit_iseq_func {
/* ISEQ was not queued yet for the machine code generation */
// ISEQ was not queued yet for the machine code generation
NOT_ADDED_JIT_ISEQ_FUNC = 0,
/* ISEQ is already queued for the machine code generation but the
code is not ready yet for the execution */
// ISEQ is already queued for the machine code generation but the
// code is not ready yet for the execution
NOT_READY_JIT_ISEQ_FUNC = 1,
/* ISEQ included not compilable insn, some internal assertion failed
or the unit is unloaded */
// ISEQ included not compilable insn, some internal assertion failed
// or the unit is unloaded
NOT_COMPILED_JIT_ISEQ_FUNC = 2,
/* End mark */
// End mark
LAST_JIT_ISEQ_FUNC = 3
};
/* MJIT options which can be defined on the MRI command line. */
// MJIT options which can be defined on the MRI command line.
struct mjit_options {
/* Converted from "jit" feature flag to tell the enablement
information to ruby_show_version(). */
// Converted from "jit" feature flag to tell the enablement
// information to ruby_show_version().
char on;
/* Save temporary files after MRI finish. The temporary files
include the pre-compiled header, C code file generated for ISEQ,
and the corresponding object file. */
// Save temporary files after MRI finish. The temporary files
// include the pre-compiled header, C code file generated for ISEQ,
// and the corresponding object file.
char save_temps;
/* Print MJIT warnings to stderr. */
// Print MJIT warnings to stderr.
char warnings;
/* Disable compiler optimization and add debug symbols. It can be
very slow. */
// Disable compiler optimization and add debug symbols. It can be
// very slow.
char debug;
/* If not 0, all ISeqs are synchronously compiled. For testing. */
// If not 0, all ISeqs are synchronously compiled. For testing.
unsigned int wait;
/* Number of calls to trigger JIT compilation. For testing. */
// Number of calls to trigger JIT compilation. For testing.
unsigned int min_calls;
/* Force printing info about MJIT work of level VERBOSE or
less. 0=silence, 1=medium, 2=verbose. */
// Force printing info about MJIT work of level VERBOSE or
// less. 0=silence, 1=medium, 2=verbose.
int verbose;
/* Maximal permitted number of iseq JIT codes in a MJIT memory
cache. */
// Maximal permitted number of iseq JIT codes in a MJIT memory
// cache.
int max_cache_size;
};
@ -88,11 +88,11 @@ extern void mjit_cont_free(struct mjit_cont *cont);
extern void mjit_add_class_serial(rb_serial_t class_serial);
extern void mjit_remove_class_serial(rb_serial_t class_serial);
/* A threshold used to reject long iseqs from JITting as such iseqs
takes too much time to be compiled. */
// A threshold used to reject long iseqs from JITting as such iseqs
// takes too much time to be compiled.
#define JIT_ISEQ_SIZE_THRESHOLD 1000
/* Return TRUE if given ISeq body should be compiled by MJIT */
// Return TRUE if given ISeq body should be compiled by MJIT
static inline int
mjit_target_iseq_p(struct rb_iseq_constant_body *body)
{
@ -100,8 +100,8 @@ mjit_target_iseq_p(struct rb_iseq_constant_body *body)
&& body->iseq_size < JIT_ISEQ_SIZE_THRESHOLD;
}
/* Try to execute the current iseq in ec. Use JIT code if it is ready.
If it is not, add ISEQ to the compilation queue and return Qundef. */
// Try to execute the current iseq in ec. Use JIT code if it is ready.
// If it is not, add ISEQ to the compilation queue and return Qundef.
static inline VALUE
mjit_exec(rb_execution_context_t *ec)
{
@ -142,7 +142,7 @@ mjit_exec(rb_execution_context_t *ec)
case NOT_COMPILED_JIT_ISEQ_FUNC:
RB_DEBUG_COUNTER_INC(mjit_exec_not_compiled);
return Qundef;
default: /* to avoid warning with LAST_JIT_ISEQ_FUNC */
default: // to avoid warning with LAST_JIT_ISEQ_FUNC
break;
}
}
@ -158,7 +158,7 @@ mjit_exec(rb_execution_context_t *ec)
void mjit_child_after_fork(void);
#else /* USE_MJIT */
#else // USE_MJIT
static inline struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec){return NULL;}
static inline void mjit_cont_free(struct mjit_cont *cont){}
static inline void mjit_postponed_job_register_start_hook(void){}
@ -172,5 +172,5 @@ static inline void mjit_remove_class_serial(rb_serial_t class_serial){}
static inline VALUE mjit_exec(rb_execution_context_t *ec) { return Qundef; /* unreachable */ }
static inline void mjit_child_after_fork(void){}
#endif /* USE_MJIT */
#endif /* RUBY_MJIT_H */
#endif // USE_MJIT
#endif // RUBY_MJIT_H

View file

@ -6,9 +6,9 @@
**********************************************************************/
/* NOTE: All functions in this file are executed on MJIT worker. So don't
call Ruby methods (C functions that may call rb_funcall) or trigger
GC (using ZALLOC, xmalloc, xfree, etc.) in this file. */
// NOTE: All functions in this file are executed on MJIT worker. So don't
// call Ruby methods (C functions that may call rb_funcall) or trigger
// GC (using ZALLOC, xmalloc, xfree, etc.) in this file.
#include "internal.h"
@ -21,13 +21,13 @@
#include "insns_info.inc"
#include "vm_insnhelper.h"
/* Macros to check if a position is already compiled using compile_status.stack_size_for_pos */
// Macros to check if a position is already compiled using compile_status.stack_size_for_pos
#define NOT_COMPILED_STACK_SIZE -1
#define ALREADY_COMPILED_P(status, pos) (status->stack_size_for_pos[pos] != NOT_COMPILED_STACK_SIZE)
/* Storage to keep compiler's status. This should have information
which is global during one `mjit_compile` call. Ones conditional
in each branch should be stored in `compile_branch`. */
// Storage to keep compiler's status. This should have information
// which is global during one `mjit_compile` call. Ones conditional
// in each branch should be stored in `compile_branch`.
struct compile_status {
bool success; // has true if compilation has had no issue
int *stack_size_for_pos; // stack_size_for_pos[pos] has stack size for the position (otherwise -1)
@ -41,9 +41,9 @@ struct compile_status {
struct rb_mjit_compile_info *compile_info;
};
/* Storage to keep data which is consistent in each conditional branch.
This is created and used for one `compile_insns` call and its values
should be copied for extra `compile_insns` call. */
// Storage to keep data which is consistent in each conditional branch.
// This is created and used for one `compile_insns` call and its values
// should be copied for extra `compile_insns` call.
struct compile_branch {
unsigned int stack_size; // this simulates sp (stack pointer) of YARV
bool finish_p; // if true, compilation in this branch should stop and let another branch to be compiled
@ -92,7 +92,7 @@ compile_case_dispatch_each(VALUE key, VALUE value, VALUE arg)
return ST_CONTINUE;
}
/* Calling rb_id2str in MJIT worker causes random SEGV. So this is disabled by default. */
// Calling rb_id2str in MJIT worker causes random SEGV. So this is disabled by default.
static void
comment_id(FILE *f, ID id)
{
@ -120,12 +120,12 @@ comment_id(FILE *f, ID id)
static void compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int stack_size,
unsigned int pos, struct compile_status *status);
/* Main function of JIT compilation, vm_exec_core counterpart for JIT. Compile one insn to `f`, may modify
b->stack_size and return next position.
When you add a new instruction to insns.def, it would be nice to have JIT compilation support here but
it's optional. This JIT compiler just ignores ISeq which includes unknown instruction, and ISeq which
does not have it can be compiled as usual. */
// Main function of JIT compilation, vm_exec_core counterpart for JIT. Compile one insn to `f`, may modify
// b->stack_size and return next position.
//
// When you add a new instruction to insns.def, it would be nice to have JIT compilation support here but
// it's optional. This JIT compiler just ignores ISeq which includes unknown instruction, and ISeq which
// does not have it can be compiled as usual.
static unsigned int
compile_insn(FILE *f, const struct rb_iseq_constant_body *body, const int insn, const VALUE *operands,
const unsigned int pos, struct compile_status *status, struct compile_branch *b)
@ -136,12 +136,12 @@ compile_insn(FILE *f, const struct rb_iseq_constant_body *body, const int insn,
#include "mjit_compile.inc"
/*****************/
/* If next_pos is already compiled and this branch is not finished yet,
next instruction won't be compiled in C code next and will need `goto`. */
// If next_pos is already compiled and this branch is not finished yet,
// next instruction won't be compiled in C code next and will need `goto`.
if (!b->finish_p && next_pos < body->iseq_size && ALREADY_COMPILED_P(status, next_pos)) {
fprintf(f, "goto label_%d;\n", next_pos);
/* Verify stack size assumption is the same among multiple branches */
// Verify stack size assumption is the same among multiple branches
if ((unsigned int)status->stack_size_for_pos[next_pos] != b->stack_size) {
if (mjit_opts.warnings || mjit_opts.verbose)
fprintf(stderr, "MJIT warning: JIT stack assumption is not the same between branches (%d != %u)\n",
@ -153,8 +153,8 @@ compile_insn(FILE *f, const struct rb_iseq_constant_body *body, const int insn,
return next_pos;
}
/* Compile one conditional branch. If it has branchXXX insn, this should be
called multiple times for each branch. */
// Compile one conditional branch. If it has branchXXX insn, this should be
// called multiple times for each branch.
static void
compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int stack_size,
unsigned int pos, struct compile_status *status)
@ -185,7 +185,7 @@ compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int st
}
}
/* Print the block to cancel JIT execution. */
// Print the block to cancel JIT execution.
static void
compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct compile_status *status)
{
@ -222,7 +222,7 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname)
&& !mjit_copy_cache_from_main_thread(iseq, status.cc_entries, status.is_entries))
return false;
/* For performance, we verify stack size only on compilation time (mjit_compile.inc.erb) without --jit-debug */
// For performance, we verify stack size only on compilation time (mjit_compile.inc.erb) without --jit-debug
if (!mjit_opts.debug) {
fprintf(f, "#undef OPT_CHECKED_RUN\n");
fprintf(f, "#define OPT_CHECKED_RUN 0\n\n");
@ -242,8 +242,8 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname)
fprintf(f, " static const VALUE *const original_body_iseq = (VALUE *)0x%"PRIxVALUE";\n",
(VALUE)body->iseq_encoded);
/* Simulate `opt_pc` in setup_parameters_complex. Other PCs which may be passed by catch tables
are not considered since vm_exec doesn't call mjit_exec for catch tables. */
// Simulate `opt_pc` in setup_parameters_complex. Other PCs which may be passed by catch tables
// are not considered since vm_exec doesn't call mjit_exec for catch tables.
if (body->param.flags.has_opt) {
int i;
fprintf(f, "\n");
@ -262,4 +262,4 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname)
return status.success;
}
#endif /* USE_MJIT */
#endif // USE_MJIT

View file

@ -6,9 +6,9 @@
**********************************************************************/
/* NOTE: All functions in this file are executed on MJIT worker. So don't
call Ruby methods (C functions that may call rb_funcall) or trigger
GC (using ZALLOC, xmalloc, xfree, etc.) in this file. */
// NOTE: All functions in this file are executed on MJIT worker. So don't
// call Ruby methods (C functions that may call rb_funcall) or trigger
// GC (using ZALLOC, xmalloc, xfree, etc.) in this file.
/* We utilize widely used C compilers (GCC and LLVM Clang) to
implement MJIT. We feed them a C code generated from ISEQ. The
@ -97,7 +97,7 @@
#include "dln.h"
#include "ruby/util.h"
#undef strdup /* ruby_strdup may trigger GC */
#undef strdup // ruby_strdup may trigger GC
#ifndef MAXPATHLEN
# define MAXPATHLEN 1024
@ -117,41 +117,41 @@
typedef intptr_t pid_t;
#endif
/* Atomically set function pointer if possible. */
// Atomically set function pointer if possible.
#define MJIT_ATOMIC_SET(var, val) (void)ATOMIC_PTR_EXCHANGE(var, val)
#define MJIT_TMP_PREFIX "_ruby_mjit_"
/* The unit structure that holds metadata of ISeq for MJIT. */
// The unit structure that holds metadata of ISeq for MJIT.
struct rb_mjit_unit {
/* Unique order number of unit. */
// Unique order number of unit.
int id;
/* Dlopen handle of the loaded object file. */
// Dlopen handle of the loaded object file.
void *handle;
const rb_iseq_t *iseq;
#ifndef _MSC_VER
/* This value is always set for `compact_all_jit_code`. Also used for lazy deletion. */
// This value is always set for `compact_all_jit_code`. Also used for lazy deletion.
char *o_file;
/* true if it's inherited from parent Ruby process and lazy deletion should be skipped.
`o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
by child for `compact_all_jit_code`. */
// true if it's inherited from parent Ruby process and lazy deletion should be skipped.
// `o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
// by child for `compact_all_jit_code`.
bool o_file_inherited_p;
#endif
#if defined(_WIN32)
/* DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted. */
// DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted.
char *so_file;
#endif
/* Only used by unload_units. Flag to check this unit is currently on stack or not. */
// Only used by unload_units. Flag to check this unit is currently on stack or not.
char used_code_p;
struct list_node unode;
// mjit_compile's optimization switches
struct rb_mjit_compile_info compile_info;
};
/* Linked list of struct rb_mjit_unit. */
// Linked list of struct rb_mjit_unit.
struct rb_mjit_unit_list {
struct list_head head;
int length; /* the list length */
int length; // the list length
};
extern void rb_native_mutex_lock(rb_nativethread_lock_t *lock);
@ -165,12 +165,12 @@ extern void rb_native_cond_signal(rb_nativethread_cond_t *cond);
extern void rb_native_cond_broadcast(rb_nativethread_cond_t *cond);
extern void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
/* process.c */
// process.c
extern rb_pid_t ruby_waitpid_locked(rb_vm_t *, rb_pid_t, int *status, int options, rb_nativethread_cond_t *cond);
/* A copy of MJIT portion of MRI options since MJIT initialization. We
need them as MJIT threads still can work when the most MRI data were
freed. */
// A copy of MJIT portion of MRI options since MJIT initialization. We
// need them as MJIT threads still can work when the most MRI data were
// freed.
struct mjit_options mjit_opts;
// true if MJIT is enabled.
@ -179,28 +179,28 @@ bool mjit_enabled = false;
// and `mjit_call_p == false`, any JIT-ed code execution is cancelled as soon as possible.
bool mjit_call_p = false;
/* Priority queue of iseqs waiting for JIT compilation.
This variable is a pointer to head unit of the queue. */
// Priority queue of iseqs waiting for JIT compilation.
// This variable is a pointer to head unit of the queue.
static struct rb_mjit_unit_list unit_queue = { LIST_HEAD_INIT(unit_queue.head) };
/* List of units which are successfully compiled. */
// List of units which are successfully compiled.
static struct rb_mjit_unit_list active_units = { LIST_HEAD_INIT(active_units.head) };
/* List of compacted so files which will be cleaned up by `free_list()` in `mjit_finish()`. */
// List of compacted so files which will be cleaned up by `free_list()` in `mjit_finish()`.
static struct rb_mjit_unit_list compact_units = { LIST_HEAD_INIT(compact_units.head) };
// List of units before recompilation and just waiting for dlclose().
static struct rb_mjit_unit_list stale_units = { LIST_HEAD_INIT(stale_units.head) };
/* The number of so far processed ISEQs, used to generate unique id. */
// The number of so far processed ISEQs, used to generate unique id.
static int current_unit_num;
/* A mutex for conitionals and critical sections. */
// A mutex for conitionals and critical sections.
static rb_nativethread_lock_t mjit_engine_mutex;
/* A thread conditional to wake up `mjit_finish` at the end of PCH thread. */
// A thread conditional to wake up `mjit_finish` at the end of PCH thread.
static rb_nativethread_cond_t mjit_pch_wakeup;
/* A thread conditional to wake up the client if there is a change in
executed unit status. */
// A thread conditional to wake up the client if there is a change in
// executed unit status.
static rb_nativethread_cond_t mjit_client_wakeup;
/* A thread conditional to wake up a worker if there we have something
to add or we need to stop MJIT engine. */
// A thread conditional to wake up a worker if there we have something
// to add or we need to stop MJIT engine.
static rb_nativethread_cond_t mjit_worker_wakeup;
/* A thread conditional to wake up workers if at the end of GC. */
// A thread conditional to wake up workers if at the end of GC.
static rb_nativethread_cond_t mjit_gc_wakeup;
// True when GC is working.
static bool in_gc;
@ -211,31 +211,31 @@ static bool stop_worker_p;
// Set to true if worker is stopped.
static bool worker_stopped;
/* Path of "/tmp", which can be changed to $TMP in MinGW. */
// Path of "/tmp", which can be changed to $TMP in MinGW.
static char *tmp_dir;
/* Hash like { 1 => true, 2 => true, ... } whose keys are valid `class_serial`s.
This is used to invalidate obsoleted CALL_CACHE. */
// Hash like { 1 => true, 2 => true, ... } whose keys are valid `class_serial`s.
// This is used to invalidate obsoleted CALL_CACHE.
static VALUE valid_class_serials;
/* Used C compiler path. */
// Used C compiler path.
static const char *cc_path;
/* Used C compiler flags. */
// Used C compiler flags.
static const char **cc_common_args;
/* Name of the precompiled header file. */
// Name of the precompiled header file.
static char *pch_file;
/* The process id which should delete the pch_file on mjit_finish. */
// The process id which should delete the pch_file on mjit_finish.
static rb_pid_t pch_owner_pid;
/* Status of the precompiled header creation. The status is
shared by the workers and the pch thread. */
// Status of the precompiled header creation. The status is
// shared by the workers and the pch thread.
static enum {PCH_NOT_READY, PCH_FAILED, PCH_SUCCESS} pch_status;
#ifndef _MSC_VER
/* Name of the header file. */
// Name of the header file.
static char *header_file;
#endif
#ifdef _WIN32
/* Linker option to enable libruby. */
// Linker option to enable libruby.
static char *libruby_pathflag;
#endif
@ -256,7 +256,7 @@ static char *libruby_pathflag;
#if defined __GNUC__ && !defined __clang__ && !defined(_WIN32) && !defined(__CYGWIN__) && !defined(_AIX) && !defined(__OpenBSD__)
# define GCC_NOSTDLIB_FLAGS "-nodefaultlibs", "-nostdlib",
#else
# define GCC_NOSTDLIB_FLAGS /* empty */
# define GCC_NOSTDLIB_FLAGS // empty
#endif
static const char *const CC_COMMON_ARGS[] = {
@ -291,8 +291,8 @@ static const char *const CC_LIBS[] = {
#define CC_CODEFLAG_ARGS (mjit_opts.debug ? CC_DEBUG_ARGS : CC_OPTIMIZE_ARGS)
/* Print the arguments according to FORMAT to stderr only if MJIT
verbose option value is more or equal to LEVEL. */
// Print the arguments according to FORMAT to stderr only if MJIT
// verbose option value is more or equal to LEVEL.
PRINTF_ARGS(static void, 2, 3)
verbose(int level, const char *format, ...)
{
@ -301,7 +301,7 @@ verbose(int level, const char *format, ...)
size_t len = strlen(format);
char *full_format = alloca(sizeof(char) * (len + 2));
/* Creating `format + '\n'` to atomically print format and '\n'. */
// Creating `format + '\n'` to atomically print format and '\n'.
memcpy(full_format, format, len);
full_format[len] = '\n';
full_format[len+1] = '\0';
@ -326,8 +326,8 @@ mjit_warning(const char *format, ...)
}
}
/* Add unit node to the tail of doubly linked LIST. It should be not in
the list before. */
// Add unit node to the tail of doubly linked `list`. It should be not in
// the list before.
static void
add_to_list(struct rb_mjit_unit *unit, struct rb_mjit_unit_list *list)
{
@ -360,7 +360,7 @@ remove_file(const char *filename)
}
}
/* Lazily delete .o and/or .so files. */
// Lazily delete .o and/or .so files.
static void
clean_object_files(struct rb_mjit_unit *unit)
{
@ -369,8 +369,8 @@ clean_object_files(struct rb_mjit_unit *unit)
char *o_file = unit->o_file;
unit->o_file = NULL;
/* For compaction, unit->o_file is always set when compilation succeeds.
So save_temps needs to be checked here. */
// For compaction, unit->o_file is always set when compilation succeeds.
// So save_temps needs to be checked here.
if (!mjit_opts.save_temps && !unit->o_file_inherited_p)
remove_file(o_file);
free(o_file);
@ -389,30 +389,29 @@ clean_object_files(struct rb_mjit_unit *unit)
#endif
}
/* This is called in the following situations:
1) On dequeue or `unload_units()`, associated ISeq is already GCed.
2) The unit is not called often and unloaded by `unload_units()`.
3) Freeing lists on `mjit_finish()`.
`jit_func` value does not matter for 1 and 3 since the unit won't be used anymore.
For the situation 2, this sets the ISeq's JIT state to NOT_COMPILED_JIT_ISEQ_FUNC
to prevent the situation that the same methods are continuously compiled. */
// This is called in the following situations:
// 1) On dequeue or `unload_units()`, associated ISeq is already GCed.
// 2) The unit is not called often and unloaded by `unload_units()`.
// 3) Freeing lists on `mjit_finish()`.
//
// `jit_func` value does not matter for 1 and 3 since the unit won't be used anymore.
// For the situation 2, this sets the ISeq's JIT state to NOT_COMPILED_JIT_ISEQ_FUNC
// to prevent the situation that the same methods are continuously compiled.
static void
free_unit(struct rb_mjit_unit *unit)
{
if (unit->iseq) { /* ISeq is not GCed */
if (unit->iseq) { // ISeq is not GCed
unit->iseq->body->jit_func = (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC;
unit->iseq->body->jit_unit = NULL;
}
if (unit->handle && dlclose(unit->handle)) { /* handle is NULL if it's in queue */
if (unit->handle && dlclose(unit->handle)) { // handle is NULL if it's in queue
mjit_warning("failed to close handle for u%d: %s", unit->id, dlerror());
}
clean_object_files(unit);
free(unit);
}
/* Start a critical section. Use message MSG to print debug info at
LEVEL. */
// Start a critical section. Use message `msg` to print debug info at `level`.
static inline void
CRITICAL_SECTION_START(int level, const char *msg)
{
@ -421,8 +420,8 @@ CRITICAL_SECTION_START(int level, const char *msg)
verbose(level, "Locked %s", msg);
}
/* Finish the current critical section. Use message MSG to print
debug info at LEVEL. */
// Finish the current critical section. Use message `msg` to print
// debug info at `level`.
static inline void
CRITICAL_SECTION_FINISH(int level, const char *msg)
{
@ -436,7 +435,7 @@ sprint_uniq_filename(char *str, size_t size, unsigned long id, const char *prefi
return snprintf(str, size, "%s/%sp%"PRI_PIDT_PREFIX"uu%lu%s", tmp_dir, prefix, getpid(), id, suffix);
}
/* Return time in milliseconds as a double. */
// Return time in milliseconds as a double.
#ifdef __APPLE__
double ruby_real_ms_time(void);
# define real_ms_time() ruby_real_ms_time()
@ -473,17 +472,17 @@ mjit_valid_class_serial_p(rb_serial_t class_serial)
return found_p;
}
/* Return the best unit from list. The best is the first
high priority unit or the unit whose iseq has the biggest number
of calls so far. */
// Return the best unit from list. The best is the first
// high priority unit or the unit whose iseq has the biggest number
// of calls so far.
static struct rb_mjit_unit *
get_from_list(struct rb_mjit_unit_list *list)
{
struct rb_mjit_unit *unit = NULL, *next, *best = NULL;
/* Find iseq with max total_calls */
// Find iseq with max total_calls
list_for_each_safe(&list->head, unit, next, unode) {
if (unit->iseq == NULL) { /* ISeq is GCed. */
if (unit->iseq == NULL) { // ISeq is GCed.
remove_from_list(unit, list);
free_unit(unit);
continue;
@ -499,8 +498,7 @@ get_from_list(struct rb_mjit_unit_list *list)
return best;
}
/* Return length of NULL-terminated array ARGS excluding the NULL
marker. */
// Return length of NULL-terminated array `args` excluding the NULL marker.
static size_t
args_len(char *const *args)
{
@ -511,9 +509,8 @@ args_len(char *const *args)
return i;
}
/* Concatenate NUM passed NULL-terminated arrays of strings, put the
result (with NULL end marker) into the heap, and return the
result. */
// Concatenate `num` passed NULL-terminated arrays of strings, put the
// result (with NULL end marker) into the heap, and return the result.
static char **
form_args(int num, ...)
{
@ -543,16 +540,14 @@ COMPILER_WARNING_PUSH
#ifdef __GNUC__
COMPILER_WARNING_IGNORED(-Wdeprecated-declarations)
#endif
/* Start an OS process of absolute executable path with arguments ARGV.
Return PID of the process. */
// Start an OS process of absolute executable path with arguments `argv`.
// Return PID of the process.
static pid_t
start_process(const char *abspath, char *const *argv)
{
pid_t pid;
/*
* Not calling non-async-signal-safe functions between vfork
* and execv for safety
*/
// Not calling non-async-signal-safe functions between vfork
// and execv for safety
int dev_null = rb_cloexec_open(ruby_null_device, O_WRONLY, 0);
if (mjit_opts.verbose >= 2) {
@ -569,9 +564,9 @@ start_process(const char *abspath, char *const *argv)
extern HANDLE rb_w32_start_process(const char *abspath, char *const *argv, int out_fd);
int out_fd = 0;
if (mjit_opts.verbose <= 1) {
/* Discard cl.exe's outputs like:
_ruby_mjit_p12u3.c
Creating library C:.../_ruby_mjit_p12u3.lib and object C:.../_ruby_mjit_p12u3.exp */
// Discard cl.exe's outputs like:
// _ruby_mjit_p12u3.c
// Creating library C:.../_ruby_mjit_p12u3.lib and object C:.../_ruby_mjit_p12u3.exp
out_fd = dev_null;
}
@ -585,17 +580,17 @@ start_process(const char *abspath, char *const *argv)
if ((pid = vfork()) == 0) { /* TODO: reuse some function in process.c */
umask(0077);
if (mjit_opts.verbose == 0) {
/* CC can be started in a thread using a file which has been
already removed while MJIT is finishing. Discard the
messages about missing files. */
// CC can be started in a thread using a file which has been
// already removed while MJIT is finishing. Discard the
// messages about missing files.
dup2(dev_null, STDERR_FILENO);
dup2(dev_null, STDOUT_FILENO);
}
(void)close(dev_null);
pid = execv(abspath, argv); /* Pid will be negative on an error */
/* Even if we successfully found CC to compile PCH we still can
fail with loading the CC in very rare cases for some reasons.
Stop the forked process in this case. */
pid = execv(abspath, argv); // Pid will be negative on an error
// Even if we successfully found CC to compile PCH we still can
// fail with loading the CC in very rare cases for some reasons.
// Stop the forked process in this case.
verbose(1, "MJIT: Error in execv: %s", abspath);
_exit(1);
}
@ -605,9 +600,9 @@ start_process(const char *abspath, char *const *argv)
}
COMPILER_WARNING_POP
/* Execute an OS process of executable PATH with arguments ARGV.
Return -1 or -2 if failed to execute, otherwise exit code of the process.
TODO: Use a similar function in process.c */
// Execute an OS process of executable PATH with arguments ARGV.
// Return -1 or -2 if failed to execute, otherwise exit code of the process.
// TODO: Use a similar function in process.c
static int
exec_process(const char *path, char *const argv[])
{
@ -655,8 +650,8 @@ static void
remove_so_file(const char *so_file, struct rb_mjit_unit *unit)
{
#if defined(_WIN32)
/* Windows can't remove files while it's used. */
unit->so_file = strdup(so_file); /* lazily delete on `clean_object_files()` */
// Windows can't remove files while it's used.
unit->so_file = strdup(so_file); // lazily delete on `clean_object_files()`
if (unit->so_file == NULL)
mjit_warning("failed to allocate memory to lazily remove '%s': %s", so_file, strerror(errno));
#else
@ -678,39 +673,39 @@ compile_c_to_so(const char *c_file, const char *so_file)
char **args;
char *p, *obj_file;
/* files[0] = "-Fe*.dll" */
// files[0] = "-Fe*.dll"
files[0] = p = alloca(sizeof(char) * (rb_strlen_lit("-Fe") + strlen(so_file) + 1));
p = append_lit(p, "-Fe");
p = append_str2(p, so_file, strlen(so_file));
*p = '\0';
/* files[1] = "-Fo*.obj" */
/* We don't need .obj file, but it's somehow created to cwd without -Fo and we want to control the output directory. */
// files[1] = "-Fo*.obj"
// We don't need .obj file, but it's somehow created to cwd without -Fo and we want to control the output directory.
files[1] = p = alloca(sizeof(char) * (rb_strlen_lit("-Fo") + strlen(so_file) - rb_strlen_lit(DLEXT) + rb_strlen_lit(".obj") + 1));
obj_file = p = append_lit(p, "-Fo");
p = append_str2(p, so_file, strlen(so_file) - rb_strlen_lit(DLEXT));
p = append_lit(p, ".obj");
*p = '\0';
/* files[2] = "-Yu*.pch" */
// files[2] = "-Yu*.pch"
files[2] = p = alloca(sizeof(char) * (rb_strlen_lit("-Yu") + strlen(pch_file) + 1));
p = append_lit(p, "-Yu");
p = append_str2(p, pch_file, strlen(pch_file));
*p = '\0';
/* files[3] = "C:/.../rb_mjit_header-*.obj" */
// files[3] = "C:/.../rb_mjit_header-*.obj"
files[3] = p = alloca(sizeof(char) * (strlen(pch_file) + 1));
p = append_str2(p, pch_file, strlen(pch_file) - strlen(".pch"));
p = append_lit(p, ".obj");
*p = '\0';
/* files[4] = "-Tc*.c" */
// files[4] = "-Tc*.c"
files[4] = p = alloca(sizeof(char) * (rb_strlen_lit("-Tc") + strlen(c_file) + 1));
p = append_lit(p, "-Tc");
p = append_str2(p, c_file, strlen(c_file));
*p = '\0';
/* files[5] = "-Fd*.pdb" */
// files[5] = "-Fd*.pdb"
files[5] = p = alloca(sizeof(char) * (rb_strlen_lit("-Fd") + strlen(pch_file) + 1));
p = append_lit(p, "-Fd");
p = append_str2(p, pch_file, strlen(pch_file) - rb_strlen_lit(".pch"));
@ -726,7 +721,7 @@ compile_c_to_so(const char *c_file, const char *so_file)
free(args);
if (exit_code == 0) {
/* remove never-used files (.obj, .lib, .exp, .pdb). XXX: Is there any way not to generate this? */
// remove never-used files (.obj, .lib, .exp, .pdb). XXX: Is there any way not to generate this?
if (!mjit_opts.save_temps) {
char *before_dot;
remove_file(obj_file);
@ -742,9 +737,9 @@ compile_c_to_so(const char *c_file, const char *so_file)
}
return exit_code == 0;
}
#else /* _MSC_VER */
#else // _MSC_VER
/* The function producing the pre-compiled header. */
// The function producing the pre-compiled header.
static void
make_pch(void)
{
@ -849,12 +844,12 @@ link_o_to_so(const char **o_files, const char *so_file)
return exit_code == 0;
}
/* Link all cached .o files and build a .so file. Reload all JIT func from it. This
allows to avoid JIT code fragmentation and improve performance to call JIT-ed code. */
// Link all cached .o files and build a .so file. Reload all JIT func from it. This
// allows to avoid JIT code fragmentation and improve performance to call JIT-ed code.
static void
compact_all_jit_code(void)
{
# ifndef _WIN32 /* This requires header transformation but we don't transform header on Windows for now */
# ifndef _WIN32 // This requires header transformation but we don't transform header on Windows for now
struct rb_mjit_unit *unit, *cur = 0;
double start_time, end_time;
static const char so_ext[] = DLEXT;
@ -862,13 +857,13 @@ compact_all_jit_code(void)
const char **o_files;
int i = 0;
/* Abnormal use case of rb_mjit_unit that doesn't have ISeq */
unit = calloc(1, sizeof(struct rb_mjit_unit)); /* To prevent GC, don't use ZALLOC */
// Abnormal use case of rb_mjit_unit that doesn't have ISeq
unit = calloc(1, sizeof(struct rb_mjit_unit)); // To prevent GC, don't use ZALLOC
if (unit == NULL) return;
unit->id = current_unit_num++;
sprint_uniq_filename(so_file, (int)sizeof(so_file), unit->id, MJIT_TMP_PREFIX, so_ext);
/* NULL-ending for form_args */
// NULL-ending for form_args
o_files = alloca(sizeof(char *) * (active_units.length + 1));
o_files[active_units.length] = NULL;
CRITICAL_SECTION_START(3, "in compact_all_jit_code to keep .o files");
@ -881,10 +876,10 @@ compact_all_jit_code(void)
bool success = link_o_to_so(o_files, so_file);
end_time = real_ms_time();
/* TODO: Shrink this big critical section. For now, this is needed to prevent failure by missing .o files.
This assumes that o -> so link doesn't take long time because the bottleneck, which is compiler optimization,
is already done. But actually it takes about 500ms for 5,000 methods on my Linux machine, so it's better to
finish this critical section before link_o_to_so by disabling unload_units. */
// TODO: Shrink this big critical section. For now, this is needed to prevent failure by missing .o files.
// This assumes that o -> so link doesn't take long time because the bottleneck, which is compiler optimization,
// is already done. But actually it takes about 500ms for 5,000 methods on my Linux machine, so it's better to
// finish this critical section before link_o_to_so by disabling unload_units.
CRITICAL_SECTION_FINISH(3, "in compact_all_jit_code to keep .o files");
if (success) {
@ -896,7 +891,7 @@ compact_all_jit_code(void)
}
unit->handle = handle;
/* lazily dlclose handle (and .so file for win32) on `mjit_finish()`. */
// lazily dlclose handle (and .so file for win32) on `mjit_finish()`.
add_to_list(unit, &compact_units);
if (!mjit_opts.save_temps)
@ -905,7 +900,7 @@ compact_all_jit_code(void)
CRITICAL_SECTION_START(3, "in compact_all_jit_code to read list");
list_for_each(&active_units.head, cur, unode) {
void *func;
char funcname[35]; /* TODO: reconsider `35` */
char funcname[35]; // TODO: reconsider `35`
sprintf(funcname, "_mjit%d", cur->id);
if ((func = dlsym(handle, funcname)) == NULL) {
@ -913,8 +908,8 @@ compact_all_jit_code(void)
continue;
}
if (cur->iseq) { /* Check whether GCed or not */
/* Usage of jit_code might be not in a critical section. */
if (cur->iseq) { // Check whether GCed or not
// Usage of jit_code might be not in a critical section.
MJIT_ATOMIC_SET(cur->iseq->body->jit_func, (mjit_func_t)func);
}
}
@ -925,10 +920,10 @@ compact_all_jit_code(void)
free(unit);
verbose(1, "JIT compaction failure (%.1fms): Failed to compact methods", end_time - start_time);
}
# endif /* _WIN32 */
# endif // _WIN32
}
#endif /* _MSC_VER */
#endif // _MSC_VER
static void *
load_func_from_so(const char *so_file, const char *funcname, struct rb_mjit_unit *unit)
@ -959,10 +954,10 @@ static const char *
header_name_end(const char *s)
{
const char *e = s + strlen(s);
# ifdef __GNUC__ /* don't chomp .pch for mswin */
# ifdef __GNUC__ // don't chomp .pch for mswin
static const char suffix[] = ".gch";
/* chomp .gch suffix */
// chomp .gch suffix
if (e > s+sizeof(suffix)-1 && strcmp(e-sizeof(suffix)+1, suffix) == 0) {
e -= sizeof(suffix)-1;
}
@ -971,16 +966,16 @@ header_name_end(const char *s)
}
#endif
/* Print platform-specific prerequisites in generated code. */
// Print platform-specific prerequisites in generated code.
static void
compile_prelude(FILE *f)
{
#ifndef __clang__ /* -include-pch is used for Clang */
#ifndef __clang__ // -include-pch is used for Clang
const char *s = pch_file;
const char *e = header_name_end(s);
fprintf(f, "#include \"");
/* print pch_file except .gch for gcc, but keep .pch for mswin */
// print pch_file except .gch for gcc, but keep .pch for mswin
for (; s < e; s++) {
switch(*s) {
case '\\': case '"':
@ -997,12 +992,12 @@ compile_prelude(FILE *f)
#endif
}
/* Compile ISeq in UNIT and return function pointer of JIT-ed code.
It may return NOT_COMPILED_JIT_ISEQ_FUNC if something went wrong. */
// Compile ISeq in UNIT and return function pointer of JIT-ed code.
// It may return NOT_COMPILED_JIT_ISEQ_FUNC if something went wrong.
static mjit_func_t
convert_unit_to_func(struct rb_mjit_unit *unit)
{
char c_file_buff[MAXPATHLEN], *c_file = c_file_buff, *so_file, funcname[35]; /* TODO: reconsider `35` */
char c_file_buff[MAXPATHLEN], *c_file = c_file_buff, *so_file, funcname[35]; // TODO: reconsider `35`
int fd;
FILE *f;
void *func;
@ -1047,17 +1042,17 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
return (mjit_func_t)NOT_COMPILED_JIT_ISEQ_FUNC;
}
/* print #include of MJIT header, etc. */
// print #include of MJIT header, etc.
compile_prelude(f);
/* wait until mjit_gc_finish_hook is called */
// wait until mjit_gc_finish_hook is called
CRITICAL_SECTION_START(3, "before mjit_compile to wait GC finish");
while (in_gc) {
verbose(3, "Waiting wakeup from GC");
rb_native_cond_wait(&mjit_gc_wakeup, &mjit_engine_mutex);
}
/* We need to check again here because we could've waited on GC above */
// We need to check again here because we could've waited on GC above
if (unit->iseq == NULL) {
fclose(f);
if (!mjit_opts.save_temps)
@ -1083,7 +1078,7 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
}
bool success = mjit_compile(f, unit->iseq, funcname);
/* release blocking mjit_gc_start_hook */
// release blocking mjit_gc_start_hook
CRITICAL_SECTION_START(3, "after mjit_compile to wakeup client for GC");
in_jit = false;
verbose(3, "Sending wakeup signal to client in a mjit-worker for GC");
@ -1102,13 +1097,13 @@ convert_unit_to_func(struct rb_mjit_unit *unit)
#ifdef _MSC_VER
success = compile_c_to_so(c_file, so_file);
#else
/* splitting .c -> .o step and .o -> .so step, to cache .o files in the future */
// splitting .c -> .o step and .o -> .so step, to cache .o files in the future
if ((success = compile_c_to_o(c_file, o_file)) != false) {
const char *o_files[2] = { NULL, NULL };
o_files[0] = o_file;
success = link_o_to_so(o_files, so_file);
/* Always set o_file for compaction. The value is also used for lazy deletion. */
// Always set o_file for compaction. The value is also used for lazy deletion.
unit->o_file = strdup(o_file);
if (unit->o_file == NULL) {
mjit_warning("failed to allocate memory to remember '%s' (%s), removing it...", o_file, strerror(errno));
@ -1146,13 +1141,13 @@ typedef struct {
bool finish_p;
} mjit_copy_job_t;
/* Singleton MJIT copy job. This is made global since it needs to be durable even when MJIT worker thread is stopped.
(ex: register job -> MJIT pause -> MJIT resume -> dispatch job. Actually this should be just cancelled by finish_p check) */
// Singleton MJIT copy job. This is made global since it needs to be durable even when MJIT worker thread is stopped.
// (ex: register job -> MJIT pause -> MJIT resume -> dispatch job. Actually this should be just cancelled by finish_p check)
static mjit_copy_job_t mjit_copy_job = { .iseq = NULL, .finish_p = true };
static void mjit_copy_job_handler(void *data);
/* vm_trace.c */
// vm_trace.c
int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *);
// Copy inline cache values of `iseq` to `cc_entries` and `is_entries`.
@ -1209,9 +1204,9 @@ mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq, struct rb_call_cache *cc
return result;
}
/* The function implementing a worker. It is executed in a separate
thread by rb_thread_create_mjit_thread. It compiles precompiled header
and then compiles requested ISeqs. */
// The function implementing a worker. It is executed in a separate
// thread by rb_thread_create_mjit_thread. It compiles precompiled header
// and then compiles requested ISeqs.
void
mjit_worker(void)
{
@ -1227,14 +1222,14 @@ mjit_worker(void)
verbose(3, "Sending wakeup signal to client in a mjit-worker");
rb_native_cond_signal(&mjit_client_wakeup);
CRITICAL_SECTION_FINISH(3, "in worker to update worker_stopped");
return; /* TODO: do the same thing in the latter half of mjit_finish */
return; // TODO: do the same thing in the latter half of mjit_finish
}
/* main worker loop */
// main worker loop
while (!stop_worker_p) {
struct rb_mjit_unit *unit;
/* wait until unit is available */
// wait until unit is available
CRITICAL_SECTION_START(3, "in worker dequeue");
while ((list_empty(&unit_queue.head) || active_units.length >= mjit_opts.max_cache_size) && !stop_worker_p) {
rb_native_cond_wait(&mjit_worker_wakeup, &mjit_engine_mutex);
@ -1254,18 +1249,18 @@ mjit_worker(void)
break;
CRITICAL_SECTION_START(3, "in jit func replace");
while (in_gc) { /* Make sure we're not GC-ing when touching ISeq */
while (in_gc) { // Make sure we're not GC-ing when touching ISeq
verbose(3, "Waiting wakeup from GC");
rb_native_cond_wait(&mjit_gc_wakeup, &mjit_engine_mutex);
}
if (unit->iseq) { /* Check whether GCed or not */
/* Usage of jit_code might be not in a critical section. */
if (unit->iseq) { // Check whether GCed or not
// Usage of jit_code might be not in a critical section.
MJIT_ATOMIC_SET(unit->iseq->body->jit_func, func);
}
CRITICAL_SECTION_FINISH(3, "in jit func replace");
#ifndef _MSC_VER
/* Combine .o files to one .so and reload all jit_func to improve memory locality */
// Combine .o files to one .so and reload all jit_func to improve memory locality
if ((!mjit_opts.wait && unit_queue.length == 0 && active_units.length > 1)
|| active_units.length == mjit_opts.max_cache_size) {
compact_all_jit_code();

View file

@ -54,4 +54,4 @@
break;
}
}
#endif /* OPT_IC_FOR_IVAR */
#endif // OPT_IC_FOR_IVAR