1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

mjit.c: use boolean type for boolean variables

and functions to clarify the intention and make sure it's not used in a
surprising way (like using 2, 3, ... other than 0, 1 even while it seems
to be a boolean).

This is a retry of r66775. It included some typos...

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@66778 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
k0kubun 2019-01-10 14:31:18 +00:00
parent efd99b5331
commit 56bf732aaf
9 changed files with 112 additions and 115 deletions

2
eval.c
View file

@ -233,7 +233,7 @@ ruby_cleanup(volatile int ex)
}
}
mjit_finish(TRUE); /* We still need ISeqs here. */
mjit_finish(true); // We still need ISeqs here.
ruby_finalize_1();

View file

@ -1595,15 +1595,15 @@ VALUE rb_math_sqrt(VALUE);
/* mjit.c */
#if USE_MJIT
extern int mjit_enabled;
VALUE mjit_pause(int wait_p);
extern bool mjit_enabled;
VALUE mjit_pause(bool wait_p);
VALUE mjit_resume(void);
void mjit_finish(int close_handle_p);
void mjit_finish(bool close_handle_p);
#else
#define mjit_enabled 0
static inline VALUE mjit_pause(int wait_p){ return Qnil; } /* unreachable */
static inline VALUE mjit_resume(void){ return Qnil; } /* unreachable */
static inline void mjit_finish(int close_handle_p){}
static inline VALUE mjit_pause(bool wait_p){ return Qnil; } // unreachable
static inline VALUE mjit_resume(void){ return Qnil; } // unreachable
static inline void mjit_finish(bool close_handle_p){}
#endif
/* newline.c */

52
mjit.c
View file

@ -48,7 +48,7 @@ mjit_copy_job_handler(void *data)
memcpy(job->is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size);
}
job->finish_p = TRUE;
job->finish_p = true;
rb_native_cond_broadcast(&mjit_worker_wakeup);
CRITICAL_SECTION_FINISH(3, "in mjit_copy_job_handler");
}
@ -88,7 +88,7 @@ mjit_gc_start_hook(void)
rb_native_cond_wait(&mjit_client_wakeup, &mjit_engine_mutex);
verbose(4, "Getting wakeup from a worker for GC");
}
in_gc = TRUE;
in_gc = true;
CRITICAL_SECTION_FINISH(4, "mjit_gc_start_hook");
}
@ -100,7 +100,7 @@ mjit_gc_finish_hook(void)
if (!mjit_enabled)
return;
CRITICAL_SECTION_START(4, "mjit_gc_finish_hook");
in_gc = FALSE;
in_gc = false;
verbose(4, "Sending wakeup signal to workers after GC");
rb_native_cond_broadcast(&mjit_gc_wakeup);
CRITICAL_SECTION_FINISH(4, "mjit_gc_finish_hook");
@ -126,7 +126,7 @@ mjit_free_iseq(const rb_iseq_t *iseq)
because node of unit_queue and one of active_units may have the same unit
during proceeding unit. */
static void
free_list(struct rb_mjit_unit_list *list, int close_handle_p)
free_list(struct rb_mjit_unit_list *list, bool close_handle_p)
{
struct rb_mjit_unit *unit = 0, *next;
@ -360,8 +360,8 @@ mjit_wait_call(rb_execution_context_t *ec, struct rb_iseq_constant_body *body)
extern VALUE ruby_archlibdir_path, ruby_prefix_path;
/* Initialize header_file, pch_file, libruby_pathflag. Return TRUE on success. */
static int
// Initialize header_file, pch_file, libruby_pathflag. Return true on success.
static bool
init_header_filename(void)
{
int fd;
@ -418,7 +418,7 @@ init_header_filename(void)
unsetenv(PRELOADENV);
verbose(3, "MJIT_HEADER: %s", hdr);
header_file = ruby_strdup(hdr);
if (!header_file) return FALSE;
if (!header_file) return false;
}
}
else
@ -437,7 +437,7 @@ init_header_filename(void)
verbose(1, "Cannot access header file: %s", header_file);
xfree(header_file);
header_file = NULL;
return FALSE;
return false;
}
(void)close(fd);
}
@ -455,7 +455,7 @@ init_header_filename(void)
verbose(1, "Cannot access precompiled header file: %s", pch_file);
xfree(pch_file);
pch_file = NULL;
return FALSE;
return false;
}
(void)close(fd);
}
@ -471,7 +471,7 @@ init_header_filename(void)
*p = '\0';
#endif
return TRUE;
return true;
}
static enum rb_id_table_iterator_result
@ -572,23 +572,23 @@ system_tmpdir(void)
#define MIN_CACHE_SIZE 10
/* Start MJIT worker. Return TRUE if worker is sucessfully started. */
static int
static bool
start_worker(void)
{
stop_worker_p = FALSE;
worker_stopped = FALSE;
stop_worker_p = false;
worker_stopped = false;
if (!rb_thread_create_mjit_thread(mjit_worker)) {
mjit_enabled = FALSE;
mjit_enabled = false;
rb_native_mutex_destroy(&mjit_engine_mutex);
rb_native_cond_destroy(&mjit_pch_wakeup);
rb_native_cond_destroy(&mjit_client_wakeup);
rb_native_cond_destroy(&mjit_worker_wakeup);
rb_native_cond_destroy(&mjit_gc_wakeup);
verbose(1, "Failure in MJIT thread initialization\n");
return FALSE;
return false;
}
return TRUE;
return true;
}
/* Initialize MJIT. Start a thread creating the precompiled header and
@ -598,8 +598,8 @@ void
mjit_init(struct mjit_options *opts)
{
mjit_opts = *opts;
mjit_enabled = TRUE;
mjit_call_p = TRUE;
mjit_enabled = true;
mjit_call_p = true;
/* Normalize options */
if (mjit_opts.min_calls == 0)
@ -635,7 +635,7 @@ mjit_init(struct mjit_options *opts)
verbose(2, "MJIT: tmp_dir is %s", tmp_dir);
if (!init_header_filename()) {
mjit_enabled = FALSE;
mjit_enabled = false;
verbose(1, "Failure in MJIT header file name initialization\n");
return;
}
@ -670,7 +670,7 @@ stop_worker(void)
while (!worker_stopped) {
verbose(3, "Sending cancel signal to worker");
CRITICAL_SECTION_START(3, "in stop_worker");
stop_worker_p = TRUE; /* Setting this inside loop because RUBY_VM_CHECK_INTS may make this FALSE. */
stop_worker_p = true; // Setting this inside loop because RUBY_VM_CHECK_INTS may make this false.
rb_native_cond_broadcast(&mjit_worker_wakeup);
CRITICAL_SECTION_FINISH(3, "in stop_worker");
RUBY_VM_CHECK_INTS(ec);
@ -679,7 +679,7 @@ stop_worker(void)
/* Stop JIT-compiling methods but compiled code is kept available. */
VALUE
mjit_pause(int wait_p)
mjit_pause(bool wait_p)
{
if (!mjit_enabled) {
rb_raise(rb_eRuntimeError, "MJIT is not enabled");
@ -732,7 +732,7 @@ skip_cleaning_object_files(struct rb_mjit_unit_list *list)
/* No mutex for list, assuming MJIT worker does not exist yet since it's immediately after fork. */
list_for_each_safe(&list->head, unit, next, unode) {
#ifndef _MSC_VER /* Actually mswin does not reach here since it doesn't have fork */
if (unit->o_file) unit->o_file_inherited_p = TRUE;
if (unit->o_file) unit->o_file_inherited_p = true;
#endif
#if defined(_WIN32) /* mswin doesn't reach here either. This is for MinGW. */
@ -775,10 +775,10 @@ mjit_child_after_fork(void)
and free MJIT data. It should be called last during MJIT
life.
If close_handle_p is TRUE, it calls dlclose() for JIT-ed code. So it should be FALSE
If close_handle_p is true, it calls dlclose() for JIT-ed code. So it should be false
if the code can still be on stack. ...But it means to leak JIT-ed handle forever (FIXME). */
void
mjit_finish(int close_handle_p)
mjit_finish(bool close_handle_p)
{
if (!mjit_enabled)
return;
@ -816,13 +816,13 @@ mjit_finish(int close_handle_p)
xfree(tmp_dir); tmp_dir = NULL;
xfree(pch_file); pch_file = NULL;
mjit_call_p = FALSE;
mjit_call_p = false;
free_list(&unit_queue, close_handle_p);
free_list(&active_units, close_handle_p);
free_list(&compact_units, close_handle_p);
finish_conts();
mjit_enabled = FALSE;
mjit_enabled = false;
verbose(1, "Successful MJIT finish");
}

4
mjit.h
View file

@ -58,13 +58,13 @@ typedef VALUE (*mjit_func_t)(rb_execution_context_t *, rb_control_frame_t *);
RUBY_SYMBOL_EXPORT_BEGIN
RUBY_EXTERN struct mjit_options mjit_opts;
RUBY_EXTERN int mjit_call_p;
RUBY_EXTERN bool mjit_call_p;
extern void mjit_add_iseq_to_process(const rb_iseq_t *iseq);
extern VALUE mjit_wait_call(rb_execution_context_t *ec, struct rb_iseq_constant_body *body);
RUBY_SYMBOL_EXPORT_END
extern int mjit_compile(FILE *f, const struct rb_iseq_constant_body *body, const char *funcname, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries);
extern bool mjit_compile(FILE *f, const struct rb_iseq_constant_body *body, const char *funcname, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries);
extern void mjit_init(struct mjit_options *opts);
extern void mjit_postponed_job_register_start_hook(void);
extern void mjit_postponed_job_register_finish_hook(void);

View file

@ -29,12 +29,12 @@
which is global during one `mjit_compile` call. Ones conditional
in each branch should be stored in `compile_branch`. */
struct compile_status {
int success; /* has TRUE if compilation has had no issue */
int *stack_size_for_pos; /* stack_size_for_pos[pos] has stack size for the position (otherwise -1) */
/* If TRUE, JIT-ed code will use local variables to store pushed values instead of
using VM's stack and moving stack pointer. */
int local_stack_p;
/* Safely-accessible cache entries copied from main thread. */
bool success; // has true if compilation has had no issue
int *stack_size_for_pos; // stack_size_for_pos[pos] has stack size for the position (otherwise -1)
// If true, JIT-ed code will use local variables to store pushed values instead of
// using VM's stack and moving stack pointer.
bool local_stack_p;
// Safely-accessible cache entries copied from main thread.
union iseq_inline_storage_entry *is_entries;
struct rb_call_cache *cc_entries;
};
@ -43,8 +43,8 @@ struct compile_status {
This is created and used for one `compile_insns` call and its values
should be copied for extra `compile_insns` call. */
struct compile_branch {
unsigned int stack_size; /* this simulates sp (stack pointer) of YARV */
int finish_p; /* if TRUE, compilation in this branch should stop and let another branch to be compiled */
unsigned int stack_size; // this simulates sp (stack pointer) of YARV
bool finish_p; // if true, compilation in this branch should stop and let another branch to be compiled
};
struct case_dispatch_var {
@ -53,21 +53,21 @@ struct case_dispatch_var {
VALUE last_value;
};
/* Returns TRUE if call cache is still not obsoleted and cc->me->def->type is available. */
static int
// Returns true if call cache is still not obsoleted and cc->me->def->type is available.
static bool
has_valid_method_type(CALL_CACHE cc)
{
extern int mjit_valid_class_serial_p(rb_serial_t class_serial);
extern bool mjit_valid_class_serial_p(rb_serial_t class_serial);
return GET_GLOBAL_METHOD_STATE() == cc->method_state
&& mjit_valid_class_serial_p(cc->class_serial) && cc->me;
}
/* Returns TRUE if iseq is inlinable, otherwise NULL. This becomes TRUE in the same condition
as CC_SET_FASTPATH (in vm_callee_setup_arg) is called from vm_call_iseq_setup. */
static int
// Returns true if iseq is inlinable, otherwise NULL. This becomes true in the same condition
// as CC_SET_FASTPATH (in vm_callee_setup_arg) is called from vm_call_iseq_setup.
static bool
inlinable_iseq_p(CALL_INFO ci, CALL_CACHE cc, const rb_iseq_t *iseq)
{
extern int rb_simple_iseq_p(const rb_iseq_t *iseq);
extern bool rb_simple_iseq_p(const rb_iseq_t *iseq);
return iseq != NULL
&& rb_simple_iseq_p(iseq) && !(ci->flag & VM_CALL_KW_SPLAT) /* Top of vm_callee_setup_arg. In this case, opt_pc is 0. */
&& (!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)); /* CC_SET_FASTPATH */
@ -143,7 +143,7 @@ compile_insn(FILE *f, const struct rb_iseq_constant_body *body, const int insn,
if (mjit_opts.warnings || mjit_opts.verbose)
fprintf(stderr, "MJIT warning: JIT stack assumption is not the same between branches (%d != %u)\n",
status->stack_size_for_pos[next_pos], b->stack_size);
status->success = FALSE;
status->success = false;
}
}
@ -160,7 +160,7 @@ compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int st
struct compile_branch branch;
branch.stack_size = stack_size;
branch.finish_p = FALSE;
branch.finish_p = false;
while (pos < body->iseq_size && !ALREADY_COMPILED_P(status, pos) && !branch.finish_p) {
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
@ -175,7 +175,7 @@ compile_insns(FILE *f, const struct rb_iseq_constant_body *body, unsigned int st
if (status->success && branch.stack_size > body->stack_max) {
if (mjit_opts.warnings || mjit_opts.verbose)
fprintf(stderr, "MJIT warning: JIT stack size (%d) exceeded its max size (%d)\n", branch.stack_size, body->stack_max);
status->success = FALSE;
status->success = false;
}
if (!status->success)
break;
@ -196,16 +196,16 @@ compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct
fprintf(f, " return Qundef;\n");
}
/* Compile ISeq to C code in F. It returns 1 if it succeeds to compile. */
int
// Compile ISeq to C code in `f`. It returns true if it succeeds to compile.
bool
mjit_compile(FILE *f, const struct rb_iseq_constant_body *body, const char *funcname, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries)
{
struct compile_status status;
status.success = TRUE;
status.success = true;
status.local_stack_p = !body->catch_except_p;
status.stack_size_for_pos = (int *)malloc(sizeof(int) * body->iseq_size);
if (status.stack_size_for_pos == NULL)
return FALSE;
return false;
memset(status.stack_size_for_pos, NOT_COMPILED_STACK_SIZE, sizeof(int) * body->iseq_size);
status.cc_entries = cc_entries;
status.is_entries = is_entries;

View file

@ -132,10 +132,10 @@ struct rb_mjit_unit {
#ifndef _MSC_VER
/* This value is always set for `compact_all_jit_code`. Also used for lazy deletion. */
char *o_file;
/* TRUE if it's inherited from parent Ruby process and lazy deletion should be skipped.
/* true if it's inherited from parent Ruby process and lazy deletion should be skipped.
`o_file = NULL` can't be used to skip lazy deletion because `o_file` could be used
by child for `compact_all_jit_code`. */
int o_file_inherited_p;
bool o_file_inherited_p;
#endif
#if defined(_WIN32)
/* DLL cannot be removed while loaded on Windows. If this is set, it'll be lazily deleted. */
@ -171,11 +171,11 @@ extern rb_pid_t ruby_waitpid_locked(rb_vm_t *, rb_pid_t, int *status, int option
freed. */
struct mjit_options mjit_opts;
/* TRUE if MJIT is enabled. */
int mjit_enabled = FALSE;
/* TRUE if JIT-ed code should be called. When `ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS`
and `mjit_call_p == FALSE`, any JIT-ed code execution is cancelled as soon as possible. */
int mjit_call_p = FALSE;
// true if MJIT is enabled.
bool mjit_enabled = false;
// true if JIT-ed code should be called. When `ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS`
// and `mjit_call_p == false`, any JIT-ed code execution is cancelled as soon as possible.
bool mjit_call_p = false;
/* Priority queue of iseqs waiting for JIT compilation.
This variable is a pointer to head unit of the queue. */
@ -198,14 +198,14 @@ static rb_nativethread_cond_t mjit_client_wakeup;
static rb_nativethread_cond_t mjit_worker_wakeup;
/* A thread conditional to wake up workers if at the end of GC. */
static rb_nativethread_cond_t mjit_gc_wakeup;
/* True when GC is working. */
static int in_gc;
/* True when JIT is working. */
static int in_jit;
/* Set to TRUE to stop worker. */
static int stop_worker_p;
/* Set to TRUE if worker is stopped. */
static int worker_stopped;
// True when GC is working.
static bool in_gc;
// True when JIT is working.
static bool in_jit;
// Set to TRUE to stop worker.
static bool stop_worker_p;
// Set to TRUE if worker is stopped.
static bool worker_stopped;
/* Path of "/tmp", which can be changed to $TMP in MinGW. */
static char *tmp_dir;
@ -363,7 +363,7 @@ clean_object_files(struct rb_mjit_unit *unit)
char *so_file = unit->so_file;
unit->so_file = NULL;
/* unit->so_file is set only when mjit_opts.save_temps is FALSE. */
// unit->so_file is set only when mjit_opts.save_temps is false.
remove_file(so_file);
free(so_file);
}
@ -444,14 +444,12 @@ real_ms_time(void)
}
#endif
/* Return TRUE if class_serial is not obsoleted. This is used by mjit_compile.c. */
int
// Return true if class_serial is not obsoleted. This is used by mjit_compile.c.
bool
mjit_valid_class_serial_p(rb_serial_t class_serial)
{
int found_p;
CRITICAL_SECTION_START(3, "in valid_class_serial_p");
found_p = rb_hash_stlike_lookup(valid_class_serials, LONG2FIX(class_serial), NULL);
bool found_p = rb_hash_stlike_lookup(valid_class_serials, LONG2FIX(class_serial), NULL);
CRITICAL_SECTION_FINISH(3, "in valid_class_serial_p");
return found_p;
}
@ -652,8 +650,8 @@ remove_so_file(const char *so_file, struct rb_mjit_unit *unit)
#define append_lit(p, str) append_str2(p, str, rb_strlen_lit(str))
#ifdef _MSC_VER
/* Compile C file to so. It returns 1 if it succeeds. (mswin) */
static int
// Compile C file to so. It returns true if it succeeds. (mswin)
static bool
compile_c_to_so(const char *c_file, const char *so_file)
{
int exit_code;
@ -703,7 +701,7 @@ compile_c_to_so(const char *c_file, const char *so_file)
args = form_args(5, CC_LDSHARED_ARGS, CC_CODEFLAG_ARGS,
files, CC_LIBS, CC_DLDFLAGS_ARGS);
if (args == NULL)
return FALSE;
return false;
exit_code = exec_process(cc_path, args);
free(args);
@ -770,8 +768,8 @@ make_pch(void)
CRITICAL_SECTION_FINISH(3, "in make_pch");
}
/* Compile .c file to .o file. It returns 1 if it succeeds. (non-mswin) */
static int
// Compile .c file to .o file. It returns true if it succeeds. (non-mswin)
static bool
compile_c_to_o(const char *c_file, const char *o_file)
{
int exit_code;
@ -791,7 +789,7 @@ compile_c_to_o(const char *c_file, const char *o_file)
# endif
args = form_args(5, cc_common_args, CC_CODEFLAG_ARGS, files, CC_LIBS, CC_DLDFLAGS_ARGS);
if (args == NULL)
return FALSE;
return false;
exit_code = exec_process(cc_path, args);
free(args);
@ -801,8 +799,8 @@ compile_c_to_o(const char *c_file, const char *o_file)
return exit_code == 0;
}
/* Link .o files to .so file. It returns 1 if it succeeds. (non-mswin) */
static int
// Link .o files to .so file. It returns true if it succeeds. (non-mswin)
static bool
link_o_to_so(const char **o_files, const char *so_file)
{
int exit_code;
@ -819,7 +817,7 @@ link_o_to_so(const char **o_files, const char *so_file)
args = form_args(6, CC_LDSHARED_ARGS, CC_CODEFLAG_ARGS,
options, o_files, CC_LIBS, CC_DLDFLAGS_ARGS);
if (args == NULL)
return FALSE;
return false;
exit_code = exec_process(cc_path, args);
free(args);
@ -840,7 +838,7 @@ compact_all_jit_code(void)
static const char so_ext[] = DLEXT;
char so_file[MAXPATHLEN];
const char **o_files;
int i = 0, success;
int i = 0;
/* Abnormal use case of rb_mjit_unit that doesn't have ISeq */
unit = calloc(1, sizeof(struct rb_mjit_unit)); /* To prevent GC, don't use ZALLOC */
@ -858,7 +856,7 @@ compact_all_jit_code(void)
}
start_time = real_ms_time();
success = link_o_to_so(o_files, so_file);
bool success = link_o_to_so(o_files, so_file);
end_time = real_ms_time();
/* TODO: Shrink this big critical section. For now, this is needed to prevent failure by missing .o files.
@ -983,7 +981,6 @@ static mjit_func_t
convert_unit_to_func(struct rb_mjit_unit *unit, struct rb_call_cache *cc_entries, union iseq_inline_storage_entry *is_entries)
{
char c_file_buff[MAXPATHLEN], *c_file = c_file_buff, *so_file, funcname[35]; /* TODO: reconsider `35` */
int success;
int fd;
FILE *f;
void *func;
@ -1044,10 +1041,10 @@ convert_unit_to_func(struct rb_mjit_unit *unit, struct rb_call_cache *cc_entries
if (!mjit_opts.save_temps)
remove_file(c_file);
free_unit(unit);
in_jit = FALSE; /* just being explicit for return */
in_jit = false; // just being explicit for return
}
else {
in_jit = TRUE;
in_jit = true;
}
CRITICAL_SECTION_FINISH(3, "before mjit_compile to wait GC finish");
if (!in_jit) {
@ -1062,11 +1059,11 @@ convert_unit_to_func(struct rb_mjit_unit *unit, struct rb_call_cache *cc_entries
verbose(2, "start compilation: %s@%s:%d -> %s", label, path, lineno, c_file);
fprintf(f, "/* %s@%s:%d */\n\n", label, path, lineno);
}
success = mjit_compile(f, unit->iseq->body, funcname, cc_entries, is_entries);
bool success = mjit_compile(f, unit->iseq->body, funcname, cc_entries, is_entries);
/* release blocking mjit_gc_start_hook */
CRITICAL_SECTION_START(3, "after mjit_compile to wakeup client for GC");
in_jit = FALSE;
in_jit = false;
verbose(3, "Sending wakeup signal to client in a mjit-worker for GC");
rb_native_cond_signal(&mjit_client_wakeup);
CRITICAL_SECTION_FINISH(3, "in worker to wakeup client for GC");
@ -1084,7 +1081,7 @@ convert_unit_to_func(struct rb_mjit_unit *unit, struct rb_call_cache *cc_entries
success = compile_c_to_so(c_file, so_file);
#else
/* splitting .c -> .o step and .o -> .so step, to cache .o files in the future */
if ((success = compile_c_to_o(c_file, o_file)) != 0) {
if ((success = compile_c_to_o(c_file, o_file)) != false) {
const char *o_files[2] = { NULL, NULL };
o_files[0] = o_file;
success = link_o_to_so(o_files, so_file);
@ -1124,7 +1121,7 @@ typedef struct {
struct rb_mjit_unit *unit;
struct rb_call_cache *cc_entries;
union iseq_inline_storage_entry *is_entries;
int finish_p;
bool finish_p;
} mjit_copy_job_t;
/* Singleton MJIT copy job. This is made global since it needs to be durable even when MJIT worker thread is stopped.
@ -1138,12 +1135,12 @@ int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *);
/* We're lazily copying cache values from main thread because these cache values
could be different between ones on enqueue timing and ones on dequeue timing.
Return TRUE if copy succeeds. */
static int
Return true if copy succeeds. */
static bool
copy_cache_from_main_thread(mjit_copy_job_t *job)
{
CRITICAL_SECTION_START(3, "in copy_cache_from_main_thread");
job->finish_p = FALSE; /* allow dispatching this job in mjit_copy_job_handler */
job->finish_p = false; // allow dispatching this job in mjit_copy_job_handler
CRITICAL_SECTION_FINISH(3, "in copy_cache_from_main_thread");
if (UNLIKELY(mjit_opts.wait)) {
@ -1152,7 +1149,7 @@ copy_cache_from_main_thread(mjit_copy_job_t *job)
}
if (!rb_workqueue_register(0, mjit_copy_job_handler, (void *)job))
return FALSE;
return false;
CRITICAL_SECTION_START(3, "in MJIT copy job wait");
/* checking `stop_worker_p` too because `RUBY_VM_CHECK_INTS(ec)` may not
lush mjit_copy_job_handler when EC_EXEC_TAG() is not TAG_NONE, and then
@ -1179,9 +1176,9 @@ mjit_worker(void)
}
#endif
if (pch_status == PCH_FAILED) {
mjit_enabled = FALSE;
mjit_enabled = false;
CRITICAL_SECTION_START(3, "in worker to update worker_stopped");
worker_stopped = TRUE;
worker_stopped = true;
verbose(3, "Sending wakeup signal to client in a mjit-worker");
rb_native_cond_signal(&mjit_client_wakeup);
CRITICAL_SECTION_FINISH(3, "in worker to update worker_stopped");
@ -1199,7 +1196,7 @@ mjit_worker(void)
verbose(3, "Getting wakeup from client");
}
unit = get_from_list(&unit_queue);
job->finish_p = TRUE; /* disable dispatching this job in mjit_copy_job_handler while it's being modified */
job->finish_p = true; // disable dispatching this job in mjit_copy_job_handler while it's being modified
CRITICAL_SECTION_FINISH(3, "in worker dequeue");
if (unit) {
@ -1216,7 +1213,7 @@ mjit_worker(void)
/* Copy ISeq's inline caches values to avoid race condition. */
if (job->cc_entries != NULL || job->is_entries != NULL) {
if (copy_cache_from_main_thread(job) == FALSE) {
if (copy_cache_from_main_thread(job) == false) {
continue; /* retry postponed_job failure, or stop worker */
}
}
@ -1245,10 +1242,10 @@ mjit_worker(void)
}
}
/* Disable dispatching this job in mjit_copy_job_handler while memory allocated by alloca
could be expired after finishing this function. */
job->finish_p = TRUE;
// Disable dispatching this job in mjit_copy_job_handler while memory allocated by alloca
// could be expired after finishing this function.
job->finish_p = true;
/* To keep mutex unlocked when it is destroyed by mjit_finish, don't wrap CRITICAL_SECTION here. */
worker_stopped = TRUE;
// To keep mutex unlocked when it is destroyed by mjit_finish, don't wrap CRITICAL_SECTION here.
worker_stopped = true;
}

View file

@ -2947,7 +2947,7 @@ rb_f_exec(int argc, const VALUE *argv)
execarg_obj = rb_execarg_new(argc, argv, TRUE, FALSE);
eargp = rb_execarg_get(execarg_obj);
if (mjit_enabled) mjit_finish(FALSE); /* avoid leaking resources, and do not leave files. XXX: JIT-ed handle can leak after exec error is rescued. */
if (mjit_enabled) mjit_finish(false); // avoid leaking resources, and do not leave files. XXX: JIT-ed handle can leak after exec error is rescued.
before_exec(); /* stop timer thread before redirects */
rb_execarg_parent_start(execarg_obj);
fail_str = eargp->use_shell ? eargp->invoke.sh.shell_script : eargp->invoke.cmd.command_name;
@ -4045,7 +4045,7 @@ rb_fork_ruby(int *status)
while (1) {
prefork();
if (mjit_enabled) mjit_pause(FALSE); /* Don't leave locked mutex to child. Note: child_handler must be enabled to pause MJIT. */
if (mjit_enabled) mjit_pause(false); // Don't leave locked mutex to child. Note: child_handler must be enabled to pause MJIT.
disable_child_handler_before_fork(&old);
before_fork_ruby();
pid = fork();
@ -6502,7 +6502,7 @@ rb_daemon(int nochdir, int noclose)
{
int err = 0;
#ifdef HAVE_DAEMON
if (mjit_enabled) mjit_pause(FALSE); /* Don't leave locked mutex to child. */
if (mjit_enabled) mjit_pause(false); // Don't leave locked mutex to child.
before_fork_ruby();
err = daemon(nochdir, noclose);
after_fork_ruby();

View file

@ -58,7 +58,7 @@ switch (insn) {
if (b->stack_size != 1) {
if (mjit_opts.warnings || mjit_opts.verbose)
fprintf(stderr, "MJIT warning: Unexpected JIT stack_size on leave: %d\n", b->stack_size);
status->success = FALSE;
status->success = false;
}
% end
%
@ -72,6 +72,6 @@ switch (insn) {
default:
if (mjit_opts.warnings || mjit_opts.verbose)
fprintf(stderr, "MJIT warning: Skipped to compile unsupported instruction: %s\n", insn_name(insn));
status->success = FALSE;
status->success = false;
break;
}

View file

@ -1643,7 +1643,7 @@ vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t
return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, 0, param, local);
}
MJIT_STATIC int
MJIT_STATIC bool
rb_simple_iseq_p(const rb_iseq_t *iseq)
{
return iseq->body->param.flags.has_opt == FALSE &&