1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Make mjit_cont sharable with YJIT (#6556)

* Make mjit_cont sharable with YJIT

* Update dependencies

* Update YJIT binding
This commit is contained in:
Takashi Kokubun 2022-10-17 09:27:59 -07:00 committed by GitHub
parent 07a93b1e37
commit e7c71c6c92
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
Notes: git 2022-10-17 16:28:19 +00:00
Merged-By: k0kubun <takashikkbn@gmail.com>
10 changed files with 160 additions and 127 deletions

View file

@ -3564,6 +3564,7 @@ cont.$(OBJEXT): {$(VPATH)}internal/value_type.h
cont.$(OBJEXT): {$(VPATH)}internal/variable.h
cont.$(OBJEXT): {$(VPATH)}internal/warning_push.h
cont.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
cont.$(OBJEXT): {$(VPATH)}iseq.h
cont.$(OBJEXT): {$(VPATH)}method.h
cont.$(OBJEXT): {$(VPATH)}missing.h
cont.$(OBJEXT): {$(VPATH)}mjit.h
@ -3580,6 +3581,8 @@ cont.$(OBJEXT): {$(VPATH)}thread_native.h
cont.$(OBJEXT): {$(VPATH)}vm_core.h
cont.$(OBJEXT): {$(VPATH)}vm_debug.h
cont.$(OBJEXT): {$(VPATH)}vm_opts.h
cont.$(OBJEXT): {$(VPATH)}vm_sync.h
cont.$(OBJEXT): {$(VPATH)}yjit.h
debug.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
debug.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
debug.$(OBJEXT): $(CCAN_DIR)/list/list.h
@ -6262,6 +6265,7 @@ eval.$(OBJEXT): $(hdrdir)/ruby/ruby.h
eval.$(OBJEXT): $(top_srcdir)/internal/array.h
eval.$(OBJEXT): $(top_srcdir)/internal/class.h
eval.$(OBJEXT): $(top_srcdir)/internal/compilers.h
eval.$(OBJEXT): $(top_srcdir)/internal/cont.h
eval.$(OBJEXT): $(top_srcdir)/internal/error.h
eval.$(OBJEXT): $(top_srcdir)/internal/eval.h
eval.$(OBJEXT): $(top_srcdir)/internal/gc.h
@ -6902,6 +6906,7 @@ gc.$(OBJEXT): {$(VPATH)}internal/variable.h
gc.$(OBJEXT): {$(VPATH)}internal/warning_push.h
gc.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
gc.$(OBJEXT): {$(VPATH)}io.h
gc.$(OBJEXT): {$(VPATH)}iseq.h
gc.$(OBJEXT): {$(VPATH)}method.h
gc.$(OBJEXT): {$(VPATH)}missing.h
gc.$(OBJEXT): {$(VPATH)}mjit.h
@ -7124,6 +7129,10 @@ goruby.$(OBJEXT): {$(VPATH)}thread_native.h
goruby.$(OBJEXT): {$(VPATH)}vm_core.h
goruby.$(OBJEXT): {$(VPATH)}vm_debug.h
goruby.$(OBJEXT): {$(VPATH)}vm_opts.h
hash.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
hash.$(OBJEXT): $(CCAN_DIR)/container_of/container_of.h
hash.$(OBJEXT): $(CCAN_DIR)/list/list.h
hash.$(OBJEXT): $(CCAN_DIR)/str/str.h
hash.$(OBJEXT): $(hdrdir)/ruby/ruby.h
hash.$(OBJEXT): $(top_srcdir)/internal/array.h
hash.$(OBJEXT): $(top_srcdir)/internal/bignum.h
@ -7134,6 +7143,7 @@ hash.$(OBJEXT): $(top_srcdir)/internal/cont.h
hash.$(OBJEXT): $(top_srcdir)/internal/error.h
hash.$(OBJEXT): $(top_srcdir)/internal/gc.h
hash.$(OBJEXT): $(top_srcdir)/internal/hash.h
hash.$(OBJEXT): $(top_srcdir)/internal/imemo.h
hash.$(OBJEXT): $(top_srcdir)/internal/object.h
hash.$(OBJEXT): $(top_srcdir)/internal/proc.h
hash.$(OBJEXT): $(top_srcdir)/internal/serial.h
@ -7146,6 +7156,7 @@ hash.$(OBJEXT): $(top_srcdir)/internal/variable.h
hash.$(OBJEXT): $(top_srcdir)/internal/vm.h
hash.$(OBJEXT): $(top_srcdir)/internal/warnings.h
hash.$(OBJEXT): {$(VPATH)}assert.h
hash.$(OBJEXT): {$(VPATH)}atomic.h
hash.$(OBJEXT): {$(VPATH)}backward/2/assume.h
hash.$(OBJEXT): {$(VPATH)}backward/2/attributes.h
hash.$(OBJEXT): {$(VPATH)}backward/2/bool.h
@ -7314,21 +7325,28 @@ hash.$(OBJEXT): {$(VPATH)}internal/value_type.h
hash.$(OBJEXT): {$(VPATH)}internal/variable.h
hash.$(OBJEXT): {$(VPATH)}internal/warning_push.h
hash.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
hash.$(OBJEXT): {$(VPATH)}iseq.h
hash.$(OBJEXT): {$(VPATH)}method.h
hash.$(OBJEXT): {$(VPATH)}missing.h
hash.$(OBJEXT): {$(VPATH)}node.h
hash.$(OBJEXT): {$(VPATH)}onigmo.h
hash.$(OBJEXT): {$(VPATH)}oniguruma.h
hash.$(OBJEXT): {$(VPATH)}probes.dmyh
hash.$(OBJEXT): {$(VPATH)}probes.h
hash.$(OBJEXT): {$(VPATH)}ractor.h
hash.$(OBJEXT): {$(VPATH)}ruby_assert.h
hash.$(OBJEXT): {$(VPATH)}ruby_atomic.h
hash.$(OBJEXT): {$(VPATH)}shape.h
hash.$(OBJEXT): {$(VPATH)}st.h
hash.$(OBJEXT): {$(VPATH)}subst.h
hash.$(OBJEXT): {$(VPATH)}symbol.h
hash.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
hash.$(OBJEXT): {$(VPATH)}thread_native.h
hash.$(OBJEXT): {$(VPATH)}transient_heap.h
hash.$(OBJEXT): {$(VPATH)}util.h
hash.$(OBJEXT): {$(VPATH)}vm_core.h
hash.$(OBJEXT): {$(VPATH)}vm_debug.h
hash.$(OBJEXT): {$(VPATH)}vm_opts.h
hash.$(OBJEXT): {$(VPATH)}vm_sync.h
inits.$(OBJEXT): $(hdrdir)/ruby.h
inits.$(OBJEXT): $(hdrdir)/ruby/ruby.h
@ -13408,6 +13426,7 @@ ruby.$(OBJEXT): $(top_srcdir)/internal/array.h
ruby.$(OBJEXT): $(top_srcdir)/internal/class.h
ruby.$(OBJEXT): $(top_srcdir)/internal/cmdlineopt.h
ruby.$(OBJEXT): $(top_srcdir)/internal/compilers.h
ruby.$(OBJEXT): $(top_srcdir)/internal/cont.h
ruby.$(OBJEXT): $(top_srcdir)/internal/error.h
ruby.$(OBJEXT): $(top_srcdir)/internal/file.h
ruby.$(OBJEXT): $(top_srcdir)/internal/gc.h
@ -13598,6 +13617,7 @@ ruby.$(OBJEXT): {$(VPATH)}internal/variable.h
ruby.$(OBJEXT): {$(VPATH)}internal/warning_push.h
ruby.$(OBJEXT): {$(VPATH)}internal/xmalloc.h
ruby.$(OBJEXT): {$(VPATH)}io.h
ruby.$(OBJEXT): {$(VPATH)}iseq.h
ruby.$(OBJEXT): {$(VPATH)}method.h
ruby.$(OBJEXT): {$(VPATH)}missing.h
ruby.$(OBJEXT): {$(VPATH)}mjit.h

133
cont.c
View file

@ -34,7 +34,9 @@ extern int madvise(caddr_t, size_t, int);
#include "internal/warnings.h"
#include "ruby/fiber/scheduler.h"
#include "mjit.h"
#include "yjit.h"
#include "vm_core.h"
#include "vm_sync.h"
#include "id_table.h"
#include "ractor_core.h"
@ -67,6 +69,8 @@ static VALUE rb_cFiberPool;
#define FIBER_POOL_ALLOCATION_FREE
#endif
#define jit_cont_enabled mjit_enabled // To be used by YJIT later
enum context_type {
CONTINUATION_CONTEXT = 0,
FIBER_CONTEXT = 1
@ -195,6 +199,15 @@ struct fiber_pool {
size_t vm_stack_size;
};
// Continuation contexts used by JITs
struct rb_jit_cont {
rb_execution_context_t *ec; // continuation ec
struct rb_jit_cont *prev, *next; // used to form lists
};
// Doubly linked list for enumerating all on-stack ISEQs.
static struct rb_jit_cont *first_jit_cont;
typedef struct rb_context_struct {
enum context_type type;
int argc;
@ -212,8 +225,7 @@ typedef struct rb_context_struct {
rb_execution_context_t saved_ec;
rb_jmpbuf_t jmpbuf;
rb_ensure_entry_t *ensure_array;
/* Pointer to MJIT info about the continuation. */
struct mjit_cont *mjit_cont;
struct rb_jit_cont *jit_cont; // Continuation contexts for JITs
} rb_context_t;
@ -1000,6 +1012,8 @@ fiber_is_root_p(const rb_fiber_t *fiber)
}
#endif
static void jit_cont_free(struct rb_jit_cont *cont);
static void
cont_free(void *ptr)
{
@ -1020,9 +1034,9 @@ cont_free(void *ptr)
RUBY_FREE_UNLESS_NULL(cont->saved_vm_stack.ptr);
if (mjit_enabled) {
VM_ASSERT(cont->mjit_cont != NULL);
mjit_cont_free(cont->mjit_cont);
if (jit_cont_enabled) {
VM_ASSERT(cont->jit_cont != NULL);
jit_cont_free(cont->jit_cont);
}
/* free rb_cont_t or rb_fiber_t */
ruby_xfree(ptr);
@ -1187,12 +1201,98 @@ cont_save_thread(rb_context_t *cont, rb_thread_t *th)
sec->machine.stack_end = NULL;
}
static void
cont_init_mjit_cont(rb_context_t *cont)
// Register a new continuation with execution context `ec`. Return JIT info about
// the continuation.
static struct rb_jit_cont *
jit_cont_new(rb_execution_context_t *ec)
{
VM_ASSERT(cont->mjit_cont == NULL);
if (mjit_enabled) {
cont->mjit_cont = mjit_cont_new(&(cont->saved_ec));
struct rb_jit_cont *cont;
// We need to use calloc instead of something like ZALLOC to avoid triggering GC here.
// When this function is called from rb_thread_alloc through rb_threadptr_root_fiber_setup,
// the thread is still being prepared and marking it causes SEGV.
cont = calloc(1, sizeof(struct rb_jit_cont));
if (cont == NULL)
rb_memerror();
cont->ec = ec;
RB_VM_LOCK_ENTER();
if (first_jit_cont == NULL) {
cont->next = cont->prev = NULL;
}
else {
cont->prev = NULL;
cont->next = first_jit_cont;
first_jit_cont->prev = cont;
}
first_jit_cont = cont;
RB_VM_LOCK_LEAVE();
return cont;
}
// Unregister continuation `cont`.
static void
jit_cont_free(struct rb_jit_cont *cont)
{
RB_VM_LOCK_ENTER();
if (cont == first_jit_cont) {
first_jit_cont = cont->next;
if (first_jit_cont != NULL)
first_jit_cont->prev = NULL;
}
else {
cont->prev->next = cont->next;
if (cont->next != NULL)
cont->next->prev = cont->prev;
}
RB_VM_LOCK_LEAVE();
free(cont);
}
// Call a given callback against all on-stack ISEQs.
void
rb_jit_cont_each_iseq(rb_iseq_callback callback)
{
struct rb_jit_cont *cont;
for (cont = first_jit_cont; cont != NULL; cont = cont->next) {
if (cont->ec->vm_stack == NULL)
continue;
const rb_control_frame_t *cfp;
for (cfp = RUBY_VM_END_CONTROL_FRAME(cont->ec) - 1; ; cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
const rb_iseq_t *iseq;
if (cfp->pc && (iseq = cfp->iseq) != NULL && imemo_type((VALUE)iseq) == imemo_iseq) {
callback(iseq);
}
if (cfp == cont->ec->cfp)
break; // reached the most recent cfp
}
}
}
// Finish working with continuation info.
void
rb_jit_cont_finish(void)
{
if (!jit_cont_enabled)
return;
struct rb_jit_cont *cont, *next;
for (cont = first_jit_cont; cont != NULL; cont = next) {
next = cont->next;
xfree(cont);
}
}
static void
cont_init_jit_cont(rb_context_t *cont)
{
VM_ASSERT(cont->jit_cont == NULL);
if (jit_cont_enabled) {
cont->jit_cont = jit_cont_new(&(cont->saved_ec));
}
}
@ -1211,7 +1311,7 @@ cont_init(rb_context_t *cont, rb_thread_t *th)
cont->saved_ec.local_storage = NULL;
cont->saved_ec.local_storage_recursive_hash = Qnil;
cont->saved_ec.local_storage_recursive_hash_for_trace = Qnil;
cont_init_mjit_cont(cont);
cont_init_jit_cont(cont);
}
static rb_context_t *
@ -1242,9 +1342,9 @@ rb_fiberptr_blocking(struct rb_fiber_struct *fiber)
// This is used for root_fiber because other fibers call cont_init_mjit_cont through cont_new.
void
rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber)
rb_fiber_init_jit_cont(struct rb_fiber_struct *fiber)
{
cont_init_mjit_cont(&fiber->cont);
cont_init_jit_cont(&fiber->cont);
}
#if 0
@ -2187,9 +2287,10 @@ rb_threadptr_root_fiber_setup(rb_thread_t *th)
fiber->blocking = 1;
fiber_status_set(fiber, FIBER_RESUMED); /* skip CREATED */
th->ec = &fiber->cont.saved_ec;
// This skips mjit_cont_new for the initial thread because mjit_enabled is always false
// at this point. mjit_init calls rb_fiber_init_mjit_cont again for this root_fiber.
rb_fiber_init_mjit_cont(fiber);
// This skips jit_cont_new for the initial thread because rb_yjit_enabled_p() and
// mjit_enabled are false at this point. ruby_opt_init will call rb_fiber_init_jit_cont
// again for this root_fiber.
rb_fiber_init_jit_cont(fiber);
}
void

4
eval.c
View file

@ -21,6 +21,7 @@
#include "gc.h"
#include "internal.h"
#include "internal/class.h"
#include "internal/cont.h"
#include "internal/error.h"
#include "internal/eval.h"
#include "internal/hash.h"
@ -251,7 +252,8 @@ rb_ec_cleanup(rb_execution_context_t *ec, int ex0)
}
}
mjit_finish(true); // We still need ISeqs here.
mjit_finish(true); // We still need ISeqs here, so it's before rb_ec_finalize().
rb_jit_cont_finish();
rb_ec_finalize(ec);

View file

@ -9,6 +9,7 @@
* @brief Internal header for Fiber.
*/
#include "ruby/ruby.h" /* for VALUE */
#include "iseq.h"
struct rb_thread_struct; /* in vm_core.h */
struct rb_fiber_struct; /* in cont.c */
@ -17,7 +18,9 @@ struct rb_execution_context_struct; /* in vm_core.c */
/* cont.c */
void rb_fiber_reset_root_local_storage(struct rb_thread_struct *);
void ruby_register_rollback_func_for_ensure(VALUE (*ensure_func)(VALUE), VALUE (*rollback_func)(VALUE));
void rb_fiber_init_mjit_cont(struct rb_fiber_struct *fiber);
void rb_fiber_init_jit_cont(struct rb_fiber_struct *fiber);
void rb_jit_cont_each_iseq(rb_iseq_callback callback);
void rb_jit_cont_finish(void);
VALUE rb_fiberptr_self(struct rb_fiber_struct *fiber);
unsigned int rb_fiberptr_blocking(struct rb_fiber_struct *fiber);

1
iseq.h
View file

@ -31,6 +31,7 @@ RUBY_EXTERN const int ruby_api_version[];
typedef struct rb_iseq_struct rb_iseq_t;
#define rb_iseq_t rb_iseq_t
#endif
typedef void (*rb_iseq_callback)(const rb_iseq_t *);
extern const ID rb_iseq_shared_exc_local_tbl[];

103
mjit.c
View file

@ -951,42 +951,19 @@ mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const
// Set up field `used_code_p` for unit iseqs whose iseq on the stack of ec.
static void
mark_ec_units(rb_execution_context_t *ec)
mark_iseq_units(const rb_iseq_t *iseq)
{
const rb_control_frame_t *cfp;
if (ec->vm_stack == NULL)
return;
for (cfp = RUBY_VM_END_CONTROL_FRAME(ec) - 1; ; cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
const rb_iseq_t *iseq;
if (cfp->pc && (iseq = cfp->iseq) != NULL
&& imemo_type((VALUE) iseq) == imemo_iseq
&& (ISEQ_BODY(iseq)->jit_unit) != NULL) {
ISEQ_BODY(iseq)->jit_unit->used_code_p = true;
}
if (cfp == ec->cfp)
break; // reached the most recent cfp
if (ISEQ_BODY(iseq)->jit_unit != NULL) {
ISEQ_BODY(iseq)->jit_unit->used_code_p = true;
}
}
// MJIT info related to an existing continutaion.
struct mjit_cont {
rb_execution_context_t *ec; // continuation ec
struct mjit_cont *prev, *next; // used to form lists
};
// Double linked list of registered continuations. This is used to detect
// units which are in use in unload_units.
static struct mjit_cont *first_cont;
// Unload JIT code of some units to satisfy the maximum permitted
// number of units with a loaded code.
static void
unload_units(void)
{
struct rb_mjit_unit *unit = 0, *next;
struct mjit_cont *cont;
int units_num = active_units.length;
// For now, we don't unload units when ISeq is GCed. We should
@ -1005,9 +982,7 @@ unload_units(void)
}
// All threads have a root_fiber which has a mjit_cont. Other normal fibers also
// have a mjit_cont. Thus we can check ISeqs in use by scanning ec of mjit_conts.
for (cont = first_cont; cont != NULL; cont = cont->next) {
mark_ec_units(cont->ec);
}
rb_jit_cont_each_iseq(mark_iseq_units);
// TODO: check stale_units and unload unused ones! (note that the unit is not associated to ISeq anymore)
// Unload units whose total_calls is smaller than any total_calls in unit_queue.
@ -1163,68 +1138,6 @@ free_list(struct rb_mjit_unit_list *list, bool close_handle_p)
list->length = 0;
}
// Register a new continuation with execution context `ec`. Return MJIT info about
// the continuation.
struct mjit_cont *
mjit_cont_new(rb_execution_context_t *ec)
{
struct mjit_cont *cont;
// We need to use calloc instead of something like ZALLOC to avoid triggering GC here.
// When this function is called from rb_thread_alloc through rb_threadptr_root_fiber_setup,
// the thread is still being prepared and marking it causes SEGV.
cont = calloc(1, sizeof(struct mjit_cont));
if (cont == NULL)
rb_memerror();
cont->ec = ec;
CRITICAL_SECTION_START(3, "in mjit_cont_new");
if (first_cont == NULL) {
cont->next = cont->prev = NULL;
}
else {
cont->prev = NULL;
cont->next = first_cont;
first_cont->prev = cont;
}
first_cont = cont;
CRITICAL_SECTION_FINISH(3, "in mjit_cont_new");
return cont;
}
// Unregister continuation `cont`.
void
mjit_cont_free(struct mjit_cont *cont)
{
CRITICAL_SECTION_START(3, "in mjit_cont_new");
if (cont == first_cont) {
first_cont = cont->next;
if (first_cont != NULL)
first_cont->prev = NULL;
}
else {
cont->prev->next = cont->next;
if (cont->next != NULL)
cont->next->prev = cont->prev;
}
CRITICAL_SECTION_FINISH(3, "in mjit_cont_new");
free(cont);
}
// Finish work with continuation info.
static void
finish_conts(void)
{
struct mjit_cont *cont, *next;
for (cont = first_cont; cont != NULL; cont = next) {
next = cont->next;
xfree(cont);
}
}
static void mjit_wait(struct rb_iseq_constant_body *body);
// Check the unit queue and start mjit_compile if nothing is in progress.
@ -1889,13 +1802,6 @@ mjit_init(const struct mjit_options *opts)
rb_native_cond_initialize(&mjit_worker_wakeup);
rb_native_cond_initialize(&mjit_gc_wakeup);
// Make sure the saved_ec of the initial thread's root_fiber is scanned by mark_ec_units.
//
// rb_threadptr_root_fiber_setup for the initial thread is called before mjit_init,
// meaning mjit_cont_new is skipped for the root_fiber. Therefore we need to call
// rb_fiber_init_mjit_cont again with mjit_enabled=true to set the root_fiber's mjit_cont.
rb_fiber_init_mjit_cont(GET_EC()->fiber_ptr);
// If --mjit=pause is given, lazily start MJIT when RubyVM::MJIT.resume is called.
// You can use it to control MJIT warmup, or to customize the JIT implementation.
if (!mjit_opts.pause) {
@ -2052,7 +1958,6 @@ mjit_finish(bool close_handle_p)
free_list(&active_units, close_handle_p);
free_list(&compact_units, close_handle_p);
free_list(&stale_units, close_handle_p);
finish_conts();
mjit_enabled = false;
verbose(1, "Successful MJIT finish");

4
mjit.h
View file

@ -101,8 +101,6 @@ extern void mjit_init(const struct mjit_options *opts);
extern void mjit_free_iseq(const rb_iseq_t *iseq);
extern void mjit_update_references(const rb_iseq_t *iseq);
extern void mjit_mark(void);
extern struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec);
extern void mjit_cont_free(struct mjit_cont *cont);
extern void mjit_mark_cc_entries(const struct rb_iseq_constant_body *const body);
extern void mjit_notify_waitpid(int exit_code);
@ -120,8 +118,6 @@ void mjit_finish(bool close_handle_p);
# else // USE_MJIT
static inline void mjit_cancel_all(const char *reason){}
static inline struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec){return NULL;}
static inline void mjit_cont_free(struct mjit_cont *cont){}
static inline void mjit_free_iseq(const rb_iseq_t *iseq){}
static inline void mjit_mark(void){}
static inline VALUE jit_exec(rb_execution_context_t *ec) { return Qundef; /* unreachable */ }

7
ruby.c
View file

@ -44,6 +44,7 @@
#include "eval_intern.h"
#include "internal.h"
#include "internal/cmdlineopt.h"
#include "internal/cont.h"
#include "internal/error.h"
#include "internal/file.h"
#include "internal/inits.h"
@ -1618,6 +1619,12 @@ ruby_opt_init(ruby_cmdline_options_t *opt)
rb_call_builtin_inits();
ruby_init_prelude();
// Make sure the saved_ec of the initial thread's root_fiber is scanned by rb_jit_cont_each_ec.
//
// rb_threadptr_root_fiber_setup for the initial thread is called before rb_yjit_enabled_p()
// or mjit_enabled becomes true, meaning jit_cont_new is skipped for the root_fiber.
// Therefore we need to call this again here to set the root_fiber's jit_cont.
rb_fiber_init_jit_cont(GET_EC()->fiber_ptr);
#if USE_MJIT
// mjit_init is safe only after rb_call_builtin_inits defines RubyVM::MJIT::Compiler
if (opt->mjit.on)

6
yjit.c
View file

@ -894,13 +894,11 @@ rb_assert_cme_handle(VALUE handle)
RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_ment));
}
typedef void (*iseq_callback)(const rb_iseq_t *);
// Heap-walking callback for rb_yjit_for_each_iseq().
static int
for_each_iseq_i(void *vstart, void *vend, size_t stride, void *data)
{
const iseq_callback callback = (iseq_callback)data;
const rb_iseq_callback callback = (rb_iseq_callback)data;
VALUE v = (VALUE)vstart;
for (; v != (VALUE)vend; v += stride) {
void *ptr = asan_poisoned_object_p(v);
@ -919,7 +917,7 @@ for_each_iseq_i(void *vstart, void *vend, size_t stride, void *data)
// Iterate through the whole GC heap and invoke a callback for each iseq.
// Used for global code invalidation.
void
rb_yjit_for_each_iseq(iseq_callback callback)
rb_yjit_for_each_iseq(rb_iseq_callback callback)
{
rb_objspace_each_objects(for_each_iseq_i, (void *)callback);
}

View file

@ -1244,6 +1244,7 @@ pub const YARVINSN_trace_putobject_INT2FIX_0_: ruby_vminsn_type = 200;
pub const YARVINSN_trace_putobject_INT2FIX_1_: ruby_vminsn_type = 201;
pub const VM_INSTRUCTION_SIZE: ruby_vminsn_type = 202;
pub type ruby_vminsn_type = u32;
pub type rb_iseq_callback = ::std::option::Option<unsafe extern "C" fn(arg1: *const rb_iseq_t)>;
extern "C" {
pub fn rb_vm_insn_addr2opcode(addr: *const ::std::os::raw::c_void) -> ::std::os::raw::c_int;
}
@ -1538,9 +1539,8 @@ extern "C" {
extern "C" {
pub fn rb_assert_cme_handle(handle: VALUE);
}
pub type iseq_callback = ::std::option::Option<unsafe extern "C" fn(arg1: *const rb_iseq_t)>;
extern "C" {
pub fn rb_yjit_for_each_iseq(callback: iseq_callback);
pub fn rb_yjit_for_each_iseq(callback: rb_iseq_callback);
}
extern "C" {
pub fn rb_yjit_obj_written(