1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Revert "Finer-grained inline constant cache invalidation"

This reverts commits for [Feature #18589]:
* 8008fb7352
  "Update formatting per feedback"
* 8f6eaca2e1
  "Delete ID from constant cache table if it becomes empty on ISEQ free"
* 629908586b
  "Finer-grained inline constant cache invalidation"

MSWin builds on AppVeyor have been crashing since the merger.
This commit is contained in:
Nobuyoshi Nakada 2022-03-25 20:29:09 +09:00 committed by GitHub
parent 7ee26740e4
commit 69967ee64e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
Notes: git 2022-03-25 20:29:29 +09:00
Merged: https://github.com/ruby/ruby/pull/5715

Merged-By: nobu <nobu@ruby-lang.org>
18 changed files with 82 additions and 466 deletions

View file

@ -1,22 +0,0 @@
$VERBOSE = nil
CONSTANT1 = 1
CONSTANT2 = 1
CONSTANT3 = 1
CONSTANT4 = 1
CONSTANT5 = 1
def constants
[CONSTANT1, CONSTANT2, CONSTANT3, CONSTANT4, CONSTANT5]
end
500_000.times do
constants
# With previous behavior, this would cause all of the constant caches
# associated with the constant lookups listed above to invalidate, meaning
# they would all have to be fetched again. With current behavior, it only
# invalidates when a name matches, so the following constant set shouldn't
# impact the constant lookups listed above.
INVALIDATE = true
end

View file

@ -1,187 +0,0 @@
# Constant lookup is cached.
assert_equal '1', %q{
CONST = 1
def const
CONST
end
const
const
}
# Invalidate when a constant is set.
assert_equal '2', %q{
CONST = 1
def const
CONST
end
const
CONST = 2
const
}
# Invalidate when a constant of the same name is set.
assert_equal '1', %q{
CONST = 1
def const
CONST
end
const
class Container
CONST = 2
end
const
}
# Invalidate when a constant is removed.
assert_equal 'missing', %q{
class Container
CONST = 1
def const
CONST
end
def self.const_missing(name)
'missing'
end
new.const
remove_const :CONST
end
Container.new.const
}
# Invalidate when a constant's visibility changes.
assert_equal 'missing', %q{
class Container
CONST = 1
def self.const_missing(name)
'missing'
end
end
def const
Container::CONST
end
const
Container.private_constant :CONST
const
}
# Invalidate when a constant's visibility changes even if the call to the
# visibility change method fails.
assert_equal 'missing', %q{
class Container
CONST1 = 1
def self.const_missing(name)
'missing'
end
end
def const1
Container::CONST1
end
const1
begin
Container.private_constant :CONST1, :CONST2
rescue NameError
end
const1
}
# Invalidate when a module is included.
assert_equal 'INCLUDE', %q{
module Include
CONST = :INCLUDE
end
class Parent
CONST = :PARENT
end
class Child < Parent
def const
CONST
end
new.const
include Include
end
Child.new.const
}
# Invalidate when const_missing is hit.
assert_equal '2', %q{
module Container
Foo = 1
Bar = 2
class << self
attr_accessor :count
def const_missing(name)
@count += 1
@count == 1 ? Foo : Bar
end
end
@count = 0
end
def const
Container::Baz
end
const
const
}
# Invalidate when the iseq gets cleaned up.
assert_equal '2', %q{
CONSTANT = 1
iseq = RubyVM::InstructionSequence.compile(<<~RUBY)
CONSTANT
RUBY
iseq.eval
iseq = nil
GC.start
CONSTANT = 2
}
# Invalidate when the iseq gets cleaned up even if it was never in the cache.
assert_equal '2', %q{
CONSTANT = 1
iseq = RubyVM::InstructionSequence.compile(<<~RUBY)
CONSTANT
RUBY
iseq = nil
GC.start
CONSTANT = 2
}

16
class.c
View file

@ -1169,20 +1169,11 @@ module_in_super_chain(const VALUE klass, VALUE module)
return false;
}
// For each ID key in the class constant table, we're going to clear the VM's
// inline constant caches associated with it.
static enum rb_id_table_iterator_result
clear_constant_cache_i(ID id, VALUE value, void *data)
{
rb_clear_constant_cache_for_id(id);
return ID_TABLE_CONTINUE;
}
static int
do_include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super, bool check_cyclic)
{
VALUE p, iclass, origin_stack = 0;
int method_changed = 0, add_subclass;
int method_changed = 0, constant_changed = 0, add_subclass;
long origin_len;
VALUE klass_origin = RCLASS_ORIGIN(klass);
VALUE original_klass = klass;
@ -1275,12 +1266,13 @@ do_include_modules_at(const VALUE klass, VALUE c, VALUE module, int search_super
}
tbl = RCLASS_CONST_TBL(module);
if (tbl && rb_id_table_size(tbl))
rb_id_table_foreach(tbl, clear_constant_cache_i, (void *) 0);
if (tbl && rb_id_table_size(tbl)) constant_changed = 1;
skip:
module = RCLASS_SUPER(module);
}
if (constant_changed) rb_clear_constant_cache();
return method_changed;
}

View file

@ -15,6 +15,8 @@
#define RBIMPL_ATTR_DEPRECATED_INTERNAL(ver) RBIMPL_ATTR_DEPRECATED(("since "#ver", also internal"))
#define RBIMPL_ATTR_DEPRECATED_INTERNAL_ONLY() RBIMPL_ATTR_DEPRECATED(("only for internal use"))
RBIMPL_ATTR_DEPRECATED_INTERNAL_ONLY() void rb_clear_constant_cache(void);
/* from version.c */
#if defined(RUBY_SHOW_COPYRIGHT_TO_DIE) && !!(RUBY_SHOW_COPYRIGHT_TO_DIE+0)
# error RUBY_SHOW_COPYRIGHT_TO_DIE is deprecated

View file

@ -252,13 +252,6 @@ void rb_undef_alloc_func(VALUE klass);
*/
rb_alloc_func_t rb_get_alloc_func(VALUE klass);
/**
* Clears the inline constant caches associated with a particular ID. Extension
* libraries should not bother with such things. Just forget about this API (or
* even, the presence of constant caches).
*/
void rb_clear_constant_cache_for_id(ID id);
/**
* Resembles `alias`.
*

View file

@ -1028,17 +1028,6 @@ opt_getinlinecache
(VALUE val)
{
struct iseq_inline_constant_cache_entry *ice = ic->entry;
// If there isn't an entry, then we're going to walk through the ISEQ
// starting at this instruction until we get to the associated
// opt_setinlinecache and associate this inline cache with every getconstant
// listed in between. We're doing this here instead of when the instructions
// are first compiled because it's possible to turn off inline caches and we
// want this to work in either case.
if (!ice) {
vm_ic_compile(GET_CFP(), ic);
}
if (ice && vm_ic_hit_p(ice, GET_EP())) {
val = ice->value;
JUMP(dst);

View file

@ -47,6 +47,7 @@ VALUE rb_obj_is_thread(VALUE obj);
void rb_vm_mark(void *ptr);
void rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx);
PUREFUNC(VALUE rb_vm_top_self(void));
void rb_vm_inc_const_missing_count(void);
const void **rb_vm_get_insns_address_table(void);
VALUE rb_source_location(int *pline);
const char *rb_source_location_cstr(int *pline);

90
iseq.c
View file

@ -102,69 +102,12 @@ compile_data_free(struct iseq_compile_data *compile_data)
}
}
struct iseq_clear_ic_references_data {
IC ic;
};
// This iterator is used to walk through the instructions and clean any
// references to ICs that are contained within this ISEQ out of the VM's
// constant cache table. It passes around a struct that holds the current IC
// we're looking for, which can be NULL (if we haven't hit an opt_getinlinecache
// instruction yet) or set to an IC (if we've hit an opt_getinlinecache and
// haven't yet hit the associated opt_setinlinecache).
static bool
iseq_clear_ic_references_i(VALUE *code, VALUE insn, size_t index, void *data)
{
struct iseq_clear_ic_references_data *ic_data = (struct iseq_clear_ic_references_data *) data;
switch (insn) {
case BIN(opt_getinlinecache): {
ic_data->ic = (IC) code[index + 2];
return true;
}
case BIN(getconstant): {
ID id = (ID) code[index + 1];
rb_vm_t *vm = GET_VM();
st_table *ics;
if (rb_id_table_lookup(vm->constant_cache, id, (VALUE *) &ics)) {
st_delete(ics, (st_data_t *) &ic_data->ic, (st_data_t *) NULL);
if (ics->num_entries == 0) {
rb_id_table_delete(vm->constant_cache, id);
st_free_table(ics);
}
}
return true;
}
case BIN(opt_setinlinecache): {
ic_data->ic = NULL;
return true;
}
default:
return true;
}
}
// When an ISEQ is being freed, all of its associated ICs are going to go away
// as well. Because of this, we need to walk through the ISEQ, find any
// opt_getinlinecache calls, and clear out the VM's constant cache of associated
// ICs.
static void
iseq_clear_ic_references(const rb_iseq_t *iseq)
{
struct iseq_clear_ic_references_data data = { .ic = NULL };
rb_iseq_each(iseq, 0, iseq_clear_ic_references_i, (void *) &data);
}
void
rb_iseq_free(const rb_iseq_t *iseq)
{
RUBY_FREE_ENTER("iseq");
if (iseq && ISEQ_BODY(iseq)) {
iseq_clear_ic_references(iseq);
struct rb_iseq_constant_body *const body = ISEQ_BODY(iseq);
mjit_free_iseq(iseq); /* Notify MJIT */
rb_yjit_iseq_free(body);
@ -307,39 +250,6 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
}
}
// Similar to rb_iseq_each_value, except that this walks through each
// instruction instead of the associated VALUEs. The provided iterator should
// return a boolean that indicates whether or not to continue iterating.
void
rb_iseq_each(const rb_iseq_t *iseq, size_t start_index, rb_iseq_each_i iterator, void *data)
{
unsigned int size;
VALUE *code;
size_t index;
rb_vm_insns_translator_t *const translator =
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
(FL_TEST((VALUE)iseq, ISEQ_TRANSLATED)) ? rb_vm_insn_addr2insn2 :
#endif
rb_vm_insn_null_translator;
const struct rb_iseq_constant_body *const body = iseq->body;
size = body->iseq_size;
code = body->iseq_encoded;
for (index = start_index; index < size;) {
void *addr = (void *) code[index];
VALUE insn = translator(addr);
if (!iterator(code, insn, index, data)) {
break;
}
index += insn_len(insn);
}
}
static VALUE
update_each_insn_value(void *ctx, VALUE obj)
{

3
iseq.h
View file

@ -182,9 +182,6 @@ void rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc,
void rb_iseq_mark_insn_storage(struct iseq_compile_data_storage *arena);
/* iseq.c */
typedef bool rb_iseq_each_i(VALUE *code, VALUE insn, size_t index, void *data);
void rb_iseq_each(const rb_iseq_t *iseq, size_t start_index, rb_iseq_each_i iterator, void *data);
VALUE rb_iseq_load(VALUE data, VALUE parent, VALUE opt);
VALUE rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc);
unsigned int rb_iseq_line_no(const rb_iseq_t *iseq, size_t pos);

View file

@ -4,11 +4,11 @@ require 'test/unit'
class TestRubyVM < Test::Unit::TestCase
def test_stat
assert_kind_of Hash, RubyVM.stat
assert_kind_of Integer, RubyVM.stat[:class_serial]
assert_kind_of Integer, RubyVM.stat[:global_constant_state]
RubyVM.stat(stat = {})
assert_not_empty stat
assert_equal stat[:class_serial], RubyVM.stat(:class_serial)
assert_equal stat[:global_constant_state], RubyVM.stat(:global_constant_state)
end
def test_stat_unknown

View file

@ -13,9 +13,9 @@
% # compiler: Capture IC values, locking getinlinecache
struct iseq_inline_constant_cache_entry *ice = ic->entry;
if (ice != NULL && !status->compile_info->disable_const_cache) {
if (ice != NULL && GET_IC_SERIAL(ice) && !status->compile_info->disable_const_cache) {
% # JIT: Inline everything in IC, and cancel the slow path
fprintf(f, " if (vm_inlined_ic_hit_p(0x%"PRIxVALUE", 0x%"PRIxVALUE", (const rb_cref_t *)0x%"PRIxVALUE", reg_cfp->ep)) {", ice->flags, ice->value, (VALUE)ice->ic_cref);
fprintf(f, " if (vm_inlined_ic_hit_p(0x%"PRIxVALUE", 0x%"PRIxVALUE", (const rb_cref_t *)0x%"PRIxVALUE", %"PRI_SERIALT_PREFIX"u, reg_cfp->ep)) {", ice->flags, ice->value, (VALUE)ice->ic_cref, GET_IC_SERIAL(ice));
fprintf(f, " stack[%d] = 0x%"PRIxVALUE";\n", b->stack_size, ice->value);
fprintf(f, " goto label_%d;\n", pos + insn_len(insn) + (int)dst);
fprintf(f, " }");

View file

@ -2848,7 +2848,7 @@ rb_const_remove(VALUE mod, ID id)
undefined_constant(mod, ID2SYM(id));
}
rb_clear_constant_cache_for_id(id);
rb_clear_constant_cache();
val = ce->value;
if (val == Qundef) {
@ -3132,7 +3132,7 @@ rb_const_set(VALUE klass, ID id, VALUE val)
struct rb_id_table *tbl = RCLASS_CONST_TBL(klass);
if (!tbl) {
RCLASS_CONST_TBL(klass) = tbl = rb_id_table_create(0);
rb_clear_constant_cache_for_id(id);
rb_clear_constant_cache();
ce = ZALLOC(rb_const_entry_t);
rb_id_table_insert(tbl, id, (VALUE)ce);
setup_const_entry(ce, klass, val, CONST_PUBLIC);
@ -3210,7 +3210,7 @@ const_tbl_update(struct autoload_const *ac)
struct autoload_data_i *ele = current_autoload_data(klass, id, &ac);
if (ele) {
rb_clear_constant_cache_for_id(id);
rb_clear_constant_cache();
ac->value = val; /* autoload_i is non-WB-protected */
ac->file = rb_source_location(&ac->line);
@ -3238,11 +3238,11 @@ const_tbl_update(struct autoload_const *ac)
"previous definition of %"PRIsVALUE" was here", name);
}
}
rb_clear_constant_cache_for_id(id);
rb_clear_constant_cache();
setup_const_entry(ce, klass, val, visibility);
}
else {
rb_clear_constant_cache_for_id(id);
rb_clear_constant_cache();
ce = ZALLOC(rb_const_entry_t);
rb_id_table_insert(tbl, id, (VALUE)ce);
@ -3297,6 +3297,10 @@ set_const_visibility(VALUE mod, int argc, const VALUE *argv,
VALUE val = argv[i];
id = rb_check_id(&val);
if (!id) {
if (i > 0) {
rb_clear_constant_cache();
}
undefined_constant(mod, val);
}
if ((ce = rb_const_lookup(mod, id))) {
@ -3311,12 +3315,15 @@ set_const_visibility(VALUE mod, int argc, const VALUE *argv,
ac->flag |= flag;
}
}
rb_clear_constant_cache_for_id(id);
}
else {
if (i > 0) {
rb_clear_constant_cache();
}
undefined_constant(mod, ID2SYM(id));
}
}
rb_clear_constant_cache();
}
void

61
vm.c
View file

@ -496,16 +496,6 @@ rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
return FALSE;
}
// Iterator function to loop through each entry in the constant cache and add
// its associated size into the given Hash.
static enum rb_id_table_iterator_result
vm_stat_constant_cache_i(ID id, VALUE table, void *constant_cache)
{
st_index_t size = ((st_table *) table)->num_entries;
rb_hash_aset((VALUE) constant_cache, ID2SYM(id), LONG2NUM(size));
return ID_TABLE_CONTINUE;
}
/*
* call-seq:
* RubyVM.stat -> Hash
@ -514,10 +504,10 @@ vm_stat_constant_cache_i(ID id, VALUE table, void *constant_cache)
*
* Returns a Hash containing implementation-dependent counters inside the VM.
*
* This hash includes information about method/constant caches:
* This hash includes information about method/constant cache serials:
*
* {
* :constant_cache=>{:RubyVM=>3},
* :global_constant_state=>481,
* :class_serial=>9029
* }
*
@ -526,10 +516,11 @@ vm_stat_constant_cache_i(ID id, VALUE table, void *constant_cache)
*
* This method is only expected to work on C Ruby.
*/
static VALUE
vm_stat(int argc, VALUE *argv, VALUE self)
{
static VALUE sym_constant_cache, sym_class_serial, sym_global_cvar_state;
static VALUE sym_global_constant_state, sym_class_serial, sym_global_cvar_state;
VALUE arg = Qnil;
VALUE hash = Qnil, key = Qnil;
@ -546,11 +537,13 @@ vm_stat(int argc, VALUE *argv, VALUE self)
hash = rb_hash_new();
}
if (sym_global_constant_state == 0) {
#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
S(constant_cache);
S(global_constant_state);
S(class_serial);
S(global_cvar_state);
#undef S
}
#define SET(name, attr) \
if (key == sym_##name) \
@ -558,25 +551,11 @@ vm_stat(int argc, VALUE *argv, VALUE self)
else if (hash != Qnil) \
rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
SET(global_constant_state, ruby_vm_global_constant_state);
SET(class_serial, ruby_vm_class_serial);
SET(global_cvar_state, ruby_vm_global_cvar_state);
#undef SET
// Here we're going to set up the constant cache hash that has key-value
// pairs of { name => count }, where name is a Symbol that represents the
// ID in the cache and count is an Integer representing the number of inline
// constant caches associated with that Symbol.
if (key == sym_constant_cache || hash != Qnil) {
VALUE constant_cache = rb_hash_new();
rb_id_table_foreach(GET_VM()->constant_cache, vm_stat_constant_cache_i, (void *) constant_cache);
if (key == sym_constant_cache) {
return constant_cache;
} else {
rb_hash_aset(hash, sym_constant_cache, constant_cache);
}
}
if (!NIL_P(key)) { /* matched key should return above */
rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
}
@ -2836,26 +2815,6 @@ size_t rb_vm_memsize_workqueue(struct list_head *workqueue); // vm_trace.c
// Used for VM memsize reporting. Returns the size of the at_exit list by
// looping through the linked list and adding up the size of the structs.
static enum rb_id_table_iterator_result
vm_memsize_constant_cache_i(ID id, VALUE ics, void *size)
{
*((size_t *) size) += rb_st_memsize((st_table *) ics);
return ID_TABLE_CONTINUE;
}
// Returns a size_t representing the memory footprint of the VM's constant
// cache, which is the memsize of the table as well as the memsize of all of the
// nested tables.
static size_t
vm_memsize_constant_cache(void)
{
rb_vm_t *vm = GET_VM();
size_t size = rb_id_table_memsize(vm->constant_cache);
rb_id_table_foreach(vm->constant_cache, vm_memsize_constant_cache_i, &size);
return size;
}
static size_t
vm_memsize_at_exit_list(rb_at_exit_list *at_exit)
{
@ -2899,8 +2858,7 @@ vm_memsize(const void *ptr)
rb_st_memsize(vm->frozen_strings) +
vm_memsize_builtin_function_table(vm->builtin_function_table) +
rb_id_table_memsize(vm->negative_cme_table) +
rb_st_memsize(vm->overloaded_cme_table) +
vm_memsize_constant_cache()
rb_st_memsize(vm->overloaded_cme_table)
);
// TODO
@ -3974,7 +3932,6 @@ Init_BareVM(void)
ruby_current_vm_ptr = vm;
vm->negative_cme_table = rb_id_table_create(16);
vm->overloaded_cme_table = st_init_numtable();
vm->constant_cache = rb_id_table_create(0);
Init_native_thread(th);
th->vm = vm;

View file

@ -229,14 +229,44 @@ struct iseq_inline_constant_cache_entry {
VALUE flags;
VALUE value; // v0
VALUE _unused1; // v1
VALUE _unused2; // v2
union ic_serial_entry ic_serial; // v1, v2
const rb_cref_t *ic_cref; // v3
};
STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
(offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
sizeof(const rb_cref_t *)) <= sizeof(struct RObject));
#if SIZEOF_SERIAL_T <= SIZEOF_VALUE
#define GET_IC_SERIAL(ice) (ice)->ic_serial.raw
#define SET_IC_SERIAL(ice, v) (ice)->ic_serial.raw = (v)
#else
static inline rb_serial_t
get_ic_serial(const struct iseq_inline_constant_cache_entry *ice)
{
union ic_serial_entry tmp;
tmp.data[0] = ice->ic_serial.data[0];
tmp.data[1] = ice->ic_serial.data[1];
return tmp.raw;
}
#define GET_IC_SERIAL(ice) get_ic_serial(ice)
static inline void
set_ic_serial(struct iseq_inline_constant_cache_entry *ice, rb_serial_t v)
{
union ic_serial_entry tmp;
tmp.raw = v;
ice->ic_serial.data[0] = tmp.data[0];
ice->ic_serial.data[1] = tmp.data[1];
}
#define SET_IC_SERIAL(ice, v) set_ic_serial((ice), (v))
#endif
struct iseq_inline_constant_cache {
struct iseq_inline_constant_cache_entry *entry;
// For YJIT: the index to the opt_getinlinecache instruction in the same iseq.
@ -692,12 +722,6 @@ typedef struct rb_vm_struct {
struct rb_id_table *negative_cme_table;
st_table *overloaded_cme_table; // cme -> overloaded_cme
// This id table contains a mapping from ID to ICs. It does this with ID
// keys and nested st_tables as values. The nested tables have ICs as keys
// and Qtrue as values. It is used when inline constant caches need to be
// invalidated or ISEQs are being freed.
struct rb_id_table *constant_cache;
#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
#endif

View file

@ -4926,47 +4926,13 @@ vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr)
#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
// For each getconstant, associate the ID that corresponds to the first operand
// to that instruction with the inline cache.
static bool
vm_ic_compile_i(VALUE *code, VALUE insn, size_t index, void *ic)
{
if (insn == BIN(opt_setinlinecache)) {
return false;
}
if (insn == BIN(getconstant)) {
ID id = code[index + 1];
rb_vm_t *vm = GET_VM();
st_table *ics;
if (!rb_id_table_lookup(vm->constant_cache, id, (VALUE *) &ics)) {
ics = st_init_numtable();
rb_id_table_insert(vm->constant_cache, id, (VALUE) ics);
}
st_insert(ics, (st_data_t) ic, (st_data_t) Qtrue);
}
return true;
}
// Loop through the instruction sequences starting at the opt_getinlinecache
// call and gather up every getconstant's ID. Associate that with the VM's
// constant cache so that whenever one of the constants changes the inline cache
// will get busted.
static void
vm_ic_compile(rb_control_frame_t *cfp, IC ic)
{
const rb_iseq_t *iseq = cfp->iseq;
rb_iseq_each(iseq, cfp->pc - iseq->body->iseq_encoded, vm_ic_compile_i, (void *) ic);
}
// For MJIT inlining
static inline bool
vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, const VALUE *reg_ep)
vm_inlined_ic_hit_p(VALUE flags, VALUE value, const rb_cref_t *ic_cref, rb_serial_t ic_serial, const VALUE *reg_ep)
{
if ((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p()) {
if (ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
((flags & IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p())) {
VM_ASSERT((flags & IMEMO_CONST_CACHE_SHAREABLE) ? rb_ractor_shareable_p(value) : true);
return (ic_cref == NULL || // no need to check CREF
@ -4979,7 +4945,7 @@ static bool
vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
{
VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, reg_ep);
return vm_inlined_ic_hit_p(ice->flags, ice->value, ice->ic_cref, GET_IC_SERIAL(ice), reg_ep);
}
// YJIT needs this function to never allocate and never raise
@ -4992,16 +4958,13 @@ rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep)
static void
vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep)
{
if (ruby_vm_const_missing_count > 0) {
ruby_vm_const_missing_count = 0;
ic->entry = NULL;
return;
}
struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)rb_imemo_new(imemo_constcache, 0, 0, 0, 0);
RB_OBJ_WRITE(ice, &ice->value, val);
ice->ic_cref = vm_get_const_key_cref(reg_ep);
SET_IC_SERIAL(ice, GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count);
if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
ruby_vm_const_missing_count = 0;
RB_OBJ_WRITE(iseq, &ic->entry, ice);
#ifndef MJIT_HEADER
// MJIT and YJIT can't be on at the same time, so there is no need to

View file

@ -14,6 +14,7 @@
MJIT_SYMBOL_EXPORT_BEGIN
RUBY_EXTERN VALUE ruby_vm_const_missing_count;
RUBY_EXTERN rb_serial_t ruby_vm_global_constant_state;
RUBY_EXTERN rb_serial_t ruby_vm_class_serial;
RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state;
@ -182,6 +183,8 @@ CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enable
#define PREV_CLASS_SERIAL() (ruby_vm_class_serial)
#define NEXT_CLASS_SERIAL() (++ruby_vm_class_serial)
#define GET_GLOBAL_CONSTANT_STATE() (ruby_vm_global_constant_state)
#define INC_GLOBAL_CONSTANT_STATE() (++ruby_vm_global_constant_state)
#define GET_GLOBAL_CVAR_STATE() (ruby_vm_global_cvar_state)
#define INC_GLOBAL_CVAR_STATE() (++ruby_vm_global_cvar_state)

View file

@ -126,27 +126,11 @@ vm_cme_invalidate(rb_callable_method_entry_t *cme)
rb_yjit_cme_invalidate((VALUE)cme);
}
static int
rb_clear_constant_cache_for_id_i(st_data_t ic, st_data_t idx, st_data_t arg)
{
((IC) ic)->entry = NULL;
return ST_CONTINUE;
}
// Here for backward compat.
void rb_clear_constant_cache(void) {}
void
rb_clear_constant_cache_for_id(ID id)
rb_clear_constant_cache(void)
{
rb_vm_t *vm = GET_VM();
st_table *ics;
if (rb_id_table_lookup(vm->constant_cache, id, (VALUE *) &ics)) {
st_foreach(ics, rb_clear_constant_cache_for_id_i, (st_data_t) NULL);
}
rb_yjit_constant_state_changed();
INC_GLOBAL_CONSTANT_STATE();
}
static void

View file

@ -4438,6 +4438,8 @@ gen_leave(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
return YJIT_END_BLOCK;
}
RUBY_EXTERN rb_serial_t ruby_vm_global_constant_state;
static codegen_status_t
gen_getglobal(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
{
@ -4705,7 +4707,8 @@ gen_opt_getinlinecache(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
// See vm_ic_hit_p(). The same conditions are checked in yjit_constant_ic_update().
struct iseq_inline_constant_cache_entry *ice = ic->entry;
if (!ice) {
if (!ice || // cache not filled
GET_IC_SERIAL(ice) != ruby_vm_global_constant_state /* cache out of date */) {
// In these cases, leave a block that unconditionally side exits
// for the interpreter to invalidate.
return YJIT_CANT_COMPILE;