mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
enable constant cache on ractors
constant cache `IC` is accessed by non-atomic manner and there are thread-safety issues, so Ruby 3.0 disables to use const cache on non-main ractors. This patch enables it by introducing `imemo_constcache` and allocates it by every re-fill of const cache like `imemo_callcache`. [Bug #17510] Now `IC` only has one entry `IC::entry` and it points to `iseq_inline_constant_cache_entry`, managed by T_IMEMO object. `IC` is atomic data structure so `rb_mjit_before_vm_ic_update()` and `rb_mjit_after_vm_ic_update()` is not needed.
This commit is contained in:
parent
bf21faec15
commit
e7fc353f04
Notes:
git
2021-01-05 02:28:27 +09:00
14 changed files with 105 additions and 70 deletions
|
@ -967,6 +967,18 @@ assert_equal 'can not access non-shareable objects in constant C::CONST by non-m
|
|||
end
|
||||
}
|
||||
|
||||
# Constant cache should care about non-sharable constants
|
||||
assert_equal "can not access non-shareable objects in constant Object::STR by non-main Ractor.", %q{
|
||||
STR = "hello"
|
||||
def str; STR; end
|
||||
s = str() # fill const cache
|
||||
begin
|
||||
Ractor.new{ str() }.take
|
||||
rescue Ractor::RemoteError => e
|
||||
e.cause.message
|
||||
end
|
||||
}
|
||||
|
||||
# Setting non-shareable objects into constants by other Ractors is not allowed
|
||||
assert_equal 'can not set constants with non-shareable objects by non-main Ractors', %q{
|
||||
class C
|
||||
|
|
14
compile.c
14
compile.c
|
@ -2359,11 +2359,8 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
|
|||
}
|
||||
break;
|
||||
}
|
||||
case TS_ISE: /* inline storage entry */
|
||||
/* Treated as an IC, but may contain a markable VALUE */
|
||||
FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
|
||||
/* fall through */
|
||||
case TS_IC: /* inline cache */
|
||||
case TS_ISE: /* inline storage entry */
|
||||
case TS_IVC: /* inline ivar cache */
|
||||
{
|
||||
unsigned int ic_index = FIX2UINT(operands[j]);
|
||||
|
@ -2375,8 +2372,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
|
|||
ic_index, body->is_size);
|
||||
}
|
||||
generated_iseq[code_index + 1 + j] = (VALUE)ic;
|
||||
|
||||
if (type == TS_IVC) FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
|
||||
FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
|
||||
break;
|
||||
}
|
||||
case TS_CALLDATA:
|
||||
|
@ -9481,14 +9477,13 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
|
|||
}
|
||||
break;
|
||||
case TS_ISE:
|
||||
FL_SET((VALUE)iseq, ISEQ_MARKABLE_ISEQ);
|
||||
/* fall through */
|
||||
case TS_IC:
|
||||
case TS_IVC: /* inline ivar cache */
|
||||
argv[j] = op;
|
||||
if (NUM2UINT(op) >= iseq->body->is_size) {
|
||||
iseq->body->is_size = NUM2INT(op) + 1;
|
||||
}
|
||||
FL_SET((VALUE)iseq, ISEQ_MARKABLE_ISEQ);
|
||||
break;
|
||||
case TS_CALLDATA:
|
||||
argv[j] = iseq_build_callinfo_from_hash(iseq, op);
|
||||
|
@ -10466,14 +10461,13 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
|
|||
break;
|
||||
}
|
||||
case TS_ISE:
|
||||
FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
|
||||
/* fall through */
|
||||
case TS_IC:
|
||||
case TS_IVC:
|
||||
{
|
||||
VALUE op = ibf_load_small_value(load, &reading_pos);
|
||||
code[code_index] = (VALUE)&is_entries[op];
|
||||
}
|
||||
FL_SET(iseqv, ISEQ_MARKABLE_ISEQ);
|
||||
break;
|
||||
case TS_CALLDATA:
|
||||
{
|
||||
|
|
|
@ -309,6 +309,7 @@ RB_DEBUG_COUNTER(obj_imemo_memo)
|
|||
RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
|
||||
RB_DEBUG_COUNTER(obj_imemo_callinfo)
|
||||
RB_DEBUG_COUNTER(obj_imemo_callcache)
|
||||
RB_DEBUG_COUNTER(obj_imemo_constcache)
|
||||
|
||||
/* ar_table */
|
||||
RB_DEBUG_COUNTER(artable_hint_hit)
|
||||
|
|
|
@ -650,6 +650,7 @@ count_imemo_objects(int argc, VALUE *argv, VALUE self)
|
|||
imemo_type_ids[10] = rb_intern("imemo_parser_strterm");
|
||||
imemo_type_ids[11] = rb_intern("imemo_callinfo");
|
||||
imemo_type_ids[12] = rb_intern("imemo_callcache");
|
||||
imemo_type_ids[13] = rb_intern("imemo_constcache");
|
||||
}
|
||||
|
||||
each_object_with_flags(count_imemo_objects_i, (void *)hash);
|
||||
|
|
19
gc.c
19
gc.c
|
@ -2398,6 +2398,7 @@ rb_imemo_name(enum imemo_type type)
|
|||
IMEMO_NAME(parser_strterm);
|
||||
IMEMO_NAME(callinfo);
|
||||
IMEMO_NAME(callcache);
|
||||
IMEMO_NAME(constcache);
|
||||
#undef IMEMO_NAME
|
||||
}
|
||||
return "unknown";
|
||||
|
@ -3102,9 +3103,9 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
|
|||
case imemo_callcache:
|
||||
RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
|
||||
break;
|
||||
default:
|
||||
/* unreachable */
|
||||
break;
|
||||
case imemo_constcache:
|
||||
RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
@ -6260,6 +6261,12 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
|
|||
gc_mark(objspace, (VALUE)vm_cc_cme(cc));
|
||||
}
|
||||
return;
|
||||
case imemo_constcache:
|
||||
{
|
||||
const struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)obj;
|
||||
gc_mark(objspace, ice->value);
|
||||
}
|
||||
return;
|
||||
#if VM_CHECK_MODE > 0
|
||||
default:
|
||||
VM_UNREACHABLE(gc_mark_imemo);
|
||||
|
@ -8985,6 +8992,12 @@ gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
|
|||
}
|
||||
}
|
||||
break;
|
||||
case imemo_constcache:
|
||||
{
|
||||
const struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)obj;
|
||||
UPDATE_IF_MOVED(objspace, ice->value);
|
||||
}
|
||||
break;
|
||||
case imemo_parser_strterm:
|
||||
case imemo_tmpbuf:
|
||||
case imemo_callinfo:
|
||||
|
|
|
@ -1023,9 +1023,10 @@ opt_getinlinecache
|
|||
()
|
||||
(VALUE val)
|
||||
{
|
||||
if (vm_ic_hit_p(ic->ic_serial, ic->ic_cref, GET_EP())) {
|
||||
val = ic->value;
|
||||
JUMP(dst);
|
||||
struct iseq_inline_constant_cache_entry *ice = ic->entry;
|
||||
if (ice && vm_ic_hit_p(ice, GET_EP())) {
|
||||
val = ice->value;
|
||||
JUMP(dst);
|
||||
}
|
||||
else {
|
||||
val = Qnil;
|
||||
|
@ -1039,7 +1040,7 @@ opt_setinlinecache
|
|||
(VALUE val)
|
||||
(VALUE val)
|
||||
{
|
||||
vm_ic_update(ic, val, GET_EP());
|
||||
vm_ic_update(GET_ISEQ(), ic, val, GET_EP());
|
||||
}
|
||||
|
||||
/* run iseq only once */
|
||||
|
|
|
@ -45,6 +45,7 @@ enum imemo_type {
|
|||
imemo_parser_strterm = 10,
|
||||
imemo_callinfo = 11,
|
||||
imemo_callcache = 12,
|
||||
imemo_constcache = 13,
|
||||
};
|
||||
|
||||
/* CREF (Class REFerence) is defined in method.h */
|
||||
|
|
11
iseq.c
11
iseq.c
|
@ -182,6 +182,17 @@ iseq_extract_values(VALUE *code, size_t pos, iseq_value_itr_t * func, void *data
|
|||
}
|
||||
}
|
||||
break;
|
||||
case TS_IC:
|
||||
{
|
||||
IC ic = (IC)code[pos + op_no + 1];
|
||||
if (ic->entry) {
|
||||
VALUE nv = func(data, (VALUE)ic->entry);
|
||||
if ((VALUE)ic->entry != nv) {
|
||||
ic->entry = (void *)nv;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case TS_IVC:
|
||||
{
|
||||
IVC ivc = (IVC)code[pos + op_no + 1];
|
||||
|
|
18
mjit.c
18
mjit.c
|
@ -82,24 +82,6 @@ mjit_gc_exit_hook(void)
|
|||
CRITICAL_SECTION_FINISH(4, "mjit_gc_exit_hook");
|
||||
}
|
||||
|
||||
// Lock setinlinecache
|
||||
void
|
||||
rb_mjit_before_vm_ic_update(void)
|
||||
{
|
||||
if (!mjit_enabled)
|
||||
return;
|
||||
CRITICAL_SECTION_START(3, "before vm_ic_update");
|
||||
}
|
||||
|
||||
// Unlock setinlinecache
|
||||
void
|
||||
rb_mjit_after_vm_ic_update(void)
|
||||
{
|
||||
if (!mjit_enabled)
|
||||
return;
|
||||
CRITICAL_SECTION_FINISH(3, "after vm_ic_update");
|
||||
}
|
||||
|
||||
// Deal with ISeq movement from compactor
|
||||
void
|
||||
mjit_update_references(const rb_iseq_t *iseq)
|
||||
|
|
4
mjit.h
4
mjit.h
|
@ -83,8 +83,6 @@ RUBY_EXTERN bool mjit_call_p;
|
|||
extern void rb_mjit_add_iseq_to_process(const rb_iseq_t *iseq);
|
||||
extern VALUE rb_mjit_wait_call(rb_execution_context_t *ec, struct rb_iseq_constant_body *body);
|
||||
extern struct rb_mjit_compile_info* rb_mjit_iseq_compile_info(const struct rb_iseq_constant_body *body);
|
||||
extern void rb_mjit_before_vm_ic_update(void);
|
||||
extern void rb_mjit_after_vm_ic_update(void);
|
||||
extern void rb_mjit_recompile_send(const rb_iseq_t *iseq);
|
||||
extern void rb_mjit_recompile_ivar(const rb_iseq_t *iseq);
|
||||
extern void rb_mjit_recompile_exivar(const rb_iseq_t *iseq);
|
||||
|
@ -187,8 +185,6 @@ void mjit_finish(bool close_handle_p);
|
|||
|
||||
# else // USE_MJIT
|
||||
|
||||
static inline void rb_mjit_before_vm_ic_update(void){}
|
||||
static inline void rb_mjit_after_vm_ic_update(void){}
|
||||
static inline struct mjit_cont *mjit_cont_new(rb_execution_context_t *ec){return NULL;}
|
||||
static inline void mjit_cont_free(struct mjit_cont *cont){}
|
||||
static inline void mjit_gc_start_hook(void){}
|
||||
|
|
|
@ -92,19 +92,23 @@ class TestGc < Test::Unit::TestCase
|
|||
assert_kind_of(Integer, res[:count])
|
||||
|
||||
stat, count = {}, {}
|
||||
GC.start
|
||||
GC.stat(stat)
|
||||
ObjectSpace.count_objects(count)
|
||||
# repeat same methods invocation for cache object creation.
|
||||
GC.stat(stat)
|
||||
ObjectSpace.count_objects(count)
|
||||
2.times{ # to ignore const cache imemo creation
|
||||
GC.start
|
||||
GC.stat(stat)
|
||||
ObjectSpace.count_objects(count)
|
||||
# repeat same methods invocation for cache object creation.
|
||||
GC.stat(stat)
|
||||
ObjectSpace.count_objects(count)
|
||||
}
|
||||
assert_equal(count[:TOTAL]-count[:FREE], stat[:heap_live_slots])
|
||||
assert_equal(count[:FREE], stat[:heap_free_slots])
|
||||
|
||||
# measure again without GC.start
|
||||
1000.times{ "a" + "b" }
|
||||
GC.stat(stat)
|
||||
ObjectSpace.count_objects(count)
|
||||
2.times{ # to ignore const cache imemo creation
|
||||
1000.times{ "a" + "b" }
|
||||
GC.stat(stat)
|
||||
ObjectSpace.count_objects(count)
|
||||
}
|
||||
assert_equal(count[:FREE], stat[:heap_free_slots])
|
||||
end
|
||||
|
||||
|
|
|
@ -12,11 +12,13 @@
|
|||
% end
|
||||
|
||||
% # compiler: Capture IC values, locking getinlinecache
|
||||
rb_mjit_before_vm_ic_update();
|
||||
rb_serial_t ic_serial = ic->ic_serial;
|
||||
const rb_cref_t *ic_cref = ic->ic_cref;
|
||||
VALUE ic_value = ic->value;
|
||||
rb_mjit_after_vm_ic_update();
|
||||
struct iseq_inline_constant_cache_entry *ice = ic->entry;
|
||||
if (ice == NULL) {
|
||||
goto getinlinecache_cancel;
|
||||
}
|
||||
rb_serial_t ic_serial = ice->ic_serial;
|
||||
const rb_cref_t *ic_cref = ice->ic_cref;
|
||||
VALUE ic_value = ice->value;
|
||||
|
||||
if (ic_serial && !status->compile_info->disable_const_cache) {
|
||||
% # JIT: Inline everything in IC, and cancel the slow path
|
||||
|
@ -34,3 +36,4 @@
|
|||
b->stack_size += <%= insn.call_attribute('sp_inc') %>;
|
||||
break;
|
||||
}
|
||||
getinlinecache_cancel:;
|
||||
|
|
20
vm_core.h
20
vm_core.h
|
@ -218,10 +218,18 @@ struct rb_control_frame_struct;
|
|||
/* iseq data type */
|
||||
typedef struct rb_compile_option_struct rb_compile_option_t;
|
||||
|
||||
struct iseq_inline_cache_entry {
|
||||
rb_serial_t ic_serial;
|
||||
const rb_cref_t *ic_cref;
|
||||
VALUE value;
|
||||
// imemo_constcache
|
||||
struct iseq_inline_constant_cache_entry {
|
||||
VALUE flags;
|
||||
|
||||
VALUE value; // v0
|
||||
const rb_cref_t *ic_cref; // v1
|
||||
rb_serial_t ic_serial; // v2
|
||||
// v3
|
||||
};
|
||||
|
||||
struct iseq_inline_constant_cache {
|
||||
struct iseq_inline_constant_cache_entry *entry;
|
||||
};
|
||||
|
||||
struct iseq_inline_iv_cache_entry {
|
||||
|
@ -233,7 +241,7 @@ union iseq_inline_storage_entry {
|
|||
struct rb_thread_struct *running_thread;
|
||||
VALUE value;
|
||||
} once;
|
||||
struct iseq_inline_cache_entry cache;
|
||||
struct iseq_inline_constant_cache ic_cache;
|
||||
struct iseq_inline_iv_cache_entry iv_cache;
|
||||
};
|
||||
|
||||
|
@ -1126,7 +1134,7 @@ enum vm_svar_index {
|
|||
};
|
||||
|
||||
/* inline cache */
|
||||
typedef struct iseq_inline_cache_entry *IC;
|
||||
typedef struct iseq_inline_constant_cache *IC;
|
||||
typedef struct iseq_inline_iv_cache_entry *IVC;
|
||||
typedef union iseq_inline_storage_entry *ISE;
|
||||
typedef const struct rb_callinfo *CALL_INFO;
|
||||
|
|
|
@ -4611,26 +4611,34 @@ vm_opt_newarray_min(rb_num_t num, const VALUE *ptr)
|
|||
|
||||
#undef id_cmp
|
||||
|
||||
#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
|
||||
|
||||
static int
|
||||
vm_ic_hit_p(const rb_serial_t ic_serial, const rb_cref_t *ic_cref, const VALUE *reg_ep)
|
||||
vm_ic_hit_p(const struct iseq_inline_constant_cache_entry *ice, const VALUE *reg_ep)
|
||||
{
|
||||
if (ic_serial == GET_GLOBAL_CONSTANT_STATE() && rb_ractor_main_p()) {
|
||||
return (ic_cref == NULL || // no need to check CREF
|
||||
ic_cref == vm_get_cref(reg_ep));
|
||||
VM_ASSERT(IMEMO_TYPE_P(ice, imemo_constcache));
|
||||
if (ice->ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
|
||||
(FL_TEST_RAW((VALUE)ice, IMEMO_CONST_CACHE_SHAREABLE) || rb_ractor_main_p())) {
|
||||
|
||||
VM_ASSERT(FL_TEST_RAW((VALUE)ice, IMEMO_CONST_CACHE_SHAREABLE) ? rb_ractor_shareable_p(ice->value) : true);
|
||||
|
||||
return (ice->ic_cref == NULL || // no need to check CREF
|
||||
ice->ic_cref == vm_get_cref(reg_ep));
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static void
|
||||
vm_ic_update(IC ic, VALUE val, const VALUE *reg_ep)
|
||||
vm_ic_update(const rb_iseq_t *iseq, IC ic, VALUE val, const VALUE *reg_ep)
|
||||
{
|
||||
VM_ASSERT(ic->value != Qundef);
|
||||
rb_mjit_before_vm_ic_update();
|
||||
ic->value = val;
|
||||
ic->ic_serial = GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count;
|
||||
ic->ic_cref = vm_get_const_key_cref(reg_ep);
|
||||
rb_mjit_after_vm_ic_update();
|
||||
|
||||
struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)rb_imemo_new(imemo_constcache, 0, 0, 0, 0);
|
||||
RB_OBJ_WRITE(ice, &ice->value, val);
|
||||
ice->ic_cref = vm_get_const_key_cref(reg_ep);
|
||||
ice->ic_serial = GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count;
|
||||
if (rb_ractor_shareable_p(val)) ice->flags |= IMEMO_CONST_CACHE_SHAREABLE;
|
||||
ruby_vm_const_missing_count = 0;
|
||||
RB_OBJ_WRITE(iseq, &ic->entry, ice);
|
||||
}
|
||||
|
||||
static VALUE
|
||||
|
|
Loading…
Reference in a new issue