1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Add direct marking on iseq operands

Directly marking iseq operands allows us to eliminate the "mark array"
stored on ISEQ objects, which will reduce the amount of memory ISEQ
objects consume.  This patch changes the iseq mark function to:

* Directly marks ISEQ operands
* Iterate over and mark child ISEQs

It also introduces two flags on the ISEQ object.  In order to mark
instruction operands, we have to disassemble the instructions and find
the instruction parameters and types.  Instructions may also be
translated to jump addresses.  Instruction sequences may get marked by
the GC *while* they're mid flight (being compiled).  The
`ISEQ_TRANSLATED` flag is used to indicate whether or not the
instructions have been translated to jump addresses so that when we
decode the instructions we know whether or not we need to go from jump
location back to original instruction or not.

Not all ISEQ objects have any markable objects embedded in their
instructions.  We can detect whether or not an ISEQ has markable objects
in the instructions at compile time.  If the instructions contain
markable objects, we set a flag `ISEQ_MARKABLE_ISEQ` on the ISEQ object.
This means that during the mark phase, we can skip decompilation if the
flag is *not* set.  In other words, we can avoid decompilation of we
know in advance there is nothing to mark.

`once` instructions have an operand that contains the result of a
one-time compilation of a regex.  Before this patch, that operand was
called an "inline cache", even though the struct was actually an "inline
storage".  This patch changes the operand to be an "inline storage" so
that we can differentiate between caches that need marking (the inline
storage) and caches that don't need marking (inline cache).

[ruby-core:84909]

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@62706 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
tenderlove 2018-03-09 20:11:45 +00:00
parent 2dafe8df55
commit 8952964976
7 changed files with 171 additions and 72 deletions

View file

@ -562,15 +562,6 @@ APPEND_ELEM(ISEQ_ARG_DECLARE LINK_ANCHOR *const anchor, LINK_ELEMENT *before, LI
#define APPEND_ELEM(anchor, before, elem) APPEND_ELEM(iseq, (anchor), (before), (elem))
#endif
static int
iseq_add_mark_object(const rb_iseq_t *iseq, VALUE v)
{
if (!SPECIAL_CONST_P(v)) {
rb_iseq_add_mark_object(iseq, v);
}
return COMPILE_OK;
}
static int
iseq_add_mark_object_compile_time(const rb_iseq_t *iseq, VALUE v)
{
@ -749,6 +740,7 @@ rb_iseq_translate_threaded_code(rb_iseq_t *iseq)
encoded[i] = (VALUE)table[insn];
i += len;
}
FL_SET(iseq, ISEQ_TRANSLATED);
#endif
return COMPILE_OK;
}
@ -1235,7 +1227,7 @@ new_child_iseq(rb_iseq_t *iseq, const NODE *const node,
rb_iseq_path(iseq), rb_iseq_realpath(iseq),
INT2FIX(line_no), parent, type, ISEQ_COMPILE_DATA(iseq)->option);
debugs("[new_child_iseq]< ---------------------------------------\n");
iseq_add_mark_object(iseq, (VALUE)ret_iseq);
iseq_add_mark_object_compile_time(iseq, (VALUE)ret_iseq);
return ret_iseq;
}
@ -1250,7 +1242,7 @@ new_child_iseq_ifunc(rb_iseq_t *iseq, const struct vm_ifunc *ifunc,
rb_iseq_path(iseq), rb_iseq_realpath(iseq),
INT2FIX(line_no), parent, type, ISEQ_COMPILE_DATA(iseq)->option);
debugs("[new_child_iseq_ifunc]< ---------------------------------------\n");
iseq_add_mark_object(iseq, (VALUE)ret_iseq);
iseq_add_mark_object_compile_time(iseq, (VALUE)ret_iseq);
return ret_iseq;
}
@ -1550,7 +1542,6 @@ iseq_set_arguments_keywords(rb_iseq_t *iseq, LINK_ANCHOR *const optargs,
switch (nd_type(val_node)) {
case NODE_LIT:
dv = val_node->nd_lit;
iseq_add_mark_object(iseq, dv);
break;
case NODE_NIL:
dv = Qnil;
@ -2076,6 +2067,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
rb_hash_rehash(map);
freeze_hide_obj(map);
generated_iseq[code_index + 1 + j] = map;
FL_SET(iseq, ISEQ_MARKABLE_ISEQ);
break;
}
case TS_LINDEX:
@ -2086,6 +2078,9 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
{
VALUE v = operands[j];
generated_iseq[code_index + 1 + j] = v;
if (!SPECIAL_CONST_P(v)) {
FL_SET(iseq, ISEQ_MARKABLE_ISEQ);
}
break;
}
case TS_VALUE: /* VALUE */
@ -2093,7 +2088,9 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
VALUE v = operands[j];
generated_iseq[code_index + 1 + j] = v;
/* to mark ruby object */
iseq_add_mark_object(iseq, v);
if (!SPECIAL_CONST_P(v)) {
FL_SET(iseq, ISEQ_MARKABLE_ISEQ);
}
break;
}
case TS_IC: /* inline cache */
@ -2106,6 +2103,17 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
generated_iseq[code_index + 1 + j] = (VALUE)ic;
break;
}
case TS_ISE: /* inline storage entry */
{
unsigned int ic_index = FIX2UINT(operands[j]);
IC ic = (IC)&iseq->body->is_entries[ic_index];
if (UNLIKELY(ic_index >= iseq->body->is_size)) {
rb_bug("iseq_set_sequence: ic_index overflow: index: %d, size: %d", ic_index, iseq->body->is_size);
}
generated_iseq[code_index + 1 + j] = (VALUE)ic;
FL_SET(iseq, ISEQ_MARKABLE_ISEQ);
break;
}
case TS_CALLINFO: /* call info */
{
struct rb_call_info *base_ci = (struct rb_call_info *)operands[j];
@ -2258,11 +2266,6 @@ iseq_set_exception_table(rb_iseq_t *iseq)
entry->end = label_get_position((LABEL *)(ptr[2] & ~1));
entry->iseq = (rb_iseq_t *)ptr[3];
/* register iseq as mark object */
if (entry->iseq != 0) {
iseq_add_mark_object(iseq, (VALUE)entry->iseq);
}
/* stack depth */
if (ptr[4]) {
LABEL *lobj = (LABEL *)(ptr[4] & ~1);
@ -4877,7 +4880,7 @@ compile_case(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const orig_nod
}
if (only_special_literals) {
iseq_add_mark_object(iseq, literals);
iseq_add_mark_object_compile_time(iseq, literals);
ADD_INSN(ret, nd_line(orig_node), dup);
ADD_INSN2(ret, nd_line(orig_node), opt_case_dispatch, literals, elselabel);
@ -7354,6 +7357,7 @@ insn_data_to_s_detail(INSN *iobj)
break;
}
case TS_IC: /* inline cache */
case TS_ISE: /* inline storage entry */
rb_str_catf(str, "<ic:%d>", FIX2INT(OPERAND_AT(iobj, j)));
break;
case TS_CALLINFO: /* call info */
@ -7593,7 +7597,6 @@ iseq_build_load_iseq(const rb_iseq_t *iseq, VALUE op)
}
loaded_iseq = rb_iseqw_to_iseq(iseqw);
iseq_add_mark_object(iseq, (VALUE)loaded_iseq);
return loaded_iseq;
}
@ -7724,7 +7727,6 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
break;
case TS_VALUE:
argv[j] = op;
iseq_add_mark_object(iseq, op);
break;
case TS_ISEQ:
{
@ -7741,6 +7743,7 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
argv[j] = (VALUE)rb_global_entry(SYM2ID(op));
break;
case TS_IC:
case TS_ISE:
argv[j] = op;
if (NUM2UINT(op) >= iseq->body->is_size) {
iseq->body->is_size = NUM2INT(op) + 1;
@ -7771,7 +7774,6 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
}
RB_GC_GUARD(op);
argv[j] = map;
rb_iseq_add_mark_object(iseq, map);
}
break;
case TS_FUNCPTR:
@ -8347,6 +8349,7 @@ ibf_dump_code(struct ibf_dump *dump, const rb_iseq_t *iseq)
code[code_index] = (VALUE)ibf_dump_iseq(dump, (const rb_iseq_t *)op);
break;
case TS_IC:
case TS_ISE:
{
unsigned int i;
for (i=0; i<iseq->body->is_size; i++) {
@ -8412,6 +8415,7 @@ ibf_load_code(const struct ibf_load *load, const rb_iseq_t *iseq, const struct r
code[code_index] = (VALUE)ibf_load_iseq(load, (const rb_iseq_t *)op);
break;
case TS_IC:
case TS_ISE:
code[code_index] = (VALUE)&is_entries[(int)op];
break;
case TS_CALLINFO:
@ -8712,7 +8716,8 @@ ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq)
dump_body.is_entries = NULL;
dump_body.ci_entries = ibf_dump_ci_entries(dump, iseq);
dump_body.cc_entries = NULL;
dump_body.mark_ary = ISEQ_FLIP_CNT(iseq);
dump_body.variable.coverage = Qnil;
dump_body.variable.original_iseq = Qnil;
return ibf_dump_write(dump, &dump_body, sizeof(dump_body));
}
@ -8744,7 +8749,9 @@ ibf_load_iseq_each(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t of
load_body->ci_kw_size = body->ci_kw_size;
load_body->insns_info.size = body->insns_info.size;
RB_OBJ_WRITE(iseq, &load_body->mark_ary, iseq_mark_ary_create((int)body->mark_ary));
ISEQ_COVERAGE_SET(iseq, Qnil);
ISEQ_ORIGINAL_ISEQ_CLEAR(iseq);
iseq->body->variable.flip_count = (int)body->variable.flip_count;
{
VALUE realpath = Qnil, path = ibf_load_object(load, body->location.pathobj);
@ -9376,7 +9383,6 @@ ibf_load_object(const struct ibf_load *load, VALUE object_index)
rb_ary_store(load->obj_list, (long)object_index, obj);
}
iseq_add_mark_object(load->iseq, obj);
return obj;
}
}
@ -9561,9 +9567,6 @@ ibf_load_iseq(const struct ibf_load *load, const rb_iseq_t *index_iseq)
ibf_load_iseq_complete(iseq);
#endif /* !USE_LAZY_LOAD */
if (load->iseq) {
iseq_add_mark_object(load->iseq, (VALUE)iseq);
}
return iseq;
}
}

View file

@ -972,11 +972,11 @@ setinlinecache
/* run iseq only once */
DEFINE_INSN
once
(ISEQ iseq, IC ic)
(ISEQ iseq, ISE ise)
()
(VALUE val)
{
val = vm_once_dispatch(ec, iseq, ic);
val = vm_once_dispatch(ec, iseq, ise);
}
/* case dispatcher, jump by table if possible */

127
iseq.c
View file

@ -115,6 +115,100 @@ rb_iseq_free(const rb_iseq_t *iseq)
RUBY_FREE_LEAVE("iseq");
}
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
static int
rb_vm_insn_addr2insn2(const void *addr)
{
int insn;
const void * const *table = rb_vm_get_insns_address_table();
for (insn = 0; insn < VM_INSTRUCTION_SIZE; insn++) {
if (table[insn] == addr) {
return insn;
}
}
rb_bug("rb_vm_insn_addr2insn: invalid insn address: %p", addr);
}
#endif
static int
rb_vm_insn_null_translator(const void *addr)
{
return (int)addr;
}
typedef void iseq_value_itr_t(void *ctx, VALUE obj);
typedef int rb_vm_insns_translator_t(const void *addr);
static int
iseq_extract_values(const VALUE *code, size_t pos, iseq_value_itr_t * func, void *data, rb_vm_insns_translator_t * translator)
{
VALUE insn = translator((void *)code[pos]);
int len = insn_len(insn);
int op_no;
const char *types = insn_op_types(insn);
for (op_no = 0; types[op_no]; op_no++) {
char type = types[op_no];
switch (type) {
case TS_CDHASH:
case TS_ISEQ:
case TS_VALUE:
{
VALUE op = code[pos + op_no + 1];
if (!SPECIAL_CONST_P(op)) {
func(data, op);
}
break;
}
case TS_ISE:
{
union iseq_inline_storage_entry *const is = (union iseq_inline_storage_entry *)code[pos + op_no + 1];
if (is->once.value) {
func(data, is->once.value);
}
break;
}
default:
break;
}
}
return len;
}
static void
rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
{
unsigned int size;
const VALUE *code;
size_t n;
rb_vm_insns_translator_t * translator;
size = iseq->body->iseq_size;
code = iseq->body->iseq_encoded;
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
if (FL_TEST(iseq, ISEQ_TRANSLATED)) {
translator = rb_vm_insn_addr2insn2;
} else {
translator = rb_vm_insn_null_translator;
}
#else
translator = rb_vm_insn_null_translator;
#endif
for (n = 0; n < size;) {
n += iseq_extract_values(code, n, func, data, translator);
}
}
static void
each_insn_value(void *ctx, VALUE obj)
{
rb_gc_mark(obj);
}
void
rb_iseq_mark(const rb_iseq_t *iseq)
{
@ -123,13 +217,31 @@ rb_iseq_mark(const rb_iseq_t *iseq)
if (iseq->body) {
const struct rb_iseq_constant_body *body = iseq->body;
RUBY_MARK_UNLESS_NULL(body->mark_ary);
if(FL_TEST(iseq, ISEQ_MARKABLE_ISEQ)) {
rb_iseq_each_value(iseq, each_insn_value, NULL);
}
rb_gc_mark(body->variable.coverage);
rb_gc_mark(body->variable.original_iseq);
rb_gc_mark(body->location.label);
rb_gc_mark(body->location.base_label);
rb_gc_mark(body->location.pathobj);
RUBY_MARK_UNLESS_NULL((VALUE)body->parent_iseq);
if (body->catch_table) {
const struct iseq_catch_table *table = body->catch_table;
unsigned int i;
for(i = 0; i < table->size; i++) {
const struct iseq_catch_table_entry *entry;
entry = &table->entries[i];
if (entry->iseq) {
rb_gc_mark((VALUE)entry->iseq);
}
}
}
}
if (FL_TEST(iseq, ISEQ_NOT_LOADED_YET)) {
rb_gc_mark(iseq->aux.loader.obj);
}
@ -299,13 +411,6 @@ set_relation(rb_iseq_t *iseq, const rb_iseq_t *piseq)
}
}
void
rb_iseq_add_mark_object(const rb_iseq_t *iseq, VALUE obj)
{
/* TODO: check dedup */
rb_ary_push(ISEQ_MARK_ARY(iseq), obj);
}
static VALUE
prepare_iseq_build(rb_iseq_t *iseq,
VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_code_location_t *code_location,
@ -326,7 +431,9 @@ prepare_iseq_build(rb_iseq_t *iseq,
if (iseq != iseq->body->local_iseq) {
RB_OBJ_WRITE(iseq, &iseq->body->location.base_label, iseq->body->local_iseq->body->location.label);
}
RB_OBJ_WRITE(iseq, &iseq->body->mark_ary, iseq_mark_ary_create(0));
ISEQ_COVERAGE_SET(iseq, Qnil);
ISEQ_ORIGINAL_ISEQ_CLEAR(iseq);
iseq->body->variable.flip_count = 0;
ISEQ_COMPILE_DATA_ALLOC(iseq);
RB_OBJ_WRITE(iseq, &ISEQ_COMPILE_DATA(iseq)->err_info, err_info);
@ -1612,6 +1719,7 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
break;
case TS_IC:
case TS_ISE:
ret = rb_sprintf("<is:%"PRIdPTRDIFF">", (union iseq_inline_storage_entry *)op - iseq->body->is_entries);
break;
@ -2356,6 +2464,7 @@ iseq_data_to_ary(const rb_iseq_t *iseq)
}
break;
case TS_IC:
case TS_ISE:
{
union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)*seq;
rb_ary_push(ary, INT2FIX(is - iseq->body->is_entries));

38
iseq.h
View file

@ -28,44 +28,25 @@ rb_call_info_kw_arg_bytes(int keyword_len)
return sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (keyword_len - 1);
}
enum iseq_mark_ary_index {
ISEQ_MARK_ARY_COVERAGE,
ISEQ_MARK_ARY_FLIP_CNT,
ISEQ_MARK_ARY_ORIGINAL_ISEQ,
ISEQ_MARK_ARY_INITIAL_SIZE
};
static inline VALUE
iseq_mark_ary_create(int flip_cnt)
{
VALUE ary = rb_ary_tmp_new(ISEQ_MARK_ARY_INITIAL_SIZE);
rb_ary_push(ary, Qnil); /* ISEQ_MARK_ARY_COVERAGE */
rb_ary_push(ary, INT2FIX(flip_cnt)); /* ISEQ_MARK_ARY_FLIP_CNT */
rb_ary_push(ary, Qnil); /* ISEQ_MARK_ARY_ORIGINAL_ISEQ */
return ary;
}
#define ISEQ_MARK_ARY(iseq) (iseq)->body->mark_ary
#define ISEQ_COVERAGE(iseq) RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_COVERAGE)
#define ISEQ_COVERAGE_SET(iseq, cov) RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_COVERAGE, cov)
#define ISEQ_COVERAGE(iseq) iseq->body->variable.coverage
#define ISEQ_COVERAGE_SET(iseq, cov) RB_OBJ_WRITE(iseq, &iseq->body->variable.coverage, cov)
#define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES)
#define ISEQ_BRANCH_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_BRANCHES)
#define ISEQ_FLIP_CNT(iseq) FIX2INT(RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_FLIP_CNT))
#define ISEQ_FLIP_CNT(iseq) (iseq)->body->variable.flip_count
static inline int
ISEQ_FLIP_CNT_INCREMENT(const rb_iseq_t *iseq)
{
int cnt = ISEQ_FLIP_CNT(iseq);
RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_FLIP_CNT, INT2FIX(cnt+1));
int cnt = iseq->body->variable.flip_count;
iseq->body->variable.flip_count += 1;
return cnt;
}
static inline VALUE *
ISEQ_ORIGINAL_ISEQ(const rb_iseq_t *iseq)
{
VALUE str = RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ);
VALUE str = iseq->body->variable.original_iseq;
if (RTEST(str)) return (VALUE *)RSTRING_PTR(str);
return NULL;
}
@ -73,14 +54,14 @@ ISEQ_ORIGINAL_ISEQ(const rb_iseq_t *iseq)
static inline void
ISEQ_ORIGINAL_ISEQ_CLEAR(const rb_iseq_t *iseq)
{
RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ, Qnil);
RB_OBJ_WRITE(iseq, &iseq->body->variable.original_iseq, Qnil);
}
static inline VALUE *
ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
{
VALUE str = rb_str_tmp_new(size * sizeof(VALUE));
RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ, str);
RB_OBJ_WRITE(iseq, &iseq->body->variable.original_iseq, str);
return (VALUE *)RSTRING_PTR(str);
}
@ -94,6 +75,8 @@ ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
#define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1
#define ISEQ_USE_COMPILE_DATA IMEMO_FL_USER2
#define ISEQ_TRANSLATED IMEMO_FL_USER3
#define ISEQ_MARKABLE_ISEQ IMEMO_FL_USER4
struct iseq_compile_data {
/* GC is needed */
@ -173,7 +156,6 @@ void rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc,
VALUE exception, VALUE body);
/* iseq.c */
void rb_iseq_add_mark_object(const rb_iseq_t *iseq, VALUE obj);
VALUE rb_iseq_load(VALUE data, VALUE parent, VALUE opt);
VALUE rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc);
struct st_table *ruby_insn_make_insn_table(void);

View file

@ -18,6 +18,7 @@ RubyVM::Typemap = {
"GENTRY" => %w[G TS_GENTRY],
"IC" => %w[K TS_IC],
"ID" => %w[I TS_ID],
"ISE" => %w[T TS_ISE],
"ISEQ" => %w[S TS_ISEQ],
"OFFSET" => %w[O TS_OFFSET],
"VALUE" => %w[V TS_VALUE],

View file

@ -411,7 +411,11 @@ struct rb_iseq_constant_body {
*/
struct rb_call_cache *cc_entries; /* size is ci_size = ci_kw_size */
VALUE mark_ary; /* Array: includes operands which should be GC marked */
struct {
rb_num_t flip_count;
VALUE coverage;
VALUE original_iseq;
} variable;
unsigned int local_table_size;
unsigned int is_size;
@ -1010,6 +1014,7 @@ enum vm_svar_index {
/* inline cache */
typedef struct iseq_inline_cache_entry *IC;
typedef union iseq_inline_storage_entry *ISE;
typedef struct rb_call_info *CALL_INFO;
typedef struct rb_call_cache *CALL_CACHE;

View file

@ -3282,11 +3282,10 @@ vm_ic_update(IC ic, VALUE val, const VALUE *reg_ep)
}
static VALUE
vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, IC ic)
vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
{
rb_thread_t *th = rb_ec_thread_ptr(ec);
rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
union iseq_inline_storage_entry *const is = (union iseq_inline_storage_entry *)ic;
again:
if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
@ -3295,10 +3294,10 @@ vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, IC ic)
else if (is->once.running_thread == NULL) {
VALUE val;
is->once.running_thread = th;
val = is->once.value = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
/* is->once.running_thread is cleared by vm_once_clear() */
is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
rb_iseq_add_mark_object(ec->cfp->iseq, val);
return val;
}
else if (is->once.running_thread == th) {