1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

* vm_core.h: revisit the structure of frame, block and env.

[Bug #12628]

  This patch introduce many changes.

  * Introduce concept of "Block Handler (BH)" to represent
    passed blocks.

  * move rb_control_frame_t::flag to ep[0] (as a special local
    variable). This flags represents not only frame type, but also
    env flags such as escaped.

  * rename `rb_block_t` to `struct rb_block`.

  * Make Proc, Binding and RubyVM::Env objects wb-protected.

  Check [Bug #12628] for more details.


git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@55766 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
ko1 2016-07-28 11:02:30 +00:00
parent 683eafd973
commit 9f60791a04
25 changed files with 1884 additions and 1072 deletions

View file

@ -1,3 +1,23 @@
Thu Jul 28 19:53:21 2016 Koichi Sasada <ko1@atdot.net>
* vm_core.h: revisit the structure of frame, block and env.
[Bug #12628]
This patch introduce many changes.
* Introduce concept of "Block Handler (BH)" to represent
passed blocks.
* move rb_control_frame_t::flag to ep[0] (as a special local
variable). This flags represents not only frame type, but also
env flags such as escaped.
* rename `rb_block_t` to `struct rb_block`.
* Make Proc, Binding and RubyVM::Env objects wb-protected.
Check [Bug #12628] for more details.
Thu Jul 28 15:05:12 2016 Nobuyoshi Nakada <nobu@ruby-lang.org>
* include/ruby/ruby.h (ruby_fl_type): use __extension__ to get rid

111
compile.c
View file

@ -180,6 +180,7 @@ r_value(VALUE value)
#define debug_compile(msg, v) (v)
#endif
#define LVAR_ERRINFO (1)
/* create new label */
#define NEW_LABEL(l) new_label_body(iseq, (l))
@ -264,6 +265,16 @@ r_value(VALUE value)
} \
} while (0)
#define ADD_GETLOCAL(seq, line, idx, level) \
do { \
ADD_INSN2((seq), (line), getlocal, INT2FIX((idx) + VM_ENV_DATA_SIZE - 1), INT2FIX(level)); \
} while (0)
#define ADD_SETLOCAL(seq, line, idx, level) \
do { \
ADD_INSN2((seq), (line), setlocal, INT2FIX((idx) + VM_ENV_DATA_SIZE - 1), INT2FIX(level)); \
} while (0)
/* add label */
#define ADD_LABEL(seq, label) \
ADD_ELEM((seq), (LINK_ELEMENT *) (label))
@ -646,7 +657,7 @@ rb_iseq_compile_node(rb_iseq_t *iseq, NODE *node)
}
if (iseq->body->type == ISEQ_TYPE_RESCUE || iseq->body->type == ISEQ_TYPE_ENSURE) {
ADD_INSN2(ret, 0, getlocal, INT2FIX(2), INT2FIX(0));
ADD_GETLOCAL(ret, 0, LVAR_ERRINFO, 0);
ADD_INSN1(ret, 0, throw, INT2FIX(0) /* continue throw */ );
}
else {
@ -1176,7 +1187,6 @@ iseq_set_exception_local_table(rb_iseq_t *iseq)
CONST_ID(id_dollar_bang, "#$!");
iseq->body->local_table_size = 1;
iseq->body->local_size = iseq->body->local_table_size + 1;
ids[0] = id_dollar_bang;
iseq->body->local_table = ids;
return COMPILE_OK;
@ -1237,7 +1247,7 @@ get_dyna_var_idx(const rb_iseq_t *iseq, ID id, int *level, int *ls)
}
*level = lv;
*ls = iseq->body->local_size;
*ls = iseq->body->local_table_size;
return idx;
}
@ -1467,10 +1477,10 @@ iseq_set_arguments(rb_iseq_t *iseq, LINK_ANCHOR *optargs, NODE *node_args)
static int
iseq_set_local_table(rb_iseq_t *iseq, const ID *tbl)
{
int size;
unsigned int size;
if (tbl) {
size = (int)*tbl;
size = (unsigned int)*tbl;
tbl++;
}
else {
@ -1482,18 +1492,9 @@ iseq_set_local_table(rb_iseq_t *iseq, const ID *tbl)
MEMCPY(ids, tbl, ID, size);
iseq->body->local_table = ids;
}
iseq->body->local_table_size = size;
iseq->body->local_size = iseq->body->local_table_size = size;
iseq->body->local_size += 1;
/*
if (lfp == dfp ) { // top, class, method
dfp[-1]: svar
else { // block
dfp[-1]: cref
}
*/
debugs("iseq_set_local_table: %d, %d\n", iseq->body->local_size, iseq->body->local_table_size);
debugs("iseq_set_local_table: %u\n", iseq->body->local_table_size);
return COMPILE_OK;
}
@ -4470,7 +4471,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
switch (nd_type(narg)) {
case NODE_ARRAY:
while (narg) {
ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(0));
ADD_GETLOCAL(ret, line, LVAR_ERRINFO, 0);
COMPILE(ret, "rescue arg", narg->nd_head);
ADD_INSN1(ret, line, checkmatch, INT2FIX(VM_CHECKMATCH_TYPE_RESCUE));
ADD_INSNL(ret, line, branchif, label_hit);
@ -4480,7 +4481,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
case NODE_SPLAT:
case NODE_ARGSCAT:
case NODE_ARGSPUSH:
ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(0));
ADD_GETLOCAL(ret, line, LVAR_ERRINFO, 0);
COMPILE(ret, "rescue/cond splat", narg);
ADD_INSN1(ret, line, checkmatch, INT2FIX(VM_CHECKMATCH_TYPE_RESCUE | VM_CHECKMATCH_ARRAY));
ADD_INSNL(ret, line, branchif, label_hit);
@ -4490,7 +4491,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
}
}
else {
ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(0));
ADD_GETLOCAL(ret, line, LVAR_ERRINFO, 0);
ADD_INSN1(ret, line, putobject, rb_eStandardError);
ADD_INSN1(ret, line, checkmatch, INT2FIX(VM_CHECKMATCH_TYPE_RESCUE));
ADD_INSNL(ret, line, branchif, label_hit);
@ -4577,7 +4578,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
case NODE_LASGN:{
ID id = node->nd_vid;
int idx = iseq->body->local_iseq->body->local_size - get_local_var_idx(iseq, id);
int idx = iseq->body->local_iseq->body->local_table_size - get_local_var_idx(iseq, id);
debugs("lvar: %"PRIsVALUE" idx: %d\n", rb_id2str(id), idx);
COMPILE(ret, "rvalue", node->nd_value);
@ -4585,8 +4586,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
if (!poped) {
ADD_INSN(ret, line, dup);
}
ADD_INSN2(ret, line, setlocal, INT2FIX(idx), INT2FIX(get_lvar_level(iseq)));
ADD_SETLOCAL(ret, line, idx, get_lvar_level(iseq));
break;
}
case NODE_DASGN:
@ -4605,8 +4605,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
compile_bug(ERROR_ARGS "NODE_DASGN(_CURR): unknown id (%"PRIsVALUE")",
rb_id2str(node->nd_vid));
}
ADD_INSN2(ret, line, setlocal, INT2FIX(ls - idx), INT2FIX(lv));
ADD_SETLOCAL(ret, line, ls - idx, lv);
break;
}
case NODE_GASGN:{
@ -5192,24 +5191,25 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
/* normal arguments */
for (i = 0; i < liseq->body->param.lead_num; i++) {
int idx = liseq->body->local_size - i;
ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
int idx = liseq->body->local_table_size - i;
ADD_GETLOCAL(args, line, idx, lvar_level);
}
if (liseq->body->param.flags.has_opt) {
/* optional arguments */
int j;
for (j = 0; j < liseq->body->param.opt_num; j++) {
int idx = liseq->body->local_size - (i + j);
ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
int idx = liseq->body->local_table_size - (i + j);
ADD_GETLOCAL(args, line, idx, lvar_level);
}
i += j;
argc = i;
}
if (liseq->body->param.flags.has_rest) {
/* rest argument */
int idx = liseq->body->local_size - liseq->body->param.rest_start;
ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
int idx = liseq->body->local_table_size - liseq->body->param.rest_start;
ADD_GETLOCAL(args, line, idx, lvar_level);
argc = liseq->body->param.rest_start + 1;
flag |= VM_CALL_ARGS_SPLAT;
}
@ -5221,8 +5221,8 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
if (liseq->body->param.flags.has_rest) {
int j;
for (j=0; j<post_len; j++) {
int idx = liseq->body->local_size - (post_start + j);
ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
int idx = liseq->body->local_table_size - (post_start + j);
ADD_GETLOCAL(args, line, idx, lvar_level);
}
ADD_INSN1(args, line, newarray, INT2FIX(j));
ADD_INSN (args, line, concatarray);
@ -5231,21 +5231,22 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
else {
int j;
for (j=0; j<post_len; j++) {
int idx = liseq->body->local_size - (post_start + j);
ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
int idx = liseq->body->local_table_size - (post_start + j);
ADD_GETLOCAL(args, line, idx, lvar_level);
}
argc = post_len + post_start;
}
}
if (liseq->body->param.flags.has_kw) { /* TODO: support keywords */
int local_size = liseq->body->local_size;
int local_size = liseq->body->local_table_size;
argc++;
ADD_INSN1(args, line, putspecialobject, INT2FIX(VM_SPECIAL_OBJECT_VMCORE));
if (liseq->body->param.flags.has_kwrest) {
ADD_INSN2(args, line, getlocal, INT2FIX(liseq->body->local_size - liseq->body->param.keyword->rest_start), INT2FIX(lvar_level));
int idx = liseq->body->local_table_size - liseq->body->param.keyword->rest_start;
ADD_GETLOCAL(args, line, idx, lvar_level);
ADD_SEND (args, line, rb_intern("dup"), INT2FIX(0));
}
else {
@ -5255,7 +5256,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
ID id = liseq->body->param.keyword->table[i];
int idx = local_size - get_local_var_idx(liseq, id);
ADD_INSN1(args, line, putobject, ID2SYM(id));
ADD_INSN2(args, line, getlocal, INT2FIX(idx), INT2FIX(lvar_level));
ADD_GETLOCAL(args, line, idx, lvar_level);
}
ADD_SEND(args, line, id_core_hash_merge_ptr, INT2FIX(i * 2 + 1));
if (liseq->body->param.flags.has_rest) {
@ -5265,7 +5266,9 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
}
}
else if (liseq->body->param.flags.has_kwrest) {
ADD_INSN2(args, line, getlocal, INT2FIX(liseq->body->local_size - liseq->body->param.keyword->rest_start), INT2FIX(lvar_level));
int idx = liseq->body->local_table_size - liseq->body->param.keyword->rest_start;
ADD_GETLOCAL(args, line, idx, lvar_level);
ADD_SEND (args, line, rb_intern("dup"), INT2FIX(0));
if (liseq->body->param.flags.has_rest) {
ADD_INSN1(args, line, newarray, INT2FIX(1));
@ -5406,10 +5409,10 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
case NODE_LVAR:{
if (!poped) {
ID id = node->nd_vid;
int idx = iseq->body->local_iseq->body->local_size - get_local_var_idx(iseq, id);
int idx = iseq->body->local_iseq->body->local_table_size - get_local_var_idx(iseq, id);
debugs("id: %"PRIsVALUE" idx: %d\n", rb_id2str(id), idx);
ADD_INSN2(ret, line, getlocal, INT2FIX(idx), INT2FIX(get_lvar_level(iseq)));
ADD_GETLOCAL(ret, line, idx, get_lvar_level(iseq));
}
break;
}
@ -5422,7 +5425,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
compile_bug(ERROR_ARGS "unknown dvar (%"PRIsVALUE")",
rb_id2str(node->nd_vid));
}
ADD_INSN2(ret, line, getlocal, INT2FIX(ls - idx), INT2FIX(lv));
ADD_GETLOCAL(ret, line, ls - idx, lv);
}
break;
}
@ -5954,7 +5957,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
case NODE_ERRINFO:{
if (!poped) {
if (iseq->body->type == ISEQ_TYPE_RESCUE) {
ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(0));
ADD_GETLOCAL(ret, line, LVAR_ERRINFO, 0);
}
else {
const rb_iseq_t *ip = iseq;
@ -5967,7 +5970,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
level++;
}
if (ip) {
ADD_INSN2(ret, line, getlocal, INT2FIX(2), INT2FIX(level));
ADD_GETLOCAL(ret, line, LVAR_ERRINFO, level);
}
else {
ADD_INSN(ret, line, putnil);
@ -6032,10 +6035,10 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
* kw = default_value
* end
*/
int kw_bits_idx = iseq->body->local_size - iseq->body->param.keyword->bits_start;
int kw_bits_idx = iseq->body->local_table_size - iseq->body->param.keyword->bits_start;
int keyword_idx = iseq->body->param.keyword->num;
ADD_INSN2(ret, line, checkkeyword, INT2FIX(kw_bits_idx), INT2FIX(keyword_idx));
ADD_INSN2(ret, line, checkkeyword, INT2FIX(kw_bits_idx + VM_ENV_DATA_SIZE - 1), INT2FIX(keyword_idx));
ADD_INSNL(ret, line, branchif, end_label);
COMPILE_POPED(ret, "keyword default argument", node->nd_body);
ADD_LABEL(ret, end_label);
@ -6779,7 +6782,6 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
len = RARRAY_LENINT(locals);
iseq->body->local_table_size = len;
iseq->body->local_table = tbl = len > 0 ? (ID *)ALLOC_N(ID, iseq->body->local_table_size) : NULL;
iseq->body->local_size = iseq->body->local_table_size + 1;
for (i = 0; i < len; i++) {
VALUE lv = RARRAY_AREF(locals, i);
@ -6866,11 +6868,11 @@ rb_iseq_build_from_ary(rb_iseq_t *iseq, VALUE misc, VALUE locals, VALUE params,
/* for parser */
int
rb_dvar_defined(ID id, const rb_block_t *base_block)
rb_dvar_defined(ID id, const struct rb_block *base_block)
{
const rb_iseq_t *iseq;
if (base_block && (iseq = base_block->iseq)) {
if (base_block && (iseq = vm_block_iseq(base_block)) != NULL) {
while (iseq->body->type == ISEQ_TYPE_BLOCK ||
iseq->body->type == ISEQ_TYPE_RESCUE ||
iseq->body->type == ISEQ_TYPE_ENSURE ||
@ -6891,13 +6893,13 @@ rb_dvar_defined(ID id, const rb_block_t *base_block)
}
int
rb_local_defined(ID id, const rb_block_t *base_block)
rb_local_defined(ID id, const struct rb_block *base_block)
{
const rb_iseq_t *iseq;
if (base_block && base_block->iseq) {
if (base_block && (iseq = vm_block_iseq(base_block)) != NULL) {
unsigned int i;
iseq = base_block->iseq->body->local_iseq;
iseq = iseq->body->local_iseq;
for (i=0; i<iseq->body->local_table_size; i++) {
if (iseq->body->local_table[i] == id) {
@ -6975,7 +6977,7 @@ for_self_aset(rb_iseq_t *iseq, LINK_ANCHOR *ret, VALUE a)
iseq->body->param.lead_num = 1;
iseq->body->param.size = 1;
ADD_INSN2(ret, line, getlocal, INT2FIX(numberof(vars)-0), INT2FIX(0));
ADD_GETLOCAL(ret, line, numberof(vars)-1, 0);
ADD_INSN1(ret, line, putobject, args->arg);
ADD_INSN1(ret, line, opt_call_c_function, (VALUE)args->func);
ADD_INSN(ret, line, pop);
@ -7415,7 +7417,7 @@ ibf_load_line_info_table(const struct ibf_load *load, const struct rb_iseq_const
static ID *
ibf_dump_local_table(struct ibf_dump *dump, const rb_iseq_t *iseq)
{
const int size = iseq->body->local_size - 1;
const int size = iseq->body->local_table_size;
ID *table = ALLOCA_N(ID, size);
int i;
@ -7429,7 +7431,7 @@ ibf_dump_local_table(struct ibf_dump *dump, const rb_iseq_t *iseq)
static ID *
ibf_load_local_table(const struct ibf_load *load, const struct rb_iseq_constant_body *body)
{
const int size = body->local_size - 1;
const int size = body->local_table_size;
if (size > 0) {
ID *table = IBF_R(body->local_table, ID, size);
@ -7596,7 +7598,6 @@ ibf_load_iseq_each(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t of
/* memcpy(load_body, load->buff + offset, sizeof(*load_body)); */
load_body->type = body->type;
load_body->stack_max = body->stack_max;
load_body->local_size = body->local_size;
load_body->iseq_size = body->iseq_size;
load_body->param = body->param;
load_body->local_table_size = body->local_table_size;

54
cont.c
View file

@ -174,6 +174,7 @@ cont_mark(void *ptr)
if (ptr) {
rb_context_t *cont = ptr;
rb_gc_mark(cont->value);
rb_thread_mark(&cont->saved_thread);
rb_gc_mark(cont->saved_thread.self);
@ -490,7 +491,7 @@ cont_capture(volatile int *stat)
cont->vm_stack = ALLOC_N(VALUE, th->stack_size);
MEMCPY(cont->vm_stack, th->stack, VALUE, th->stack_size);
#endif
cont->saved_thread.stack = 0;
cont->saved_thread.stack = NULL;
cont_save_machine_stack(th, cont);
@ -539,7 +540,7 @@ cont_restore_thread(rb_context_t *cont)
th->fiber = sth->fiber;
fib = th->fiber ? th->fiber : th->root_fiber;
if (fib) {
if (fib && fib->cont.saved_thread.stack) {
th->stack_size = fib->cont.saved_thread.stack_size;
th->stack = fib->cont.saved_thread.stack;
}
@ -554,6 +555,7 @@ cont_restore_thread(rb_context_t *cont)
else {
/* fiber */
th->stack = sth->stack;
sth->stack = NULL;
th->stack_size = sth->stack_size;
th->local_storage = sth->local_storage;
th->local_storage_recursive_hash = sth->local_storage_recursive_hash;
@ -573,7 +575,6 @@ cont_restore_thread(rb_context_t *cont)
th->root_lep = sth->root_lep;
th->root_svar = sth->root_svar;
th->ensure_list = sth->ensure_list;
}
#if FIBER_USE_NATIVE
@ -727,7 +728,6 @@ fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
}
#endif
/* swap machine context */
#ifdef _WIN32
SwitchToFiber(newfib->fib_handle);
@ -1084,7 +1084,6 @@ rb_cont_call(int argc, VALUE *argv, VALUE contval)
/* restore `tracing' context. see [Feature #4347] */
th->trace_arg = cont->saved_thread.trace_arg;
cont_restore_0(cont, &contval);
return Qnil; /* unreachable */
}
@ -1190,6 +1189,18 @@ fiber_t_alloc(VALUE fibval)
return fib;
}
rb_control_frame_t *
rb_vm_push_frame(rb_thread_t *th,
const rb_iseq_t *iseq,
VALUE type,
VALUE self,
VALUE specval,
VALUE cref_or_me,
const VALUE *pc,
VALUE *sp,
int local_size,
int stack_max);
static VALUE
fiber_init(VALUE fibval, VALUE proc)
{
@ -1201,27 +1212,24 @@ fiber_init(VALUE fibval, VALUE proc)
/* initialize cont */
cont->vm_stack = 0;
th->stack = 0;
th->stack = NULL;
th->stack_size = 0;
th->stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
th->stack = ALLOC_N(VALUE, th->stack_size);
th->cfp = (void *)(th->stack + th->stack_size);
th->cfp--;
th->cfp->pc = 0;
th->cfp->sp = th->stack + 2;
#if VM_DEBUG_BP_CHECK
th->cfp->bp_check = 0;
#endif
th->cfp->ep = th->stack + 1;
th->cfp->ep[ 0] = VM_ENVVAL_BLOCK_PTR(0);
th->cfp->ep[-1] = 0;
th->cfp->self = Qnil;
th->cfp->flag = VM_FRAME_MAGIC_DUMMY | VM_FRAME_FLAG_FINISH;
th->cfp->iseq = 0;
th->cfp->proc = 0;
th->cfp->block_iseq = 0;
rb_vm_push_frame(th,
NULL,
VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
Qnil, /* self */
VM_BLOCK_HANDLER_NONE,
0, /* specval */
NULL, /* pc */
th->stack, /* sp */
0, /* local_size */
0);
th->tag = 0;
th->local_storage = st_init_numtable();
th->local_storage_recursive_hash = Qnil;
@ -1268,12 +1276,12 @@ rb_fiber_start(void)
argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
cont->value = Qnil;
th->errinfo = Qnil;
th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
th->root_lep = rb_vm_ep_local_ep(vm_block_ep(&proc->block));
th->root_svar = Qfalse;
fib->status = RUNNING;
EXEC_EVENT_HOOK(th, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, Qnil);
cont->value = rb_vm_invoke_proc(th, proc, argc, argv, 0);
cont->value = rb_vm_invoke_proc(th, proc, argc, argv, VM_BLOCK_HANDLER_NONE);
}
TH_POP_TAG();

35
eval.c
View file

@ -759,12 +759,11 @@ int
rb_block_given_p(void)
{
rb_thread_t *th = GET_THREAD();
if (rb_vm_control_frame_block_ptr(th->cfp)) {
return TRUE;
if (rb_vm_frame_block_handler(th->cfp) == VM_BLOCK_HANDLER_NONE) {
return FALSE;
}
else {
return FALSE;
return TRUE;
}
}
@ -1236,15 +1235,15 @@ rb_mod_refine(VALUE module, VALUE klass)
id_refined_class, id_defined_at;
VALUE refinements, activated_refinements;
rb_thread_t *th = GET_THREAD();
rb_block_t *block = rb_vm_control_frame_block_ptr(th->cfp);
VALUE block_handler = rb_vm_frame_block_handler(th->cfp);
if (!block) {
rb_raise(rb_eArgError, "no block given");
if (block_handler == VM_BLOCK_HANDLER_NONE) {
rb_raise(rb_eArgError, "no block given");
}
if (block->proc) {
rb_raise(rb_eArgError,
"can't pass a Proc as a block to Module#refine");
if (vm_block_handler_type(block_handler) != block_handler_type_iseq) {
rb_raise(rb_eArgError, "can't pass a Proc as a block to Module#refine");
}
Check_Type(klass, T_CLASS);
CONST_ID(id_refinements, "__refinements__");
refinements = rb_attr_get(module, id_refinements);
@ -1315,7 +1314,7 @@ mod_using(VALUE self, VALUE module)
void
rb_obj_call_init(VALUE obj, int argc, const VALUE *argv)
{
PASS_PASSED_BLOCK();
PASS_PASSED_BLOCK_HANDLER();
rb_funcall2(obj, idInitialize, argc, argv);
}
@ -1448,7 +1447,7 @@ top_using(VALUE self, VALUE module)
return self;
}
static VALUE *
static const VALUE *
errinfo_place(rb_thread_t *th)
{
rb_control_frame_t *cfp = th->cfp;
@ -1457,12 +1456,12 @@ errinfo_place(rb_thread_t *th)
while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
if (cfp->iseq->body->type == ISEQ_TYPE_RESCUE) {
return &cfp->ep[-2];
return &cfp->ep[VM_ENV_INDEX_LAST_LVAR];
}
else if (cfp->iseq->body->type == ISEQ_TYPE_ENSURE &&
!THROW_DATA_P(cfp->ep[-2]) &&
!FIXNUM_P(cfp->ep[-2])) {
return &cfp->ep[-2];
!THROW_DATA_P(cfp->ep[VM_ENV_INDEX_LAST_LVAR]) &&
!FIXNUM_P(cfp->ep[VM_ENV_INDEX_LAST_LVAR])) {
return &cfp->ep[VM_ENV_INDEX_LAST_LVAR];
}
}
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
@ -1473,7 +1472,7 @@ errinfo_place(rb_thread_t *th)
static VALUE
get_thread_errinfo(rb_thread_t *th)
{
VALUE *ptr = errinfo_place(th);
const VALUE *ptr = errinfo_place(th);
if (ptr) {
return *ptr;
}
@ -1502,7 +1501,7 @@ errinfo_setter(VALUE val, ID id, VALUE *var)
rb_raise(rb_eTypeError, "assigning non-exception to $!");
}
else {
VALUE *ptr = errinfo_place(GET_THREAD());
const VALUE *ptr = errinfo_place(GET_THREAD());
if (ptr) {
*ptr = val;
}

View file

@ -5,13 +5,23 @@
#include "vm_core.h"
static inline void
pass_passed_block(rb_thread_t *th)
vm_passed_block_handler_set(rb_thread_t *th, VALUE block_handler)
{
th->passed_block = rb_vm_control_frame_block_ptr(th->cfp);
th->cfp->flag |= VM_FRAME_FLAG_PASSED;
VM_ASSERT(vm_block_handler_verify(block_handler));
th->passed_block_handler = block_handler;
}
#define PASS_PASSED_BLOCK_TH(th) pass_passed_block(th)
#define PASS_PASSED_BLOCK() pass_passed_block(GET_THREAD())
static inline void
pass_passed_block_handler(rb_thread_t *th)
{
VALUE block_handler = rb_vm_frame_block_handler(th->cfp);
VM_ASSERT(vm_block_handler_verify(block_handler));
vm_passed_block_handler_set(th, block_handler);
VM_ENV_FLAGS_SET(th->cfp->ep, VM_FRAME_FLAG_PASSED);
}
#define PASS_PASSED_BLOCK_HANDLER_TH(th) pass_passed_block_handler(th)
#define PASS_PASSED_BLOCK_HANDLER() pass_passed_block_handler(GET_THREAD())
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
@ -277,7 +287,7 @@ NORETURN(void rb_raise_method_missing(rb_thread_t *th, int argc, const VALUE *ar
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val);
rb_cref_t *rb_vm_cref(void);
rb_cref_t *rb_vm_cref_replace_with_duplicated_cref(void);
VALUE rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, const rb_block_t *blockptr, VALUE filename);
VALUE rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, VALUE block_handler, VALUE filename);
void rb_vm_set_progname(VALUE filename);
void rb_thread_terminate_all(void);
VALUE rb_vm_cbase(void);

13
gc.c
View file

@ -9158,19 +9158,6 @@ rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
}
}
static const rb_iseq_t *
vm_proc_iseq(VALUE procval)
{
rb_proc_t *proc = RTYPEDDATA_DATA(procval);
if (RUBY_VM_NORMAL_ISEQ_P(proc->block.iseq)) {
return proc->block.iseq;
}
else {
return NULL;
}
}
const char *
rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
{

View file

@ -58,7 +58,7 @@ getlocal
(VALUE val)
{
int i, lev = (int)level;
VALUE *ep = GET_EP();
const VALUE *ep = GET_EP();
/* optimized insns generated for level == (0|1) in defs/opt_operand.def */
for (i = 0; i < lev; i++) {
@ -81,13 +81,13 @@ setlocal
()
{
int i, lev = (int)level;
VALUE *ep = GET_EP();
const VALUE *ep = GET_EP();
/* optimized insns generated for level == (0|1) in defs/opt_operand.def */
for (i = 0; i < lev; i++) {
ep = GET_PREV_EP(ep);
}
*(ep - idx) = val;
vm_env_write(ep, -(int)idx, val);
}
/**
@ -790,7 +790,7 @@ checkkeyword
ret = (bits & (0x01 << keyword_index)) ? Qfalse : Qtrue;
}
else {
assert(RB_TYPE_P(kw_bits, T_HASH));
VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
ret = rb_hash_has_key(kw_bits, INT2FIX(keyword_index)) ? Qfalse : Qtrue;
}
}
@ -932,11 +932,11 @@ defineclass
rb_iseq_check(class_iseq);
/* enter scope */
vm_push_frame(th, class_iseq, VM_FRAME_MAGIC_CLASS, klass,
VM_ENVVAL_BLOCK_PTR(GET_BLOCK_PTR()),
vm_push_frame(th, class_iseq, VM_FRAME_MAGIC_CLASS | VM_ENV_FLAG_LOCAL, klass,
GET_BLOCK_HANDLER(),
(VALUE)vm_cref_push(th, klass, NULL, FALSE),
class_iseq->body->iseq_encoded, GET_SP(),
class_iseq->body->local_size,
class_iseq->body->local_table_size,
class_iseq->body->stack_max);
RESTORE_REGS();
NEXT_INSN();
@ -1059,7 +1059,7 @@ opt_send_without_block
(VALUE val) // inc += -ci->orig_argc;
{
struct rb_calling_info calling;
calling.blockptr = NULL;
calling.block_handler = VM_BLOCK_HANDLER_NONE;
vm_search_method(ci, cc, calling.recv = TOPN(calling.argc = ci->orig_argc));
CALL_METHOD(&calling, ci, cc);
}
@ -1097,7 +1097,7 @@ invokeblock
{
struct rb_calling_info calling;
calling.argc = ci->orig_argc;
calling.blockptr = NULL;
calling.block_handler = VM_BLOCK_HANDLER_NONE;
calling.recv = GET_SELF();
val = vm_invoke_block(th, GET_CFP(), &calling, ci);

View file

@ -894,9 +894,9 @@ int rb_class_has_methods(VALUE c);
VALUE rb_invcmp(VALUE, VALUE);
/* compile.c */
struct rb_block_struct;
int rb_dvar_defined(ID, const struct rb_block_struct *);
int rb_local_defined(ID, const struct rb_block_struct *);
struct rb_block;
int rb_dvar_defined(ID, const struct rb_block *);
int rb_local_defined(ID, const struct rb_block *);
CONSTFUNC(const char * rb_insns_name(int i));
VALUE rb_insns_name_array(void);
@ -1212,7 +1212,7 @@ struct RBasicRaw {
#endif
VALUE rb_parser_get_yydebug(VALUE);
VALUE rb_parser_set_yydebug(VALUE, VALUE);
VALUE rb_parser_set_context(VALUE, const struct rb_block_struct *, int);
VALUE rb_parser_set_context(VALUE, const struct rb_block *, int);
void *rb_parser_load_file(VALUE parser, VALUE name);
int rb_is_const_name(VALUE name);
int rb_is_class_name(VALUE name);
@ -1372,7 +1372,7 @@ VALUE rb_enc_str_scrub(rb_encoding *enc, VALUE str, VALUE repl);
#define is_ascii_string(str) (rb_enc_str_coderange(str) == ENC_CODERANGE_7BIT)
#define is_broken_string(str) (rb_enc_str_coderange(str) == ENC_CODERANGE_BROKEN)
size_t rb_str_memsize(VALUE);
VALUE rb_sym_proc_call(VALUE args, VALUE sym, int argc, const VALUE *argv, VALUE passed_proc);
VALUE rb_sym_proc_call(ID mid, int argc, const VALUE *argv, VALUE passed_proc);
VALUE rb_sym_to_proc(VALUE sym);
/* symbol.c */

21
iseq.c
View file

@ -607,11 +607,11 @@ rb_iseq_load(VALUE data, VALUE parent, VALUE opt)
}
rb_iseq_t *
rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, rb_block_t *base_block, VALUE opt)
rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, const struct rb_block *base_block, VALUE opt)
{
rb_thread_t *th = GET_THREAD();
rb_iseq_t *iseq = NULL;
const rb_iseq_t *const parent = base_block ? base_block->iseq : NULL;
const rb_iseq_t *const parent = base_block ? vm_block_iseq(base_block) : NULL;
rb_compile_option_t option;
const enum iseq_type type = parent ? ISEQ_TYPE_EVAL : ISEQ_TYPE_TOP;
#if !defined(__GNUC__) || (__GNUC__ == 4 && __GNUC_MINOR__ == 8)
@ -661,7 +661,7 @@ rb_iseq_compile(VALUE src, VALUE file, VALUE line)
}
rb_iseq_t *
rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, rb_block_t *base_block)
rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block)
{
return rb_iseq_compile_with_option(src, file, Qnil, line, base_block, Qnil);
}
@ -1263,11 +1263,14 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
if (pnop) {
const rb_iseq_t *diseq = iseq;
VALUE level = *pnop, i;
ID lid;
for (i = 0; i < level; i++) {
diseq = diseq->body->parent_iseq;
}
ret = id_to_name(diseq->body->local_table[diseq->body->local_size - op], INT2FIX('*'));
lid = diseq->body->local_table[diseq->body->local_table_size +
VM_ENV_DATA_SIZE - 1 - op];
ret = id_to_name(lid, INT2FIX('*'));
}
else {
ret = rb_sprintf("%"PRIuVALUE, op);
@ -1520,7 +1523,7 @@ rb_iseq_disasm(const rb_iseq_t *iseq)
rb_str_catf(str,
"local table (size: %d, argc: %d "
"[opts: %d, rest: %d, post: %d, block: %d, kw: %d@%d, kwrest: %d])\n",
iseq->body->local_size,
iseq->body->local_table_size,
iseq->body->param.lead_num,
iseq->body->param.opt_num,
iseq->body->param.flags.has_rest ? iseq->body->param.rest_start : -1,
@ -1553,7 +1556,7 @@ rb_iseq_disasm(const rb_iseq_t *iseq)
(iseq->body->param.flags.has_post && iseq->body->param.post_start <= li && li < iseq->body->param.post_start + iseq->body->param.post_num) ? "Post" : "",
(iseq->body->param.flags.has_block && iseq->body->param.block_start == li) ? "Block" : "");
rb_str_catf(str, "[%2d] ", iseq->body->local_size - i);
rb_str_catf(str, "[%2d] ", iseq->body->local_table_size - i);
width = RSTRING_LEN(str) + 11;
if (name)
rb_str_append(str, name);
@ -1646,9 +1649,7 @@ iseqw_s_of(VALUE klass, VALUE body)
rb_secure(1);
if (rb_obj_is_proc(body)) {
rb_proc_t *proc;
GetProcPtr(body, proc);
iseq = proc->block.iseq;
iseq = vm_proc_iseq(body);
if (!RUBY_VM_NORMAL_ISEQ_P(iseq)) {
iseq = NULL;
@ -2052,7 +2053,7 @@ iseq_data_to_ary(const rb_iseq_t *iseq)
st_free_table(labels_table);
rb_hash_aset(misc, ID2SYM(rb_intern("arg_size")), INT2FIX(iseq->body->param.size));
rb_hash_aset(misc, ID2SYM(rb_intern("local_size")), INT2FIX(iseq->body->local_size));
rb_hash_aset(misc, ID2SYM(rb_intern("local_size")), INT2FIX(iseq->body->local_table_size));
rb_hash_aset(misc, ID2SYM(rb_intern("stack_max")), INT2FIX(iseq->body->stack_max));
/* TODO: compatibility issue */

2
load.c
View file

@ -997,7 +997,7 @@ rb_require_internal(VALUE fname, int safe)
case 's':
handle = (long)rb_vm_call_cfunc(rb_vm_top_self(), load_ext,
path, 0, path);
path, VM_BLOCK_HANDLER_NONE, path);
rb_ary_push(ruby_dln_librefs, LONG2NUM(handle));
break;
}

View file

@ -228,8 +228,6 @@ vtable_included(const struct vtable * tbl, ID id)
return 0;
}
typedef struct rb_block_struct rb_block_t;
typedef struct token_info {
const char *token;
int linenum;
@ -316,7 +314,7 @@ struct parser_params {
VALUE error_buffer;
VALUE debug_lines;
VALUE coverage;
const rb_block_t *base_block;
const struct rb_block *base_block;
#else
/* Ripper only */
@ -10893,7 +10891,7 @@ rb_parser_new(void)
}
VALUE
rb_parser_set_context(VALUE vparser, const rb_block_t *base, int main)
rb_parser_set_context(VALUE vparser, const struct rb_block *base, int main)
{
struct parser_params *parser;

521
proc.c
View file

@ -47,35 +47,51 @@ static int method_min_max_arity(VALUE, int *max);
/* Proc */
#define IS_METHOD_PROC_IFUNC(ifunc) ((ifunc)->func == bmcall)
#define IS_METHOD_PROC_ISEQ(iseq) \
(RUBY_VM_IFUNC_P(iseq) && \
IS_METHOD_PROC_IFUNC((struct vm_ifunc *)(iseq)))
static VALUE proc_to_s_(VALUE self, const rb_proc_t *proc);
static void
block_mark(const struct rb_block *block)
{
switch (vm_block_type(block)) {
case block_type_iseq:
case block_type_ifunc:
{
const struct rb_captured_block *captured = &block->as.captured;
RUBY_MARK_UNLESS_NULL(captured->self);
RUBY_MARK_UNLESS_NULL((VALUE)captured->code.val);
if (captured->ep && captured->ep[VM_ENV_DATA_INDEX_ENV] != Qundef /* cfunc_proc_t */) {
RUBY_MARK_UNLESS_NULL(VM_ENV_ENVVAL(captured->ep));
}
}
break;
case block_type_symbol:
RUBY_MARK_UNLESS_NULL(block->as.symbol);
break;
case block_type_proc:
RUBY_MARK_UNLESS_NULL(block->as.proc);
break;
}
}
static void
proc_mark(void *ptr)
{
rb_proc_t *proc = ptr;
RUBY_MARK_UNLESS_NULL(proc->block.proc);
RUBY_MARK_UNLESS_NULL(proc->block.self);
if (proc->block.ep) {
RUBY_MARK_UNLESS_NULL(rb_vm_proc_envval(proc));
}
if (proc->block.iseq && RUBY_VM_IFUNC_P(proc->block.iseq)) {
rb_gc_mark((VALUE)(proc->block.iseq));
}
block_mark(&proc->block);
RUBY_MARK_LEAVE("proc");
}
typedef struct {
rb_proc_t basic;
VALUE env[3]; /* me, specval, envval */
VALUE env[VM_ENV_DATA_SIZE + 1]; /* ..., envval */
} cfunc_proc_t;
static size_t
proc_memsize(const void *ptr)
{
const rb_proc_t *proc = ptr;
if (proc->block.ep == ((const cfunc_proc_t *)ptr)->env+1)
if (proc->block.as.captured.ep == ((const cfunc_proc_t *)ptr)->env+1)
return sizeof(cfunc_proc_t);
return sizeof(rb_proc_t);
}
@ -87,7 +103,7 @@ static const rb_data_type_t proc_data_type = {
RUBY_TYPED_DEFAULT_FREE,
proc_memsize,
},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
};
VALUE
@ -108,21 +124,20 @@ rb_obj_is_proc(VALUE proc)
}
}
VALUE rb_proc_create(VALUE klass, const struct rb_block *block,
int8_t safe_level, int8_t is_from_method, int8_t is_lambda);
/* :nodoc: */
static VALUE
proc_dup(VALUE self)
{
VALUE procval;
rb_proc_t *src;
rb_proc_t *dst;
GetProcPtr(self, src);
procval = rb_proc_alloc(rb_cProc);
GetProcPtr(procval, dst);
*dst = *src;
dst->block.proc = procval;
procval = rb_proc_create(rb_cProc, &src->block,
src->safe_level, src->is_from_method, src->is_lambda);
RB_GC_GUARD(self); /* for: body = proc_dup(body) */
return procval;
}
@ -266,7 +281,8 @@ binding_mark(void *ptr)
RUBY_MARK_ENTER("binding");
RUBY_MARK_UNLESS_NULL(bind->env);
block_mark(&bind->block);
RUBY_MARK_UNLESS_NULL(bind->path);
RUBY_MARK_LEAVE("binding");
@ -305,7 +321,7 @@ binding_dup(VALUE self)
rb_binding_t *src, *dst;
GetBindingPtr(self, src);
GetBindingPtr(bindval, dst);
dst->env = src->env;
dst->block = src->block;
dst->path = src->path;
dst->first_lineno = src->first_lineno;
return bindval;
@ -375,7 +391,7 @@ bind_eval(int argc, VALUE *argv, VALUE bindval)
return rb_f_eval(argc+1, args, Qnil /* self will be searched in eval */);
}
static VALUE *
static const VALUE *
get_local_variable_ptr(VALUE envval, ID lid)
{
rb_env_t *env;
@ -385,9 +401,9 @@ get_local_variable_ptr(VALUE envval, ID lid)
unsigned int i;
GetEnvPtr(envval, env);
iseq = env->block.iseq;
iseq = env->iseq;
if (RUBY_VM_NORMAL_ISEQ_P(iseq)) {
if (iseq && RUBY_VM_NORMAL_ISEQ_P(iseq)) {
for (i=0; i<iseq->body->local_table_size; i++) {
if (iseq->body->local_table[i] == lid) {
return &env->env[i];
@ -454,7 +470,7 @@ bind_local_variables(VALUE bindval)
const rb_env_t *env;
GetBindingPtr(bindval, bind);
GetEnvPtr(bind->env, env);
GetEnvPtr(VM_ENV_ENVVAL(vm_block_ep(&bind->block)), env);
return rb_vm_env_local_variables(env);
}
@ -487,7 +503,7 @@ bind_local_variable_get(VALUE bindval, VALUE sym)
GetBindingPtr(bindval, bind);
if ((ptr = get_local_variable_ptr(bind->env, lid)) == NULL) {
if ((ptr = get_local_variable_ptr(VM_ENV_ENVVAL(vm_block_ep(&bind->block)), lid)) == NULL) {
sym = ID2SYM(lid);
undefined:
rb_name_err_raise("local variable `%1$s' not defined for %2$s",
@ -526,17 +542,20 @@ bind_local_variable_set(VALUE bindval, VALUE sym, VALUE val)
{
ID lid = check_local_id(bindval, &sym);
rb_binding_t *bind;
VALUE *ptr;
const VALUE *ptr;
VALUE envval;
if (!lid) lid = rb_intern_str(sym);
GetBindingPtr(bindval, bind);
if ((ptr = get_local_variable_ptr(bind->env, lid)) == NULL) {
envval = VM_ENV_ENVVAL(vm_block_ep(&bind->block));
if ((ptr = get_local_variable_ptr(envval, lid)) == NULL) {
/* not found. create new env */
ptr = rb_binding_add_dynavars(bind, 1, &lid);
envval = VM_ENV_ENVVAL(vm_block_ep(&bind->block));
}
*ptr = val;
RB_OBJ_WRITE(envval, ptr, val);
return val;
}
@ -567,7 +586,7 @@ bind_local_variable_defined_p(VALUE bindval, VALUE sym)
if (!lid) return Qfalse;
GetBindingPtr(bindval, bind);
return get_local_variable_ptr(bind->env, lid) ? Qtrue : Qfalse;
return get_local_variable_ptr(VM_ENV_ENVVAL(vm_block_ep(&bind->block)), lid) ? Qtrue : Qfalse;
}
/*
@ -580,11 +599,8 @@ static VALUE
bind_receiver(VALUE bindval)
{
const rb_binding_t *bind;
const rb_env_t *env;
GetBindingPtr(bindval, bind);
GetEnvPtr(bind->env, env);
return env->block.self;
return vm_block_self(&bind->block);
}
static VALUE
@ -593,11 +609,19 @@ cfunc_proc_new(VALUE klass, VALUE ifunc, int8_t is_lambda)
rb_proc_t *proc;
cfunc_proc_t *sproc;
VALUE procval = TypedData_Make_Struct(klass, cfunc_proc_t, &proc_data_type, sproc);
sproc->env[1] = VM_ENVVAL_BLOCK_PTR(0);
VALUE *ep;
proc = &sproc->basic;
proc->block.ep = sproc->env+1;
proc->block.iseq = (rb_iseq_t *)ifunc;
proc->block.proc = procval;
vm_block_type_set(&proc->block, block_type_ifunc);
*(VALUE **)&proc->block.as.captured.ep = ep = sproc->env + VM_ENV_DATA_SIZE-1;
ep[VM_ENV_DATA_INDEX_FLAGS] = VM_FRAME_MAGIC_IFUNC | VM_ENV_FLAG_LOCAL | VM_ENV_FLAG_ESCAPED;
ep[VM_ENV_DATA_INDEX_ME_CREF] = Qfalse;
ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
ep[VM_ENV_DATA_INDEX_ENV] = Qundef; /* envval */
/* self? */
RB_OBJ_WRITE(procval, &proc->block.as.captured.code.ifunc, ifunc);
proc->is_lambda = is_lambda;
return procval;
}
@ -605,7 +629,13 @@ cfunc_proc_new(VALUE klass, VALUE ifunc, int8_t is_lambda)
static VALUE
sym_proc_new(VALUE klass, VALUE sym)
{
return cfunc_proc_new(klass, sym, 0);
VALUE procval = rb_proc_alloc(klass);
rb_proc_t *proc;
GetProcPtr(procval, proc);
vm_block_type_set(&proc->block, block_type_symbol);
RB_OBJ_WRITE(procval, &proc->block.as.symbol, sym);
return procval;
}
VALUE
@ -625,16 +655,23 @@ static const char proc_without_block[] = "tried to create Proc object without a
static VALUE
proc_new(VALUE klass, int8_t is_lambda)
{
VALUE procval = Qnil;
VALUE procval;
rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->cfp;
rb_block_t *block;
VALUE block_handler;
if (!(block = rb_vm_control_frame_block_ptr(cfp))) {
if ((block_handler = rb_vm_frame_block_handler(cfp)) == VM_BLOCK_HANDLER_NONE) {
#if !PROC_NEW_REQUIRES_BLOCK
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
if ((block = rb_vm_control_frame_block_ptr(cfp)) != 0) {
if ((block_handler = rb_vm_frame_block_handler(cfp)) != VM_BLOCK_HANDLER_NONE) {
const VALUE *lep = rb_vm_ep_local_ep(cfp->ep);
if (VM_ENV_ESCAPED_P(lep)) {
procval = VM_ENV_PROCVAL(lep);
goto return_existing_proc;
}
if (is_lambda) {
rb_warn(proc_without_block);
}
@ -647,13 +684,13 @@ proc_new(VALUE klass, int8_t is_lambda)
}
}
procval = block->proc;
/* block is in cf */
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_proc:
procval = VM_BH_TO_PROC(block_handler);
if (procval) {
if (SYMBOL_P(procval)) {
return (klass != rb_cProc) ? sym_proc_new(klass, procval) : rb_sym_to_proc(procval);
}
else if (RBASIC_CLASS(procval) == klass) {
return_existing_proc:
if (RBASIC_CLASS(procval) == klass) {
return procval;
}
else {
@ -661,10 +698,20 @@ proc_new(VALUE klass, int8_t is_lambda)
RBASIC_SET_CLASS(newprocval, klass);
return newprocval;
}
}
break;
procval = rb_vm_make_proc_lambda(th, block, klass, is_lambda);
return procval;
case block_handler_type_symbol:
return (klass != rb_cProc) ?
sym_proc_new(klass, VM_BH_TO_SYMBOL(block_handler)) :
rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
break;
case block_handler_type_ifunc:
case block_handler_type_iseq:
return rb_vm_make_proc_lambda(th, VM_BH_TO_CAPT_BLOCK(block_handler), klass, is_lambda);
}
VM_UNREACHABLE(proc_new);
return Qnil;
}
/*
@ -777,27 +824,7 @@ rb_block_lambda(void)
static VALUE
proc_call(int argc, VALUE *argv, VALUE procval)
{
VALUE vret;
const rb_block_t *blockptr = 0;
const rb_iseq_t *iseq;
rb_proc_t *proc;
VALUE passed_procval;
GetProcPtr(procval, proc);
iseq = proc->block.iseq;
if (RUBY_VM_IFUNC_P(iseq) || iseq->body->param.flags.has_block) {
if (rb_block_given_p()) {
rb_proc_t *passed_proc;
passed_procval = rb_block_proc();
GetProcPtr(passed_procval, passed_proc);
blockptr = &passed_proc->block;
}
}
vret = rb_vm_invoke_proc(GET_THREAD(), proc, argc, argv, blockptr);
RB_GC_GUARD(procval);
RB_GC_GUARD(passed_procval);
return vret;
/* removed */
}
#endif
@ -815,44 +842,35 @@ check_argc(long argc)
#define check_argc(argc) (argc)
#endif
static rb_block_t *
passed_block(VALUE pass_procval)
{
if (!NIL_P(pass_procval)) {
rb_proc_t *pass_proc;
if (SYMBOL_P(pass_procval)) {
pass_procval = sym_proc_new(rb_cProc, pass_procval);
}
GetProcPtr(pass_procval, pass_proc);
return &pass_proc->block;
}
return 0;
}
VALUE
rb_proc_call(VALUE self, VALUE args)
{
VALUE vret;
rb_proc_t *proc;
GetProcPtr(self, proc);
vret = rb_vm_invoke_proc(GET_THREAD(), proc, check_argc(RARRAY_LEN(args)), RARRAY_CONST_PTR(args), 0);
vret = rb_vm_invoke_proc(GET_THREAD(), proc,
check_argc(RARRAY_LEN(args)), RARRAY_CONST_PTR(args),
VM_BLOCK_HANDLER_NONE);
RB_GC_GUARD(self);
RB_GC_GUARD(args);
return vret;
}
VALUE
rb_proc_call_with_block(VALUE self, int argc, const VALUE *argv, VALUE pass_procval)
static VALUE
proc_to_block_handler(VALUE procval)
{
return NIL_P(procval) ? VM_BLOCK_HANDLER_NONE : procval;
}
VALUE
rb_proc_call_with_block(VALUE self, int argc, const VALUE *argv, VALUE passed_procval)
{
rb_thread_t *th = GET_THREAD();
VALUE vret;
rb_proc_t *proc;
rb_block_t *block = 0;
GetProcPtr(self, proc);
block = passed_block(pass_procval);
vret = rb_vm_invoke_proc(GET_THREAD(), proc, argc, argv, block);
vret = rb_vm_invoke_proc(th, proc, argc, argv, proc_to_block_handler(passed_procval));
RB_GC_GUARD(self);
RB_GC_GUARD(pass_procval);
return vret;
}
@ -916,21 +934,24 @@ rb_iseq_min_max_arity(const rb_iseq_t *iseq, int *max)
}
static int
rb_block_min_max_arity(rb_block_t *block, int *max)
rb_block_min_max_arity(const struct rb_block *block, int *max)
{
const rb_iseq_t *iseq = block->iseq;
if (iseq) {
if (RUBY_VM_NORMAL_ISEQ_P(iseq)) {
return rb_iseq_min_max_arity(iseq, max);
}
else {
if (IS_METHOD_PROC_ISEQ(iseq)) {
const struct vm_ifunc *ifunc = (struct vm_ifunc *)iseq;
switch (vm_block_type(block)) {
case block_type_iseq:
return rb_iseq_min_max_arity(block->as.captured.code.iseq, max);
case block_type_proc:
return rb_block_min_max_arity(vm_proc_block(block->as.proc), max);
case block_type_ifunc:
{
const struct vm_ifunc *ifunc = block->as.captured.code.ifunc;
if (IS_METHOD_PROC_IFUNC(ifunc)) {
/* e.g. method(:foo).to_proc.arity */
return method_min_max_arity((VALUE)ifunc->data, max);
}
}
/* fall through */
case block_type_symbol:
break;
}
*max = UNLIMITED_ARGUMENTS;
return 0;
@ -946,7 +967,7 @@ static int
rb_proc_min_max_arity(VALUE self, int *max)
{
rb_proc_t *proc;
rb_block_t *block;
const struct rb_block *block;
GetProcPtr(self, proc);
block = &proc->block;
return rb_block_min_max_arity(block, max);
@ -961,57 +982,95 @@ rb_proc_arity(VALUE self)
return (proc->is_lambda ? min == max : max != UNLIMITED_ARGUMENTS) ? min : -min-1;
}
static void
block_setup(struct rb_block *block, VALUE block_handler)
{
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_iseq:
block->type = block_type_iseq;
block->as.captured = *VM_BH_TO_ISEQ_BLOCK(block_handler);
break;
case block_handler_type_ifunc:
block->type = block_type_ifunc;
block->as.captured = *VM_BH_TO_IFUNC_BLOCK(block_handler);
break;
case block_handler_type_symbol:
block->type = block_type_symbol;
block->as.symbol = VM_BH_TO_SYMBOL(block_handler);
break;
case block_handler_type_proc:
block->type = block_type_proc;
block->as.proc = VM_BH_TO_PROC(block_handler);
}
}
int
rb_block_arity(void)
{
int min, max;
rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = th->cfp;
rb_block_t *block = rb_vm_control_frame_block_ptr(cfp);
VALUE proc_value;
VALUE block_handler = rb_vm_frame_block_handler(cfp);
struct rb_block block;
if (!block) rb_raise(rb_eArgError, "no block given");
min = rb_block_min_max_arity(block, &max);
proc_value = block->proc;
if (proc_value) {
if (SYMBOL_P(proc_value)) {
return -1;
}
else {
rb_proc_t *proc;
GetProcPtr(proc_value, proc);
if (proc)
return (proc->is_lambda ? min == max : max != UNLIMITED_ARGUMENTS) ? min : -min-1;
}
if (block_handler == VM_BLOCK_HANDLER_NONE) {
rb_raise(rb_eArgError, "no block given");
}
block_setup(&block, block_handler);
min = rb_block_min_max_arity(&block, &max);
switch (vm_block_type(&block)) {
case block_handler_type_symbol:
return -1;
case block_handler_type_proc:
{
VALUE procval = block_handler;
rb_proc_t *proc;
GetProcPtr(procval, proc);
return (proc->is_lambda ? min == max : max != UNLIMITED_ARGUMENTS) ? min : -min-1;
/* fall through */
}
default:
return max != UNLIMITED_ARGUMENTS ? min : -min-1;
}
return max != UNLIMITED_ARGUMENTS ? min : -min-1;
}
const rb_iseq_t *
rb_proc_get_iseq(VALUE self, int *is_proc)
{
const rb_proc_t *proc;
const rb_iseq_t *iseq;
const struct rb_block *block;
GetProcPtr(self, proc);
iseq = proc->block.iseq;
block = &proc->block;
if (is_proc) *is_proc = !proc->is_lambda;
if (RUBY_VM_IFUNC_P(iseq)) {
const struct vm_ifunc *ifunc = (struct vm_ifunc *)iseq;
iseq = 0;
if (IS_METHOD_PROC_IFUNC(ifunc)) {
/* method(:foo).to_proc */
iseq = rb_method_iseq((VALUE)ifunc->data);
if (is_proc) *is_proc = 0;
switch (vm_block_type(block)) {
case block_type_iseq:
return rb_iseq_check(block->as.captured.code.iseq);
case block_type_proc:
return rb_proc_get_iseq(block->as.proc, is_proc);
case block_type_ifunc:
{
const struct vm_ifunc *ifunc = block->as.captured.code.ifunc;
if (IS_METHOD_PROC_IFUNC(ifunc)) {
/* method(:foo).to_proc */
if (is_proc) *is_proc = 0;
return rb_method_iseq((VALUE)ifunc->data);
}
else {
return NULL;
}
}
return iseq;
}
else if (SYMBOL_P(iseq)) {
case block_type_symbol:
return NULL;
}
else {
return rb_iseq_check(iseq);
}
VM_UNREACHABLE(rb_proc_get_iseq);
return NULL;
}
static VALUE
@ -1090,8 +1149,9 @@ rb_hash_proc(st_index_t hash, VALUE prc)
{
rb_proc_t *proc;
GetProcPtr(prc, proc);
hash = rb_hash_uint(hash, (st_index_t)proc->block.iseq);
return rb_hash_uint(hash, (st_index_t)proc->block.ep >> 16);
hash = rb_hash_uint(hash, (st_index_t)proc->block.as.captured.code.val);
hash = rb_hash_uint(hash, (st_index_t)proc->block.as.captured.self);
return rb_hash_uint(hash, (st_index_t)proc->block.as.captured.ep >> 16);
}
VALUE
@ -1153,34 +1213,40 @@ proc_hash(VALUE self)
*/
static VALUE
proc_to_s(VALUE self)
proc_to_s_(VALUE self, const rb_proc_t *proc)
{
VALUE str = 0;
rb_proc_t *proc;
const char *cname = rb_obj_classname(self);
const rb_iseq_t *iseq;
const struct rb_block *block;
const char *is_lambda;
GetProcPtr(self, proc);
iseq = proc->block.iseq;
block = &proc->block;
is_lambda = proc->is_lambda ? " (lambda)" : "";
if (RUBY_VM_NORMAL_ISEQ_P(iseq) && rb_iseq_check(iseq)) {
int first_lineno = 0;
if (iseq->body->line_info_table) {
first_lineno = FIX2INT(rb_iseq_first_lineno(iseq));
again:
switch (vm_block_type(block)) {
case block_type_proc:
block = vm_proc_block(block->as.proc);
goto again;
case block_type_iseq:
{
const rb_iseq_t *iseq = rb_iseq_check(block->as.captured.code.iseq);
int first_lineno = 0;
if (iseq->body->line_info_table) {
first_lineno = FIX2INT(rb_iseq_first_lineno(iseq));
}
str = rb_sprintf("#<%s:%p@%"PRIsVALUE":%d%s>", cname, (void *)self,
iseq->body->location.path, first_lineno, is_lambda);
}
str = rb_sprintf("#<%s:%p@%"PRIsVALUE":%d%s>", cname, (void *)self,
iseq->body->location.path, first_lineno, is_lambda);
}
else if (SYMBOL_P(iseq)) {
break;
case block_type_symbol:
str = rb_sprintf("#<%s:%p(&%+"PRIsVALUE")%s>", cname, (void *)self,
(VALUE)iseq, is_lambda);
}
else {
str = rb_sprintf("#<%s:%p%s>", cname, (void *)proc->block.iseq,
block->as.symbol, is_lambda);
break;
case block_type_ifunc:
str = rb_sprintf("#<%s:%p%s>", cname, proc->block.as.captured.code.ifunc,
is_lambda);
break;
}
if (OBJ_TAINTED(self)) {
@ -1189,6 +1255,14 @@ proc_to_s(VALUE self)
return str;
}
static VALUE
proc_to_s(VALUE self)
{
const rb_proc_t *proc;
GetProcPtr(self, proc);
return proc_to_s_(self, proc);
}
/*
* call-seq:
* prc.to_proc -> proc
@ -1791,16 +1865,19 @@ rb_mod_define_method(int argc, VALUE *argv, VALUE mod)
body = rb_block_lambda();
#else
rb_thread_t *th = GET_THREAD();
rb_block_t *block = rb_vm_control_frame_block_ptr(th->cfp);
if (!block) rb_raise(rb_eArgError, proc_without_block);
VALUE block_handler = rb_vm_frame_block_handler(th->cfp);
if (block_handler == VM_BLOCK_HANDLER_NONE) rb_raise(rb_eArgError, proc_without_block);
body = block->proc;
if (SYMBOL_P(body)) {
body = rb_sym_to_proc(body);
}
else if (!body) {
body = rb_vm_make_proc_lambda(th, block, rb_cProc, TRUE);
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_proc:
body = VM_BH_TO_PROC(block_handler);
break;
case block_handler_type_symbol:
body = rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
break;
case block_handler_type_iseq:
case block_handler_type_ifunc:
body = rb_vm_make_proc_lambda(th, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc, TRUE);
}
#endif
}
@ -1842,14 +1919,14 @@ rb_mod_define_method(int argc, VALUE *argv, VALUE mod)
RB_GC_GUARD(body);
}
else {
rb_proc_t *proc;
body = proc_dup(body);
GetProcPtr(body, proc);
if (RUBY_VM_NORMAL_ISEQ_P(proc->block.iseq)) {
VALUE procval = proc_dup(body);
if (vm_proc_iseq(procval) != NULL) {
rb_proc_t *proc;
GetProcPtr(procval, proc);
proc->is_lambda = TRUE;
proc->is_from_method = TRUE;
}
rb_add_method(mod, id, VM_METHOD_TYPE_BMETHOD, (void *)body, scope_visi->method_visi);
rb_add_method(mod, id, VM_METHOD_TYPE_BMETHOD, (void *)procval, scope_visi->method_visi);
if (scope_visi->module_func) {
rb_add_method(rb_singleton_class(mod), id, VM_METHOD_TYPE_BMETHOD, (void *)body, METHOD_VISI_PUBLIC);
}
@ -1963,8 +2040,8 @@ method_clone(VALUE self)
VALUE
rb_method_call(int argc, const VALUE *argv, VALUE method)
{
VALUE proc = rb_block_given_p() ? rb_block_proc() : Qnil;
return rb_method_call_with_block(argc, argv, method, proc);
VALUE procval = rb_block_given_p() ? rb_block_proc() : Qnil;
return rb_method_call_with_block(argc, argv, method, procval);
}
static const rb_callable_method_entry_t *
@ -1976,16 +2053,16 @@ method_callable_method_entry(const struct METHOD *data)
static inline VALUE
call_method_data(rb_thread_t *th, const struct METHOD *data,
int argc, const VALUE *argv, VALUE pass_procval)
int argc, const VALUE *argv, VALUE passed_procval)
{
th->passed_block = passed_block(pass_procval);
vm_passed_block_handler_set(th, proc_to_block_handler(passed_procval));
return rb_vm_call(th, data->recv, data->me->called_id, argc, argv,
method_callable_method_entry(data));
}
static VALUE
call_method_data_safe(rb_thread_t *th, const struct METHOD *data,
int argc, const VALUE *argv, VALUE pass_procval,
int argc, const VALUE *argv, VALUE passed_procval,
int safe)
{
VALUE result = Qnil; /* OK */
@ -1995,7 +2072,7 @@ call_method_data_safe(rb_thread_t *th, const struct METHOD *data,
if ((state = TH_EXEC_TAG()) == 0) {
/* result is used only if state == 0, no exceptions is caught. */
/* otherwise it doesn't matter even if clobbered. */
NO_CLOBBERED(result) = call_method_data(th, data, argc, argv, pass_procval);
NO_CLOBBERED(result) = call_method_data(th, data, argc, argv, passed_procval);
}
TH_POP_TAG();
rb_set_safe_level_force(safe);
@ -2005,7 +2082,7 @@ call_method_data_safe(rb_thread_t *th, const struct METHOD *data,
}
VALUE
rb_method_call_with_block(int argc, const VALUE *argv, VALUE method, VALUE pass_procval)
rb_method_call_with_block(int argc, const VALUE *argv, VALUE method, VALUE passed_procval)
{
const struct METHOD *data;
rb_thread_t *const th = GET_THREAD();
@ -2019,10 +2096,10 @@ rb_method_call_with_block(int argc, const VALUE *argv, VALUE method, VALUE pass_
int safe = rb_safe_level();
if (safe < safe_level_to_run) {
rb_set_safe_level_force(safe_level_to_run);
return call_method_data_safe(th, data, argc, argv, pass_procval, safe);
return call_method_data_safe(th, data, argc, argv, passed_procval, safe);
}
}
return call_method_data(th, data, argc, argv, pass_procval);
return call_method_data(th, data, argc, argv, passed_procval);
}
/**********************************************************************
@ -2627,8 +2704,18 @@ localjump_reason(VALUE exc)
rb_cref_t *rb_vm_cref_new_toplevel(void); /* vm.c */
static inline void
env_write(VALUE env, const VALUE *ep, int index, VALUE v)
{
VM_ASSERT(VM_ENV_ESCAPED_P(ep));
VM_ASSERT(env == VM_ENV_ENVVAL(ep));
VM_ASSERT(vm_env_ep(env) == ep);
RB_OBJ_WRITE(env, &ep[index], v);
}
static VALUE
env_clone(VALUE envval, VALUE receiver, const rb_cref_t *cref)
env_clone(VALUE envval, const rb_cref_t *cref)
{
VALUE newenvval = TypedData_Wrap_Struct(RBASIC_CLASS(envval), RTYPEDDATA_TYPE(envval), 0);
rb_env_t *env, *newenv;
@ -2642,9 +2729,11 @@ env_clone(VALUE envval, VALUE receiver, const rb_cref_t *cref)
envsize = sizeof(rb_env_t) + (env->env_size - 1) * sizeof(VALUE);
newenv = xmalloc(envsize);
memcpy(newenv, env, envsize);
VM_ASSERT(env->ep > env->env);
newenv->ep = &newenv->env[env->ep - env->env];
VM_FORCE_WRITE(&newenv->ep[VM_ENV_DATA_INDEX_ENV], newenvval);
RTYPEDDATA_DATA(newenvval) = newenv;
newenv->block.self = receiver;
newenv->block.ep[-1] = (VALUE)cref;
env_write(newenvval, newenv->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)cref);
return newenvval;
}
@ -2666,31 +2755,61 @@ env_clone(VALUE envval, VALUE receiver, const rb_cref_t *cref)
static VALUE
proc_binding(VALUE self)
{
VALUE bindval, envval;
const rb_proc_t *proc;
const rb_iseq_t *iseq;
VALUE bindval, envval = Qundef, binding_self = Qundef;
rb_binding_t *bind;
const rb_proc_t *proc;
const rb_iseq_t *iseq = NULL;
const struct rb_block *block;
const rb_env_t *env;
GetProcPtr(self, proc);
envval = rb_vm_proc_envval(proc);
iseq = proc->block.iseq;
if (SYMBOL_P(iseq)) goto error;
if (RUBY_VM_IFUNC_P(iseq)) {
struct vm_ifunc *ifunc = (struct vm_ifunc *)iseq;
if (IS_METHOD_PROC_IFUNC(ifunc)) {
VALUE method = (VALUE)ifunc->data;
envval = env_clone(envval, method_receiver(method), method_cref(method));
iseq = rb_method_iseq(method);
}
else {
error:
rb_raise(rb_eArgError, "Can't create Binding from C level Proc");
block = &proc->block;
again:
switch (vm_block_type(block)) {
case block_type_iseq:
iseq = block->as.captured.code.iseq;
binding_self = block->as.captured.self;
envval = VM_ENV_ENVVAL(block->as.captured.ep);
break;
case block_type_proc:
GetProcPtr(block->as.proc, proc);
block = &proc->block;
goto again;
case block_type_symbol:
goto error;
case block_type_ifunc:
{
const struct vm_ifunc *ifunc = block->as.captured.code.ifunc;
if (IS_METHOD_PROC_IFUNC(ifunc)) {
VALUE method = (VALUE)ifunc->data;
rb_env_t *newenv;
iseq = rb_method_iseq(method);
envval = VM_ENV_ENVVAL(block->as.captured.ep);
envval = env_clone(envval, method_cref(method));
binding_self = method_receiver(method);
GetEnvPtr(envval, newenv);
/* set empty iseq */
newenv->iseq = rb_iseq_new(NULL, rb_str_new2("<empty iseq>"), rb_str_new2("<empty_iseq>"), Qnil, 0, ISEQ_TYPE_TOP);
break;
}
else {
error:
rb_raise(rb_eArgError, "Can't create Binding from C level Proc");
return Qnil;
}
}
}
bindval = rb_binding_alloc(rb_cBinding);
GetBindingPtr(bindval, bind);
bind->env = envval;
GetEnvPtr(envval, env);
bind->block.as.captured.self = binding_self;
bind->block.as.captured.code.iseq = env->iseq;
bind->block.as.captured.ep = env->ep;
if (iseq) {
rb_iseq_check(iseq);

View file

@ -3680,7 +3680,6 @@ rb_f_fork(VALUE obj)
rb_thread_atfork();
if (rb_block_given_p()) {
int status;
rb_protect(rb_yield, Qundef, &status);
ruby_stop(status);
}

11
ruby.c
View file

@ -639,13 +639,10 @@ require_libraries(VALUE *req_list)
*req_list = 0;
}
static rb_block_t*
static const struct rb_block*
toplevel_context(rb_binding_t *bind)
{
rb_env_t *env;
GetEnvPtr(bind->env, env);
return &env->block;
return &bind->block;
}
static void
@ -1447,7 +1444,7 @@ process_options(int argc, char **argv, struct cmdline_options *opt)
char fbuf[MAXPATHLEN];
int i = (int)proc_options(argc, argv, opt, 0);
rb_binding_t *toplevel_binding;
rb_block_t *base_block;
const struct rb_block *base_block;
argc -= i;
argv += i;
@ -1700,7 +1697,7 @@ process_options(int argc, char **argv, struct cmdline_options *opt)
path = rb_realpath_internal(Qnil, opt->script_name, 1);
}
base_block = toplevel_context(toplevel_binding);
iseq = rb_iseq_new_main(tree, opt->script_name, path, base_block->iseq);
iseq = rb_iseq_new_main(tree, opt->script_name, path, vm_block_iseq(base_block));
}
if (opt->dump & DUMP_BIT(insns)) {

View file

@ -9459,7 +9459,7 @@ sym_to_sym(VALUE sym)
}
VALUE
rb_sym_proc_call(VALUE args, VALUE sym, int argc, const VALUE *argv, VALUE passed_proc)
rb_sym_proc_call(ID mid, int argc, const VALUE *argv, VALUE passed_proc)
{
VALUE obj;
@ -9467,7 +9467,7 @@ rb_sym_proc_call(VALUE args, VALUE sym, int argc, const VALUE *argv, VALUE passe
rb_raise(rb_eArgError, "no receiver given");
}
obj = argv[0];
return rb_funcall_with_block(obj, (ID)sym, argc - 1, argv + 1, passed_proc);
return rb_funcall_with_block(obj, mid, argc - 1, argv + 1, passed_proc);
}
#if 0

View file

@ -587,10 +587,12 @@ thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_s
if (!th->first_func) {
GetProcPtr(th->first_proc, proc);
th->errinfo = Qnil;
th->root_lep = rb_vm_ep_local_ep(proc->block.ep);
th->root_lep = rb_vm_ep_local_ep(vm_proc_ep(th->first_proc));
th->root_svar = Qfalse;
EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, Qundef);
th->value = rb_vm_invoke_proc(th, proc, (int)RARRAY_LEN(args), RARRAY_CONST_PTR(args), 0);
th->value = rb_vm_invoke_proc(th, proc,
(int)RARRAY_LEN(args), RARRAY_CONST_PTR(args),
VM_BLOCK_HANDLER_NONE);
EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_END, th->self, 0, 0, Qundef);
}
else {

View file

@ -10,7 +10,7 @@ puts <<EOS
EOS
P = (0..3)
L = (1..6)
L = (0..5)
def fname param, local
"vm_call_iseq_setup_normal_0start_#{param}params_#{local}locals"
@ -48,8 +48,8 @@ vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, con
else {
if (param_size <= #{P.end} &&
local_size <= #{L.end}) {
VM_ASSERT(local_size != 0);
return vm_call_iseq_handlers[param_size][local_size-1];
VM_ASSERT(local_size >= 0);
return vm_call_iseq_handlers[param_size][local_size];
}
return &vm_call_iseq_setup_normal_0start;
}

714
vm.c

File diff suppressed because it is too large Load diff

105
vm_args.c
View file

@ -466,22 +466,22 @@ args_setup_kw_rest_parameter(VALUE keyword_hash, VALUE *locals)
static inline void
args_setup_block_parameter(rb_thread_t *th, struct rb_calling_info *calling, VALUE *locals)
{
VALUE block_handler = calling->block_handler;
VALUE blockval = Qnil;
const rb_block_t *blockptr = calling->blockptr;
if (blockptr) {
/* make Proc object */
if (blockptr->proc == 0) {
rb_proc_t *proc;
blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
GetProcPtr(blockval, proc);
calling->blockptr = &proc->block;
}
else if (SYMBOL_P(blockptr->proc)) {
blockval = rb_sym_to_proc(blockptr->proc);
}
else {
blockval = blockptr->proc;
if (block_handler != VM_BLOCK_HANDLER_NONE) {
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_iseq:
case block_handler_type_ifunc:
blockval = rb_vm_make_proc(th, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
break;
case block_handler_type_symbol:
blockval = rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
break;
case block_handler_type_proc:
blockval = VM_BH_TO_PROC(block_handler);
break;
}
}
*locals = blockval;
@ -698,9 +698,9 @@ raise_argument_error(rb_thread_t *th, const rb_iseq_t *iseq, const VALUE exc)
VALUE at;
if (iseq) {
vm_push_frame(th, iseq, VM_FRAME_MAGIC_DUMMY, Qnil /* self */,
VM_ENVVAL_BLOCK_PTR(0) /* specval*/, Qfalse /* me or cref */,
iseq->body->iseq_encoded, th->cfp->sp, 1 /* local_size (cref/me) */, 0 /* stack_max */);
vm_push_frame(th, iseq, VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL, Qnil /* self */,
VM_BLOCK_HANDLER_NONE /* specval*/, Qfalse /* me or cref */,
iseq->body->iseq_encoded, th->cfp->sp, 0, 0 /* stack_max */);
at = rb_vm_backtrace_object();
rb_vm_pop_frame(th);
}
@ -766,16 +766,23 @@ vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling,
calling->argc -= kw_len - 1;
}
static inline void
vm_caller_setup_proc_as_block(rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling,
VALUE proc)
static VALUE
vm_to_proc(VALUE proc)
{
rb_proc_t *po;
if (UNLIKELY(!rb_obj_is_proc(proc))) {
VALUE b;
b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
GetProcPtr(proc, po);
calling->blockptr = &po->block;
RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp)->proc = proc;
if (NIL_P(b) || !rb_obj_is_proc(b)) {
rb_raise(rb_eTypeError,
"wrong argument type %s (expected Proc)",
rb_obj_classname(proc));
}
return b;
}
else {
return proc;
}
}
static void
@ -783,51 +790,31 @@ vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling, const struct rb_call_info *ci, rb_iseq_t *blockiseq, const int is_super)
{
if (ci->flag & VM_CALL_ARGS_BLOCKARG) {
VALUE proc;
VALUE block_code = *(--reg_cfp->sp);
proc = *(--reg_cfp->sp);
if (NIL_P(proc)) {
calling->blockptr = NULL;
}
else if (SYMBOL_P(proc) &&
rb_method_basic_definition_p(rb_cSymbol, idTo_proc)) {
if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) {
calling->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp);
calling->blockptr->iseq = (rb_iseq_t *)proc;
calling->blockptr->proc = proc;
}
else {
proc = rb_sym_to_proc(proc);
vm_caller_setup_proc_as_block(reg_cfp, calling, proc);
}
if (NIL_P(block_code)) {
calling->block_handler = VM_BLOCK_HANDLER_NONE;
}
else {
if (!rb_obj_is_proc(proc)) {
VALUE b;
b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
if (NIL_P(b) || !rb_obj_is_proc(b)) {
rb_raise(rb_eTypeError,
"wrong argument type %s (expected Proc)",
rb_obj_classname(proc));
}
proc = b;
if (SYMBOL_P(block_code) && rb_method_basic_definition_p(rb_cSymbol, idTo_proc)) {
calling->block_handler = block_code;
}
else {
calling->block_handler = vm_to_proc(block_code);
}
vm_caller_setup_proc_as_block(reg_cfp, calling, proc);
}
}
else if (blockiseq != 0) { /* likely */
rb_block_t *blockptr = calling->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp);
blockptr->iseq = blockiseq;
blockptr->proc = 0;
else if (blockiseq != NULL) { /* likely */
struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(reg_cfp);
captured->code.iseq = blockiseq;
calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(captured);
}
else {
if (is_super) {
calling->blockptr = GET_BLOCK_PTR();
calling->block_handler = GET_BLOCK_HANDLER();
}
else {
calling->blockptr = NULL;
calling->block_handler = VM_BLOCK_HANDLER_NONE;
}
}
}

530
vm_core.h
View file

@ -47,8 +47,12 @@
#if VM_CHECK_MODE > 0
#define VM_ASSERT(expr) ( \
RUBY_ASSERT_WHEN(VM_CHECK_MODE > 0, expr))
#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
#else
#define VM_ASSERT(expr) ((void)0)
#define VM_UNREACHABLE(func) ((void)0)
#endif
#define RUBY_VM_THREAD_MODEL 2
@ -225,7 +229,7 @@ struct rb_call_info_with_kwarg {
};
struct rb_calling_info {
struct rb_block_struct *blockptr;
VALUE block_handler;
VALUE recv;
int argc;
};
@ -278,10 +282,6 @@ struct rb_iseq_constant_body {
ISEQ_TYPE_DEFINED_GUARD
} type; /* instruction sequence type */
unsigned int stack_max; /* for stack overflow check */
/* sizeof(vars) + 1 */
unsigned int local_size;
unsigned int iseq_size;
const VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
@ -384,6 +384,7 @@ struct rb_iseq_constant_body {
unsigned int ci_size;
unsigned int ci_kw_size;
unsigned int line_info_size;
unsigned int stack_max; /* for stack overflow check */
};
/* T_IMEMO/iseq */
@ -593,28 +594,52 @@ typedef struct rb_vm_struct {
#define VM_DEBUG_VERIFY_METHOD_CACHE (VM_DEBUG_MODE != 0)
#endif
struct rb_captured_block {
VALUE self;
const VALUE *ep;
union {
const rb_iseq_t *iseq;
const struct vm_ifunc *ifunc;
VALUE val;
} code;
};
enum rb_block_handler_type {
block_handler_type_iseq,
block_handler_type_ifunc,
block_handler_type_symbol,
block_handler_type_proc
};
enum rb_block_type {
block_type_iseq,
block_type_ifunc,
block_type_symbol,
block_type_proc
};
struct rb_block {
union {
struct rb_captured_block captured;
VALUE symbol;
VALUE proc;
} as;
enum rb_block_type type;
};
typedef struct rb_control_frame_struct {
const VALUE *pc; /* cfp[0] */
VALUE *sp; /* cfp[1] */
const rb_iseq_t *iseq; /* cfp[2] */
VALUE flag; /* cfp[3] */
VALUE self; /* cfp[4] / block[0] */
VALUE *ep; /* cfp[5] / block[1] */
const rb_iseq_t *block_iseq;/* cfp[6] / block[2] */
VALUE proc; /* cfp[7] / block[3] */
VALUE self; /* cfp[3] / block[0] */
const VALUE *ep; /* cfp[4] / block[1] */
const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc */
#if VM_DEBUG_BP_CHECK
VALUE *bp_check; /* cfp[8] */
VALUE *bp_check; /* cfp[6] */
#endif
} rb_control_frame_t;
typedef struct rb_block_struct {
VALUE self; /* share with method frame if it's only block */
VALUE *ep; /* share with method frame if it's only block */
const rb_iseq_t *iseq;
VALUE proc;
} rb_block_t;
extern const rb_data_type_t ruby_threadptr_data_type;
#define GetThreadPtr(obj, ptr) \
@ -690,7 +715,7 @@ typedef struct rb_thread_struct {
int waiting_fd;
/* for rb_iterate */
const rb_block_t *passed_block;
VALUE passed_block_handler;
/* for bmethod */
const rb_callable_method_entry_t *passed_bmethod_me;
@ -703,7 +728,7 @@ typedef struct rb_thread_struct {
VALUE top_wrapper;
/* eval env */
VALUE *root_lep;
const VALUE *root_lep;
VALUE root_svar;
/* thread control */
@ -822,8 +847,8 @@ rb_iseq_t *rb_iseq_new_with_opt(NODE*, VALUE, VALUE, VALUE, VALUE, const rb_iseq
/* src -> iseq */
rb_iseq_t *rb_iseq_compile(VALUE src, VALUE file, VALUE line);
rb_iseq_t *rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, rb_block_t *base_block);
rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, rb_block_t *base_block, VALUE opt);
rb_iseq_t *rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block);
rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE absolute_path, VALUE line, const struct rb_block *base_block, VALUE opt);
VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
@ -841,7 +866,7 @@ RUBY_SYMBOL_EXPORT_END
GetCoreDataFromValue((obj), rb_proc_t, (ptr))
typedef struct {
rb_block_t block;
const struct rb_block block;
int8_t safe_level; /* 0..1 */
int8_t is_from_method; /* bool */
int8_t is_lambda; /* bool */
@ -852,8 +877,9 @@ typedef struct {
typedef struct {
int env_size;
rb_block_t block;
VALUE env[1]; /* flexible array */
const VALUE *ep;
const rb_iseq_t *iseq;
const VALUE env[1]; /* flexible array */
} rb_env_t;
extern const rb_data_type_t ruby_binding_data_type;
@ -862,7 +888,7 @@ extern const rb_data_type_t ruby_binding_data_type;
GetCoreDataFromValue((obj), rb_binding_t, (ptr))
typedef struct {
VALUE env;
struct rb_block block;
VALUE path;
unsigned short first_lineno;
} rb_binding_t;
@ -903,32 +929,6 @@ enum vm_svar_index {
VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
};
#define VM_FRAME_MAGIC_METHOD 0x11
#define VM_FRAME_MAGIC_BLOCK 0x21
#define VM_FRAME_MAGIC_CLASS 0x31
#define VM_FRAME_MAGIC_TOP 0x41
#define VM_FRAME_MAGIC_CFUNC 0x61
#define VM_FRAME_MAGIC_PROC 0x71
#define VM_FRAME_MAGIC_IFUNC 0x81
#define VM_FRAME_MAGIC_EVAL 0x91
#define VM_FRAME_MAGIC_LAMBDA 0xa1
#define VM_FRAME_MAGIC_RESCUE 0xb1
#define VM_FRAME_MAGIC_DUMMY 0xc1
#define VM_FRAME_MAGIC_MASK_BITS 8
#define VM_FRAME_MAGIC_MASK (~(~(VALUE)0<<VM_FRAME_MAGIC_MASK_BITS))
#define VM_FRAME_TYPE(cfp) ((cfp)->flag & VM_FRAME_MAGIC_MASK)
/* other frame flag */
#define VM_FRAME_FLAG_PASSED 0x0100
#define VM_FRAME_FLAG_FINISH 0x0200
#define VM_FRAME_FLAG_BMETHOD 0x0400
#define VM_FRAME_TYPE_FINISH_P(cfp) (((cfp)->flag & VM_FRAME_FLAG_FINISH) != 0)
#define VM_FRAME_TYPE_BMETHOD_P(cfp) (((cfp)->flag & VM_FRAME_FLAG_BMETHOD) != 0)
#define RUBYVM_CFUNC_FRAME_P(cfp) \
(VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
/* inline cache */
typedef struct iseq_inline_cache_entry *IC;
typedef struct rb_call_info *CALL_INFO;
@ -945,31 +945,180 @@ typedef VALUE CDHASH;
typedef rb_control_frame_t *
(FUNC_FASTCALL(*rb_insn_func_t))(rb_thread_t *, rb_control_frame_t *);
#define GC_GUARDED_PTR(p) ((VALUE)((VALUE)(p) | 0x01))
#define GC_GUARDED_PTR_REF(p) ((void *)(((VALUE)(p)) & ~0x03))
#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
/*
* block frame:
* ep[ 0]: prev frame
* ep[-1]: CREF (for *_eval)
*
* method frame:
* ep[ 0]: block pointer (ptr | VM_ENVVAL_BLOCK_PTR_FLAG)
*/
enum {
/* Frame/Environment flag bits:
* MMMM MMMM MMMM MMMM ____ ____ FFFF EEEX (LSB)
*
* X : tag for GC marking (It seems as Fixnum)
* EEE : 3 bits Env flags
* FFFF: 4 bits Frame flags
* MMMM: 16 bits frame magic (to check frame corruption)
*/
#define VM_ENVVAL_BLOCK_PTR_FLAG 0x02
#define VM_ENVVAL_BLOCK_PTR(v) (GC_GUARDED_PTR(v) | VM_ENVVAL_BLOCK_PTR_FLAG)
#define VM_ENVVAL_BLOCK_PTR_P(v) ((v) & VM_ENVVAL_BLOCK_PTR_FLAG)
#define VM_ENVVAL_PREV_EP_PTR(v) ((VALUE)GC_GUARDED_PTR(v))
#define VM_ENVVAL_PREV_EP_PTR_P(v) (!(VM_ENVVAL_BLOCK_PTR_P(v)))
/* frame types */
VM_FRAME_MAGIC_METHOD = 0x11110001,
VM_FRAME_MAGIC_BLOCK = 0x22220001,
VM_FRAME_MAGIC_CLASS = 0x33330001,
VM_FRAME_MAGIC_TOP = 0x44440001,
VM_FRAME_MAGIC_CFUNC = 0x55550001,
VM_FRAME_MAGIC_PROC = 0x66660001,
VM_FRAME_MAGIC_IFUNC = 0x77770001,
VM_FRAME_MAGIC_EVAL = 0x88880001,
VM_FRAME_MAGIC_LAMBDA = 0x99990001,
VM_FRAME_MAGIC_RESCUE = 0xaaaa0001,
VM_FRAME_MAGIC_DUMMY = 0xbbbb0001,
#define VM_EP_PREV_EP(ep) ((VALUE *)GC_GUARDED_PTR_REF((ep)[0]))
#define VM_EP_BLOCK_PTR(ep) ((rb_block_t *)GC_GUARDED_PTR_REF((ep)[0]))
#define VM_EP_LEP_P(ep) VM_ENVVAL_BLOCK_PTR_P((ep)[0])
VM_FRAME_MAGIC_MASK = 0xffff0001,
VALUE *rb_vm_ep_local_ep(VALUE *ep);
rb_block_t *rb_vm_control_frame_block_ptr(const rb_control_frame_t *cfp);
/* frame flag */
VM_FRAME_FLAG_PASSED = 0x0010,
VM_FRAME_FLAG_FINISH = 0x0020,
VM_FRAME_FLAG_BMETHOD = 0x0040,
/* env flag */
VM_ENV_FLAG_LOCAL = 0x0002,
VM_ENV_FLAG_ESCAPED = 0x0004,
VM_ENV_FLAG_WB_REQUIRED = 0x0008
};
static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
#define VM_FRAME_TYPE_FINISH_P(cfp) (VM_ENV_FLAGS((cfp)->ep, VM_FRAME_FLAG_FINISH ) != 0)
#define VM_FRAME_TYPE_BMETHOD_P(cfp) (VM_ENV_FLAGS((cfp)->ep, VM_FRAME_FLAG_BMETHOD) != 0)
#define VM_ENV_DATA_SIZE ( 3)
#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
#define VM_ENV_DATA_INDEX_ENV_PROC ( 2) /* ep[ 2] */
#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
static inline void
VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
{
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
VM_ASSERT(FIXNUM_P(flags));
VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
}
static inline void
VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
{
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
VM_ASSERT(FIXNUM_P(flags));
VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
}
static inline long
VM_ENV_FLAGS(const VALUE *ep, long flag)
{
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
VM_ASSERT(FIXNUM_P(flags));
return flags & flag;
}
static inline long
VM_FRAME_TYPE(const rb_control_frame_t *cfp)
{
return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
}
#define RUBYVM_CFUNC_FRAME_P(cfp) \
(VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
#define VM_BLOCK_HANDLER_NONE 0
static inline int
VM_ENV_LOCAL_P(const VALUE *ep)
{
return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
}
static inline const VALUE *
VM_ENV_PREV_EP(const VALUE *ep)
{
VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
}
static inline VALUE
VM_ENV_BLOCK_HANDLER(const VALUE *ep)
{
VM_ASSERT(VM_ENV_LOCAL_P(ep));
return ep[VM_ENV_DATA_INDEX_SPECVAL];
}
#if VM_CHECK_MODE > 0
int rb_vm_ep_in_heap_p(const VALUE *ep);
#endif
static inline int
VM_ENV_ESCAPED_P(const VALUE *ep)
{
VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
}
static inline VALUE
VM_ENV_ENVVAL(const VALUE *ep)
{
VM_ASSERT(VM_ENV_ESCAPED_P(ep));
return ep[VM_ENV_DATA_INDEX_ENV];
}
static inline VALUE
VM_ENV_PROCVAL(const VALUE *ep)
{
VM_ASSERT(VM_ENV_ESCAPED_P(ep));
VM_ASSERT(VM_ENV_LOCAL_P(ep));
VM_ASSERT(VM_ENV_BLOCK_HANDLER(ep) != VM_BLOCK_HANDLER_NONE);
return ep[VM_ENV_DATA_INDEX_ENV_PROC];
}
static inline void
VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
{
*((VALUE *)ptr) = v;
}
static inline void
VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
{
VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
VM_FORCE_WRITE(ptr, special_const_value);
}
static inline void
VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
{
VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
VM_FORCE_WRITE(&ep[index], v);
}
#if VM_CHECK_MODE > 0
static inline const VALUE *
vm_env_ep(VALUE envval)
{
rb_env_t *env;
GetEnvPtr(envval, env);
return env->ep;
}
#endif
const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
@ -980,13 +1129,230 @@ rb_block_t *rb_vm_control_frame_block_ptr(const rb_control_frame_t *cfp);
#define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
(!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
#define RUBY_VM_IFUNC_P(ptr) (RB_TYPE_P((VALUE)(ptr), T_IMEMO) && imemo_type((VALUE)ptr) == imemo_ifunc)
#define RUBY_VM_NORMAL_ISEQ_P(ptr) (RB_TYPE_P((VALUE)(ptr), T_IMEMO) && imemo_type((VALUE)ptr) == imemo_iseq && rb_iseq_check((rb_iseq_t *)ptr))
#define RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp) ((rb_block_t *)(&(cfp)->self))
#define RUBY_VM_GET_CFP_FROM_BLOCK_PTR(b) \
((rb_control_frame_t *)((VALUE *)(b) - 4))
/* magic number `4' is depend on rb_control_frame_t layout. */
static inline int
VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
{
if ((block_handler & 0x03) == 0x01) {
#if VM_CHECK_MODE > 0
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
VM_ASSERT(RB_TYPE_P(captured->code.val, T_IMEMO));
VM_ASSERT(imemo_type(captured->code.val) == imemo_iseq);
#endif
return 1;
}
else {
return 0;
}
}
static inline VALUE
VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
{
VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
return block_handler;
}
static inline const struct rb_captured_block *
VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
{
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
return captured;
}
static inline int
VM_BH_IFUNC_P(VALUE block_handler)
{
if ((block_handler & 0x03) == 0x03) {
#if VM_CHECK_MODE > 0
struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
VM_ASSERT(RB_TYPE_P(captured->code.val, T_IMEMO));
VM_ASSERT(imemo_type(captured->code.val) == imemo_ifunc);
#endif
return 1;
}
else {
return 0;
}
}
static inline VALUE
VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
{
VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
VM_ASSERT(VM_BH_IFUNC_P(block_handler));
return block_handler;
}
static inline const struct rb_captured_block *
VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
{
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
VM_ASSERT(VM_BH_IFUNC_P(block_handler));
return captured;
}
static inline const struct rb_captured_block *
VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
{
struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
return captured;
}
static inline enum rb_block_handler_type
vm_block_handler_type(VALUE block_handler)
{
if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
return block_handler_type_iseq;
}
else if (VM_BH_IFUNC_P(block_handler)) {
return block_handler_type_ifunc;
}
else if (SYMBOL_P(block_handler)) {
return block_handler_type_symbol;
}
else {
VM_ASSERT(rb_obj_is_proc(block_handler));
return block_handler_type_proc;
}
}
static inline int
vm_block_handler_verify(VALUE block_handler)
{
VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
vm_block_handler_type(block_handler) >= 0);
return 1;
}
static inline enum rb_block_type
vm_block_type(const struct rb_block *block)
{
#if VM_CHECK_MODE > 0
switch (block->type) {
case block_type_iseq:
VM_ASSERT(RB_TYPE_P(block->as.captured.code.val, T_IMEMO));
VM_ASSERT(imemo_type(block->as.captured.code.val) == imemo_iseq);
break;
case block_type_ifunc:
VM_ASSERT(RB_TYPE_P(block->as.captured.code.val, T_IMEMO));
VM_ASSERT(imemo_type(block->as.captured.code.val) == imemo_ifunc);
break;
case block_type_symbol:
VM_ASSERT(SYMBOL_P(block->as.symbol));
break;
case block_type_proc:
VM_ASSERT(rb_obj_is_proc(block->as.proc));
break;
}
#endif
return block->type;
}
static inline void
vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
{
struct rb_block *mb = (struct rb_block *)block;
mb->type = type;
}
static inline const struct rb_block *
vm_proc_block(VALUE procval)
{
rb_proc_t *proc = RTYPEDDATA_DATA(procval);
VM_ASSERT(rb_obj_is_proc(procval));
return &proc->block;
}
static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
static inline const VALUE *vm_block_ep(const struct rb_block *block);
static inline const rb_iseq_t *
vm_proc_iseq(VALUE procval)
{
VM_ASSERT(rb_obj_is_proc(procval));
return vm_block_iseq(vm_proc_block(procval));
}
static inline const VALUE *
vm_proc_ep(VALUE procval)
{
return vm_block_ep(vm_proc_block(procval));
}
static inline const rb_iseq_t *
vm_block_iseq(const struct rb_block *block)
{
switch (vm_block_type(block)) {
case block_type_iseq: return block->as.captured.code.iseq;
case block_type_proc: return vm_proc_iseq(block->as.proc);
case block_type_ifunc:
case block_type_symbol: return NULL;
}
VM_UNREACHABLE(vm_block_iseq);
return NULL;
}
static inline const VALUE *
vm_block_ep(const struct rb_block *block)
{
switch (vm_block_type(block)) {
case block_type_iseq:
case block_type_ifunc: return block->as.captured.ep;
case block_type_proc: return vm_proc_ep(block->as.proc);
case block_type_symbol: return NULL;
}
VM_UNREACHABLE(vm_block_ep);
return NULL;
}
static inline VALUE
vm_block_self(const struct rb_block *block)
{
switch (vm_block_type(block)) {
case block_type_iseq:
case block_type_ifunc:
return block->as.captured.self;
case block_type_proc:
return vm_block_self(vm_proc_block(block->as.proc));
case block_type_symbol:
return Qundef;
}
VM_UNREACHABLE(vm_block_self);
return Qundef;
}
static inline VALUE
VM_BH_TO_SYMBOL(VALUE block_handler)
{
VM_ASSERT(SYMBOL_P(block_handler));
return block_handler;
}
static inline VALUE
VM_BH_FROM_SYMBOL(VALUE symbol)
{
VM_ASSERT(SYMBOL_P(symbol));
return symbol;
}
static inline VALUE
VM_BH_TO_PROC(VALUE block_handler)
{
VM_ASSERT(rb_obj_is_proc(block_handler));
return block_handler;
}
static inline VALUE
VM_BH_FROM_PROC(VALUE procval)
{
VM_ASSERT(rb_obj_is_proc(procval));
return procval;
}
/* VM related object allocate functions */
VALUE rb_thread_alloc(VALUE klass);
@ -1010,15 +1376,13 @@ VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
RUBY_SYMBOL_EXPORT_END
int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, VALUE *klassp);
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc,
int argc, const VALUE *argv, const rb_block_t *blockptr);
VALUE rb_vm_make_proc_lambda(rb_thread_t *th, const rb_block_t *block, VALUE klass, int8_t is_lambda);
VALUE rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass);
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE block_handler);
VALUE rb_vm_make_proc_lambda(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
VALUE rb_vm_make_proc(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass);
VALUE rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp);
VALUE rb_vm_env_local_variables(const rb_env_t *env);
VALUE rb_vm_env_prev_envval(const rb_env_t *env);
VALUE rb_vm_proc_envval(const rb_proc_t *proc);
VALUE *rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars);
const VALUE *rb_binding_add_dynavars(rb_binding_t *bind, int dyncount, const ID *dynvars);
void rb_vm_inc_const_missing_count(void);
void rb_vm_gvl_destroy(rb_vm_t *vm);
VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc,

View file

@ -38,10 +38,6 @@ control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
const rb_callable_method_entry_t *me;
if (cfp->block_iseq != 0 && !RUBY_VM_IFUNC_P(cfp->block_iseq)) {
biseq_name = ""; /* RSTRING(cfp->block_iseq->body->location.label)->ptr; */
}
if (ep < 0 || (size_t)ep > th->stack_size) {
ep = (ptrdiff_t)cfp->ep;
ep_in_heap = 'p';
@ -95,6 +91,7 @@ control_frame_dump(rb_thread_t *th, rb_control_frame_t *cfp)
}
if (cfp->iseq != 0) {
#define RUBY_VM_IFUNC_P(ptr) (RB_TYPE_P((VALUE)(ptr), T_IMEMO) && imemo_type((VALUE)ptr) == imemo_ifunc)
if (RUBY_VM_IFUNC_P(cfp->iseq)) {
iseq_name = "<ifunc>";
}
@ -185,7 +182,7 @@ rb_vmdebug_stack_dump_raw_current(void)
}
void
rb_vmdebug_env_dump_raw(rb_env_t *env, VALUE *ep)
rb_vmdebug_env_dump_raw(rb_env_t *env, const VALUE *ep)
{
int i;
fprintf(stderr, "-- env --------------------\n");
@ -215,13 +212,13 @@ rb_vmdebug_proc_dump_raw(rb_proc_t *proc)
{
rb_env_t *env;
char *selfstr;
VALUE val = rb_inspect(proc->block.self);
VALUE val = rb_inspect(vm_block_self(&proc->block));
selfstr = StringValueCStr(val);
fprintf(stderr, "-- proc -------------------\n");
fprintf(stderr, "self: %s\n", selfstr);
GetEnvPtr(rb_vm_proc_envval(proc), env);
rb_vmdebug_env_dump_raw(env, proc->block.ep);
GetEnvPtr(VM_ENV_ENVVAL(vm_block_ep(&proc->block)), env);
rb_vmdebug_env_dump_raw(env, vm_block_ep(&proc->block));
}
void
@ -239,7 +236,7 @@ static VALUE *
vm_base_ptr(rb_control_frame_t *cfp)
{
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_size + 1;
VALUE *bp = prev_cfp->sp + iseq->body->local_table_size + VM_ENV_DATA_SIZE;
if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
bp += 1;

187
vm_eval.c
View file

@ -18,9 +18,9 @@ struct local_var_list {
static inline VALUE method_missing(VALUE obj, ID id, int argc, const VALUE *argv, enum method_missing_reason call_status);
static inline VALUE vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const rb_cref_t *cref);
static inline VALUE vm_yield(rb_thread_t *th, int argc, const VALUE *argv);
static inline VALUE vm_yield_with_block(rb_thread_t *th, int argc, const VALUE *argv, const rb_block_t *blockargptr);
static inline VALUE vm_yield_with_block(rb_thread_t *th, int argc, const VALUE *argv, VALUE block_handler);
static VALUE vm_exec(rb_thread_t *th);
static void vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref, rb_block_t *base_block);
static void vm_set_eval_stack(rb_thread_t * th, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block);
static int vm_collect_local_variables_in_heap(rb_thread_t *th, const VALUE *dfp, const struct local_var_list *vars);
static VALUE rb_eUncaughtThrow;
@ -114,16 +114,16 @@ vm_call0_cfunc_with_frame(rb_thread_t* th, struct rb_calling_info *calling, cons
VALUE recv = calling->recv;
int argc = calling->argc;
ID mid = ci->mid;
rb_block_t *blockptr = calling->blockptr;
VALUE block_handler = calling->block_handler;
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, mid);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, mid, me->owner, Qnil);
{
rb_control_frame_t *reg_cfp = th->cfp;
vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC, recv,
VM_ENVVAL_BLOCK_PTR(blockptr), (VALUE)me,
0, reg_cfp->sp, 1, 0);
vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC | VM_ENV_FLAG_LOCAL, recv,
block_handler, (VALUE)me,
0, reg_cfp->sp, 0, 0);
if (len >= 0) rb_check_arity(argc, len, len);
@ -155,13 +155,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
{
VALUE ret;
if (th->passed_block) {
calling->blockptr = (rb_block_t *)th->passed_block;
th->passed_block = 0;
}
else {
calling->blockptr = 0;
}
calling->block_handler = vm_passed_block_handler(th);
again:
switch (cc->me->def->type) {
@ -178,7 +172,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
}
vm_call_iseq_setup(th, reg_cfp, calling, ci, cc);
th->cfp->flag |= VM_FRAME_FLAG_FINISH;
VM_ENV_FLAGS_SET(th->cfp->ep, VM_FRAME_FLAG_FINISH);
return vm_exec(th); /* CHECK_INTS in this function */
}
case VM_METHOD_TYPE_NOTIMPLEMENTED:
@ -222,7 +216,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
goto again;
case VM_METHOD_TYPE_MISSING:
{
th->passed_block = calling->blockptr;
vm_passed_block_handler_set(th, calling->block_handler);
return method_missing(calling->recv, ci->mid, calling->argc,
argv, MISSING_NOENTRY);
}
@ -235,7 +229,7 @@ vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_
{
rb_proc_t *proc;
GetProcPtr(calling->recv, proc);
ret = rb_vm_invoke_proc(th, proc, calling->argc, argv, calling->blockptr);
ret = rb_vm_invoke_proc(th, proc, calling->argc, argv, calling->block_handler);
goto success;
}
default:
@ -288,8 +282,9 @@ vm_call_super(rb_thread_t *th, int argc, const VALUE *argv)
VALUE
rb_call_super(int argc, const VALUE *argv)
{
PASS_PASSED_BLOCK();
return vm_call_super(GET_THREAD(), argc, argv);
rb_thread_t *th = GET_THREAD();
PASS_PASSED_BLOCK_HANDLER_TH(th);
return vm_call_super(th, argc, argv);
}
VALUE
@ -747,11 +742,10 @@ method_missing(VALUE obj, ID id, int argc, const VALUE *argv, enum method_missin
{
VALUE *nargv, result, work, klass;
rb_thread_t *th = GET_THREAD();
const rb_block_t *blockptr = th->passed_block;
VALUE block_handler = vm_passed_block_handler(th);
const rb_callable_method_entry_t *me;
th->method_missing_reason = call_status;
th->passed_block = 0;
if (id == idMethodMissing) {
missing:
@ -768,7 +762,7 @@ method_missing(VALUE obj, ID id, int argc, const VALUE *argv, enum method_missin
if (!klass) goto missing;
me = rb_callable_method_entry(klass, idMethodMissing);
if (!me || METHOD_ENTRY_BASIC(me)) goto missing;
th->passed_block = blockptr;
vm_passed_block_handler_set(th, block_handler);
result = vm_call0(th, obj, idMethodMissing, argc, argv, me);
if (work) ALLOCV_END(work);
return result;
@ -778,7 +772,7 @@ void
rb_raise_method_missing(rb_thread_t *th, int argc, const VALUE *argv,
VALUE obj, int call_status)
{
th->passed_block = 0;
vm_passed_block_handler_set(th, VM_BLOCK_HANDLER_NONE);
raise_method_missing(th, argc, argv, obj, call_status | MISSING_MISSING);
}
@ -874,23 +868,16 @@ rb_funcallv_public(VALUE recv, ID mid, int argc, const VALUE *argv)
VALUE
rb_funcall_passing_block(VALUE recv, ID mid, int argc, const VALUE *argv)
{
PASS_PASSED_BLOCK();
PASS_PASSED_BLOCK_HANDLER();
return rb_call(recv, mid, argc, argv, CALL_PUBLIC);
}
VALUE
rb_funcall_with_block(VALUE recv, ID mid, int argc, const VALUE *argv, VALUE pass_procval)
rb_funcall_with_block(VALUE recv, ID mid, int argc, const VALUE *argv, VALUE passed_procval)
{
if (!NIL_P(pass_procval)) {
if (!NIL_P(passed_procval)) {
rb_thread_t *th = GET_THREAD();
rb_block_t *block = 0;
rb_proc_t *pass_proc;
GetProcPtr(pass_procval, pass_proc);
block = &pass_proc->block;
th->passed_block = block;
vm_passed_block_handler_set(th, passed_procval);
}
return rb_call(recv, mid, argc, argv, CALL_PUBLIC);
@ -957,7 +944,7 @@ send_internal(int argc, const VALUE *argv, VALUE recv, call_type scope)
else {
argv++; argc--;
}
PASS_PASSED_BLOCK_TH(th);
PASS_PASSED_BLOCK_HANDLER_TH(th);
ret = rb_call0(recv, id, argc, argv, scope, self);
ALLOCV_END(vargv);
return ret;
@ -1080,13 +1067,8 @@ rb_yield_splat(VALUE values)
VALUE
rb_yield_block(VALUE val, VALUE arg, int argc, const VALUE *argv, VALUE blockarg)
{
const rb_block_t *blockptr = NULL;
if (!NIL_P(blockarg)) {
rb_proc_t *blockproc;
GetProcPtr(blockarg, blockproc);
blockptr = &blockproc->block;
}
return vm_yield_with_block(GET_THREAD(), argc, argv, blockptr);
return vm_yield_with_block(GET_THREAD(), argc, argv,
NIL_P(blockarg) ? VM_BLOCK_HANDLER_NONE : blockarg);
}
static VALUE
@ -1166,16 +1148,17 @@ rb_iterate0(VALUE (* it_proc) (VALUE), VALUE data1,
if (state == 0) {
iter_retry:
{
rb_block_t *blockptr;
VALUE block_handler;
if (ifunc) {
blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
blockptr->iseq = (void *)ifunc;
blockptr->proc = 0;
struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
captured->code.ifunc = ifunc;
block_handler = VM_BH_FROM_IFUNC_BLOCK(captured);
}
else {
blockptr = VM_CF_BLOCK_PTR(cfp);
block_handler = VM_CF_BLOCK_HANDLER(cfp);
}
th->passed_block = blockptr;
vm_passed_block_handler_set(th, block_handler);
}
retval = (*it_proc) (data1);
}
@ -1294,12 +1277,14 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
{
int state;
VALUE result = Qundef;
VALUE envval;
rb_thread_t *th = GET_THREAD();
rb_env_t *env = NULL;
rb_block_t block, *base_block;
VALUE file = filename ? filename : rb_source_location(&lineno);
int line = lineno;
struct rb_block block;
const struct rb_block *base_block;
volatile VALUE file;
volatile int line;
file = filename ? filename : rb_source_location(&lineno);
line = lineno;
{
rb_cref_t *cref = cref_arg;
@ -1314,25 +1299,23 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
if (!NIL_P(scope)) {
bind = Check_TypedStruct(scope, &ruby_binding_data_type);
{
envval = bind->env;
if (NIL_P(absolute_path) && !NIL_P(bind->path)) {
file = bind->path;
line = bind->first_lineno;
absolute_path = rb_current_realfilepath();
}
if (NIL_P(absolute_path) && !NIL_P(bind->path)) {
file = bind->path;
line = bind->first_lineno;
absolute_path = rb_current_realfilepath();
}
GetEnvPtr(envval, env);
base_block = &env->block;
base_block = &bind->block;
}
else {
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
if (cfp != 0) {
block = *RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
block.as.captured = *VM_CFP_TO_CAPTURED_BLOCK(cfp);
block.as.captured.self = self;
block.as.captured.code.iseq = cfp->iseq;
block.type = block_type_iseq;
base_block = &block;
base_block->self = self;
base_block->iseq = cfp->iseq; /* TODO */
}
else {
rb_raise(rb_eRuntimeError, "Can't eval on top of Fiber or Thread");
@ -1355,9 +1338,10 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
rb_exc_raise(adjust_backtrace_in_eval(th, th->errinfo));
}
if (!cref && base_block->iseq) {
/* TODO: what the code checking? */
if (!cref && base_block->as.captured.code.val) {
if (NIL_P(scope)) {
rb_cref_t *orig_cref = rb_vm_get_cref(base_block->ep);
rb_cref_t *orig_cref = rb_vm_get_cref(vm_block_ep(base_block));
cref = vm_cref_dup(orig_cref);
}
else {
@ -1373,7 +1357,7 @@ eval_string_with_cref(VALUE self, VALUE src, VALUE scope, rb_cref_t *const cref_
/* save new env */
if (bind && iseq->body->local_table_size > 0) {
bind->env = vm_make_env_object(th, th->cfp);
vm_bind_update_env(bind, vm_make_env_object(th, th->cfp));
}
}
@ -1579,16 +1563,41 @@ static VALUE
yield_under(VALUE under, VALUE self, int argc, const VALUE *argv)
{
rb_thread_t *th = GET_THREAD();
rb_block_t block, *blockptr;
rb_control_frame_t *cfp = th->cfp;
VALUE block_handler = VM_CF_BLOCK_HANDLER(cfp);
VALUE new_block_handler = 0;
const struct rb_captured_block *captured = NULL;
struct rb_captured_block new_captured;
const VALUE *ep = NULL;
rb_cref_t *cref;
if ((blockptr = VM_CF_BLOCK_PTR(th->cfp)) != 0) {
block = *blockptr;
block.self = self;
VM_CF_LEP(th->cfp)[0] = VM_ENVVAL_BLOCK_PTR(&block);
}
cref = vm_cref_push(th, under, blockptr, TRUE);
if (block_handler != VM_BLOCK_HANDLER_NONE) {
again:
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_iseq:
captured = VM_BH_TO_CAPT_BLOCK(block_handler);
new_captured = *captured;
new_block_handler = VM_BH_FROM_ISEQ_BLOCK(&new_captured);
break;
case block_handler_type_ifunc:
captured = VM_BH_TO_CAPT_BLOCK(block_handler);
new_captured = *captured;
new_block_handler = VM_BH_FROM_IFUNC_BLOCK(&new_captured);
break;
case block_handler_type_proc:
block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
goto again;
case block_handler_type_symbol:
return rb_sym_proc_call(SYM2ID(VM_BH_TO_SYMBOL(block_handler)), 1, &self, VM_BLOCK_HANDLER_NONE);
}
new_captured.self = self;
ep = captured->ep;
VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
}
cref = vm_cref_push(th, under, ep, TRUE);
return vm_yield_with_cref(th, argc, argv, cref);
}
@ -1596,18 +1605,22 @@ VALUE
rb_yield_refine_block(VALUE refinement, VALUE refinements)
{
rb_thread_t *th = GET_THREAD();
rb_block_t block, *blockptr;
rb_cref_t *cref;
VALUE block_handler = VM_CF_BLOCK_HANDLER(th->cfp);
if ((blockptr = VM_CF_BLOCK_PTR(th->cfp)) != 0) {
block = *blockptr;
block.self = refinement;
VM_CF_LEP(th->cfp)[0] = VM_ENVVAL_BLOCK_PTR(&block);
if (vm_block_handler_type(block_handler) != block_handler_type_iseq) {
rb_bug("rb_yield_refine_block: an iseq block is required");
}
else {
const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
struct rb_captured_block new_captured = *captured;
VALUE new_block_handler = VM_BH_FROM_ISEQ_BLOCK(&new_captured);
const VALUE *ep = captured->ep;
rb_cref_t *cref = vm_cref_push(th, refinement, ep, TRUE);
CREF_REFINEMENTS_SET(cref, refinements);
VM_FORCE_WRITE_SPECIAL_CONST(&VM_CF_LEP(th->cfp)[VM_ENV_DATA_INDEX_SPECVAL], new_block_handler);
new_captured.self = refinement;
return vm_yield_with_cref(th, 0, NULL, cref);
}
cref = vm_cref_push(th, refinement, blockptr, TRUE);
CREF_REFINEMENTS_SET(cref, refinements);
return vm_yield_with_cref(th, 0, NULL, cref);
}
/* string eval under the class/module context */
@ -2093,9 +2106,9 @@ rb_f_local_variables(void)
local_var_list_add(&vars, cfp->iseq->body->local_table[i]);
}
}
if (!VM_EP_LEP_P(cfp->ep)) {
if (!VM_ENV_LOCAL_P(cfp->ep)) {
/* block */
VALUE *ep = VM_CF_PREV_EP(cfp);
const VALUE *ep = VM_CF_PREV_EP(cfp);
if (vm_collect_local_variables_in_heap(th, ep, &vars)) {
break;
@ -2142,7 +2155,7 @@ rb_f_block_given_p(void)
rb_control_frame_t *cfp = th->cfp;
cfp = vm_get_ruby_level_caller_cfp(th, RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
if (cfp != 0 && VM_CF_BLOCK_PTR(cfp)) {
if (cfp != NULL && VM_CF_BLOCK_HANDLER(cfp) != VM_BLOCK_HANDLER_NONE) {
return Qtrue;
}
else {

View file

@ -72,7 +72,7 @@ callable_method_entry_p(const rb_callable_method_entry_t *me)
static void
vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me)
{
int magic = (int)(type & VM_FRAME_MAGIC_MASK);
unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
enum imemo_type cref_or_me_type = imemo_none;
if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
@ -82,10 +82,10 @@ vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE
req_me = TRUE;
}
if (req_block && !VM_ENVVAL_BLOCK_PTR_P(specval)) {
if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
}
if (!req_block && VM_ENVVAL_BLOCK_PTR_P(specval)) {
if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
}
@ -125,6 +125,7 @@ vm_check_frame(VALUE type,
VALUE cref_or_me)
{
int magic = (int)(type & VM_FRAME_MAGIC_MASK);
VM_ASSERT(FIXNUM_P(type));
#define CHECK(magic, req_block, req_me, req_cref) case magic: vm_check_frame_detail(type, req_block, req_me, req_cref, specval, cref_or_me); break;
switch (magic) {
@ -165,7 +166,7 @@ vm_push_frame(rb_thread_t *th,
int i;
vm_check_frame(type, specval, cref_or_me);
VM_ASSERT(local_size >= 1);
VM_ASSERT(local_size >= 0);
/* check stack overflow */
CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
@ -175,23 +176,23 @@ vm_push_frame(rb_thread_t *th,
/* setup new frame */
cfp->pc = (VALUE *)pc;
cfp->iseq = (rb_iseq_t *)iseq;
cfp->flag = type;
cfp->self = self;
cfp->block_iseq = NULL;
cfp->proc = 0;
cfp->block_code = NULL;
/* setup vm value stack */
/* initialize local variables */
for (i=0; i < local_size - 1; i++) {
for (i=0; i < local_size; i++) {
*sp++ = Qnil;
}
/* set special val */
*sp++ = cref_or_me; /* Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
*sp = specval;
/* setup vm control frame stack */
/* setup ep with managing data */
VM_ASSERT(VM_ENV_DATA_INDEX_ME_CREF == -2);
VM_ASSERT(VM_ENV_DATA_INDEX_SPECVAL == -1);
VM_ASSERT(VM_ENV_DATA_INDEX_FLAGS == -0);
*sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
*sp++ = specval /* ep[-1] / block handler or prev env ptr */;
*sp = type; /* ep[-0] / ENV_FLAGS */
cfp->ep = sp;
cfp->sp = sp + 1;
@ -207,20 +208,33 @@ vm_push_frame(rb_thread_t *th,
return cfp;
}
static inline int
vm_pop_frame(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *ep /* we'll use ep soon */)
rb_control_frame_t *
rb_vm_push_frame(rb_thread_t *th,
const rb_iseq_t *iseq,
VALUE type,
VALUE self,
VALUE specval,
VALUE cref_or_me,
const VALUE *pc,
VALUE *sp,
int local_size,
int stack_max)
{
return vm_push_frame(th, iseq, type, self, specval, cref_or_me, pc, sp, local_size, stack_max);
}
/* return TRUE if the frame is finished */
static inline int
vm_pop_frame(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *ep)
{
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
if (VMDEBUG == 2) SDR();
th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
if (UNLIKELY(VM_FRAME_TYPE_FINISH_P(cfp))) {
return TRUE;
}
else {
return FALSE;
}
return flags & VM_FRAME_FLAG_FINISH;
}
void
@ -252,49 +266,93 @@ rb_error_arity(int argc, int min, int max)
rb_exc_raise(rb_arity_error_new(argc, min, max));
}
/* svar */
/* lvar */
static inline struct vm_svar **
lep_svar_place(rb_thread_t *th, const VALUE *lep)
NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
static void
vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
{
const VALUE *svar_place;
/* remember env value forcely */
rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
VM_FORCE_WRITE(&ep[index], v);
VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
}
if (lep && (th == NULL || th->root_lep != lep)) {
svar_place = &lep[-1];
static inline void
vm_env_write(const VALUE *ep, int index, VALUE v)
{
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
VM_STACK_ENV_WRITE(ep, index, v);
}
else {
svar_place = &th->root_svar;
vm_env_write_slowpath(ep, index, v);
}
}
void
rb_vm_env_write(const VALUE *ep, int index, VALUE v)
{
vm_env_write(ep, index, v);
}
/* svar */
#if VM_CHECK_MODE > 0
{
VALUE svar = *svar_place;
if (svar != Qfalse) {
if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
switch (imemo_type(svar)) {
case imemo_svar:
case imemo_cref:
case imemo_ment:
goto okay;
default:
break; /* fall through */
}
}
rb_bug("lep_svar_place: unknown type: %s", rb_obj_info(svar));
static int
vm_svar_valid_p(VALUE svar)
{
if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
switch (imemo_type(svar)) {
case imemo_svar:
case imemo_cref:
case imemo_ment:
return TRUE;
default:
break;
}
okay:;
}
rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
return FALSE;
}
#endif
return (struct vm_svar **)svar_place;
static inline struct vm_svar *
lep_svar(rb_thread_t *th, const VALUE *lep)
{
VALUE svar;
if (lep && (th == NULL || th->root_lep != lep)) {
svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
}
else {
svar = th->root_svar;
}
VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
return (struct vm_svar *)svar;
}
static inline void
lep_svar_write(rb_thread_t *th, const VALUE *lep, const struct vm_svar *svar)
{
VM_ASSERT(vm_svar_valid_p((VALUE)svar));
if (lep && (th == NULL || th->root_lep != lep)) {
vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
}
else {
RB_OBJ_WRITE(th->self, &th->root_svar, svar);
}
}
static VALUE
lep_svar_get(rb_thread_t *th, const VALUE *lep, rb_num_t key)
{
struct vm_svar ** const svar_place = lep_svar_place(th, lep);
const struct vm_svar *const svar = *svar_place;
const struct vm_svar *svar = lep_svar(th, lep);
if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
@ -323,13 +381,12 @@ svar_new(VALUE obj)
}
static void
lep_svar_set(rb_thread_t *th, VALUE *lep, rb_num_t key, VALUE val)
lep_svar_set(rb_thread_t *th, const VALUE *lep, rb_num_t key, VALUE val)
{
struct vm_svar **svar_place = lep_svar_place(th, lep);
struct vm_svar *svar = *svar_place;
struct vm_svar *svar = lep_svar(th, lep);
if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
svar = *svar_place = svar_new((VALUE)svar);
lep_svar_write(th, lep, svar = svar_new((VALUE)svar));
}
switch (key) {
@ -351,7 +408,7 @@ lep_svar_set(rb_thread_t *th, VALUE *lep, rb_num_t key, VALUE val)
}
static inline VALUE
vm_getspecial(rb_thread_t *th, VALUE *lep, rb_num_t key, rb_num_t type)
vm_getspecial(rb_thread_t *th, const VALUE *lep, rb_num_t key, rb_num_t type)
{
VALUE val;
@ -416,15 +473,15 @@ check_method_entry(VALUE obj, int can_be_svar)
const rb_callable_method_entry_t *
rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
{
VALUE *ep = cfp->ep;
const VALUE *ep = cfp->ep;
rb_callable_method_entry_t *me;
while (!VM_EP_LEP_P(ep)) {
if ((me = check_method_entry(ep[-1], FALSE)) != NULL) return me;
ep = VM_EP_PREV_EP(ep);
while (!VM_ENV_LOCAL_P(ep)) {
if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
ep = VM_ENV_PREV_EP(ep);
}
return check_method_entry(ep[-1], TRUE);
return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
}
static rb_cref_t *
@ -472,12 +529,12 @@ vm_env_cref(const VALUE *ep)
{
rb_cref_t *cref;
while (!VM_EP_LEP_P(ep)) {
if ((cref = check_cref(ep[-1], FALSE)) != NULL) return cref;
ep = VM_EP_PREV_EP(ep);
while (!VM_ENV_LOCAL_P(ep)) {
if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
ep = VM_ENV_PREV_EP(ep);
}
return check_cref(ep[-1], TRUE);
return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
}
static int
@ -499,15 +556,15 @@ is_cref(const VALUE v, int can_be_svar)
static int
vm_env_cref_by_cref(const VALUE *ep)
{
while (!VM_EP_LEP_P(ep)) {
if (is_cref(ep[-1], FALSE)) return TRUE;
ep = VM_EP_PREV_EP(ep);
while (!VM_ENV_LOCAL_P(ep)) {
if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
ep = VM_ENV_PREV_EP(ep);
}
return is_cref(ep[-1], TRUE);
return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
}
static rb_cref_t *
cref_replace_with_duplicated_cref_each_frame(VALUE *vptr, int can_be_svar, VALUE parent)
cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
{
const VALUE v = *vptr;
rb_cref_t *cref, *new_cref;
@ -518,16 +575,15 @@ cref_replace_with_duplicated_cref_each_frame(VALUE *vptr, int can_be_svar, VALUE
cref = (rb_cref_t *)v;
new_cref = vm_cref_dup(cref);
if (parent) {
/* this pointer is in svar */
RB_OBJ_WRITE(parent, vptr, new_cref);
}
else {
*vptr = (VALUE)new_cref;
VM_FORCE_WRITE(vptr, (VALUE)new_cref);
}
return (rb_cref_t *)new_cref;
case imemo_svar:
if (can_be_svar) {
return cref_replace_with_duplicated_cref_each_frame((VALUE *)&((struct vm_svar *)v)->cref_or_me, FALSE, v);
return cref_replace_with_duplicated_cref_each_frame((const VALUE *)&((struct vm_svar *)v)->cref_or_me, FALSE, v);
}
case imemo_ment:
rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
@ -543,14 +599,17 @@ vm_cref_replace_with_duplicated_cref(const VALUE *ep)
{
if (vm_env_cref_by_cref(ep)) {
rb_cref_t *cref;
VALUE envval;
while (!VM_EP_LEP_P(ep)) {
if ((cref = cref_replace_with_duplicated_cref_each_frame((VALUE *)&ep[-1], FALSE, Qfalse)) != NULL) {
while (!VM_ENV_LOCAL_P(ep)) {
envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
return cref;
}
ep = VM_EP_PREV_EP(ep);
ep = VM_ENV_PREV_EP(ep);
}
return cref_replace_with_duplicated_cref_each_frame((VALUE *)&ep[-1], TRUE, Qfalse);
envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
}
else {
rb_bug("vm_cref_dup: unreachable");
@ -608,12 +667,12 @@ rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t
}
static rb_cref_t *
vm_cref_push(rb_thread_t *th, VALUE klass, rb_block_t *blockptr, int pushed_by_eval)
vm_cref_push(rb_thread_t *th, VALUE klass, const VALUE *ep, int pushed_by_eval)
{
rb_cref_t *prev_cref = NULL;
if (blockptr) {
prev_cref = vm_env_cref(blockptr->ep);
if (ep) {
prev_cref = vm_env_cref(ep);
}
else {
rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(th, th->cfp);
@ -921,7 +980,7 @@ static VALUE
vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
const int flag, const rb_num_t level, const VALUE throwobj)
{
rb_control_frame_t *escape_cfp = NULL;
const rb_control_frame_t *escape_cfp = NULL;
const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(th); /* end of control frame pointer */
if (flag != 0) {
@ -929,7 +988,7 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
}
else if (state == TAG_BREAK) {
int is_orphan = 1;
VALUE *ep = GET_EP();
const VALUE *ep = GET_EP();
const rb_iseq_t *base_iseq = GET_ISEQ();
escape_cfp = reg_cfp;
@ -940,7 +999,7 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
base_iseq = escape_cfp->iseq;
}
else {
ep = VM_EP_PREV_EP(ep);
ep = VM_ENV_PREV_EP(ep);
base_iseq = base_iseq->body->parent_iseq;
escape_cfp = rb_vm_search_cf_from_ep(th, escape_cfp, ep);
VM_ASSERT(escape_cfp->iseq == base_iseq);
@ -953,7 +1012,7 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
state = TAG_RETURN;
}
else {
ep = VM_EP_PREV_EP(ep);
ep = VM_ENV_PREV_EP(ep);
while (escape_cfp < eocfp) {
if (escape_cfp->ep == ep) {
@ -986,22 +1045,22 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
}
else if (state == TAG_RETRY) {
rb_num_t i;
VALUE *ep = VM_EP_PREV_EP(GET_EP());
const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
for (i = 0; i < level; i++) {
ep = VM_EP_PREV_EP(ep);
ep = VM_ENV_PREV_EP(ep);
}
escape_cfp = rb_vm_search_cf_from_ep(th, reg_cfp, ep);
}
else if (state == TAG_RETURN) {
VALUE *current_ep = GET_EP();
VALUE *target_lep = VM_EP_LEP(current_ep);
const VALUE *current_ep = GET_EP();
const VALUE *target_lep = VM_EP_LEP(current_ep);
int in_class_frame = 0;
escape_cfp = reg_cfp;
while (escape_cfp < eocfp) {
VALUE *lep = VM_CF_LEP(escape_cfp);
const VALUE *lep = VM_CF_LEP(escape_cfp);
if (!target_lep) {
target_lep = lep;
@ -1021,14 +1080,14 @@ vm_throw_start(rb_thread_t *const th, rb_control_frame_t *const reg_cfp, enum ru
goto valid_return;
}
else {
VALUE *tep = current_ep;
const VALUE *tep = current_ep;
while (target_lep != tep) {
if (escape_cfp->ep == tep) {
/* in lambda */
goto valid_return;
}
tep = VM_EP_PREV_EP(tep);
tep = VM_ENV_PREV_EP(tep);
}
}
}
@ -1140,6 +1199,7 @@ vm_search_method(const struct rb_call_info *ci, struct rb_call_cache *cc, VALUE
#if OPT_INLINE_METHOD_CACHE
if (LIKELY(GET_GLOBAL_METHOD_STATE() == cc->method_state && RCLASS_SERIAL(klass) == cc->class_serial)) {
/* cache hit! */
VM_ASSERT(cc->call != NULL);
return;
}
#endif
@ -1287,26 +1347,29 @@ double_cmp_ge(double a, double b)
}
static VALUE *
vm_base_ptr(rb_control_frame_t *cfp)
vm_base_ptr(const rb_control_frame_t *cfp)
{
rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_size + 1;
if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
/* adjust `self' */
bp += 1;
}
const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
if (cfp->iseq && RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
/* adjust `self' */
bp += 1;
}
#if VM_DEBUG_BP_CHECK
if (bp != cfp->bp_check) {
fprintf(stderr, "bp_check: %ld, bp: %ld\n",
(long)(cfp->bp_check - GET_THREAD()->stack),
(long)(bp - GET_THREAD()->stack));
rb_bug("vm_base_ptr: unreachable");
}
if (bp != cfp->bp_check) {
fprintf(stderr, "bp_check: %ld, bp: %ld\n",
(long)(cfp->bp_check - GET_THREAD()->stack),
(long)(bp - GET_THREAD()->stack));
rb_bug("vm_base_ptr: unreachable");
}
#endif
return bp;
return bp;
}
else {
return NULL;
}
}
/* method call processes with call_info */
@ -1347,7 +1410,7 @@ vm_call_iseq_setup_normal_0start(rb_thread_t *th, rb_control_frame_t *cfp, struc
{
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
int param = iseq->body->param.size;
int local = iseq->body->local_size;
int local = iseq->body->local_table_size;
return vm_call_iseq_setup_normal(th, cfp, calling, ci, cc, 0, param, local);
}
@ -1390,7 +1453,7 @@ vm_call_iseq_setup(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_i
{
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
const int param_size = iseq->body->param.size;
const int local_size = iseq->body->local_size;
const int local_size = iseq->body->local_table_size;
const int opt_pc = vm_callee_setup_arg(th, calling, ci, cc, def_iseq_ptr(cc->me->def), cfp->sp - calling->argc, param_size, local_size);
return vm_call_iseq_setup_2(th, cfp, calling, ci, cc, opt_pc, param_size, local_size);
}
@ -1417,8 +1480,8 @@ vm_call_iseq_setup_normal(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_ca
VALUE *sp = argv + param_size;
cfp->sp = argv - 1 /* recv */;
vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD, calling->recv,
VM_ENVVAL_BLOCK_PTR(calling->blockptr), (VALUE)me,
vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
calling->block_handler, (VALUE)me,
iseq->body->iseq_encoded + opt_pc, sp,
local_size - param_size,
iseq->body->stack_max);
@ -1437,6 +1500,18 @@ vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_
VALUE *sp_orig, *sp;
VALUE finish_flag = VM_FRAME_TYPE_FINISH_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
dst_captured->code.val = src_captured->code.val;
if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
}
else {
calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
}
}
vm_pop_frame(th, cfp, cfp->ep);
cfp = th->cfp;
@ -1453,10 +1528,10 @@ vm_call_iseq_setup_tailcall(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_
*sp++ = src_argv[i];
}
vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | finish_flag,
calling->recv, VM_ENVVAL_BLOCK_PTR(calling->blockptr), (VALUE)me,
vm_push_frame(th, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
calling->recv, calling->block_handler, (VALUE)me,
iseq->body->iseq_encoded + opt_pc, sp,
iseq->body->local_size - iseq->body->param.size,
iseq->body->local_table_size - iseq->body->param.size,
iseq->body->stack_max);
cfp->sp = sp_orig;
@ -1637,15 +1712,15 @@ vm_call_cfunc_with_frame(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb
int len = cfunc->argc;
VALUE recv = calling->recv;
rb_block_t *blockptr = calling->blockptr;
VALUE block_handler = calling->block_handler;
int argc = calling->argc;
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, me->called_id);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, me->called_id, me->owner, Qundef);
vm_push_frame(th, NULL, VM_FRAME_MAGIC_CFUNC, recv,
VM_ENVVAL_BLOCK_PTR(blockptr), (VALUE)me,
0, th->cfp->sp, 1, 0);
vm_push_frame(th, NULL, VM_FRAME_MAGIC_CFUNC | VM_ENV_FLAG_LOCAL, recv,
block_handler, (VALUE)me,
0, th->cfp->sp, 0, 0);
if (len >= 0) rb_check_arity(argc, len, len);
@ -1733,9 +1808,9 @@ rb_vm_call_cfunc_push_frame(rb_thread_t *th)
const rb_callable_method_entry_t *me = calling->me;
th->passed_ci = 0;
vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC,
calling->recv, VM_ENVVAL_BLOCK_PTR(calling->blockptr), (VALUE)me /* cref */,
0, th->cfp->sp + cc->aux.inc_sp, 1, 0);
vm_push_frame(th, 0, VM_FRAME_MAGIC_CFUNC | VM_ENV_FLAG_LOCAL,
calling->recv, calling->block_handler, (VALUE)me /* cref */,
0, th->cfp->sp + cc->aux.inc_sp, 0, 0);
if (calling->call != vm_call_general) {
calling->call = vm_call_cfunc_with_frame;
@ -1774,7 +1849,7 @@ vm_call_bmethod_body(rb_thread_t *th, struct rb_calling_info *calling, const str
/* control block frame */
th->passed_bmethod_me = cc->me;
GetProcPtr(cc->me->def->body.proc, proc);
val = vm_invoke_bmethod(th, proc, calling->recv, calling->argc, argv, calling->blockptr);
val = vm_invoke_bmethod(th, proc, calling->recv, calling->argc, argv, calling->block_handler);
return val;
}
@ -1786,7 +1861,6 @@ vm_call_bmethod(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_info
int argc;
CALLER_SETUP_ARG(cfp, calling, ci);
argc = calling->argc;
argv = ALLOCA_N(VALUE, argc);
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
@ -1879,7 +1953,7 @@ vm_call_opt_call(rb_thread_t *th, rb_control_frame_t *cfp, struct rb_calling_inf
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
cfp->sp -= argc + 1;
return rb_vm_invoke_proc(th, proc, argc, argv, calling->blockptr);
return rb_vm_invoke_proc(th, proc, argc, argv, calling->block_handler);
}
static VALUE
@ -2305,18 +2379,40 @@ block_proc_is_lambda(const VALUE procval)
}
static VALUE
vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block, VALUE self,
int argc, const VALUE *argv,
const rb_block_t *blockargptr)
vm_block_handler_to_proc(rb_thread_t *th, VALUE block_handler)
{
const struct vm_ifunc *ifunc = (struct vm_ifunc *)block->iseq;
VALUE val, arg, blockarg, data;
rb_block_call_func *func;
VALUE blockarg = Qnil;
if (block_handler != VM_BLOCK_HANDLER_NONE) {
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_proc:
blockarg = block_handler;
break;
case block_handler_type_symbol:
blockarg = rb_sym_to_proc(block_handler);
break;
case block_handler_type_iseq:
case block_handler_type_ifunc:
blockarg = rb_vm_make_proc(th, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
break;
}
}
return blockarg;
}
static VALUE
vm_yield_with_cfunc(rb_thread_t *th,
const struct rb_captured_block *captured,
VALUE self, int argc, const VALUE *argv, VALUE block_handler)
{
int is_lambda = FALSE; /* TODO */
VALUE val, arg, blockarg;
const struct vm_ifunc *ifunc = captured->code.ifunc;
const rb_callable_method_entry_t *me = th->passed_bmethod_me;
th->passed_bmethod_me = NULL;
if (!RUBY_VM_IFUNC_P(block->proc) && !SYMBOL_P(block->proc) &&
block_proc_is_lambda(block->proc)) {
if (is_lambda) {
arg = rb_ary_new4(argc, argv);
}
else if (argc == 0) {
@ -2326,36 +2422,26 @@ vm_yield_with_cfunc(rb_thread_t *th, const rb_block_t *block, VALUE self,
arg = argv[0];
}
if (blockargptr) {
if (blockargptr->proc) {
blockarg = blockargptr->proc;
}
else {
blockarg = rb_vm_make_proc(th, blockargptr, rb_cProc);
}
}
else {
blockarg = Qnil;
}
blockarg = vm_block_handler_to_proc(th, block_handler);
vm_push_frame(th, (rb_iseq_t *)ifunc, VM_FRAME_MAGIC_IFUNC,
self, VM_ENVVAL_PREV_EP_PTR(block->ep), (VALUE)me,
0, th->cfp->sp, 1, 0);
if (SYMBOL_P(ifunc)) {
func = rb_sym_proc_call;
data = SYM2ID((VALUE)ifunc);
}
else {
func = (rb_block_call_func *)ifunc->func;
data = (VALUE)ifunc->data;
}
val = (*func)(arg, data, argc, argv, blockarg);
vm_push_frame(th, (const rb_iseq_t *)captured->code.ifunc,
VM_FRAME_MAGIC_IFUNC,
self,
VM_GUARDED_PREV_EP(captured->ep),
(VALUE)me,
0, th->cfp->sp, 0, 0);
val = (*ifunc->func)(arg, ifunc->data, argc, argv, blockarg);
rb_vm_pop_frame(th);
return val;
}
static VALUE
vm_yield_with_symbol(rb_thread_t *th, VALUE symbol, int argc, const VALUE *argv, VALUE block_handler)
{
return rb_sym_proc_call(SYM2ID(symbol), argc, argv, vm_block_handler_to_proc(th, block_handler));
}
static inline int
vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
{
@ -2428,14 +2514,14 @@ vm_callee_setup_block_arg(rb_thread_t *th, struct rb_calling_info *calling, cons
}
static int
vm_yield_setup_args(rb_thread_t *th, const rb_iseq_t *iseq, const int argc, VALUE *argv, const rb_block_t *blockptr, enum arg_setup_type arg_setup_type)
vm_yield_setup_args(rb_thread_t *th, const rb_iseq_t *iseq, const int argc, VALUE *argv, VALUE block_handler, enum arg_setup_type arg_setup_type)
{
struct rb_calling_info calling_entry, *calling;
struct rb_call_info ci_entry, *ci;
calling = &calling_entry;
calling->argc = argc;
calling->blockptr = (rb_block_t *)blockptr;
calling->block_handler = block_handler;
ci_entry.flag = 0;
ci = &ci_entry;
@ -2443,64 +2529,128 @@ vm_yield_setup_args(rb_thread_t *th, const rb_iseq_t *iseq, const int argc, VALU
return vm_callee_setup_block_arg(th, calling, ci, iseq, argv, arg_setup_type);
}
/* ruby iseq -> ruby block iseq */
/* ruby iseq -> ruby block */
static VALUE
vm_invoke_iseq_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling, const struct rb_call_info *ci,
int is_lambda, const struct rb_captured_block *captured)
{
const rb_iseq_t *iseq = captured->code.iseq;
const int arg_size = iseq->body->param.size;
VALUE * const rsp = GET_SP() - calling->argc;
int opt_pc = vm_callee_setup_block_arg(th, calling, ci, iseq, rsp, is_lambda ? arg_setup_lambda : arg_setup_block);
SET_SP(rsp);
vm_push_frame(th, iseq,
is_lambda ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK,
captured->self,
VM_GUARDED_PREV_EP(captured->ep), 0,
iseq->body->iseq_encoded + opt_pc,
rsp + arg_size,
iseq->body->local_table_size - arg_size, iseq->body->stack_max);
return Qundef;
}
static VALUE
vm_invoke_symbol_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling, const struct rb_call_info *ci,
VALUE symbol)
{
VALUE val;
int argc;
CALLER_SETUP_ARG(th->cfp, calling, ci);
argc = calling->argc;
val = vm_yield_with_symbol(th, symbol, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE);
POPN(argc);
return val;
}
static VALUE
vm_invoke_ifunc_block(rb_thread_t *th, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling, const struct rb_call_info *ci,
const struct rb_captured_block *captured)
{
VALUE val;
int argc;
CALLER_SETUP_ARG(th->cfp, calling, ci);
argc = calling->argc;
val = vm_yield_with_cfunc(th, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), VM_BLOCK_HANDLER_NONE);
POPN(argc); /* TODO: should put before C/yield? */
return val;
}
static VALUE
vm_proc_to_block_handler(VALUE procval)
{
const struct rb_block *block = vm_proc_block(procval);
switch (vm_block_type(block)) {
case block_type_iseq:
return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
case block_type_ifunc:
return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
case block_type_symbol:
return VM_BH_FROM_SYMBOL(block->as.symbol);
case block_type_proc:
return VM_BH_FROM_PROC(block->as.proc);
}
VM_UNREACHABLE(vm_yield_with_proc);
return Qundef;
}
static VALUE
vm_invoke_block(rb_thread_t *th, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci)
{
const rb_block_t *block = VM_CF_BLOCK_PTR(reg_cfp);
VALUE block_handler = VM_CF_BLOCK_HANDLER(reg_cfp);
VALUE type = GET_ISEQ()->body->local_iseq->body->type;
int is_lambda = FALSE;
if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) || block == 0) {
if ((type != ISEQ_TYPE_METHOD && type != ISEQ_TYPE_CLASS) ||
block_handler == VM_BLOCK_HANDLER_NONE) {
rb_vm_localjump_error("no block given (yield)", Qnil, 0);
}
if (RUBY_VM_NORMAL_ISEQ_P(block->iseq)) {
const rb_iseq_t *iseq = block->iseq;
const int arg_size = iseq->body->param.size;
int is_lambda = block_proc_is_lambda(block->proc);
VALUE * const rsp = GET_SP() - calling->argc;
int opt_pc = vm_callee_setup_block_arg(th, calling, ci, iseq, rsp, is_lambda ? arg_setup_lambda : arg_setup_block);
SET_SP(rsp);
vm_push_frame(th, iseq,
is_lambda ? VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK,
block->self,
VM_ENVVAL_PREV_EP_PTR(block->ep), 0,
iseq->body->iseq_encoded + opt_pc,
rsp + arg_size,
iseq->body->local_size - arg_size, iseq->body->stack_max);
return Qundef;
}
else {
VALUE val;
int argc;
CALLER_SETUP_ARG(th->cfp, calling, ci);
argc = calling->argc;
val = vm_yield_with_cfunc(th, block, block->self, argc, STACK_ADDR_FROM_TOP(argc), 0);
POPN(argc); /* TODO: should put before C/yield? */
return val;
again:
switch (vm_block_handler_type(block_handler)) {
case block_handler_type_iseq:
{
const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
return vm_invoke_iseq_block(th, reg_cfp, calling, ci, is_lambda, captured);
}
case block_handler_type_ifunc:
{
const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
return vm_invoke_ifunc_block(th, reg_cfp, calling, ci, captured);
}
case block_handler_type_proc:
is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
goto again;
case block_handler_type_symbol:
return vm_invoke_symbol_block(th, reg_cfp, calling, ci, VM_BH_TO_SYMBOL(block_handler));
}
VM_UNREACHABLE(vm_invoke_block: unreachable);
return Qnil;
}
static VALUE
vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
{
rb_block_t *blockptr;
rb_thread_t *th = GET_THREAD();
rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
struct rb_captured_block *captured;
if (cfp == 0) {
rb_bug("vm_make_proc_with_iseq: unreachable");
}
blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
blockptr->iseq = blockiseq;
blockptr->proc = 0;
captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
captured->code.iseq = blockiseq;
return rb_vm_make_proc(th, blockptr, rb_cProc);
return rb_vm_make_proc(th, captured, rb_cProc);
}
static VALUE
@ -2619,7 +2769,7 @@ vm_defined(rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE
break;
}
case DEFINED_YIELD:
if (GET_BLOCK_PTR()) {
if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
expr_type = DEFINED_YIELD;
}
break;

View file

@ -104,7 +104,7 @@ enum vm_regan_acttype {
/* deal with variables */
/**********************************************************/
#define GET_PREV_EP(ep) ((VALUE *)((ep)[0] & ~0x03))
#define GET_PREV_EP(ep) ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
#define GET_GLOBAL(entry) rb_gvar_get((struct rb_global_entry*)(entry))
#define SET_GLOBAL(entry, val) rb_gvar_set((struct rb_global_entry*)(entry), (val))
@ -148,7 +148,7 @@ enum vm_regan_acttype {
#define CI_SET_FASTPATH(ci, func, enabled) /* do nothing */
#endif
#define GET_BLOCK_PTR() ((rb_block_t *)(GC_GUARDED_PTR_REF(GET_LEP()[0])))
#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
/**********************************************************/
/* deal with control flow 3: exception */
@ -173,7 +173,7 @@ enum vm_regan_acttype {
#define CALL_SIMPLE_METHOD(recv_) do { \
struct rb_calling_info calling; \
calling.blockptr = NULL; \
calling.block_handler = VM_BLOCK_HANDLER_NONE; \
calling.argc = ci->orig_argc; \
vm_search_method(ci, cc, calling.recv = (recv_)); \
CALL_METHOD(&calling, ci, cc); \
@ -189,7 +189,7 @@ static VALUE make_no_method_exception(VALUE exc, VALUE format, VALUE obj,
int argc, const VALUE *argv, int priv);
static inline struct vm_throw_data *
THROW_DATA_NEW(VALUE val, rb_control_frame_t *cf, VALUE st)
THROW_DATA_NEW(VALUE val, const rb_control_frame_t *cf, VALUE st)
{
return (struct vm_throw_data *)rb_imemo_new(imemo_throw_data, val, (VALUE)cf, st, 0);
}

View file

@ -1871,9 +1871,9 @@ call_method_entry(rb_thread_t *th, VALUE defined_class, VALUE obj, ID id,
{
const rb_callable_method_entry_t *cme =
prepare_callable_method_entry(defined_class, id, me);
const rb_block_t *passed_block = th->passed_block;
VALUE passed_block_handler = vm_passed_block_handler(th);
VALUE result = vm_call0(th, obj, id, argc, argv, cme);
th->passed_block = passed_block;
vm_passed_block_handler_set(th, passed_block_handler);
return result;
}