1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

* vm_core.h: split rb_call_info_t into several structs.

* rb_call_info (ci) has compiled fixed information.
  * if ci->flag & VM_CALL_KWARG, then rb_call_info is
    also rb_call_info_with_kwarg. This technique reduce one word
    for major rb_call_info data.
  * rb_calling_info has temporary data (argc, blockptr, recv).
    for each method dispatch. This data is allocated only on
    machine stack.
  * rb_call_cache is for inline method cache.
  Before this patch, only rb_call_info_t data is passed.
  After this patch, above three structs are passed.
  This patch improves:
  * data locarity (rb_call_info is now read-only data).
  * reduce memory consumption (rb_call_info_with_kwarg,
    rb_calling_info).
* compile.c: use above data.
* insns.def: ditto.
* iseq.c: ditto.
* vm_args.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_insnhelper.h: ditto.
* iseq.h: add iseq_compile_data::ci_index and
  iseq_compile_data::ci_kw_indx.
* tool/instruction.rb: introduce TS_CALLCACHE operand type.



git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51903 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
ko1 2015-09-19 17:59:58 +00:00
parent 19499aaeb1
commit d5ec9ec308
12 changed files with 681 additions and 522 deletions

View file

@ -1,3 +1,42 @@
Sun Sep 20 02:46:34 2015 Koichi Sasada <ko1@atdot.net>
* vm_core.h: split rb_call_info_t into several structs.
* rb_call_info (ci) has compiled fixed information.
* if ci->flag & VM_CALL_KWARG, then rb_call_info is
also rb_call_info_with_kwarg. This technique reduce one word
for major rb_call_info data.
* rb_calling_info has temporary data (argc, blockptr, recv).
for each method dispatch. This data is allocated only on
machine stack.
* rb_call_cache is for inline method cache.
Before this patch, only rb_call_info_t data is passed.
After this patch, above three structs are passed.
This patch improves:
* data locarity (rb_call_info is now read-only data).
* reduce memory consumption (rb_call_info_with_kwarg,
rb_calling_info).
* compile.c: use above data.
* insns.def: ditto.
* iseq.c: ditto.
* vm_args.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_insnhelper.h: ditto.
* iseq.h: add iseq_compile_data::ci_index and
iseq_compile_data::ci_kw_indx.
* tool/instruction.rb: introduce TS_CALLCACHE operand type.
Sun Sep 20 02:18:10 2015 Tanaka Akira <akr@fsij.org>
* test/lib/envutil.rb: mkfifo command based File.mkfifo method

133
compile.c
View file

@ -944,45 +944,42 @@ new_insn_body(rb_iseq_t *iseq, int line_no, enum ruby_vminsn_type insn_id, int a
return new_insn_core(iseq, line_no, insn_id, argc, operands);
}
static rb_call_info_t *
new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, rb_call_info_kw_arg_t *kw_arg, int has_blockiseq)
static struct rb_call_info *
new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_call_info_kw_arg *kw_arg, int has_blockiseq)
{
rb_call_info_t *ci = (rb_call_info_t *)compile_data_alloc(iseq, sizeof(rb_call_info_t));
size_t size = kw_arg != NULL ? sizeof(struct rb_call_info_with_kwarg) : sizeof(struct rb_call_info);
struct rb_call_info *ci = (struct rb_call_info *)compile_data_alloc(iseq, size);
struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci;
ci->mid = mid;
ci->flag = flag;
ci->orig_argc = argc;
ci->argc = argc;
ci->kw_arg = kw_arg;
if (kw_arg) {
ci->argc += kw_arg->keyword_len;
ci->flag |= VM_CALL_KWARG;
ci_kw->kw_arg = kw_arg;
ci->orig_argc += kw_arg->keyword_len;
iseq->body->ci_kw_size++;
}
else {
iseq->body->ci_size++;
}
if (!(ci->flag & (VM_CALL_ARGS_SPLAT | VM_CALL_ARGS_BLOCKARG)) &&
ci->kw_arg == NULL && !has_blockiseq) {
kw_arg == NULL && !has_blockiseq) {
ci->flag |= VM_CALL_ARGS_SIMPLE;
}
ci->method_state = 0;
ci->class_serial = 0;
ci->blockptr = 0;
ci->recv = Qundef;
ci->call = 0; /* TODO: should set default function? */
ci->aux.index = iseq->body->callinfo_size++;
return ci;
}
static INSN *
new_insn_send(rb_iseq_t *iseq, int line_no, ID id, VALUE argc, const rb_iseq_t *blockiseq, VALUE flag, rb_call_info_kw_arg_t *keywords)
new_insn_send(rb_iseq_t *iseq, int line_no, ID id, VALUE argc, const rb_iseq_t *blockiseq, VALUE flag, struct rb_call_info_kw_arg *keywords)
{
VALUE *operands = (VALUE *)compile_data_alloc(iseq, sizeof(VALUE) * 2);
VALUE *operands = (VALUE *)compile_data_alloc(iseq, sizeof(VALUE) * 3);
operands[0] = (VALUE)new_callinfo(iseq, id, FIX2INT(argc), FIX2INT(flag), keywords, blockiseq != NULL);
operands[1] = (VALUE)blockiseq;
return new_insn_core(iseq, line_no, BIN(send), 2, operands);
operands[1] = Qfalse; /* cache */
operands[2] = (VALUE)blockiseq;
return new_insn_core(iseq, line_no, BIN(send), 3, operands);
}
static rb_iseq_t *
@ -1497,8 +1494,11 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *anchor)
generated_iseq = ALLOC_N(VALUE, code_index);
line_info_table = ALLOC_N(struct iseq_line_info_entry, insn_num);
iseq->body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, iseq->body->is_size);
iseq->body->callinfo_entries = ALLOC_N(rb_call_info_t, iseq->body->callinfo_size);
/* MEMZERO(iseq->body->callinfo_entries, rb_call_info_t, iseq->body->callinfo_size); */
iseq->body->ci_entries = (struct rb_call_info *)ruby_xmalloc(sizeof(struct rb_call_info) * iseq->body->ci_size +
sizeof(struct rb_call_info_with_kwarg) * iseq->body->ci_kw_size);
iseq->body->cc_entries = ZALLOC_N(struct rb_call_cache, iseq->body->ci_size + iseq->body->ci_kw_size);
iseq->compile_data->ci_index = iseq->compile_data->ci_kw_index = 0;
list = FIRST_ELEMENT(anchor);
line_info_index = code_index = sp = 0;
@ -1599,16 +1599,31 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *anchor)
}
case TS_CALLINFO: /* call info */
{
rb_call_info_t *base_ci = (rb_call_info_t *)operands[j];
rb_call_info_t *ci = &iseq->body->callinfo_entries[base_ci->aux.index];
*ci = *base_ci;
struct rb_call_info *base_ci = (struct rb_call_info *)operands[j];
struct rb_call_info *ci;
if (UNLIKELY(base_ci->aux.index >= iseq->body->callinfo_size)) {
rb_bug("iseq_set_sequence: ci_index overflow: index: %d, size: %d", base_ci->argc, iseq->body->callinfo_size);
if (base_ci->flag & VM_CALL_KWARG) {
struct rb_call_info_with_kwarg *ci_kw_entries = (struct rb_call_info_with_kwarg *)&iseq->body->ci_entries[iseq->body->ci_size];
struct rb_call_info_with_kwarg *ci_kw = &ci_kw_entries[iseq->compile_data->ci_kw_index++];
*ci_kw = *((struct rb_call_info_with_kwarg *)base_ci);
ci = (struct rb_call_info *)ci_kw;
assert(iseq->compile_data->ci_kw_index <= iseq->body->ci_kw_size);
}
else {
ci = &iseq->body->ci_entries[iseq->compile_data->ci_index++];
*ci = *base_ci;
assert(iseq->compile_data->ci_index <= iseq->body->ci_size);
}
generated_iseq[code_index + 1 + j] = (VALUE)ci;
break;
}
case TS_CALLCACHE:
{
struct rb_call_cache *cc = &iseq->body->cc_entries[iseq->compile_data->ci_index + iseq->compile_data->ci_kw_index - 1];
generated_iseq[code_index + 1 + j] = (VALUE)cc;
break;
}
case TS_ID: /* ID */
generated_iseq[code_index + 1 + j] = SYM2ID(operands[j]);
break;
@ -1948,7 +1963,7 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
enum ruby_vminsn_type previ = piobj->insn_id;
if (previ == BIN(send) || previ == BIN(opt_send_without_block) || previ == BIN(invokesuper)) {
rb_call_info_t *ci = (rb_call_info_t *)piobj->operands[0];
struct rb_call_info *ci = (struct rb_call_info *)piobj->operands[0];
rb_iseq_t *blockiseq = (rb_iseq_t *)piobj->operands[1];
if (blockiseq == 0) {
ci->flag |= VM_CALL_TAILCALL;
@ -1966,9 +1981,12 @@ insn_set_specialized_instruction(rb_iseq_t *iseq, INSN *iobj, int insn_id)
if (insn_id == BIN(opt_neq)) {
VALUE *old_operands = iobj->operands;
iobj->operand_size = 4;
iobj->operands = (VALUE *)compile_data_alloc(iseq, iobj->operand_size * sizeof(VALUE));
iobj->operands[0] = old_operands[0];
iobj->operands[1] = (VALUE)new_callinfo(iseq, idEq, 1, 0, NULL, FALSE);
iobj->operands[1] = Qfalse; /* CALL_CACHE */
iobj->operands[2] = (VALUE)new_callinfo(iseq, idEq, 1, 0, NULL, FALSE);
iobj->operands[3] = Qfalse; /* CALL_CACHE */
}
return COMPILE_OK;
@ -1978,8 +1996,8 @@ static int
iseq_specialized_instruction(rb_iseq_t *iseq, INSN *iobj)
{
if (iobj->insn_id == BIN(send)) {
rb_call_info_t *ci = (rb_call_info_t *)OPERAND_AT(iobj, 0);
const rb_iseq_t *blockiseq = (rb_iseq_t *)OPERAND_AT(iobj, 1);
struct rb_call_info *ci = (struct rb_call_info *)OPERAND_AT(iobj, 0);
const rb_iseq_t *blockiseq = (rb_iseq_t *)OPERAND_AT(iobj, 2);
#define SP_INSN(opt) insn_set_specialized_instruction(iseq, iobj, BIN(opt_##opt))
if (ci->flag & VM_CALL_ARGS_SIMPLE) {
@ -2020,7 +2038,7 @@ iseq_specialized_instruction(rb_iseq_t *iseq, INSN *iobj)
if ((ci->flag & VM_CALL_ARGS_BLOCKARG) == 0 && blockiseq == NULL) {
iobj->insn_id = BIN(opt_send_without_block);
iobj->operand_size = 1;
iobj->operand_size = insn_len(iobj->insn_id) - 1;
}
}
#undef SP_INSN
@ -2402,7 +2420,7 @@ compile_branch_condition(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * cond,
}
static int
compile_array_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *ret, const NODE * const root_node, rb_call_info_kw_arg_t ** const kw_arg_ptr)
compile_array_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *ret, const NODE * const root_node, struct rb_call_info_kw_arg ** const kw_arg_ptr)
{
if (kw_arg_ptr == NULL) return FALSE;
@ -2427,7 +2445,7 @@ compile_array_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *ret, const NODE * const
node = root_node->nd_head;
{
int len = (int)node->nd_alen / 2;
rb_call_info_kw_arg_t *kw_arg = (rb_call_info_kw_arg_t *)ruby_xmalloc(sizeof(rb_call_info_kw_arg_t) + sizeof(VALUE) * (len - 1));
struct rb_call_info_kw_arg *kw_arg = (struct rb_call_info_kw_arg *)ruby_xmalloc(sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (len - 1));
VALUE *keywords = kw_arg->keywords;
int i = 0;
kw_arg->keyword_len = len;
@ -2455,7 +2473,7 @@ enum compile_array_type_t {
static int
compile_array_(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE* node_root,
enum compile_array_type_t type, rb_call_info_kw_arg_t **keywords_ptr, int poped)
enum compile_array_type_t type, struct rb_call_info_kw_arg **keywords_ptr, int poped)
{
NODE *node = node_root;
int line = (int)nd_line(node);
@ -2680,15 +2698,15 @@ compile_massign_lhs(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE *node)
switch (nd_type(node)) {
case NODE_ATTRASGN: {
INSN *iobj;
rb_call_info_t *ci;
struct rb_call_info *ci;
VALUE dupidx;
COMPILE_POPED(ret, "masgn lhs (NODE_ATTRASGN)", node);
POP_ELEMENT(ret); /* pop pop insn */
iobj = (INSN *)POP_ELEMENT(ret); /* pop send insn */
ci = (rb_call_info_t *)iobj->operands[0];
ci->orig_argc += 1; ci->argc = ci->orig_argc;
ci = (struct rb_call_info *)iobj->operands[0];
ci->orig_argc += 1;
dupidx = INT2FIX(ci->orig_argc);
ADD_INSN1(ret, nd_line(node), topn, dupidx);
@ -3228,7 +3246,7 @@ add_ensure_iseq(LINK_ANCHOR *ret, rb_iseq_t *iseq, int is_return)
}
static VALUE
setup_args(rb_iseq_t *iseq, LINK_ANCHOR *args, NODE *argn, unsigned int *flag, rb_call_info_kw_arg_t **keywords)
setup_args(rb_iseq_t *iseq, LINK_ANCHOR *args, NODE *argn, unsigned int *flag, struct rb_call_info_kw_arg **keywords)
{
VALUE argc = INT2FIX(0);
int nsplat = 0;
@ -4504,8 +4522,10 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
VALUE str = rb_fstring(node->nd_args->nd_head->nd_lit);
node->nd_args->nd_head->nd_lit = str;
COMPILE(ret, "recv", node->nd_recv);
ADD_INSN2(ret, line, opt_aref_with,
new_callinfo(iseq, idAREF, 1, 0, NULL, FALSE), str);
ADD_INSN3(ret, line, opt_aref_with,
new_callinfo(iseq, idAREF, 1, 0, NULL, FALSE),
Qnil, /* CALL_CACHE */
str);
if (poped) {
ADD_INSN(ret, line, pop);
}
@ -4523,7 +4543,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
ID mid = node->nd_mid;
VALUE argc;
unsigned int flag = 0;
rb_call_info_kw_arg_t *keywords = NULL;
struct rb_call_info_kw_arg *keywords = NULL;
const rb_iseq_t *parent_block = iseq->compile_data->current_block;
iseq->compile_data->current_block = NULL;
@ -4635,7 +4655,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
DECL_ANCHOR(args);
int argc;
unsigned int flag = 0;
rb_call_info_kw_arg_t *keywords = NULL;
struct rb_call_info_kw_arg *keywords = NULL;
const rb_iseq_t *parent_block = iseq->compile_data->current_block;
INIT_ANCHOR(args);
@ -4742,8 +4762,9 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
/* dummy receiver */
ADD_INSN1(ret, line, putobject, nd_type(node) == NODE_ZSUPER ? Qfalse : Qtrue);
ADD_SEQ(ret, args);
ADD_INSN2(ret, line, invokesuper,
ADD_INSN3(ret, line, invokesuper,
new_callinfo(iseq, 0, argc, flag | VM_CALL_SUPER | VM_CALL_FCALL, keywords, parent_block != NULL),
Qnil, /* CALL_CACHE */
parent_block);
if (poped) {
@ -4839,7 +4860,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
DECL_ANCHOR(args);
VALUE argc;
unsigned int flag = 0;
rb_call_info_kw_arg_t *keywords = NULL;
struct rb_call_info_kw_arg *keywords = NULL;
INIT_ANCHOR(args);
if (iseq->body->type == ISEQ_TYPE_TOP) {
@ -4982,7 +5003,7 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
else {
ADD_SEQ(ret, recv);
ADD_SEQ(ret, val);
ADD_INSN1(ret, line, opt_regexpmatch2, new_callinfo(iseq, idEqTilde, 1, 0, NULL, FALSE));
ADD_INSN2(ret, line, opt_regexpmatch2, new_callinfo(iseq, idEqTilde, 1, 0, NULL, FALSE), Qnil);
}
}
else {
@ -5516,8 +5537,9 @@ iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *ret, NODE * node, int poped)
ADD_INSN(ret, line, swap);
ADD_INSN1(ret, line, topn, INT2FIX(1));
}
ADD_INSN2(ret, line, opt_aset_with,
new_callinfo(iseq, idASET, 2, 0, NULL, FALSE), str);
ADD_INSN3(ret, line, opt_aset_with,
new_callinfo(iseq, idASET, 2, 0, NULL, FALSE),
Qnil/* CALL_CACHE */, str);
ADD_INSN(ret, line, pop);
break;
}
@ -5687,13 +5709,17 @@ insn_data_to_s_detail(INSN *iobj)
break;
case TS_CALLINFO: /* call info */
{
rb_call_info_t *ci = (rb_call_info_t *)OPERAND_AT(iobj, j);
struct rb_call_info *ci = (struct rb_call_info *)OPERAND_AT(iobj, j);
rb_str_cat2(str, "<callinfo:");
if (ci->mid)
rb_str_catf(str, "%"PRIsVALUE, rb_id2str(ci->mid));
if (ci->mid) rb_str_catf(str, "%"PRIsVALUE, rb_id2str(ci->mid));
rb_str_catf(str, ", %d>", ci->orig_argc);
break;
}
case TS_CALLCACHE: /* call cache */
{
rb_str_catf(str, "<call cache>");
break;
}
case TS_CDHASH: /* case/when condition cache */
rb_str_cat2(str, "<ch>");
break;
@ -5911,7 +5937,7 @@ iseq_build_callinfo_from_hash(rb_iseq_t *iseq, VALUE op)
ID mid = 0;
int orig_argc = 0;
unsigned int flag = 0;
rb_call_info_kw_arg_t *kw_arg = 0;
struct rb_call_info_kw_arg *kw_arg = 0;
if (!NIL_P(op)) {
VALUE vmid = rb_hash_aref(op, ID2SYM(rb_intern("mid")));
@ -6030,6 +6056,9 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *anchor,
case TS_CALLINFO:
argv[j] = iseq_build_callinfo_from_hash(iseq, op);
break;
case TS_CALLCACHE:
argv[j] = Qfalse;
break;
case TS_ID:
argv[j] = rb_convert_type(op, T_SYMBOL,
"Symbol", "to_sym");

102
insns.def
View file

@ -936,14 +936,15 @@ defineclass
*/
DEFINE_INSN
send
(CALL_INFO ci, ISEQ iseq)
(CALL_INFO ci, CALL_CACHE cc, ISEQ blockiseq)
(...)
(VALUE val) // inc += - (int)(ci->orig_argc + ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0));
{
ci->argc = ci->orig_argc;
vm_caller_setup_arg_block(th, reg_cfp, ci, iseq, FALSE);
vm_search_method(ci, ci->recv = TOPN(ci->argc));
CALL_METHOD(ci);
struct rb_calling_info calling;
vm_caller_setup_arg_block(th, reg_cfp, &calling, ci, blockiseq, FALSE);
vm_search_method(ci, cc, calling.recv = TOPN(calling.argc = ci->orig_argc));
CALL_METHOD(&calling, ci, cc);
}
DEFINE_INSN
@ -967,13 +968,14 @@ opt_str_freeze
*/
DEFINE_INSN
opt_send_without_block
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(...)
(VALUE val) // inc += -ci->orig_argc;
{
ci->argc = ci->orig_argc;
vm_search_method(ci, ci->recv = TOPN(ci->argc));
CALL_METHOD(ci);
struct rb_calling_info calling;
calling.blockptr = NULL;
vm_search_method(ci, cc, calling.recv = TOPN(calling.argc = ci->orig_argc));
CALL_METHOD(&calling, ci, cc);
}
/**
@ -983,15 +985,17 @@ opt_send_without_block
*/
DEFINE_INSN
invokesuper
(CALL_INFO ci, ISEQ iseq)
(CALL_INFO ci, CALL_CACHE cc, ISEQ blockiseq)
(...)
(VALUE val) // inc += - (int)(ci->orig_argc + ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0));
{
ci->argc = ci->orig_argc;
vm_caller_setup_arg_block(th, reg_cfp, ci, iseq, TRUE);
ci->recv = GET_SELF();
vm_search_super_method(th, GET_CFP(), ci);
CALL_METHOD(ci);
struct rb_calling_info calling;
calling.argc = ci->orig_argc;
vm_caller_setup_arg_block(th, reg_cfp, &calling, ci, blockiseq, TRUE);
calling.recv = GET_SELF();
vm_search_super_method(th, GET_CFP(), &calling, ci, cc);
CALL_METHOD(&calling, ci, cc);
}
/**
@ -1005,10 +1009,12 @@ invokeblock
(...)
(VALUE val) // inc += 1 - ci->orig_argc;
{
ci->argc = ci->orig_argc;
ci->blockptr = 0;
ci->recv = GET_SELF();
val = vm_invoke_block(th, GET_CFP(), ci);
struct rb_calling_info calling;
calling.argc = ci->orig_argc;
calling.blockptr = NULL;
calling.recv = GET_SELF();
val = vm_invoke_block(th, GET_CFP(), &calling, ci);
if (val == Qundef) {
RESTORE_REGS();
NEXT_INSN();
@ -1260,7 +1266,7 @@ opt_case_dispatch
*/
DEFINE_INSN
opt_plus
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1323,7 +1329,7 @@ opt_plus
*/
DEFINE_INSN
opt_minus
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1371,7 +1377,7 @@ opt_minus
*/
DEFINE_INSN
opt_mult
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1421,7 +1427,7 @@ opt_mult
*/
DEFINE_INSN
opt_div
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1484,7 +1490,7 @@ opt_div
*/
DEFINE_INSN
opt_mod
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1551,11 +1557,11 @@ opt_mod
*/
DEFINE_INSN
opt_eq
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
val = opt_eq_func(recv, obj, ci);
val = opt_eq_func(recv, obj, ci, cc);
if (val == Qundef) {
/* other */
@ -1572,16 +1578,17 @@ opt_eq
*/
DEFINE_INSN
opt_neq
(CALL_INFO ci, CALL_INFO ci_eq)
(CALL_INFO ci, CALL_CACHE cc, CALL_INFO ci_eq, CALL_CACHE cc_eq)
(VALUE recv, VALUE obj)
(VALUE val)
{
extern VALUE rb_obj_not_equal(VALUE obj1, VALUE obj2);
vm_search_method(ci, recv);
vm_search_method(ci, cc, recv);
val = Qundef;
if (check_cfunc(ci->me, rb_obj_not_equal)) {
val = opt_eq_func(recv, obj, ci_eq);
if (check_cfunc(cc->me, rb_obj_not_equal)) {
val = opt_eq_func(recv, obj, ci_eq, cc_eq);
if (val != Qundef) {
val = RTEST(val) ? Qfalse : Qtrue;
@ -1603,7 +1610,7 @@ opt_neq
*/
DEFINE_INSN
opt_lt
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1647,7 +1654,7 @@ opt_lt
*/
DEFINE_INSN
opt_le
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1682,7 +1689,7 @@ opt_le
*/
DEFINE_INSN
opt_gt
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1726,7 +1733,7 @@ opt_gt
*/
DEFINE_INSN
opt_ge
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1760,7 +1767,7 @@ opt_ge
*/
DEFINE_INSN
opt_ltlt
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1792,7 +1799,7 @@ opt_ltlt
*/
DEFINE_INSN
opt_aref
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj)
(VALUE val)
{
@ -1822,7 +1829,7 @@ opt_aref
*/
DEFINE_INSN
opt_aset
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv, VALUE obj, VALUE set)
(VALUE val)
{
@ -1855,7 +1862,7 @@ opt_aset
*/
DEFINE_INSN
opt_aset_with
(CALL_INFO ci, VALUE key)
(CALL_INFO ci, CALL_CACHE cc, VALUE key)
(VALUE recv, VALUE val)
(VALUE val)
{
@ -1877,7 +1884,7 @@ opt_aset_with
*/
DEFINE_INSN
opt_aref_with
(CALL_INFO ci, VALUE key)
(CALL_INFO ci, CALL_CACHE cc, VALUE key)
(VALUE recv)
(VALUE val)
{
@ -1898,7 +1905,7 @@ opt_aref_with
*/
DEFINE_INSN
opt_length
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
@ -1933,7 +1940,7 @@ opt_length
*/
DEFINE_INSN
opt_size
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
@ -1968,7 +1975,7 @@ opt_size
*/
DEFINE_INSN
opt_empty_p
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
@ -2006,7 +2013,7 @@ opt_empty_p
*/
DEFINE_INSN
opt_succ
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
@ -2049,14 +2056,15 @@ opt_succ
*/
DEFINE_INSN
opt_not
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE recv)
(VALUE val)
{
extern VALUE rb_obj_not(VALUE obj);
vm_search_method(ci, recv);
if (check_cfunc(ci->me, rb_obj_not)) {
vm_search_method(ci, cc, recv);
if (check_cfunc(cc->me, rb_obj_not)) {
val = RTEST(recv) ? Qfalse : Qtrue;
}
else {
@ -2092,7 +2100,7 @@ opt_regexpmatch1
*/
DEFINE_INSN
opt_regexpmatch2
(CALL_INFO ci)
(CALL_INFO ci, CALL_CACHE cc)
(VALUE obj2, VALUE obj1)
(VALUE val)
{

48
iseq.c
View file

@ -74,14 +74,14 @@ rb_iseq_free(const rb_iseq_t *iseq)
ruby_xfree((void *)iseq->body->local_table);
ruby_xfree((void *)iseq->body->is_entries);
if (iseq->body->callinfo_entries) {
if (iseq->body->ci_entries) {
unsigned int i;
for (i=0; i<iseq->body->callinfo_size; i++) {
/* TODO: revisit callinfo data structure */
const rb_call_info_kw_arg_t *kw_arg = iseq->body->callinfo_entries[i].kw_arg;
struct rb_call_info_with_kwarg *ci_kw_entries = (struct rb_call_info_with_kwarg *)&iseq->body->ci_entries[iseq->body->ci_size];
for (i=0; i<iseq->body->ci_kw_size; i++) {
const struct rb_call_info_kw_arg *kw_arg = ci_kw_entries[i].kw_arg;
ruby_xfree((void *)kw_arg);
}
ruby_xfree(iseq->body->callinfo_entries);
ruby_xfree(iseq->body->ci_entries);
}
ruby_xfree((void *)iseq->body->catch_table);
ruby_xfree((void *)iseq->body->param.opt_table);
@ -161,7 +161,7 @@ iseq_memsize(const rb_iseq_t *iseq)
}
if (body) {
rb_call_info_t *ci_entries = body->callinfo_entries;
struct rb_call_info_with_kwarg *ci_kw_entries = (struct rb_call_info_with_kwarg *)&body->ci_entries[body->ci_size];
size += sizeof(struct rb_iseq_constant_body);
size += body->iseq_size * sizeof(VALUE);
@ -173,13 +173,14 @@ iseq_memsize(const rb_iseq_t *iseq)
size += (body->param.opt_num + 1) * sizeof(VALUE);
size += param_keyword_size(body->param.keyword);
size += body->is_size * sizeof(union iseq_inline_storage_entry);
size += body->callinfo_size * sizeof(rb_call_info_t);
size += body->ci_size * sizeof(struct rb_call_info);
size += body->ci_kw_size * sizeof(struct rb_call_info_with_kwarg);
if (ci_entries) {
if (ci_kw_entries) {
unsigned int i;
for (i = 0; i < body->callinfo_size; i++) {
const rb_call_info_kw_arg_t *kw_arg = ci_entries[i].kw_arg;
for (i = 0; i < body->ci_kw_size; i++) {
const struct rb_call_info_kw_arg *kw_arg = ci_kw_entries[i].kw_arg;
if (kw_arg) {
size += rb_call_info_kw_arg_bytes(kw_arg->keyword_len);
@ -1267,7 +1268,7 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
case TS_CALLINFO:
{
rb_call_info_t *ci = (rb_call_info_t *)op;
struct rb_call_info *ci = (struct rb_call_info *)op;
VALUE ary = rb_ary_new();
if (ci->mid) {
@ -1276,8 +1277,8 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
rb_ary_push(ary, rb_sprintf("argc:%d", ci->orig_argc));
if (ci->kw_arg) {
rb_ary_push(ary, rb_sprintf("kw:%d", ci->kw_arg->keyword_len));
if (ci->flag & VM_CALL_KWARG) {
rb_ary_push(ary, rb_sprintf("kw:%d", ((struct rb_call_info_with_kwarg *)ci)->kw_arg->keyword_len));
}
if (ci->flag) {
@ -1288,6 +1289,7 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
if (ci->flag & VM_CALL_VCALL) rb_ary_push(flags, rb_str_new2("VCALL"));
if (ci->flag & VM_CALL_TAILCALL) rb_ary_push(flags, rb_str_new2("TAILCALL"));
if (ci->flag & VM_CALL_SUPER) rb_ary_push(flags, rb_str_new2("SUPER"));
if (ci->flag & VM_CALL_KWARG) rb_ary_push(flags, rb_str_new2("KWARG"));
if (ci->flag & VM_CALL_OPT_SEND) rb_ary_push(flags, rb_str_new2("SNED")); /* maybe not reachable */
if (ci->flag & VM_CALL_ARGS_SIMPLE) rb_ary_push(flags, rb_str_new2("ARGS_SIMPLE")); /* maybe not reachable */
rb_ary_push(ary, rb_ary_join(flags, rb_str_new2("|")));
@ -1296,6 +1298,10 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
}
break;
case TS_CALLCACHE:
ret = rb_str_new2("<callcache>");
break;
case TS_CDHASH:
ret = rb_str_new2("<cdhash>");
break;
@ -1883,20 +1889,21 @@ iseq_data_to_ary(const rb_iseq_t *iseq)
break;
case TS_CALLINFO:
{
rb_call_info_t *ci = (rb_call_info_t *)*seq;
struct rb_call_info *ci = (struct rb_call_info *)*seq;
VALUE e = rb_hash_new();
int orig_argc = ci->orig_argc;
rb_hash_aset(e, ID2SYM(rb_intern("mid")), ci->mid ? ID2SYM(ci->mid) : Qnil);
rb_hash_aset(e, ID2SYM(rb_intern("flag")), UINT2NUM(ci->flag));
if (ci->kw_arg) {
if (ci->flag & VM_CALL_KWARG) {
struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci;
int i;
VALUE kw = rb_ary_new2((long)ci->kw_arg->keyword_len);
VALUE kw = rb_ary_new2((long)ci_kw->kw_arg->keyword_len);
orig_argc -= ci->kw_arg->keyword_len;
for (i = 0; i < ci->kw_arg->keyword_len; i++) {
rb_ary_push(kw, ci->kw_arg->keywords[i]);
orig_argc -= ci_kw->kw_arg->keyword_len;
for (i = 0; i < ci_kw->kw_arg->keyword_len; i++) {
rb_ary_push(kw, ci_kw->kw_arg->keywords[i]);
}
rb_hash_aset(e, ID2SYM(rb_intern("kw_arg")), kw);
}
@ -1906,6 +1913,9 @@ iseq_data_to_ary(const rb_iseq_t *iseq)
rb_ary_push(ary, e);
}
break;
case TS_CALLCACHE:
rb_ary_push(ary, Qfalse);
break;
case TS_ID:
rb_ary_push(ary, ID2SYM(*seq));
break;

4
iseq.h
View file

@ -20,7 +20,7 @@ typedef struct rb_iseq_struct rb_iseq_t;
static inline size_t
rb_call_info_kw_arg_bytes(int keyword_len)
{
return sizeof(rb_call_info_kw_arg_t) + sizeof(VALUE) * (keyword_len - 1);
return sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (keyword_len - 1);
}
RUBY_SYMBOL_EXPORT_BEGIN
@ -142,6 +142,8 @@ struct iseq_compile_data {
int last_coverable_line;
int label_no;
int node_level;
unsigned int ci_index;
unsigned int ci_kw_index;
const rb_compile_option_t *option;
#if SUPPORT_JOKE
st_table *labels_table;

View file

@ -715,7 +715,7 @@ class RubyVM
# skip make operands when body has no reference to this operand
# TODO: really needed?
re = /\b#{var}\b/n
if re =~ insn.body or re =~ insn.sp_inc or insn.rets.any?{|t, v| re =~ v} or re =~ 'ic' or re =~ 'ci'
if re =~ insn.body or re =~ insn.sp_inc or insn.rets.any?{|t, v| re =~ v} or re =~ 'ic' or re =~ 'ci' or re =~ 'cc'
ops << " #{type} #{var} = (#{type})GET_OPERAND(#{i+1});"
end
@ -949,6 +949,8 @@ class RubyVM
"TS_IC"
when /^CALL_INFO/
"TS_CALLINFO"
when /^CALL_CACHE/
"TS_CALLCACHE"
when /^\.\.\./
"TS_VARIABLE"
when /^CDHASH/
@ -971,6 +973,7 @@ class RubyVM
'TS_GENTRY' => 'G',
'TS_IC' => 'K',
'TS_CALLINFO' => 'C',
'TS_CALLCACHE' => 'E',
'TS_CDHASH' => 'H',
'TS_ISEQ' => 'S',
'TS_VARIABLE' => '.',

2
vm.c
View file

@ -1652,7 +1652,7 @@ vm_exec(rb_thread_t *th)
}
}
if (catch_iseq != 0) { /* found catch table */
if (catch_iseq != NULL) { /* found catch table */
/* enter catch scope */
cfp->sp = vm_base_ptr(cfp) + cont_sp;
cfp->pc = cfp->iseq->body->iseq_encoded + cont_pc;

View file

@ -15,9 +15,10 @@ VALUE rb_keyword_error_new(const char *error, VALUE keys); /* class.c */
struct args_info {
/* basic args info */
rb_call_info_t *ci;
struct rb_calling_info *calling;
VALUE *argv;
int argc;
const struct rb_call_info_kw_arg *kw_arg;
/* additional args info */
int rest_index;
@ -235,8 +236,9 @@ args_pop_keyword_hash(struct args_info *args, VALUE *kw_hash_ptr, rb_thread_t *t
static int
args_kw_argv_to_hash(struct args_info *args)
{
const VALUE *const passed_keywords = args->ci->kw_arg->keywords;
const int kw_len = args->ci->kw_arg->keyword_len;
const struct rb_call_info_kw_arg *kw_arg = args->kw_arg;
const VALUE *const passed_keywords = kw_arg->keywords;
const int kw_len = kw_arg->keyword_len;
VALUE h = rb_hash_new();
const int kw_start = args->argc - kw_len;
const VALUE * const kw_argv = args->argv + kw_start;
@ -257,8 +259,9 @@ args_stored_kw_argv_to_hash(struct args_info *args)
{
VALUE h = rb_hash_new();
int i;
const VALUE *const passed_keywords = args->ci->kw_arg->keywords;
const int passed_keyword_len = args->ci->kw_arg->keyword_len;
const struct rb_call_info_kw_arg *kw_arg = args->kw_arg;
const VALUE *const passed_keywords = kw_arg->keywords;
const int passed_keyword_len = kw_arg->keyword_len;
for (i=0; i<passed_keyword_len; i++) {
rb_hash_aset(h, passed_keywords[i], args->kw_argv[i]);
@ -462,10 +465,10 @@ args_setup_kw_rest_parameter(VALUE keyword_hash, VALUE *locals)
}
static inline void
args_setup_block_parameter(rb_thread_t *th, rb_call_info_t *ci, VALUE *locals)
args_setup_block_parameter(rb_thread_t *th, struct rb_calling_info *calling, VALUE *locals)
{
VALUE blockval = Qnil;
const rb_block_t *blockptr = ci->blockptr;
const rb_block_t *blockptr = calling->blockptr;
if (blockptr) {
/* make Proc object */
@ -473,7 +476,7 @@ args_setup_block_parameter(rb_thread_t *th, rb_call_info_t *ci, VALUE *locals)
rb_proc_t *proc;
blockval = rb_vm_make_proc(th, blockptr, rb_cProc);
GetProcPtr(blockval, proc);
ci->blockptr = &proc->block;
calling->blockptr = &proc->block;
}
else {
blockval = blockptr->proc;
@ -499,7 +502,9 @@ fill_keys_values(st_data_t key, st_data_t val, st_data_t ptr)
}
static int
setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, rb_call_info_t * const ci,
setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq,
struct rb_calling_info *const calling,
const struct rb_call_info *ci,
VALUE * const locals, const enum arg_setup_type arg_setup_type)
{
const int min_argc = iseq->body->param.lead_num + iseq->body->param.post_num;
@ -525,20 +530,22 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, r
* <- iseq->body->param.size------------>
* ^ locals ^ sp
*/
for (i=ci->argc; i<iseq->body->param.size; i++) {
for (i=calling->argc; i<iseq->body->param.size; i++) {
locals[i] = Qnil;
}
th->cfp->sp = &locals[i];
/* setup args */
args = &args_body;
args->ci = ci;
given_argc = args->argc = ci->argc;
args->calling = calling;
given_argc = args->argc = calling->argc;
args->argv = locals;
if (ci->kw_arg) {
if (ci->flag & VM_CALL_KWARG) {
args->kw_arg = ((struct rb_call_info_with_kwarg *)ci)->kw_arg;
if (iseq->body->param.flags.has_kw) {
int kw_len = ci->kw_arg->keyword_len;
int kw_len = args->kw_arg->keyword_len;
/* copy kw_argv */
args->kw_argv = ALLOCA_N(VALUE, kw_len);
args->argc -= kw_len;
@ -551,6 +558,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, r
}
}
else {
args->kw_arg = NULL;
args->kw_argv = NULL;
}
@ -642,7 +650,8 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, r
VALUE * const klocals = locals + iseq->body->param.keyword->bits_start - iseq->body->param.keyword->num;
if (args->kw_argv != NULL) {
args_setup_kw_parameters(args->kw_argv, args->ci->kw_arg->keyword_len, args->ci->kw_arg->keywords, iseq, klocals);
const struct rb_call_info_kw_arg *kw_arg = args->kw_arg;
args_setup_kw_parameters(args->kw_argv, kw_arg->keyword_len, kw_arg->keywords, iseq, klocals);
}
else if (!NIL_P(keyword_hash)) {
int kw_len = rb_long2int(RHASH_SIZE(keyword_hash));
@ -665,7 +674,7 @@ setup_parameters_complex(rb_thread_t * const th, const rb_iseq_t * const iseq, r
}
if (iseq->body->param.flags.has_block) {
args_setup_block_parameter(th, ci, locals + iseq->body->param.block_start);
args_setup_block_parameter(th, calling, locals + iseq->body->param.block_start);
}
#if 0
@ -717,10 +726,11 @@ argument_kw_error(rb_thread_t *th, const rb_iseq_t *iseq, const char *error, con
}
static inline void
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, rb_call_info_t *ci)
vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
VALUE *argv = cfp->sp - ci->argc;
VALUE ary = argv[ci->argc-1];
int argc = calling->argc;
VALUE *argv = cfp->sp - argc;
VALUE ary = argv[argc-1];
cfp->sp--;
@ -733,15 +743,16 @@ vm_caller_setup_arg_splat(rb_control_frame_t *cfp, rb_call_info_t *ci)
for (i = 0; i < len; i++) {
*cfp->sp++ = ptr[i];
}
ci->argc += i - 1;
calling->argc += i - 1;
}
}
static inline void
vm_caller_setup_arg_kw(rb_control_frame_t *cfp, rb_call_info_t *ci)
vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci)
{
const VALUE *const passed_keywords = ci->kw_arg->keywords;
const int kw_len = ci->kw_arg->keyword_len;
struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci;
const VALUE *const passed_keywords = ci_kw->kw_arg->keywords;
const int kw_len = ci_kw->kw_arg->keyword_len;
const VALUE h = rb_hash_new();
VALUE *sp = cfp->sp;
int i;
@ -752,18 +763,12 @@ vm_caller_setup_arg_kw(rb_control_frame_t *cfp, rb_call_info_t *ci)
(sp-kw_len)[0] = h;
cfp->sp -= kw_len - 1;
ci->argc -= kw_len - 1;
calling->argc -= kw_len - 1;
}
#define SAVE_RESTORE_CI(expr, ci) do { \
int saved_argc = (ci)->argc; rb_block_t *saved_blockptr = (ci)->blockptr; /* save */ \
expr; \
(ci)->argc = saved_argc; (ci)->blockptr = saved_blockptr; /* restore */ \
} while (0)
static void
vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp, rb_call_info_t *ci,
rb_iseq_t *blockiseq, const int is_super)
vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp,
struct rb_calling_info *calling, const struct rb_call_info *ci, rb_iseq_t *blockiseq, const int is_super)
{
if (ci->flag & VM_CALL_ARGS_BLOCKARG) {
rb_proc_t *po;
@ -774,8 +779,7 @@ vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp, rb
if (proc != Qnil) {
if (!rb_obj_is_proc(proc)) {
VALUE b;
SAVE_RESTORE_CI(b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc"), ci);
b = rb_check_convert_type(proc, T_DATA, "Proc", "to_proc");
if (NIL_P(b) || !rb_obj_is_proc(b)) {
rb_raise(rb_eTypeError,
@ -785,32 +789,32 @@ vm_caller_setup_arg_block(const rb_thread_t *th, rb_control_frame_t *reg_cfp, rb
proc = b;
}
GetProcPtr(proc, po);
ci->blockptr = &po->block;
calling->blockptr = &po->block;
RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp)->proc = proc;
}
else {
ci->blockptr = NULL;
calling->blockptr = NULL;
}
}
else if (blockiseq != 0) { /* likely */
ci->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp);
ci->blockptr->iseq = blockiseq;
ci->blockptr->proc = 0;
rb_block_t *blockptr = calling->blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(reg_cfp);
blockptr->iseq = blockiseq;
blockptr->proc = 0;
}
else {
if (is_super) {
ci->blockptr = GET_BLOCK_PTR();
calling->blockptr = GET_BLOCK_PTR();
}
else {
ci->blockptr = NULL;
calling->blockptr = NULL;
}
}
}
#define IS_ARGS_SPLAT(ci) ((ci)->flag & VM_CALL_ARGS_SPLAT)
#define IS_ARGS_KEYWORD(ci) ((ci)->kw_arg != NULL)
#define IS_ARGS_SPLAT(ci) ((ci)->flag & VM_CALL_ARGS_SPLAT)
#define IS_ARGS_KEYWORD(ci) ((ci)->flag & VM_CALL_KWARG)
#define CALLER_SETUP_ARG(cfp, ci) do { \
if (UNLIKELY(IS_ARGS_SPLAT(ci))) vm_caller_setup_arg_splat((cfp), (ci)); \
if (UNLIKELY(IS_ARGS_KEYWORD(ci))) vm_caller_setup_arg_kw((cfp), (ci)); \
#define CALLER_SETUP_ARG(cfp, calling, ci) do { \
if (UNLIKELY(IS_ARGS_SPLAT(ci))) vm_caller_setup_arg_splat((cfp), (calling)); \
if (UNLIKELY(IS_ARGS_KEYWORD(ci))) vm_caller_setup_arg_kw((cfp), (calling), (ci)); \
} while (0)

View file

@ -173,11 +173,6 @@ union iseq_inline_storage_entry {
struct rb_thread_struct;
struct rb_control_frame_struct;
typedef struct rb_call_info_kw_arg_struct {
int keyword_len;
VALUE keywords[1];
} rb_call_info_kw_arg_t;
enum method_missing_reason {
MISSING_NOENTRY = 0x00,
MISSING_PRIVATE = 0x01,
@ -188,14 +183,30 @@ enum method_missing_reason {
MISSING_NONE = 0x20
};
/* rb_call_info_t contains calling information including inline cache */
typedef struct rb_call_info_struct {
struct rb_call_info {
/* fixed at compile time */
ID mid;
unsigned int flag;
int orig_argc;
const rb_call_info_kw_arg_t *kw_arg;
};
struct rb_call_info_kw_arg {
int keyword_len;
VALUE keywords[1];
};
struct rb_call_info_with_kwarg {
struct rb_call_info ci;
struct rb_call_info_kw_arg *kw_arg;
};
struct rb_calling_info {
struct rb_block_struct *blockptr;
VALUE recv;
int argc;
};
struct rb_call_cache {
/* inline cache: keys */
rb_serial_t method_state;
rb_serial_t class_serial;
@ -203,18 +214,14 @@ typedef struct rb_call_info_struct {
/* inline cache: values */
const rb_callable_method_entry_t *me;
/* temporary values for method calling */
struct rb_block_struct *blockptr;
VALUE recv;
int argc;
VALUE (*call)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
union {
unsigned int index; /* used by ivar */
enum method_missing_reason method_missing_reason; /* used by method_missing */
int inc_sp; /* used by cfunc */
} aux;
VALUE (*call)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_call_info_struct *ci);
} rb_call_info_t;
};
#if 1
#define GetCoreDataFromValue(obj, type, ptr) do { \
@ -337,12 +344,19 @@ struct rb_iseq_constant_body {
struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
union iseq_inline_storage_entry *is_entries;
rb_call_info_t *callinfo_entries;
struct rb_call_info *ci_entries; /* struct rb_call_info ci_entries[ci_size];
* struct rb_call_info_with_kwarg cikw_entries[ci_kw_size];
* So that:
* struct rb_call_info_with_kwarg *cikw_entries = &body->ci_entries[ci_size];
*/
struct rb_call_cache *cc_entries; /* size is ci_size = ci_kw_size */
const VALUE mark_ary; /* Array: includes operands which should be GC marked */
unsigned int local_table_size;
unsigned int is_size;
unsigned int callinfo_size;
unsigned int ci_size;
unsigned int ci_kw_size;
unsigned int line_info_size;
};
@ -632,7 +646,7 @@ typedef struct rb_thread_struct {
const rb_callable_method_entry_t *passed_bmethod_me;
/* for cfunc */
rb_call_info_t *passed_ci;
struct rb_calling_info *calling;
/* for load(true) */
VALUE top_self;
@ -827,14 +841,16 @@ enum vm_check_match_type {
#define VM_CHECKMATCH_TYPE_MASK 0x03
#define VM_CHECKMATCH_ARRAY 0x04
#define VM_CALL_ARGS_SPLAT (0x01 << 1) /* m(*args) */
#define VM_CALL_ARGS_BLOCKARG (0x01 << 2) /* m(&block) */
#define VM_CALL_FCALL (0x01 << 3) /* m(...) */
#define VM_CALL_VCALL (0x01 << 4) /* m */
#define VM_CALL_TAILCALL (0x01 << 5) /* located at tail position */
#define VM_CALL_SUPER (0x01 << 6) /* super */
#define VM_CALL_OPT_SEND (0x01 << 7) /* internal flag */
#define VM_CALL_ARGS_SIMPLE (0x01 << 8) /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
#define VM_CALL_ARGS_SPLAT (0x01 << 0) /* m(*args) */
#define VM_CALL_ARGS_BLOCKARG (0x01 << 1) /* m(&block) */
#define VM_CALL_FCALL (0x01 << 2) /* m(...) */
#define VM_CALL_VCALL (0x01 << 3) /* m */
#define VM_CALL_ARGS_SIMPLE (0x01 << 4) /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
#define VM_CALL_BLOCKISEQ (0x01 << 5) /* has blockiseq */
#define VM_CALL_KWARG (0x01 << 6) /* has kwarg */
#define VM_CALL_TAILCALL (0x01 << 7) /* located at tail position */
#define VM_CALL_SUPER (0x01 << 8) /* super */
#define VM_CALL_OPT_SEND (0x01 << 9) /* internal flag */
enum vm_special_object_type {
VM_SPECIAL_OBJECT_VMCORE = 1,
@ -878,7 +894,8 @@ enum vm_svar_index {
/* inline cache */
typedef struct iseq_inline_cache_entry *IC;
typedef rb_call_info_t *CALL_INFO;
typedef struct rb_call_info *CALL_INFO;
typedef struct rb_call_cache *CALL_CACHE;
void rb_vm_change_state(void);

117
vm_eval.c
View file

@ -39,43 +39,48 @@ typedef enum call_type {
static VALUE send_internal(int argc, const VALUE *argv, VALUE recv, call_type scope);
static VALUE vm_call0_body(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv);
static VALUE vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv);
static VALUE
vm_call0(rb_thread_t* th, VALUE recv, ID id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me)
{
rb_call_info_t ci_entry, *ci = &ci_entry;
struct rb_calling_info calling_entry, *calling;
struct rb_call_info ci_entry;
struct rb_call_cache cc_entry;
ci->flag = 0;
ci->mid = id;
ci->recv = recv;
ci->argc = argc;
ci->me = me;
ci->kw_arg = NULL;
calling = &calling_entry;
return vm_call0_body(th, ci, argv);
ci_entry.flag = 0;
ci_entry.mid = id;
cc_entry.me = me;
calling->recv = recv;
calling->argc = argc;
return vm_call0_body(th, calling, &ci_entry, &cc_entry, argv);
}
#if OPT_CALL_CFUNC_WITHOUT_FRAME
static VALUE
vm_call0_cfunc(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
vm_call0_cfunc(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
{
VALUE val;
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, ci->me->owner, ci->mid);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, ci->recv, ci->mid, ci->me->owner, Qnil);
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, cc->me->owner, ci->mid);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, calling->recv, ci->mid, cc->me->owner, Qnil);
{
rb_control_frame_t *reg_cfp = th->cfp;
const rb_callable_method_entry_t *me = ci->me;
const rb_callable_method_entry_t *me = cc->me;
const rb_method_cfunc_t *cfunc = &me->def->body.cfunc;
int len = cfunc->argc;
VALUE recv = ci->recv;
int argc = ci->argc;
VALUE recv = calling->recv;
int argc = calling->argc;
if (len >= 0) rb_check_arity(ci->argc, len, len);
if (len >= 0) rb_check_arity(argc, len, len);
th->passed_ci = ci;
ci->aux.inc_sp = 0;
cc->aux.inc_sp = 0;
VM_PROFILE_UP(2);
val = (*cfunc->invoker)(cfunc->func, recv, argc, argv);
@ -93,23 +98,23 @@ vm_call0_cfunc(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
vm_pop_frame(th);
}
}
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, ci->recv, ci->mid, ci->me->owner, val);
RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, ci->me->owner, ci->mid);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, calling->recv, ci->mid, callnig->cc->me->owner, val);
RUBY_DTRACE_CMETHOD_RETURN_HOOK(th, cc->me->owner, ci->mid);
return val;
}
#else
static VALUE
vm_call0_cfunc_with_frame(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
vm_call0_cfunc_with_frame(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
{
VALUE val;
const rb_callable_method_entry_t *me = ci->me;
const rb_callable_method_entry_t *me = cc->me;
const rb_method_cfunc_t *cfunc = &me->def->body.cfunc;
int len = cfunc->argc;
VALUE recv = ci->recv;
int argc = ci->argc;
VALUE recv = calling->recv;
int argc = calling->argc;
ID mid = ci->mid;
rb_block_t *blockptr = ci->blockptr;
rb_block_t *blockptr = calling->blockptr;
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(th, me->owner, mid);
EXEC_EVENT_HOOK(th, RUBY_EVENT_C_CALL, recv, mid, me->owner, Qnil);
@ -138,114 +143,114 @@ vm_call0_cfunc_with_frame(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv
}
static VALUE
vm_call0_cfunc(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
vm_call0_cfunc(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
{
return vm_call0_cfunc_with_frame(th, ci, argv);
return vm_call0_cfunc_with_frame(th, calling, ci, cc, argv);
}
#endif
/* `ci' should point temporal value (on stack value) */
static VALUE
vm_call0_body(rb_thread_t* th, rb_call_info_t *ci, const VALUE *argv)
vm_call0_body(rb_thread_t* th, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
{
VALUE ret;
if (th->passed_block) {
ci->blockptr = (rb_block_t *)th->passed_block;
calling->blockptr = (rb_block_t *)th->passed_block;
th->passed_block = 0;
}
else {
ci->blockptr = 0;
calling->blockptr = 0;
}
again:
switch (ci->me->def->type) {
switch (cc->me->def->type) {
case VM_METHOD_TYPE_ISEQ:
{
rb_control_frame_t *reg_cfp = th->cfp;
int i;
CHECK_VM_STACK_OVERFLOW(reg_cfp, ci->argc + 1);
CHECK_VM_STACK_OVERFLOW(reg_cfp, calling->argc + 1);
*reg_cfp->sp++ = ci->recv;
for (i = 0; i < ci->argc; i++) {
*reg_cfp->sp++ = calling->recv;
for (i = 0; i < calling->argc; i++) {
*reg_cfp->sp++ = argv[i];
}
vm_call_iseq_setup(th, reg_cfp, ci);
vm_call_iseq_setup(th, reg_cfp, calling, ci, cc);
th->cfp->flag |= VM_FRAME_FLAG_FINISH;
return vm_exec(th); /* CHECK_INTS in this function */
}
case VM_METHOD_TYPE_NOTIMPLEMENTED:
case VM_METHOD_TYPE_CFUNC:
ret = vm_call0_cfunc(th, ci, argv);
ret = vm_call0_cfunc(th, calling, ci, cc, argv);
goto success;
case VM_METHOD_TYPE_ATTRSET:
rb_check_arity(ci->argc, 1, 1);
ret = rb_ivar_set(ci->recv, ci->me->def->body.attr.id, argv[0]);
rb_check_arity(calling->argc, 1, 1);
ret = rb_ivar_set(calling->recv, cc->me->def->body.attr.id, argv[0]);
goto success;
case VM_METHOD_TYPE_IVAR:
rb_check_arity(ci->argc, 0, 0);
ret = rb_attr_get(ci->recv, ci->me->def->body.attr.id);
rb_check_arity(calling->argc, 0, 0);
ret = rb_attr_get(calling->recv, cc->me->def->body.attr.id);
goto success;
case VM_METHOD_TYPE_BMETHOD:
ret = vm_call_bmethod_body(th, ci, argv);
ret = vm_call_bmethod_body(th, calling, ci, cc, argv);
goto success;
case VM_METHOD_TYPE_ZSUPER:
case VM_METHOD_TYPE_REFINED:
{
const rb_method_type_t type = ci->me->def->type;
const rb_method_type_t type = cc->me->def->type;
VALUE super_class;
if (type == VM_METHOD_TYPE_REFINED && ci->me->def->body.refined.orig_me) {
ci->me = refined_method_callable_without_refinement(ci->me);
if (type == VM_METHOD_TYPE_REFINED && cc->me->def->body.refined.orig_me) {
cc->me = refined_method_callable_without_refinement(cc->me);
goto again;
}
super_class = RCLASS_SUPER(ci->me->defined_class);
super_class = RCLASS_SUPER(cc->me->defined_class);
if (!super_class || !(ci->me = rb_callable_method_entry(super_class, ci->mid))) {
if (!super_class || !(cc->me = rb_callable_method_entry(super_class, ci->mid))) {
enum method_missing_reason ex = (type == VM_METHOD_TYPE_ZSUPER) ? MISSING_SUPER : 0;
ret = method_missing(ci->recv, ci->mid, ci->argc, argv, ex);
ret = method_missing(calling->recv, ci->mid, calling->argc, argv, ex);
goto success;
}
RUBY_VM_CHECK_INTS(th);
goto again;
}
case VM_METHOD_TYPE_ALIAS:
ci->me = aliased_callable_method_entry(ci->me);
cc->me = aliased_callable_method_entry(cc->me);
goto again;
case VM_METHOD_TYPE_MISSING:
{
VALUE new_args = rb_ary_new4(ci->argc, argv);
VALUE new_args = rb_ary_new4(calling->argc, argv);
rb_ary_unshift(new_args, ID2SYM(ci->mid));
th->passed_block = ci->blockptr;
ret = rb_funcall2(ci->recv, idMethodMissing, ci->argc+1,
th->passed_block = calling->blockptr;
ret = rb_funcall2(calling->recv, idMethodMissing, calling->argc+1,
RARRAY_CONST_PTR(new_args));
RB_GC_GUARD(new_args);
return ret;
}
case VM_METHOD_TYPE_OPTIMIZED:
switch (ci->me->def->body.optimize_type) {
switch (cc->me->def->body.optimize_type) {
case OPTIMIZED_METHOD_TYPE_SEND:
ret = send_internal(ci->argc, argv, ci->recv, CALL_FCALL);
ret = send_internal(calling->argc, argv, calling->recv, CALL_FCALL);
goto success;
case OPTIMIZED_METHOD_TYPE_CALL:
{
rb_proc_t *proc;
GetProcPtr(ci->recv, proc);
ret = rb_vm_invoke_proc(th, proc, ci->argc, argv, ci->blockptr);
GetProcPtr(calling->recv, proc);
ret = rb_vm_invoke_proc(th, proc, calling->argc, argv, calling->blockptr);
goto success;
}
default:
rb_bug("vm_call0: unsupported optimized method type (%d)", ci->me->def->body.optimize_type);
rb_bug("vm_call0: unsupported optimized method type (%d)", cc->me->def->body.optimize_type);
}
break;
case VM_METHOD_TYPE_UNDEF:
break;
}
rb_bug("vm_call0: unsupported method type (%d)", ci->me->def->type);
rb_bug("vm_call0: unsupported method type (%d)", cc->me->def->type);
return Qundef;
success:

File diff suppressed because it is too large Load diff

View file

@ -162,8 +162,8 @@ enum vm_regan_acttype {
} \
} while (0)
#define CALL_METHOD(ci) do { \
VALUE v = (*(ci)->call)(th, GET_CFP(), (ci)); \
#define CALL_METHOD(calling, ci, cc) do { \
VALUE v = (*(cc)->call)(th, GET_CFP(), (calling), (ci), (cc)); \
if (v == Qundef) { \
RESTORE_REGS(); \
NEXT_INSN(); \
@ -182,8 +182,8 @@ enum vm_regan_acttype {
#endif
#if OPT_CALL_FASTPATH
#define CI_SET_FASTPATH(ci, func, enabled) do { \
if (LIKELY(enabled)) ((ci)->call = (func)); \
#define CI_SET_FASTPATH(cc, func, enabled) do { \
if (LIKELY(enabled)) ((cc)->call = (func)); \
} while (0)
#else
#define CI_SET_FASTPATH(ci, func, enabled) /* do nothing */
@ -213,9 +213,11 @@ enum vm_regan_acttype {
#endif
#define CALL_SIMPLE_METHOD(recv_) do { \
ci->blockptr = 0; ci->argc = ci->orig_argc; \
vm_search_method(ci, ci->recv = (recv_)); \
CALL_METHOD(ci); \
struct rb_calling_info calling; \
calling.blockptr = NULL; \
calling.argc = ci->orig_argc; \
vm_search_method(ci, cc, calling.recv = (recv_)); \
CALL_METHOD(&calling, ci, cc); \
} while (0)
#define NEXT_CLASS_SERIAL() (++ruby_vm_class_serial)