mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
VALUE size packed callinfo (ci).
Now, rb_call_info contains how to call the method with tuple of (mid, orig_argc, flags, kwarg). Most of cases, kwarg == NULL and mid+argc+flags only requires 64bits. So this patch packed rb_call_info to VALUE (1 word) on such cases. If we can not represent it in VALUE, then use imemo_callinfo which contains conventional callinfo (rb_callinfo, renamed from rb_call_info). iseq->body->ci_kw_size is removed because all of callinfo is VALUE size (packed ci or a pointer to imemo_callinfo). To access ci information, we need to use these functions: vm_ci_mid(ci), _flag(ci), _argc(ci), _kwarg(ci). struct rb_call_info_kw_arg is renamed to rb_callinfo_kwarg. rb_funcallv_with_cc() and rb_method_basic_definition_p_with_cc() is temporary removed because cd->ci should be marked.
This commit is contained in:
parent
a1eb1fabef
commit
f2286925f0
Notes:
git
2020-02-22 09:59:43 +09:00
26 changed files with 630 additions and 544 deletions
|
@ -1837,6 +1837,7 @@ compile.$(OBJEXT): {$(VPATH)}builtin.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}compile.c
|
compile.$(OBJEXT): {$(VPATH)}compile.c
|
||||||
compile.$(OBJEXT): {$(VPATH)}config.h
|
compile.$(OBJEXT): {$(VPATH)}config.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}constant.h
|
compile.$(OBJEXT): {$(VPATH)}constant.h
|
||||||
|
compile.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}defines.h
|
compile.$(OBJEXT): {$(VPATH)}defines.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}encindex.h
|
compile.$(OBJEXT): {$(VPATH)}encindex.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}encoding.h
|
compile.$(OBJEXT): {$(VPATH)}encoding.h
|
||||||
|
@ -1866,6 +1867,7 @@ compile.$(OBJEXT): {$(VPATH)}subst.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
compile.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
||||||
compile.$(OBJEXT): {$(VPATH)}thread_native.h
|
compile.$(OBJEXT): {$(VPATH)}thread_native.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}util.h
|
compile.$(OBJEXT): {$(VPATH)}util.h
|
||||||
|
compile.$(OBJEXT): {$(VPATH)}vm_callinfo.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}vm_core.h
|
compile.$(OBJEXT): {$(VPATH)}vm_core.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}vm_debug.h
|
compile.$(OBJEXT): {$(VPATH)}vm_debug.h
|
||||||
compile.$(OBJEXT): {$(VPATH)}vm_opts.h
|
compile.$(OBJEXT): {$(VPATH)}vm_opts.h
|
||||||
|
@ -1966,6 +1968,7 @@ debug.$(OBJEXT): $(top_srcdir)/internal/warnings.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}assert.h
|
debug.$(OBJEXT): {$(VPATH)}assert.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}config.h
|
debug.$(OBJEXT): {$(VPATH)}config.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}debug.c
|
debug.$(OBJEXT): {$(VPATH)}debug.c
|
||||||
|
debug.$(OBJEXT): {$(VPATH)}debug_counter.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}defines.h
|
debug.$(OBJEXT): {$(VPATH)}defines.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}encoding.h
|
debug.$(OBJEXT): {$(VPATH)}encoding.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}eval_intern.h
|
debug.$(OBJEXT): {$(VPATH)}eval_intern.h
|
||||||
|
@ -1987,6 +1990,7 @@ debug.$(OBJEXT): {$(VPATH)}symbol.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
debug.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
||||||
debug.$(OBJEXT): {$(VPATH)}thread_native.h
|
debug.$(OBJEXT): {$(VPATH)}thread_native.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}util.h
|
debug.$(OBJEXT): {$(VPATH)}util.h
|
||||||
|
debug.$(OBJEXT): {$(VPATH)}vm_callinfo.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}vm_core.h
|
debug.$(OBJEXT): {$(VPATH)}vm_core.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}vm_debug.h
|
debug.$(OBJEXT): {$(VPATH)}vm_debug.h
|
||||||
debug.$(OBJEXT): {$(VPATH)}vm_opts.h
|
debug.$(OBJEXT): {$(VPATH)}vm_opts.h
|
||||||
|
@ -2437,6 +2441,7 @@ gc.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
||||||
gc.$(OBJEXT): {$(VPATH)}thread_native.h
|
gc.$(OBJEXT): {$(VPATH)}thread_native.h
|
||||||
gc.$(OBJEXT): {$(VPATH)}transient_heap.h
|
gc.$(OBJEXT): {$(VPATH)}transient_heap.h
|
||||||
gc.$(OBJEXT): {$(VPATH)}util.h
|
gc.$(OBJEXT): {$(VPATH)}util.h
|
||||||
|
gc.$(OBJEXT): {$(VPATH)}vm_callinfo.h
|
||||||
gc.$(OBJEXT): {$(VPATH)}vm_core.h
|
gc.$(OBJEXT): {$(VPATH)}vm_core.h
|
||||||
gc.$(OBJEXT): {$(VPATH)}vm_opts.h
|
gc.$(OBJEXT): {$(VPATH)}vm_opts.h
|
||||||
golf_prelude.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
|
golf_prelude.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
|
||||||
|
@ -2663,6 +2668,7 @@ iseq.$(OBJEXT): {$(VPATH)}subst.h
|
||||||
iseq.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
iseq.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
||||||
iseq.$(OBJEXT): {$(VPATH)}thread_native.h
|
iseq.$(OBJEXT): {$(VPATH)}thread_native.h
|
||||||
iseq.$(OBJEXT): {$(VPATH)}util.h
|
iseq.$(OBJEXT): {$(VPATH)}util.h
|
||||||
|
iseq.$(OBJEXT): {$(VPATH)}vm_callinfo.h
|
||||||
iseq.$(OBJEXT): {$(VPATH)}vm_core.h
|
iseq.$(OBJEXT): {$(VPATH)}vm_core.h
|
||||||
iseq.$(OBJEXT): {$(VPATH)}vm_opts.h
|
iseq.$(OBJEXT): {$(VPATH)}vm_opts.h
|
||||||
load.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
|
load.$(OBJEXT): $(CCAN_DIR)/check_type/check_type.h
|
||||||
|
@ -2986,6 +2992,7 @@ mjit_compile.$(OBJEXT): {$(VPATH)}st.h
|
||||||
mjit_compile.$(OBJEXT): {$(VPATH)}subst.h
|
mjit_compile.$(OBJEXT): {$(VPATH)}subst.h
|
||||||
mjit_compile.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
mjit_compile.$(OBJEXT): {$(VPATH)}thread_$(THREAD_MODEL).h
|
||||||
mjit_compile.$(OBJEXT): {$(VPATH)}thread_native.h
|
mjit_compile.$(OBJEXT): {$(VPATH)}thread_native.h
|
||||||
|
mjit_compile.$(OBJEXT): {$(VPATH)}vm_callinfo.h
|
||||||
mjit_compile.$(OBJEXT): {$(VPATH)}vm_core.h
|
mjit_compile.$(OBJEXT): {$(VPATH)}vm_core.h
|
||||||
mjit_compile.$(OBJEXT): {$(VPATH)}vm_exec.h
|
mjit_compile.$(OBJEXT): {$(VPATH)}vm_exec.h
|
||||||
mjit_compile.$(OBJEXT): {$(VPATH)}vm_insnhelper.h
|
mjit_compile.$(OBJEXT): {$(VPATH)}vm_insnhelper.h
|
||||||
|
@ -4165,6 +4172,7 @@ vm.$(OBJEXT): {$(VPATH)}vm.h
|
||||||
vm.$(OBJEXT): {$(VPATH)}vm.inc
|
vm.$(OBJEXT): {$(VPATH)}vm.inc
|
||||||
vm.$(OBJEXT): {$(VPATH)}vm_args.c
|
vm.$(OBJEXT): {$(VPATH)}vm_args.c
|
||||||
vm.$(OBJEXT): {$(VPATH)}vm_call_iseq_optimized.inc
|
vm.$(OBJEXT): {$(VPATH)}vm_call_iseq_optimized.inc
|
||||||
|
vm.$(OBJEXT): {$(VPATH)}vm_callinfo.h
|
||||||
vm.$(OBJEXT): {$(VPATH)}vm_core.h
|
vm.$(OBJEXT): {$(VPATH)}vm_core.h
|
||||||
vm.$(OBJEXT): {$(VPATH)}vm_debug.h
|
vm.$(OBJEXT): {$(VPATH)}vm_debug.h
|
||||||
vm.$(OBJEXT): {$(VPATH)}vm_eval.c
|
vm.$(OBJEXT): {$(VPATH)}vm_eval.c
|
||||||
|
|
318
compile.c
318
compile.c
|
@ -36,6 +36,7 @@
|
||||||
#include "ruby/re.h"
|
#include "ruby/re.h"
|
||||||
#include "ruby/util.h"
|
#include "ruby/util.h"
|
||||||
#include "vm_core.h"
|
#include "vm_core.h"
|
||||||
|
#include "vm_callinfo.h"
|
||||||
#include "vm_debug.h"
|
#include "vm_debug.h"
|
||||||
|
|
||||||
#include "builtin.h"
|
#include "builtin.h"
|
||||||
|
@ -919,6 +920,15 @@ compile_data_alloc2(rb_iseq_t *iseq, size_t x, size_t y)
|
||||||
return compile_data_alloc(iseq, size);
|
return compile_data_alloc(iseq, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void *
|
||||||
|
compile_data_calloc2(rb_iseq_t *iseq, size_t x, size_t y)
|
||||||
|
{
|
||||||
|
size_t size = rb_size_mul_or_raise(x, y, rb_eRuntimeError);
|
||||||
|
void *p = compile_data_alloc(iseq, size);
|
||||||
|
memset(p, 0, size);
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
static INSN *
|
static INSN *
|
||||||
compile_data_alloc_insn(rb_iseq_t *iseq)
|
compile_data_alloc_insn(rb_iseq_t *iseq)
|
||||||
{
|
{
|
||||||
|
@ -1187,38 +1197,31 @@ new_insn_body(rb_iseq_t *iseq, int line_no, enum ruby_vminsn_type insn_id, int a
|
||||||
return new_insn_core(iseq, line_no, insn_id, argc, operands);
|
return new_insn_core(iseq, line_no, insn_id, argc, operands);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct rb_call_info *
|
static const struct rb_callinfo *
|
||||||
new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_call_info_kw_arg *kw_arg, int has_blockiseq)
|
new_callinfo(rb_iseq_t *iseq, ID mid, int argc, unsigned int flag, struct rb_callinfo_kwarg *kw_arg, int has_blockiseq)
|
||||||
{
|
{
|
||||||
size_t size = kw_arg != NULL ? sizeof(struct rb_call_info_with_kwarg) : sizeof(struct rb_call_info);
|
VM_ASSERT(argc >= 0);
|
||||||
struct rb_call_info *ci = (struct rb_call_info *)compile_data_alloc(iseq, size);
|
|
||||||
struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci;
|
|
||||||
|
|
||||||
ci->mid = mid;
|
if (!(flag & (VM_CALL_ARGS_SPLAT | VM_CALL_ARGS_BLOCKARG | VM_CALL_KW_SPLAT)) &&
|
||||||
ci->flag = flag;
|
kw_arg == NULL && !has_blockiseq) {
|
||||||
ci->orig_argc = argc;
|
flag |= VM_CALL_ARGS_SIMPLE;
|
||||||
|
}
|
||||||
|
|
||||||
if (kw_arg) {
|
if (kw_arg) {
|
||||||
ci->flag |= VM_CALL_KWARG;
|
flag |= VM_CALL_KWARG;
|
||||||
ci_kw->kw_arg = kw_arg;
|
argc += kw_arg->keyword_len;
|
||||||
ci->orig_argc += kw_arg->keyword_len;
|
|
||||||
iseq->body->ci_kw_size++;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
iseq->body->ci_size++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(ci->flag & (VM_CALL_ARGS_SPLAT | VM_CALL_ARGS_BLOCKARG | VM_CALL_KW_SPLAT)) &&
|
iseq->body->ci_size++;
|
||||||
kw_arg == NULL && !has_blockiseq) {
|
const struct rb_callinfo *ci = vm_ci_new(mid, flag, argc, kw_arg);
|
||||||
ci->flag |= VM_CALL_ARGS_SIMPLE;
|
RB_OBJ_WRITTEN(iseq, Qundef, ci);
|
||||||
}
|
|
||||||
return ci;
|
return ci;
|
||||||
}
|
}
|
||||||
|
|
||||||
static INSN *
|
static INSN *
|
||||||
new_insn_send(rb_iseq_t *iseq, int line_no, ID id, VALUE argc, const rb_iseq_t *blockiseq, VALUE flag, struct rb_call_info_kw_arg *keywords)
|
new_insn_send(rb_iseq_t *iseq, int line_no, ID id, VALUE argc, const rb_iseq_t *blockiseq, VALUE flag, struct rb_callinfo_kwarg *keywords)
|
||||||
{
|
{
|
||||||
VALUE *operands = compile_data_alloc2(iseq, sizeof(VALUE), 2);
|
VALUE *operands = compile_data_calloc2(iseq, sizeof(VALUE), 2);
|
||||||
operands[0] = (VALUE)new_callinfo(iseq, id, FIX2INT(argc), FIX2INT(flag), keywords, blockiseq != NULL);
|
operands[0] = (VALUE)new_callinfo(iseq, id, FIX2INT(argc), FIX2INT(flag), keywords, blockiseq != NULL);
|
||||||
operands[1] = (VALUE)blockiseq;
|
operands[1] = (VALUE)blockiseq;
|
||||||
return new_insn_core(iseq, line_no, BIN(send), 2, operands);
|
return new_insn_core(iseq, line_no, BIN(send), 2, operands);
|
||||||
|
@ -2129,11 +2132,8 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
|
||||||
insns_info = ALLOC_N(struct iseq_insn_info_entry, insn_num);
|
insns_info = ALLOC_N(struct iseq_insn_info_entry, insn_num);
|
||||||
positions = ALLOC_N(unsigned int, insn_num);
|
positions = ALLOC_N(unsigned int, insn_num);
|
||||||
body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, body->is_size);
|
body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, body->is_size);
|
||||||
body->call_data =
|
body->call_data = ZALLOC_N(struct rb_call_data, body->ci_size);
|
||||||
rb_xcalloc_mul_add_mul(
|
ISEQ_COMPILE_DATA(iseq)->ci_index = 0;
|
||||||
sizeof(struct rb_call_data), body->ci_size,
|
|
||||||
sizeof(struct rb_kwarg_call_data), body->ci_kw_size);
|
|
||||||
ISEQ_COMPILE_DATA(iseq)->ci_index = ISEQ_COMPILE_DATA(iseq)->ci_kw_index = 0;
|
|
||||||
|
|
||||||
list = FIRST_ELEMENT(anchor);
|
list = FIRST_ELEMENT(anchor);
|
||||||
insns_info_index = code_index = sp = 0;
|
insns_info_index = code_index = sp = 0;
|
||||||
|
@ -2219,22 +2219,10 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
|
||||||
}
|
}
|
||||||
case TS_CALLDATA:
|
case TS_CALLDATA:
|
||||||
{
|
{
|
||||||
struct rb_call_info *source_ci = (struct rb_call_info *)operands[j];
|
const struct rb_callinfo *source_ci = (const struct rb_callinfo *)operands[j];
|
||||||
struct rb_call_data *cd;
|
struct rb_call_data *cd = &body->call_data[ISEQ_COMPILE_DATA(iseq)->ci_index++];
|
||||||
|
|
||||||
if (source_ci->flag & VM_CALL_KWARG) {
|
|
||||||
struct rb_kwarg_call_data *kw_calls = (struct rb_kwarg_call_data *)&body->call_data[body->ci_size];
|
|
||||||
struct rb_kwarg_call_data *cd_kw = &kw_calls[ISEQ_COMPILE_DATA(iseq)->ci_kw_index++];
|
|
||||||
cd_kw->ci_kw = *((struct rb_call_info_with_kwarg *)source_ci);
|
|
||||||
cd = (struct rb_call_data *)cd_kw;
|
|
||||||
assert(ISEQ_COMPILE_DATA(iseq)->ci_kw_index <= body->ci_kw_size);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
cd = &body->call_data[ISEQ_COMPILE_DATA(iseq)->ci_index++];
|
|
||||||
cd->ci = *source_ci;
|
|
||||||
assert(ISEQ_COMPILE_DATA(iseq)->ci_index <= body->ci_size);
|
assert(ISEQ_COMPILE_DATA(iseq)->ci_index <= body->ci_size);
|
||||||
}
|
cd->ci = source_ci;
|
||||||
|
|
||||||
generated_iseq[code_index + 1 + j] = (VALUE)cd;
|
generated_iseq[code_index + 1 + j] = (VALUE)cd;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2565,9 +2553,6 @@ remove_unreachable_chunk(rb_iseq_t *iseq, LINK_ELEMENT *i)
|
||||||
unref_destination((INSN *)i, pos);
|
unref_destination((INSN *)i, pos);
|
||||||
break;
|
break;
|
||||||
case TS_CALLDATA:
|
case TS_CALLDATA:
|
||||||
if (((struct rb_call_info *)OPERAND_AT(i, pos))->flag & VM_CALL_KWARG)
|
|
||||||
--(body->ci_kw_size);
|
|
||||||
else
|
|
||||||
--(body->ci_size);
|
--(body->ci_size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2709,6 +2694,28 @@ optimize_checktype(rb_iseq_t *iseq, INSN *iobj)
|
||||||
return TRUE;
|
return TRUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct rb_callinfo *
|
||||||
|
ci_flag_set(const rb_iseq_t *iseq, const struct rb_callinfo *ci, unsigned int add)
|
||||||
|
{
|
||||||
|
const struct rb_callinfo *nci = vm_ci_new(vm_ci_mid(ci),
|
||||||
|
vm_ci_flag(ci) | add,
|
||||||
|
vm_ci_argc(ci),
|
||||||
|
vm_ci_kwarg(ci));
|
||||||
|
RB_OBJ_WRITTEN(iseq, ci, nci);
|
||||||
|
return nci;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct rb_callinfo *
|
||||||
|
ci_argc_set(const rb_iseq_t *iseq, const struct rb_callinfo *ci, int argc)
|
||||||
|
{
|
||||||
|
const struct rb_callinfo *nci = vm_ci_new(vm_ci_mid(ci),
|
||||||
|
vm_ci_flag(ci),
|
||||||
|
argc,
|
||||||
|
vm_ci_kwarg(ci));
|
||||||
|
RB_OBJ_WRITTEN(iseq, ci, nci);
|
||||||
|
return nci;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcallopt)
|
iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcallopt)
|
||||||
{
|
{
|
||||||
|
@ -3150,16 +3157,17 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
|
||||||
NIL_P(OPERAND_AT(iobj, 0)) &&
|
NIL_P(OPERAND_AT(iobj, 0)) &&
|
||||||
IS_NEXT_INSN_ID(&iobj->link, send)) {
|
IS_NEXT_INSN_ID(&iobj->link, send)) {
|
||||||
INSN *niobj = (INSN *)iobj->link.next;
|
INSN *niobj = (INSN *)iobj->link.next;
|
||||||
struct rb_call_info *ci = (struct rb_call_info *)OPERAND_AT(niobj, 0);
|
const struct rb_callinfo *ci = (struct rb_callinfo *)OPERAND_AT(niobj, 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* freezestring nil # no debug_info
|
* freezestring nil # no debug_info
|
||||||
* send <:+@, 0, ARG_SIMPLE> # :-@, too
|
* send <:+@, 0, ARG_SIMPLE> # :-@, too
|
||||||
* =>
|
* =>
|
||||||
* send <:+@, 0, ARG_SIMPLE> # :-@, too
|
* send <:+@, 0, ARG_SIMPLE> # :-@, too
|
||||||
*/
|
*/
|
||||||
if ((ci->mid == idUPlus || ci->mid == idUMinus) &&
|
if ((vm_ci_mid(ci) == idUPlus || vm_ci_mid(ci) == idUMinus) &&
|
||||||
(ci->flag & VM_CALL_ARGS_SIMPLE) &&
|
(vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE) &&
|
||||||
ci->orig_argc == 0) {
|
vm_ci_argc(ci) == 0) {
|
||||||
ELEM_REMOVE(list);
|
ELEM_REMOVE(list);
|
||||||
return COMPILE_OK;
|
return COMPILE_OK;
|
||||||
}
|
}
|
||||||
|
@ -3207,14 +3215,19 @@ iseq_peephole_optimize(rb_iseq_t *iseq, LINK_ELEMENT *list, const int do_tailcal
|
||||||
}
|
}
|
||||||
|
|
||||||
if (piobj) {
|
if (piobj) {
|
||||||
struct rb_call_info *ci = (struct rb_call_info *)OPERAND_AT(piobj, 0);
|
const struct rb_callinfo *ci = (struct rb_callinfo *)OPERAND_AT(piobj, 0);
|
||||||
if (IS_INSN_ID(piobj, send) || IS_INSN_ID(piobj, invokesuper)) {
|
if (IS_INSN_ID(piobj, send) ||
|
||||||
|
IS_INSN_ID(piobj, invokesuper)) {
|
||||||
if (OPERAND_AT(piobj, 1) == 0) { /* no blockiseq */
|
if (OPERAND_AT(piobj, 1) == 0) { /* no blockiseq */
|
||||||
ci->flag |= VM_CALL_TAILCALL;
|
ci = ci_flag_set(iseq, ci, VM_CALL_TAILCALL);
|
||||||
|
OPERAND_AT(piobj, 0) = (VALUE)ci;
|
||||||
|
RB_OBJ_WRITTEN(iseq, Qundef, ci);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
ci->flag |= VM_CALL_TAILCALL;
|
ci = ci_flag_set(iseq, ci, VM_CALL_TAILCALL);
|
||||||
|
OPERAND_AT(piobj, 0) = (VALUE)ci;
|
||||||
|
RB_OBJ_WRITTEN(iseq, Qundef, ci);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3277,7 +3290,7 @@ insn_set_specialized_instruction(rb_iseq_t *iseq, INSN *iobj, int insn_id)
|
||||||
if (insn_id == BIN(opt_neq)) {
|
if (insn_id == BIN(opt_neq)) {
|
||||||
VALUE *old_operands = iobj->operands;
|
VALUE *old_operands = iobj->operands;
|
||||||
iobj->operand_size = 2;
|
iobj->operand_size = 2;
|
||||||
iobj->operands = compile_data_alloc2(iseq, iobj->operand_size, sizeof(VALUE));
|
iobj->operands = compile_data_calloc2(iseq, iobj->operand_size, sizeof(VALUE));
|
||||||
iobj->operands[0] = (VALUE)new_callinfo(iseq, idEq, 1, 0, NULL, FALSE);
|
iobj->operands[0] = (VALUE)new_callinfo(iseq, idEq, 1, 0, NULL, FALSE);
|
||||||
iobj->operands[1] = old_operands[0];
|
iobj->operands[1] = old_operands[0];
|
||||||
}
|
}
|
||||||
|
@ -3295,9 +3308,9 @@ iseq_specialized_instruction(rb_iseq_t *iseq, INSN *iobj)
|
||||||
*/
|
*/
|
||||||
INSN *niobj = (INSN *)iobj->link.next;
|
INSN *niobj = (INSN *)iobj->link.next;
|
||||||
if (IS_INSN_ID(niobj, send)) {
|
if (IS_INSN_ID(niobj, send)) {
|
||||||
struct rb_call_info *ci = (struct rb_call_info *)OPERAND_AT(niobj, 0);
|
const struct rb_callinfo *ci = (struct rb_callinfo *)OPERAND_AT(niobj, 0);
|
||||||
if ((ci->flag & VM_CALL_ARGS_SIMPLE) && ci->orig_argc == 0) {
|
if ((vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE) && vm_ci_argc(ci) == 0) {
|
||||||
switch (ci->mid) {
|
switch (vm_ci_mid(ci)) {
|
||||||
case idMax:
|
case idMax:
|
||||||
iobj->insn_id = BIN(opt_newarray_max);
|
iobj->insn_id = BIN(opt_newarray_max);
|
||||||
ELEM_REMOVE(&niobj->link);
|
ELEM_REMOVE(&niobj->link);
|
||||||
|
@ -3312,14 +3325,14 @@ iseq_specialized_instruction(rb_iseq_t *iseq, INSN *iobj)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (IS_INSN_ID(iobj, send)) {
|
if (IS_INSN_ID(iobj, send)) {
|
||||||
struct rb_call_info *ci = (struct rb_call_info *)OPERAND_AT(iobj, 0);
|
const struct rb_callinfo *ci = (struct rb_callinfo *)OPERAND_AT(iobj, 0);
|
||||||
const rb_iseq_t *blockiseq = (rb_iseq_t *)OPERAND_AT(iobj, 1);
|
const rb_iseq_t *blockiseq = (rb_iseq_t *)OPERAND_AT(iobj, 1);
|
||||||
|
|
||||||
#define SP_INSN(opt) insn_set_specialized_instruction(iseq, iobj, BIN(opt_##opt))
|
#define SP_INSN(opt) insn_set_specialized_instruction(iseq, iobj, BIN(opt_##opt))
|
||||||
if (ci->flag & VM_CALL_ARGS_SIMPLE) {
|
if (vm_ci_flag(ci) & VM_CALL_ARGS_SIMPLE) {
|
||||||
switch (ci->orig_argc) {
|
switch (vm_ci_argc(ci)) {
|
||||||
case 0:
|
case 0:
|
||||||
switch (ci->mid) {
|
switch (vm_ci_mid(ci)) {
|
||||||
case idLength: SP_INSN(length); return COMPILE_OK;
|
case idLength: SP_INSN(length); return COMPILE_OK;
|
||||||
case idSize: SP_INSN(size); return COMPILE_OK;
|
case idSize: SP_INSN(size); return COMPILE_OK;
|
||||||
case idEmptyP: SP_INSN(empty_p);return COMPILE_OK;
|
case idEmptyP: SP_INSN(empty_p);return COMPILE_OK;
|
||||||
|
@ -3329,7 +3342,7 @@ iseq_specialized_instruction(rb_iseq_t *iseq, INSN *iobj)
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
switch (ci->mid) {
|
switch (vm_ci_mid(ci)) {
|
||||||
case idPLUS: SP_INSN(plus); return COMPILE_OK;
|
case idPLUS: SP_INSN(plus); return COMPILE_OK;
|
||||||
case idMINUS: SP_INSN(minus); return COMPILE_OK;
|
case idMINUS: SP_INSN(minus); return COMPILE_OK;
|
||||||
case idMULT: SP_INSN(mult); return COMPILE_OK;
|
case idMULT: SP_INSN(mult); return COMPILE_OK;
|
||||||
|
@ -3349,14 +3362,14 @@ iseq_specialized_instruction(rb_iseq_t *iseq, INSN *iobj)
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
switch (ci->mid) {
|
switch (vm_ci_mid(ci)) {
|
||||||
case idASET: SP_INSN(aset); return COMPILE_OK;
|
case idASET: SP_INSN(aset); return COMPILE_OK;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((ci->flag & VM_CALL_ARGS_BLOCKARG) == 0 && blockiseq == NULL) {
|
if ((vm_ci_flag(ci) & VM_CALL_ARGS_BLOCKARG) == 0 && blockiseq == NULL) {
|
||||||
iobj->insn_id = BIN(opt_send_without_block);
|
iobj->insn_id = BIN(opt_send_without_block);
|
||||||
iobj->operand_size = insn_len(iobj->insn_id) - 1;
|
iobj->operand_size = insn_len(iobj->insn_id) - 1;
|
||||||
}
|
}
|
||||||
|
@ -3444,8 +3457,7 @@ new_unified_insn(rb_iseq_t *iseq,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (argc > 0) {
|
if (argc > 0) {
|
||||||
ptr = operands =
|
ptr = operands = compile_data_alloc2(iseq, sizeof(VALUE), argc);
|
||||||
compile_data_alloc2(iseq, sizeof(VALUE), argc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* copy operands */
|
/* copy operands */
|
||||||
|
@ -3870,7 +3882,7 @@ keyword_node_p(const NODE *const node)
|
||||||
static int
|
static int
|
||||||
compile_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *const ret,
|
compile_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *const ret,
|
||||||
const NODE *const root_node,
|
const NODE *const root_node,
|
||||||
struct rb_call_info_kw_arg **const kw_arg_ptr,
|
struct rb_callinfo_kwarg **const kw_arg_ptr,
|
||||||
unsigned int *flag)
|
unsigned int *flag)
|
||||||
{
|
{
|
||||||
if (kw_arg_ptr == NULL) return FALSE;
|
if (kw_arg_ptr == NULL) return FALSE;
|
||||||
|
@ -3901,8 +3913,8 @@ compile_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *const ret,
|
||||||
node = root_node->nd_head;
|
node = root_node->nd_head;
|
||||||
{
|
{
|
||||||
int len = (int)node->nd_alen / 2;
|
int len = (int)node->nd_alen / 2;
|
||||||
struct rb_call_info_kw_arg *kw_arg =
|
struct rb_callinfo_kwarg *kw_arg =
|
||||||
rb_xmalloc_mul_add(len - 1, sizeof(VALUE), sizeof(struct rb_call_info_kw_arg));
|
rb_xmalloc_mul_add(len - 1, sizeof(VALUE), sizeof(struct rb_callinfo_kwarg));
|
||||||
VALUE *keywords = kw_arg->keywords;
|
VALUE *keywords = kw_arg->keywords;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
kw_arg->keyword_len = len;
|
kw_arg->keyword_len = len;
|
||||||
|
@ -3924,7 +3936,7 @@ compile_keyword_arg(rb_iseq_t *iseq, LINK_ANCHOR *const ret,
|
||||||
|
|
||||||
static int
|
static int
|
||||||
compile_args(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node,
|
compile_args(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node,
|
||||||
struct rb_call_info_kw_arg **keywords_ptr, unsigned int *flag)
|
struct rb_callinfo_kwarg **keywords_ptr, unsigned int *flag)
|
||||||
{
|
{
|
||||||
int len = 0;
|
int len = 0;
|
||||||
|
|
||||||
|
@ -4407,20 +4419,25 @@ compile_massign_lhs(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const n
|
||||||
switch (nd_type(node)) {
|
switch (nd_type(node)) {
|
||||||
case NODE_ATTRASGN: {
|
case NODE_ATTRASGN: {
|
||||||
INSN *iobj;
|
INSN *iobj;
|
||||||
struct rb_call_info *ci;
|
|
||||||
VALUE dupidx;
|
VALUE dupidx;
|
||||||
int line = nd_line(node);
|
int line = nd_line(node);
|
||||||
|
|
||||||
CHECK(COMPILE_POPPED(ret, "masgn lhs (NODE_ATTRASGN)", node));
|
CHECK(COMPILE_POPPED(ret, "masgn lhs (NODE_ATTRASGN)", node));
|
||||||
|
|
||||||
iobj = (INSN *)get_prev_insn((INSN *)LAST_ELEMENT(ret)); /* send insn */
|
iobj = (INSN *)get_prev_insn((INSN *)LAST_ELEMENT(ret)); /* send insn */
|
||||||
ci = (struct rb_call_info *)OPERAND_AT(iobj, 0);
|
const struct rb_callinfo *ci = (struct rb_callinfo *)OPERAND_AT(iobj, 0);
|
||||||
ci->orig_argc += 1;
|
int argc = vm_ci_argc(ci) + 1;
|
||||||
dupidx = INT2FIX(ci->orig_argc);
|
ci = ci_argc_set(iseq, ci, argc);
|
||||||
|
OPERAND_AT(iobj, 0) = (VALUE)ci;
|
||||||
|
RB_OBJ_WRITTEN(iseq, Qundef, ci);
|
||||||
|
dupidx = INT2FIX(argc);
|
||||||
|
|
||||||
INSERT_BEFORE_INSN1(iobj, line, topn, dupidx);
|
INSERT_BEFORE_INSN1(iobj, line, topn, dupidx);
|
||||||
if (ci->flag & VM_CALL_ARGS_SPLAT) {
|
if (vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT) {
|
||||||
--ci->orig_argc;
|
int argc = vm_ci_argc(ci);
|
||||||
|
ci = ci_argc_set(iseq, ci, argc - 1);
|
||||||
|
OPERAND_AT(iobj, 0) = (VALUE)ci;
|
||||||
|
RB_OBJ_WRITTEN(iseq, Qundef, iobj);
|
||||||
INSERT_BEFORE_INSN1(iobj, line, newarray, INT2FIX(1));
|
INSERT_BEFORE_INSN1(iobj, line, newarray, INT2FIX(1));
|
||||||
INSERT_BEFORE_INSN(iobj, line, concatarray);
|
INSERT_BEFORE_INSN(iobj, line, concatarray);
|
||||||
}
|
}
|
||||||
|
@ -5017,7 +5034,7 @@ check_keyword(const NODE *node)
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
setup_args_core(rb_iseq_t *iseq, LINK_ANCHOR *const args, const NODE *argn,
|
setup_args_core(rb_iseq_t *iseq, LINK_ANCHOR *const args, const NODE *argn,
|
||||||
int dup_rest, unsigned int *flag, struct rb_call_info_kw_arg **keywords)
|
int dup_rest, unsigned int *flag, struct rb_callinfo_kwarg **keywords)
|
||||||
{
|
{
|
||||||
if (argn) {
|
if (argn) {
|
||||||
switch (nd_type(argn)) {
|
switch (nd_type(argn)) {
|
||||||
|
@ -5077,7 +5094,7 @@ setup_args_core(rb_iseq_t *iseq, LINK_ANCHOR *const args, const NODE *argn,
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
setup_args(rb_iseq_t *iseq, LINK_ANCHOR *const args, const NODE *argn,
|
setup_args(rb_iseq_t *iseq, LINK_ANCHOR *const args, const NODE *argn,
|
||||||
unsigned int *flag, struct rb_call_info_kw_arg **keywords)
|
unsigned int *flag, struct rb_callinfo_kwarg **keywords)
|
||||||
{
|
{
|
||||||
VALUE ret;
|
VALUE ret;
|
||||||
if (argn && nd_type(argn) == NODE_BLOCK_PASS) {
|
if (argn && nd_type(argn) == NODE_BLOCK_PASS) {
|
||||||
|
@ -5209,7 +5226,7 @@ compile_if(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, int
|
||||||
DECL_ANCHOR(else_seq);
|
DECL_ANCHOR(else_seq);
|
||||||
LABEL *then_label, *else_label, *end_label;
|
LABEL *then_label, *else_label, *end_label;
|
||||||
VALUE branches = Qfalse;
|
VALUE branches = Qfalse;
|
||||||
int ci_size, ci_kw_size;
|
int ci_size;
|
||||||
VALUE catch_table = ISEQ_COMPILE_DATA(iseq)->catch_table_ary;
|
VALUE catch_table = ISEQ_COMPILE_DATA(iseq)->catch_table_ary;
|
||||||
long catch_table_size = NIL_P(catch_table) ? 0 : RARRAY_LEN(catch_table);
|
long catch_table_size = NIL_P(catch_table) ? 0 : RARRAY_LEN(catch_table);
|
||||||
|
|
||||||
|
@ -5224,12 +5241,10 @@ compile_if(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, int
|
||||||
then_label, else_label);
|
then_label, else_label);
|
||||||
|
|
||||||
ci_size = body->ci_size;
|
ci_size = body->ci_size;
|
||||||
ci_kw_size = body->ci_kw_size;
|
|
||||||
CHECK(COMPILE_(then_seq, "then", node_body, popped));
|
CHECK(COMPILE_(then_seq, "then", node_body, popped));
|
||||||
catch_table = ISEQ_COMPILE_DATA(iseq)->catch_table_ary;
|
catch_table = ISEQ_COMPILE_DATA(iseq)->catch_table_ary;
|
||||||
if (!then_label->refcnt) {
|
if (!then_label->refcnt) {
|
||||||
body->ci_size = ci_size;
|
body->ci_size = ci_size;
|
||||||
body->ci_kw_size = ci_kw_size;
|
|
||||||
if (!NIL_P(catch_table)) rb_ary_set_len(catch_table, catch_table_size);
|
if (!NIL_P(catch_table)) rb_ary_set_len(catch_table, catch_table_size);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -5237,12 +5252,10 @@ compile_if(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, int
|
||||||
}
|
}
|
||||||
|
|
||||||
ci_size = body->ci_size;
|
ci_size = body->ci_size;
|
||||||
ci_kw_size = body->ci_kw_size;
|
|
||||||
CHECK(COMPILE_(else_seq, "else", node_else, popped));
|
CHECK(COMPILE_(else_seq, "else", node_else, popped));
|
||||||
catch_table = ISEQ_COMPILE_DATA(iseq)->catch_table_ary;
|
catch_table = ISEQ_COMPILE_DATA(iseq)->catch_table_ary;
|
||||||
if (!else_label->refcnt) {
|
if (!else_label->refcnt) {
|
||||||
body->ci_size = ci_size;
|
body->ci_size = ci_size;
|
||||||
body->ci_kw_size = ci_kw_size;
|
|
||||||
if (!NIL_P(catch_table)) rb_ary_set_len(catch_table, catch_table_size);
|
if (!NIL_P(catch_table)) rb_ary_set_len(catch_table, catch_table_size);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -6873,7 +6886,7 @@ compile_call(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *const node, in
|
||||||
ID mid = node->nd_mid;
|
ID mid = node->nd_mid;
|
||||||
VALUE argc;
|
VALUE argc;
|
||||||
unsigned int flag = 0;
|
unsigned int flag = 0;
|
||||||
struct rb_call_info_kw_arg *keywords = NULL;
|
struct rb_callinfo_kwarg *keywords = NULL;
|
||||||
const rb_iseq_t *parent_block = ISEQ_COMPILE_DATA(iseq)->current_block;
|
const rb_iseq_t *parent_block = ISEQ_COMPILE_DATA(iseq)->current_block;
|
||||||
LABEL *else_label = NULL;
|
LABEL *else_label = NULL;
|
||||||
VALUE branches = Qfalse;
|
VALUE branches = Qfalse;
|
||||||
|
@ -7676,7 +7689,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, in
|
||||||
DECL_ANCHOR(args);
|
DECL_ANCHOR(args);
|
||||||
int argc;
|
int argc;
|
||||||
unsigned int flag = 0;
|
unsigned int flag = 0;
|
||||||
struct rb_call_info_kw_arg *keywords = NULL;
|
struct rb_callinfo_kwarg *keywords = NULL;
|
||||||
const rb_iseq_t *parent_block = ISEQ_COMPILE_DATA(iseq)->current_block;
|
const rb_iseq_t *parent_block = ISEQ_COMPILE_DATA(iseq)->current_block;
|
||||||
|
|
||||||
INIT_ANCHOR(args);
|
INIT_ANCHOR(args);
|
||||||
|
@ -7834,7 +7847,7 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, in
|
||||||
DECL_ANCHOR(args);
|
DECL_ANCHOR(args);
|
||||||
VALUE argc;
|
VALUE argc;
|
||||||
unsigned int flag = 0;
|
unsigned int flag = 0;
|
||||||
struct rb_call_info_kw_arg *keywords = NULL;
|
struct rb_callinfo_kwarg *keywords = NULL;
|
||||||
|
|
||||||
INIT_ANCHOR(args);
|
INIT_ANCHOR(args);
|
||||||
|
|
||||||
|
@ -8665,10 +8678,10 @@ insn_data_to_s_detail(INSN *iobj)
|
||||||
break;
|
break;
|
||||||
case TS_CALLDATA: /* we store these as call infos at compile time */
|
case TS_CALLDATA: /* we store these as call infos at compile time */
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = (struct rb_call_info *)OPERAND_AT(iobj, j);
|
const struct rb_callinfo *ci = (struct rb_callinfo *)OPERAND_AT(iobj, j);
|
||||||
rb_str_cat2(str, "<calldata:");
|
rb_str_cat2(str, "<calldata:");
|
||||||
if (ci->mid) rb_str_catf(str, "%"PRIsVALUE, rb_id2str(ci->mid));
|
if (vm_ci_mid(ci)) rb_str_catf(str, "%"PRIsVALUE, rb_id2str(vm_ci_mid(ci)));
|
||||||
rb_str_catf(str, ", %d>", ci->orig_argc);
|
rb_str_catf(str, ", %d>", vm_ci_argc(ci));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case TS_CDHASH: /* case/when condition cache */
|
case TS_CDHASH: /* case/when condition cache */
|
||||||
|
@ -8905,7 +8918,7 @@ iseq_build_callinfo_from_hash(rb_iseq_t *iseq, VALUE op)
|
||||||
ID mid = 0;
|
ID mid = 0;
|
||||||
int orig_argc = 0;
|
int orig_argc = 0;
|
||||||
unsigned int flag = 0;
|
unsigned int flag = 0;
|
||||||
struct rb_call_info_kw_arg *kw_arg = 0;
|
struct rb_callinfo_kwarg *kw_arg = 0;
|
||||||
|
|
||||||
if (!NIL_P(op)) {
|
if (!NIL_P(op)) {
|
||||||
VALUE vmid = rb_hash_aref(op, ID2SYM(rb_intern("mid")));
|
VALUE vmid = rb_hash_aref(op, ID2SYM(rb_intern("mid")));
|
||||||
|
@ -8920,7 +8933,7 @@ iseq_build_callinfo_from_hash(rb_iseq_t *iseq, VALUE op)
|
||||||
if (!NIL_P(vkw_arg)) {
|
if (!NIL_P(vkw_arg)) {
|
||||||
int i;
|
int i;
|
||||||
int len = RARRAY_LENINT(vkw_arg);
|
int len = RARRAY_LENINT(vkw_arg);
|
||||||
size_t n = rb_call_info_kw_arg_bytes(len);
|
size_t n = rb_callinfo_kwarg_bytes(len);
|
||||||
|
|
||||||
kw_arg = xmalloc(n);
|
kw_arg = xmalloc(n);
|
||||||
kw_arg->keyword_len = len;
|
kw_arg->keyword_len = len;
|
||||||
|
@ -8932,7 +8945,9 @@ iseq_build_callinfo_from_hash(rb_iseq_t *iseq, VALUE op)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return (VALUE)new_callinfo(iseq, mid, orig_argc, flag, kw_arg, (flag & VM_CALL_ARGS_SIMPLE) == 0);
|
const struct rb_callinfo *ci = new_callinfo(iseq, mid, orig_argc, flag, kw_arg, (flag & VM_CALL_ARGS_SIMPLE) == 0);
|
||||||
|
RB_OBJ_WRITTEN(iseq, Qundef, ci);
|
||||||
|
return (VALUE)ci;
|
||||||
}
|
}
|
||||||
|
|
||||||
static rb_event_flag_t
|
static rb_event_flag_t
|
||||||
|
@ -9009,7 +9024,13 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (argc > 0) {
|
if (argc > 0) {
|
||||||
argv = compile_data_alloc2(iseq, sizeof(VALUE), argc);
|
argv = compile_data_calloc2(iseq, sizeof(VALUE), argc);
|
||||||
|
|
||||||
|
// add element before operand setup to make GC root
|
||||||
|
ADD_ELEM(anchor,
|
||||||
|
(LINK_ELEMENT*)new_insn_core(iseq, line_no,
|
||||||
|
(enum ruby_vminsn_type)insn_id, argc, argv));
|
||||||
|
|
||||||
for (j=0; j<argc; j++) {
|
for (j=0; j<argc; j++) {
|
||||||
VALUE op = rb_ary_entry(obj, j+1);
|
VALUE op = rb_ary_entry(obj, j+1);
|
||||||
switch (insn_op_type((VALUE)insn_id, j)) {
|
switch (insn_op_type((VALUE)insn_id, j)) {
|
||||||
|
@ -9093,9 +9114,11 @@ iseq_build_from_ary_body(rb_iseq_t *iseq, LINK_ANCHOR *const anchor,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
ADD_ELEM(anchor,
|
ADD_ELEM(anchor,
|
||||||
(LINK_ELEMENT*)new_insn_core(iseq, line_no,
|
(LINK_ELEMENT*)new_insn_core(iseq, line_no,
|
||||||
(enum ruby_vminsn_type)insn_id, argc, argv));
|
(enum ruby_vminsn_type)insn_id, argc, NULL));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
rb_raise(rb_eTypeError, "unexpected object for instruction");
|
rb_raise(rb_eTypeError, "unexpected object for instruction");
|
||||||
|
@ -9229,13 +9252,15 @@ rb_iseq_mark_insn_storage(struct iseq_compile_data_storage *storage)
|
||||||
case TS_CDHASH:
|
case TS_CDHASH:
|
||||||
case TS_ISEQ:
|
case TS_ISEQ:
|
||||||
case TS_VALUE:
|
case TS_VALUE:
|
||||||
|
case TS_CALLDATA: // ci is stored.
|
||||||
{
|
{
|
||||||
VALUE op = OPERAND_AT(iobj, j);
|
VALUE op = OPERAND_AT(iobj, j);
|
||||||
|
|
||||||
if (!SPECIAL_CONST_P(op)) {
|
if (!SPECIAL_CONST_P(op)) {
|
||||||
rb_gc_mark(op);
|
rb_gc_mark(op);
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -9704,12 +9729,6 @@ ibf_load_id(const struct ibf_load *load, const ID id_index)
|
||||||
|
|
||||||
/* dump/load: code */
|
/* dump/load: code */
|
||||||
|
|
||||||
static VALUE
|
|
||||||
ibf_dump_calldata(struct ibf_dump *dump, const struct rb_call_data *cd)
|
|
||||||
{
|
|
||||||
return (cd->ci.flag & VM_CALL_KWARG) ? Qtrue : Qfalse;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ibf_offset_t ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq);
|
static ibf_offset_t ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq);
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -9898,9 +9917,6 @@ ibf_dump_code(struct ibf_dump *dump, const rb_iseq_t *iseq)
|
||||||
break;
|
break;
|
||||||
case TS_CALLDATA:
|
case TS_CALLDATA:
|
||||||
{
|
{
|
||||||
/* ibf_dump_calldata() always returns either Qtrue or Qfalse */
|
|
||||||
char c = ibf_dump_calldata(dump, (const struct rb_call_data *)op) == Qtrue; // 1 or 0
|
|
||||||
ibf_dump_write_byte(dump, c);
|
|
||||||
goto skip_wv;
|
goto skip_wv;
|
||||||
}
|
}
|
||||||
case TS_ID:
|
case TS_ID:
|
||||||
|
@ -9937,7 +9953,6 @@ ibf_load_code(const struct ibf_load *load, const rb_iseq_t *iseq, ibf_offset_t b
|
||||||
|
|
||||||
struct rb_iseq_constant_body *load_body = iseq->body;
|
struct rb_iseq_constant_body *load_body = iseq->body;
|
||||||
struct rb_call_data *cd_entries = load_body->call_data;
|
struct rb_call_data *cd_entries = load_body->call_data;
|
||||||
struct rb_kwarg_call_data *cd_kw_entries = (struct rb_kwarg_call_data *)&load_body->call_data[load_body->ci_size];
|
|
||||||
union iseq_inline_storage_entry *is_entries = load_body->is_entries;
|
union iseq_inline_storage_entry *is_entries = load_body->is_entries;
|
||||||
|
|
||||||
for (code_index=0; code_index<iseq_size;) {
|
for (code_index=0; code_index<iseq_size;) {
|
||||||
|
@ -9984,8 +9999,7 @@ ibf_load_code(const struct ibf_load *load, const rb_iseq_t *iseq, ibf_offset_t b
|
||||||
break;
|
break;
|
||||||
case TS_CALLDATA:
|
case TS_CALLDATA:
|
||||||
{
|
{
|
||||||
unsigned char op = ibf_load_byte(load, &reading_pos);
|
code[code_index] = (VALUE)cd_entries++;
|
||||||
code[code_index] = op ? (VALUE)cd_kw_entries++ : (VALUE)cd_entries++; /* op is 1 (kw) or 0 (!kw) */
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case TS_ID:
|
case TS_ID:
|
||||||
|
@ -10257,40 +10271,31 @@ ibf_dump_ci_entries(struct ibf_dump *dump, const rb_iseq_t *iseq)
|
||||||
{
|
{
|
||||||
const struct rb_iseq_constant_body *const body = iseq->body;
|
const struct rb_iseq_constant_body *const body = iseq->body;
|
||||||
const unsigned int ci_size = body->ci_size;
|
const unsigned int ci_size = body->ci_size;
|
||||||
const unsigned int ci_kw_size = body->ci_kw_size;
|
const struct rb_call_data *cds = body->call_data;
|
||||||
const struct rb_call_data *calls = body->call_data;
|
|
||||||
const struct rb_kwarg_call_data *kw_calls = (const struct rb_kwarg_call_data *)&body->call_data[ci_size];
|
|
||||||
|
|
||||||
ibf_offset_t offset = ibf_dump_pos(dump);
|
ibf_offset_t offset = ibf_dump_pos(dump);
|
||||||
|
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < ci_size; i++) {
|
for (i = 0; i < ci_size; i++) {
|
||||||
VALUE mid = ibf_dump_id(dump, calls[i].ci.mid);
|
const struct rb_callinfo *ci = cds[i].ci;
|
||||||
|
ibf_dump_write_small_value(dump, ibf_dump_id(dump, vm_ci_mid(ci)));
|
||||||
ibf_dump_write_small_value(dump, mid);
|
ibf_dump_write_small_value(dump, vm_ci_flag(ci));
|
||||||
ibf_dump_write_small_value(dump, calls[i].ci.flag);
|
ibf_dump_write_small_value(dump, vm_ci_argc(ci));
|
||||||
ibf_dump_write_small_value(dump, calls[i].ci.orig_argc);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < ci_kw_size; i++) {
|
|
||||||
const struct rb_call_info_kw_arg *kw_arg = kw_calls[i].ci_kw.kw_arg;
|
|
||||||
|
|
||||||
VALUE mid = ibf_dump_id(dump, kw_calls[i].ci_kw.ci.mid);
|
|
||||||
|
|
||||||
ibf_dump_write_small_value(dump, mid);
|
|
||||||
ibf_dump_write_small_value(dump, kw_calls[i].ci_kw.ci.flag);
|
|
||||||
ibf_dump_write_small_value(dump, kw_calls[i].ci_kw.ci.orig_argc);
|
|
||||||
|
|
||||||
ibf_dump_write_small_value(dump, kw_arg->keyword_len);
|
|
||||||
|
|
||||||
int j;
|
|
||||||
for (j = 0; j < kw_calls[i].ci_kw.kw_arg->keyword_len; j++) {
|
|
||||||
VALUE keyword = ibf_dump_object(dump, kw_arg->keywords[j]); /* kw_arg->keywords[n] is Symbol */
|
|
||||||
|
|
||||||
|
const struct rb_callinfo_kwarg *kwarg = vm_ci_kwarg(ci);
|
||||||
|
if (kwarg) {
|
||||||
|
int len = kwarg->keyword_len;
|
||||||
|
ibf_dump_write_small_value(dump, len);
|
||||||
|
for (int j=0; j<len; j++) {
|
||||||
|
VALUE keyword = ibf_dump_object(dump, kwarg->keywords[j]);
|
||||||
ibf_dump_write_small_value(dump, keyword);
|
ibf_dump_write_small_value(dump, keyword);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
ibf_dump_write_small_value(dump, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
@ -10299,50 +10304,36 @@ ibf_dump_ci_entries(struct ibf_dump *dump, const rb_iseq_t *iseq)
|
||||||
static struct rb_call_data *
|
static struct rb_call_data *
|
||||||
ibf_load_ci_entries(const struct ibf_load *load,
|
ibf_load_ci_entries(const struct ibf_load *load,
|
||||||
ibf_offset_t ci_entries_offset,
|
ibf_offset_t ci_entries_offset,
|
||||||
unsigned int ci_size,
|
unsigned int ci_size)
|
||||||
unsigned int ci_kw_size)
|
|
||||||
{
|
{
|
||||||
ibf_offset_t reading_pos = ci_entries_offset;
|
ibf_offset_t reading_pos = ci_entries_offset;
|
||||||
|
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
struct rb_call_data *calls =
|
struct rb_call_data *cds = ZALLOC_N(struct rb_call_data, ci_size);
|
||||||
rb_xcalloc_mul_add_mul(
|
|
||||||
sizeof(struct rb_call_data), ci_size,
|
|
||||||
sizeof(struct rb_kwarg_call_data), ci_kw_size);
|
|
||||||
struct rb_kwarg_call_data *kw_calls = (struct rb_kwarg_call_data *)&calls[ci_size];
|
|
||||||
|
|
||||||
for (i = 0; i < ci_size; i++) {
|
for (i = 0; i < ci_size; i++) {
|
||||||
VALUE mid_index = ibf_load_small_value(load, &reading_pos);
|
VALUE mid_index = ibf_load_small_value(load, &reading_pos);
|
||||||
|
ID mid = ibf_load_id(load, mid_index);
|
||||||
|
unsigned int flag = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
||||||
|
unsigned int argc = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
||||||
|
|
||||||
calls[i].ci.mid = ibf_load_id(load, mid_index);
|
struct rb_callinfo_kwarg *kwarg = NULL;
|
||||||
calls[i].ci.flag = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
int kwlen = (int)ibf_load_small_value(load, &reading_pos);
|
||||||
calls[i].ci.orig_argc = (int)ibf_load_small_value(load, &reading_pos);
|
if (kwlen > 0) {
|
||||||
}
|
kwarg = rb_xmalloc_mul_add(kwlen - 1, sizeof(VALUE), sizeof(struct rb_callinfo_kwarg));;
|
||||||
|
kwarg->keyword_len = kwlen;
|
||||||
for (i = 0; i < ci_kw_size; i++) {
|
for (int j=0; j<kwlen; j++) {
|
||||||
VALUE mid_index = ibf_load_small_value(load, &reading_pos);
|
|
||||||
|
|
||||||
kw_calls[i].ci_kw.ci.mid = ibf_load_id(load, mid_index);
|
|
||||||
kw_calls[i].ci_kw.ci.flag = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
|
||||||
kw_calls[i].ci_kw.ci.orig_argc = (int)ibf_load_small_value(load, &reading_pos);
|
|
||||||
|
|
||||||
int keyword_len = (int)ibf_load_small_value(load, &reading_pos);
|
|
||||||
|
|
||||||
kw_calls[i].ci_kw.kw_arg =
|
|
||||||
rb_xmalloc_mul_add(keyword_len - 1, sizeof(VALUE), sizeof(struct rb_call_info_kw_arg));
|
|
||||||
|
|
||||||
kw_calls[i].ci_kw.kw_arg->keyword_len = keyword_len;
|
|
||||||
|
|
||||||
int j;
|
|
||||||
for (j = 0; j < kw_calls[i].ci_kw.kw_arg->keyword_len; j++) {
|
|
||||||
VALUE keyword = ibf_load_small_value(load, &reading_pos);
|
VALUE keyword = ibf_load_small_value(load, &reading_pos);
|
||||||
|
kwarg->keywords[j] = ibf_load_object(load, keyword);
|
||||||
kw_calls[i].ci_kw.kw_arg->keywords[j] = ibf_load_object(load, keyword);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return calls;
|
cds[i].ci = vm_ci_new(mid, flag, argc, kwarg);
|
||||||
|
RB_OBJ_WRITTEN(load->iseq, Qundef, cds[i].ci);
|
||||||
|
}
|
||||||
|
|
||||||
|
return cds;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ibf_offset_t
|
static ibf_offset_t
|
||||||
|
@ -10449,7 +10440,6 @@ ibf_dump_iseq_each(struct ibf_dump *dump, const rb_iseq_t *iseq)
|
||||||
ibf_dump_write_small_value(dump, body->local_table_size);
|
ibf_dump_write_small_value(dump, body->local_table_size);
|
||||||
ibf_dump_write_small_value(dump, body->is_size);
|
ibf_dump_write_small_value(dump, body->is_size);
|
||||||
ibf_dump_write_small_value(dump, body->ci_size);
|
ibf_dump_write_small_value(dump, body->ci_size);
|
||||||
ibf_dump_write_small_value(dump, body->ci_kw_size);
|
|
||||||
ibf_dump_write_small_value(dump, body->stack_max);
|
ibf_dump_write_small_value(dump, body->stack_max);
|
||||||
ibf_dump_write_small_value(dump, body->catch_except_p);
|
ibf_dump_write_small_value(dump, body->catch_except_p);
|
||||||
|
|
||||||
|
@ -10556,7 +10546,6 @@ ibf_load_iseq_each(struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t offset)
|
||||||
const unsigned int local_table_size = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
const unsigned int local_table_size = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
||||||
const unsigned int is_size = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
const unsigned int is_size = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
||||||
const unsigned int ci_size = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
const unsigned int ci_size = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
||||||
const unsigned int ci_kw_size = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
|
||||||
const unsigned int stack_max = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
const unsigned int stack_max = (unsigned int)ibf_load_small_value(load, &reading_pos);
|
||||||
const char catch_except_p = (char)ibf_load_small_value(load, &reading_pos);
|
const char catch_except_p = (char)ibf_load_small_value(load, &reading_pos);
|
||||||
|
|
||||||
|
@ -10584,7 +10573,6 @@ ibf_load_iseq_each(struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t offset)
|
||||||
load_body->local_table_size = local_table_size;
|
load_body->local_table_size = local_table_size;
|
||||||
load_body->is_size = is_size;
|
load_body->is_size = is_size;
|
||||||
load_body->ci_size = ci_size;
|
load_body->ci_size = ci_size;
|
||||||
load_body->ci_kw_size = ci_kw_size;
|
|
||||||
load_body->insns_info.size = insns_info_size;
|
load_body->insns_info.size = insns_info_size;
|
||||||
|
|
||||||
ISEQ_COVERAGE_SET(iseq, Qnil);
|
ISEQ_COVERAGE_SET(iseq, Qnil);
|
||||||
|
@ -10600,7 +10588,7 @@ ibf_load_iseq_each(struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t offset)
|
||||||
load_body->catch_except_p = catch_except_p;
|
load_body->catch_except_p = catch_except_p;
|
||||||
|
|
||||||
load_body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, is_size);
|
load_body->is_entries = ZALLOC_N(union iseq_inline_storage_entry, is_size);
|
||||||
load_body->call_data = ibf_load_ci_entries(load, ci_entries_offset, ci_size, ci_kw_size);
|
load_body->call_data = ibf_load_ci_entries(load, ci_entries_offset, ci_size);
|
||||||
load_body->param.opt_table = ibf_load_param_opt_table(load, param_opt_table_offset, param_opt_num);
|
load_body->param.opt_table = ibf_load_param_opt_table(load, param_opt_table_offset, param_opt_num);
|
||||||
load_body->param.keyword = ibf_load_param_keyword(load, param_keyword_offset);
|
load_body->param.keyword = ibf_load_param_keyword(load, param_keyword_offset);
|
||||||
load_body->param.flags.has_kw = (param_flags >> 4) & 1;
|
load_body->param.flags.has_kw = (param_flags >> 4) & 1;
|
||||||
|
|
1
debug.c
1
debug.c
|
@ -24,6 +24,7 @@
|
||||||
#include "symbol.h"
|
#include "symbol.h"
|
||||||
#include "vm_core.h"
|
#include "vm_core.h"
|
||||||
#include "vm_debug.h"
|
#include "vm_debug.h"
|
||||||
|
#include "vm_callinfo.h"
|
||||||
|
|
||||||
/* This is the only place struct RIMemo is actually used */
|
/* This is the only place struct RIMemo is actually used */
|
||||||
struct RIMemo {
|
struct RIMemo {
|
||||||
|
|
|
@ -49,6 +49,12 @@ RB_DEBUG_COUNTER(mc_miss_by_visi)
|
||||||
RB_DEBUG_COUNTER(mc_miss_spurious)
|
RB_DEBUG_COUNTER(mc_miss_spurious)
|
||||||
RB_DEBUG_COUNTER(mc_miss_reuse_call)
|
RB_DEBUG_COUNTER(mc_miss_reuse_call)
|
||||||
|
|
||||||
|
// callinfo
|
||||||
|
RB_DEBUG_COUNTER(ci_packed)
|
||||||
|
RB_DEBUG_COUNTER(ci_kw)
|
||||||
|
RB_DEBUG_COUNTER(ci_nokw)
|
||||||
|
RB_DEBUG_COUNTER(ci_runtime)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* call cache fastpath usage
|
* call cache fastpath usage
|
||||||
*/
|
*/
|
||||||
|
@ -282,6 +288,7 @@ RB_DEBUG_COUNTER(obj_imemo_throw_data)
|
||||||
RB_DEBUG_COUNTER(obj_imemo_ifunc)
|
RB_DEBUG_COUNTER(obj_imemo_ifunc)
|
||||||
RB_DEBUG_COUNTER(obj_imemo_memo)
|
RB_DEBUG_COUNTER(obj_imemo_memo)
|
||||||
RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
|
RB_DEBUG_COUNTER(obj_imemo_parser_strterm)
|
||||||
|
RB_DEBUG_COUNTER(obj_imemo_callinfo)
|
||||||
|
|
||||||
/* ar_table */
|
/* ar_table */
|
||||||
RB_DEBUG_COUNTER(artable_hint_hit)
|
RB_DEBUG_COUNTER(artable_hint_hit)
|
||||||
|
|
|
@ -637,6 +637,7 @@ count_imemo_objects(int argc, VALUE *argv, VALUE self)
|
||||||
imemo_type_ids[8] = rb_intern("imemo_tmpbuf");
|
imemo_type_ids[8] = rb_intern("imemo_tmpbuf");
|
||||||
imemo_type_ids[9] = rb_intern("imemo_ast");
|
imemo_type_ids[9] = rb_intern("imemo_ast");
|
||||||
imemo_type_ids[10] = rb_intern("imemo_parser_strterm");
|
imemo_type_ids[10] = rb_intern("imemo_parser_strterm");
|
||||||
|
imemo_type_ids[11] = rb_intern("imemo_callinfo");
|
||||||
}
|
}
|
||||||
|
|
||||||
rb_objspace_each_objects(count_imemo_objects_i, (void *)hash);
|
rb_objspace_each_objects(count_imemo_objects_i, (void *)hash);
|
||||||
|
|
25
gc.c
25
gc.c
|
@ -106,6 +106,7 @@
|
||||||
#include "symbol.h"
|
#include "symbol.h"
|
||||||
#include "transient_heap.h"
|
#include "transient_heap.h"
|
||||||
#include "vm_core.h"
|
#include "vm_core.h"
|
||||||
|
#include "vm_callinfo.h"
|
||||||
|
|
||||||
#include "builtin.h"
|
#include "builtin.h"
|
||||||
|
|
||||||
|
@ -2892,6 +2893,9 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
|
||||||
case imemo_parser_strterm:
|
case imemo_parser_strterm:
|
||||||
RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
|
RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
|
||||||
break;
|
break;
|
||||||
|
case imemo_callinfo:
|
||||||
|
RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
/* unreachable */
|
/* unreachable */
|
||||||
break;
|
break;
|
||||||
|
@ -5202,7 +5206,10 @@ gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
|
||||||
if (LIKELY(objspace->mark_func_data == NULL)) {
|
if (LIKELY(objspace->mark_func_data == NULL)) {
|
||||||
rgengc_check_relation(objspace, obj);
|
rgengc_check_relation(objspace, obj);
|
||||||
if (!gc_mark_set(objspace, obj)) return; /* already marked */
|
if (!gc_mark_set(objspace, obj)) return; /* already marked */
|
||||||
if (RB_TYPE_P(obj, T_NONE)) rb_bug("try to mark T_NONE object"); /* check here will help debugging */
|
if (RB_TYPE_P(obj, T_NONE)) {
|
||||||
|
rp(obj);
|
||||||
|
rb_bug("try to mark T_NONE object"); /* check here will help debugging */
|
||||||
|
}
|
||||||
gc_aging(objspace, obj);
|
gc_aging(objspace, obj);
|
||||||
gc_grey(objspace, obj);
|
gc_grey(objspace, obj);
|
||||||
}
|
}
|
||||||
|
@ -5326,6 +5333,8 @@ gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
|
||||||
case imemo_parser_strterm:
|
case imemo_parser_strterm:
|
||||||
rb_strterm_mark(obj);
|
rb_strterm_mark(obj);
|
||||||
return;
|
return;
|
||||||
|
case imemo_callinfo:
|
||||||
|
return;
|
||||||
#if VM_CHECK_MODE > 0
|
#if VM_CHECK_MODE > 0
|
||||||
default:
|
default:
|
||||||
VM_UNREACHABLE(gc_mark_imemo);
|
VM_UNREACHABLE(gc_mark_imemo);
|
||||||
|
@ -8119,6 +8128,7 @@ gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
|
||||||
break;
|
break;
|
||||||
case imemo_parser_strterm:
|
case imemo_parser_strterm:
|
||||||
case imemo_tmpbuf:
|
case imemo_tmpbuf:
|
||||||
|
case imemo_callinfo:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
rb_bug("not reachable %d", imemo_type(obj));
|
rb_bug("not reachable %d", imemo_type(obj));
|
||||||
|
@ -11595,6 +11605,7 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
|
||||||
IMEMO_NAME(tmpbuf);
|
IMEMO_NAME(tmpbuf);
|
||||||
IMEMO_NAME(ast);
|
IMEMO_NAME(ast);
|
||||||
IMEMO_NAME(parser_strterm);
|
IMEMO_NAME(parser_strterm);
|
||||||
|
IMEMO_NAME(callinfo);
|
||||||
#undef IMEMO_NAME
|
#undef IMEMO_NAME
|
||||||
default: UNREACHABLE;
|
default: UNREACHABLE;
|
||||||
}
|
}
|
||||||
|
@ -11621,6 +11632,16 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
|
||||||
rb_raw_iseq_info(BUFF_ARGS, iseq);
|
rb_raw_iseq_info(BUFF_ARGS, iseq);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case imemo_callinfo:
|
||||||
|
{
|
||||||
|
const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
|
||||||
|
APPENDF((BUFF_ARGS, "(mid:%s, flag:%x argc:%d, kwarg:%s)",
|
||||||
|
rb_id2name(vm_ci_mid(ci)),
|
||||||
|
vm_ci_flag(ci),
|
||||||
|
vm_ci_argc(ci),
|
||||||
|
vm_ci_kwarg(ci) ? "available" : "NULL"));
|
||||||
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -11676,7 +11697,7 @@ rb_obj_info_dump(VALUE obj)
|
||||||
fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
|
fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
MJIT_FUNC_EXPORTED void
|
||||||
rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
|
rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
|
||||||
{
|
{
|
||||||
char buff[0x100];
|
char buff[0x100];
|
||||||
|
|
12
insns.def
12
insns.def
|
@ -775,10 +775,10 @@ send
|
||||||
(CALL_DATA cd, ISEQ blockiseq)
|
(CALL_DATA cd, ISEQ blockiseq)
|
||||||
(...)
|
(...)
|
||||||
(VALUE val)
|
(VALUE val)
|
||||||
// attr rb_snum_t sp_inc = sp_inc_of_sendish(&cd->ci);
|
// attr rb_snum_t sp_inc = sp_inc_of_sendish(cd->ci);
|
||||||
// attr rb_snum_t comptime_sp_inc = sp_inc_of_sendish(ci);
|
// attr rb_snum_t comptime_sp_inc = sp_inc_of_sendish(ci);
|
||||||
{
|
{
|
||||||
VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), &cd->ci, blockiseq, false);
|
VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, false);
|
||||||
val = vm_sendish(ec, GET_CFP(), cd, bh, vm_search_method_wrap);
|
val = vm_sendish(ec, GET_CFP(), cd, bh, vm_search_method_wrap);
|
||||||
|
|
||||||
if (val == Qundef) {
|
if (val == Qundef) {
|
||||||
|
@ -794,7 +794,7 @@ opt_send_without_block
|
||||||
(...)
|
(...)
|
||||||
(VALUE val)
|
(VALUE val)
|
||||||
// attr bool handles_sp = true;
|
// attr bool handles_sp = true;
|
||||||
// attr rb_snum_t sp_inc = sp_inc_of_sendish(&cd->ci);
|
// attr rb_snum_t sp_inc = sp_inc_of_sendish(cd->ci);
|
||||||
// attr rb_snum_t comptime_sp_inc = sp_inc_of_sendish(ci);
|
// attr rb_snum_t comptime_sp_inc = sp_inc_of_sendish(ci);
|
||||||
{
|
{
|
||||||
VALUE bh = VM_BLOCK_HANDLER_NONE;
|
VALUE bh = VM_BLOCK_HANDLER_NONE;
|
||||||
|
@ -881,10 +881,10 @@ invokesuper
|
||||||
(CALL_DATA cd, ISEQ blockiseq)
|
(CALL_DATA cd, ISEQ blockiseq)
|
||||||
(...)
|
(...)
|
||||||
(VALUE val)
|
(VALUE val)
|
||||||
// attr rb_snum_t sp_inc = sp_inc_of_sendish(&cd->ci);
|
// attr rb_snum_t sp_inc = sp_inc_of_sendish(cd->ci);
|
||||||
// attr rb_snum_t comptime_sp_inc = sp_inc_of_sendish(ci);
|
// attr rb_snum_t comptime_sp_inc = sp_inc_of_sendish(ci);
|
||||||
{
|
{
|
||||||
VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), &cd->ci, blockiseq, true);
|
VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), cd->ci, blockiseq, true);
|
||||||
val = vm_sendish(ec, GET_CFP(), cd, bh, vm_search_super_method);
|
val = vm_sendish(ec, GET_CFP(), cd, bh, vm_search_super_method);
|
||||||
|
|
||||||
if (val == Qundef) {
|
if (val == Qundef) {
|
||||||
|
@ -900,7 +900,7 @@ invokeblock
|
||||||
(...)
|
(...)
|
||||||
(VALUE val)
|
(VALUE val)
|
||||||
// attr bool handles_sp = true;
|
// attr bool handles_sp = true;
|
||||||
// attr rb_snum_t sp_inc = sp_inc_of_invokeblock(&cd->ci);
|
// attr rb_snum_t sp_inc = sp_inc_of_invokeblock(cd->ci);
|
||||||
// attr rb_snum_t comptime_sp_inc = sp_inc_of_invokeblock(ci);
|
// attr rb_snum_t comptime_sp_inc = sp_inc_of_invokeblock(ci);
|
||||||
{
|
{
|
||||||
if (UNLIKELY(cd->cc.call != vm_invokeblock_i)) {
|
if (UNLIKELY(cd->cc.call != vm_invokeblock_i)) {
|
||||||
|
|
|
@ -41,7 +41,8 @@ enum imemo_type {
|
||||||
imemo_iseq = 7,
|
imemo_iseq = 7,
|
||||||
imemo_tmpbuf = 8,
|
imemo_tmpbuf = 8,
|
||||||
imemo_ast = 9,
|
imemo_ast = 9,
|
||||||
imemo_parser_strterm = 10
|
imemo_parser_strterm = 10,
|
||||||
|
imemo_callinfo = 11,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* CREF (Class REFerence) is defined in method.h */
|
/* CREF (Class REFerence) is defined in method.h */
|
||||||
|
|
|
@ -85,16 +85,9 @@ struct rb_call_cache {
|
||||||
};
|
};
|
||||||
STATIC_ASSERT(cachelined, sizeof(struct rb_call_cache) <= CACHELINE);
|
STATIC_ASSERT(cachelined, sizeof(struct rb_call_cache) <= CACHELINE);
|
||||||
|
|
||||||
struct rb_call_info {
|
|
||||||
/* fixed at compile time */
|
|
||||||
ID mid;
|
|
||||||
unsigned int flag;
|
|
||||||
int orig_argc;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct rb_call_data {
|
struct rb_call_data {
|
||||||
|
const struct rb_callinfo *ci;
|
||||||
struct rb_call_cache cc;
|
struct rb_call_cache cc;
|
||||||
struct rb_call_info ci;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* vm_insnhelper.h */
|
/* vm_insnhelper.h */
|
||||||
|
@ -150,12 +143,6 @@ MJIT_SYMBOL_EXPORT_BEGIN
|
||||||
void rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass);
|
void rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass);
|
||||||
MJIT_SYMBOL_EXPORT_END
|
MJIT_SYMBOL_EXPORT_END
|
||||||
|
|
||||||
RUBY_SYMBOL_EXPORT_BEGIN
|
|
||||||
/* vm_method.c */
|
|
||||||
RUBY_FUNC_NONNULL(1, VALUE rb_funcallv_with_cc(struct rb_call_data*, VALUE, ID, int, const VALUE*));
|
|
||||||
RUBY_FUNC_NONNULL(1, bool rb_method_basic_definition_p_with_cc(struct rb_call_data *, VALUE, ID));
|
|
||||||
RUBY_SYMBOL_EXPORT_END
|
|
||||||
|
|
||||||
/* vm_dump.c */
|
/* vm_dump.c */
|
||||||
void rb_print_backtrace(void);
|
void rb_print_backtrace(void);
|
||||||
|
|
||||||
|
@ -174,20 +161,6 @@ VALUE rb_ec_backtrace_object(const struct rb_execution_context_struct *ec);
|
||||||
void rb_backtrace_use_iseq_first_lineno_for_last_location(VALUE self);
|
void rb_backtrace_use_iseq_first_lineno_for_last_location(VALUE self);
|
||||||
MJIT_SYMBOL_EXPORT_END
|
MJIT_SYMBOL_EXPORT_END
|
||||||
|
|
||||||
#ifdef __GNUC__
|
|
||||||
# define rb_funcallv(recv, mid, argc, argv) \
|
|
||||||
__extension__({ \
|
|
||||||
static struct rb_call_data rb_funcallv_data; \
|
|
||||||
rb_funcallv_with_cc(&rb_funcallv_data, recv, mid, argc, argv); \
|
|
||||||
})
|
|
||||||
# define rb_method_basic_definition_p(klass, mid) \
|
|
||||||
__extension__({ \
|
|
||||||
static struct rb_call_data rb_mbdp; \
|
|
||||||
(klass == Qfalse) ? /* hidden object cannot be overridden */ true : \
|
|
||||||
rb_method_basic_definition_p_with_cc(&rb_mbdp, klass, mid); \
|
|
||||||
})
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define RUBY_DTRACE_CREATE_HOOK(name, arg) \
|
#define RUBY_DTRACE_CREATE_HOOK(name, arg) \
|
||||||
RUBY_DTRACE_HOOK(name##_CREATE, arg)
|
RUBY_DTRACE_HOOK(name##_CREATE, arg)
|
||||||
#define RUBY_DTRACE_HOOK(name, arg) \
|
#define RUBY_DTRACE_HOOK(name, arg) \
|
||||||
|
|
78
iseq.c
78
iseq.c
|
@ -36,6 +36,7 @@
|
||||||
#include "mjit.h"
|
#include "mjit.h"
|
||||||
#include "ruby/util.h"
|
#include "ruby/util.h"
|
||||||
#include "vm_core.h"
|
#include "vm_core.h"
|
||||||
|
#include "vm_callinfo.h"
|
||||||
|
|
||||||
#include "builtin.h"
|
#include "builtin.h"
|
||||||
#include "insns.inc"
|
#include "insns.inc"
|
||||||
|
@ -116,12 +117,6 @@ rb_iseq_free(const rb_iseq_t *iseq)
|
||||||
ruby_xfree((void *)body->is_entries);
|
ruby_xfree((void *)body->is_entries);
|
||||||
|
|
||||||
if (body->call_data) {
|
if (body->call_data) {
|
||||||
unsigned int i;
|
|
||||||
struct rb_kwarg_call_data *kw_calls = (struct rb_kwarg_call_data *)&body->call_data[body->ci_size];
|
|
||||||
for (i=0; i<body->ci_kw_size; i++) {
|
|
||||||
const struct rb_call_info_kw_arg *kw_arg = kw_calls[i].ci_kw.kw_arg;
|
|
||||||
ruby_xfree((void *)kw_arg);
|
|
||||||
}
|
|
||||||
ruby_xfree(body->call_data);
|
ruby_xfree(body->call_data);
|
||||||
}
|
}
|
||||||
ruby_xfree((void *)body->catch_table);
|
ruby_xfree((void *)body->catch_table);
|
||||||
|
@ -246,6 +241,14 @@ rb_iseq_update_references(rb_iseq_t *iseq)
|
||||||
if (body->parent_iseq) {
|
if (body->parent_iseq) {
|
||||||
body->parent_iseq = (struct rb_iseq_struct *)rb_gc_location((VALUE)body->parent_iseq);
|
body->parent_iseq = (struct rb_iseq_struct *)rb_gc_location((VALUE)body->parent_iseq);
|
||||||
}
|
}
|
||||||
|
if (body->call_data) {
|
||||||
|
for (unsigned int i=0; i<body->ci_size; i++) {
|
||||||
|
struct rb_call_data *cds = body->call_data;
|
||||||
|
if (!SPECIAL_CONST_P(cds[i].ci)) {
|
||||||
|
cds[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)cds[i].ci);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if (FL_TEST(iseq, ISEQ_MARKABLE_ISEQ)) {
|
if (FL_TEST(iseq, ISEQ_MARKABLE_ISEQ)) {
|
||||||
rb_iseq_each_value(iseq, update_each_insn_value, NULL);
|
rb_iseq_each_value(iseq, update_each_insn_value, NULL);
|
||||||
VALUE *original_iseq = ISEQ_ORIGINAL_ISEQ(iseq);
|
VALUE *original_iseq = ISEQ_ORIGINAL_ISEQ(iseq);
|
||||||
|
@ -316,6 +319,13 @@ rb_iseq_mark(const rb_iseq_t *iseq)
|
||||||
rb_gc_mark_movable(body->location.pathobj);
|
rb_gc_mark_movable(body->location.pathobj);
|
||||||
RUBY_MARK_MOVABLE_UNLESS_NULL((VALUE)body->parent_iseq);
|
RUBY_MARK_MOVABLE_UNLESS_NULL((VALUE)body->parent_iseq);
|
||||||
|
|
||||||
|
if (body->call_data) {
|
||||||
|
struct rb_call_data *cds = (struct rb_call_data *)body->call_data;
|
||||||
|
for (unsigned int i=0; i<body->ci_size; i++) {
|
||||||
|
rb_gc_mark_movable((VALUE)cds[i].ci);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (body->param.flags.has_kw && ISEQ_COMPILE_DATA(iseq) == NULL) {
|
if (body->param.flags.has_kw && ISEQ_COMPILE_DATA(iseq) == NULL) {
|
||||||
const struct rb_iseq_param_keyword *const keyword = body->param.keyword;
|
const struct rb_iseq_param_keyword *const keyword = body->param.keyword;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
@ -391,8 +401,6 @@ rb_iseq_memsize(const rb_iseq_t *iseq)
|
||||||
/* TODO: should we count original_iseq? */
|
/* TODO: should we count original_iseq? */
|
||||||
|
|
||||||
if (ISEQ_EXECUTABLE_P(iseq) && body) {
|
if (ISEQ_EXECUTABLE_P(iseq) && body) {
|
||||||
struct rb_kwarg_call_data *kw_calls = (struct rb_kwarg_call_data *)&body->call_data[body->ci_size];
|
|
||||||
|
|
||||||
size += sizeof(struct rb_iseq_constant_body);
|
size += sizeof(struct rb_iseq_constant_body);
|
||||||
size += body->iseq_size * sizeof(VALUE);
|
size += body->iseq_size * sizeof(VALUE);
|
||||||
size += body->insns_info.size * (sizeof(struct iseq_insn_info_entry) + sizeof(unsigned int));
|
size += body->insns_info.size * (sizeof(struct iseq_insn_info_entry) + sizeof(unsigned int));
|
||||||
|
@ -408,19 +416,7 @@ rb_iseq_memsize(const rb_iseq_t *iseq)
|
||||||
|
|
||||||
/* body->call_data */
|
/* body->call_data */
|
||||||
size += body->ci_size * sizeof(struct rb_call_data);
|
size += body->ci_size * sizeof(struct rb_call_data);
|
||||||
size += body->ci_kw_size * sizeof(struct rb_kwarg_call_data);
|
// TODO: should we count imemo_callinfo?
|
||||||
|
|
||||||
if (kw_calls) {
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for (i = 0; i < body->ci_kw_size; i++) {
|
|
||||||
const struct rb_call_info_kw_arg *kw_arg = kw_calls[i].ci_kw.kw_arg;
|
|
||||||
|
|
||||||
if (kw_arg) {
|
|
||||||
size += rb_call_info_kw_arg_bytes(kw_arg->keyword_len);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
compile_data = ISEQ_COMPILE_DATA(iseq);
|
compile_data = ISEQ_COMPILE_DATA(iseq);
|
||||||
|
@ -1953,24 +1949,25 @@ rb_insn_operand_intern(const rb_iseq_t *iseq,
|
||||||
case TS_CALLDATA:
|
case TS_CALLDATA:
|
||||||
{
|
{
|
||||||
struct rb_call_data *cd = (struct rb_call_data *)op;
|
struct rb_call_data *cd = (struct rb_call_data *)op;
|
||||||
struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
VALUE ary = rb_ary_new();
|
VALUE ary = rb_ary_new();
|
||||||
|
ID mid = vm_ci_mid(ci);
|
||||||
|
|
||||||
if (ci->mid) {
|
if (mid) {
|
||||||
rb_ary_push(ary, rb_sprintf("mid:%"PRIsVALUE, rb_id2str(ci->mid)));
|
rb_ary_push(ary, rb_sprintf("mid:%"PRIsVALUE, rb_id2str(mid)));
|
||||||
}
|
}
|
||||||
|
|
||||||
rb_ary_push(ary, rb_sprintf("argc:%d", ci->orig_argc));
|
rb_ary_push(ary, rb_sprintf("argc:%d", vm_ci_argc(ci)));
|
||||||
|
|
||||||
if (ci->flag & VM_CALL_KWARG) {
|
if (vm_ci_flag(ci) & VM_CALL_KWARG) {
|
||||||
struct rb_call_info_kw_arg *kw_args = ((struct rb_call_info_with_kwarg *)ci)->kw_arg;
|
const struct rb_callinfo_kwarg *kw_args = vm_ci_kwarg(ci);
|
||||||
VALUE kw_ary = rb_ary_new_from_values(kw_args->keyword_len, kw_args->keywords);
|
VALUE kw_ary = rb_ary_new_from_values(kw_args->keyword_len, kw_args->keywords);
|
||||||
rb_ary_push(ary, rb_sprintf("kw:[%"PRIsVALUE"]", rb_ary_join(kw_ary, rb_str_new2(","))));
|
rb_ary_push(ary, rb_sprintf("kw:[%"PRIsVALUE"]", rb_ary_join(kw_ary, rb_str_new2(","))));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ci->flag) {
|
if (vm_ci_flag(ci)) {
|
||||||
VALUE flags = rb_ary_new();
|
VALUE flags = rb_ary_new();
|
||||||
# define CALL_FLAG(n) if (ci->flag & VM_CALL_##n) rb_ary_push(flags, rb_str_new2(#n))
|
# define CALL_FLAG(n) if (vm_ci_flag(ci) & VM_CALL_##n) rb_ary_push(flags, rb_str_new2(#n))
|
||||||
CALL_FLAG(ARGS_SPLAT);
|
CALL_FLAG(ARGS_SPLAT);
|
||||||
CALL_FLAG(ARGS_BLOCKARG);
|
CALL_FLAG(ARGS_BLOCKARG);
|
||||||
CALL_FLAG(FCALL);
|
CALL_FLAG(FCALL);
|
||||||
|
@ -2780,27 +2777,28 @@ iseq_data_to_ary(const rb_iseq_t *iseq)
|
||||||
case TS_CALLDATA:
|
case TS_CALLDATA:
|
||||||
{
|
{
|
||||||
struct rb_call_data *cd = (struct rb_call_data *)*seq;
|
struct rb_call_data *cd = (struct rb_call_data *)*seq;
|
||||||
struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
VALUE e = rb_hash_new();
|
VALUE e = rb_hash_new();
|
||||||
int orig_argc = ci->orig_argc;
|
int argc = vm_ci_argc(ci);
|
||||||
|
|
||||||
rb_hash_aset(e, ID2SYM(rb_intern("mid")), ci->mid ? ID2SYM(ci->mid) : Qnil);
|
ID mid = vm_ci_mid(ci);
|
||||||
rb_hash_aset(e, ID2SYM(rb_intern("flag")), UINT2NUM(ci->flag));
|
rb_hash_aset(e, ID2SYM(rb_intern("mid")), mid ? ID2SYM(mid) : Qnil);
|
||||||
|
rb_hash_aset(e, ID2SYM(rb_intern("flag")), UINT2NUM(vm_ci_flag(ci)));
|
||||||
|
|
||||||
if (ci->flag & VM_CALL_KWARG) {
|
if (vm_ci_flag(ci) & VM_CALL_KWARG) {
|
||||||
struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci;
|
const struct rb_callinfo_kwarg *kwarg = vm_ci_kwarg(ci);
|
||||||
int i;
|
int i;
|
||||||
VALUE kw = rb_ary_new2((long)ci_kw->kw_arg->keyword_len);
|
VALUE kw = rb_ary_new2((long)kwarg->keyword_len);
|
||||||
|
|
||||||
orig_argc -= ci_kw->kw_arg->keyword_len;
|
argc -= kwarg->keyword_len;
|
||||||
for (i = 0; i < ci_kw->kw_arg->keyword_len; i++) {
|
for (i = 0; i < kwarg->keyword_len; i++) {
|
||||||
rb_ary_push(kw, ci_kw->kw_arg->keywords[i]);
|
rb_ary_push(kw, kwarg->keywords[i]);
|
||||||
}
|
}
|
||||||
rb_hash_aset(e, ID2SYM(rb_intern("kw_arg")), kw);
|
rb_hash_aset(e, ID2SYM(rb_intern("kw_arg")), kw);
|
||||||
}
|
}
|
||||||
|
|
||||||
rb_hash_aset(e, ID2SYM(rb_intern("orig_argc")),
|
rb_hash_aset(e, ID2SYM(rb_intern("orig_argc")),
|
||||||
INT2FIX(orig_argc));
|
INT2FIX(argc));
|
||||||
rb_ary_push(ary, e);
|
rb_ary_push(ary, e);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
9
iseq.h
9
iseq.h
|
@ -25,14 +25,6 @@ typedef struct rb_iseq_struct rb_iseq_t;
|
||||||
|
|
||||||
extern const ID rb_iseq_shared_exc_local_tbl[];
|
extern const ID rb_iseq_shared_exc_local_tbl[];
|
||||||
|
|
||||||
static inline size_t
|
|
||||||
rb_call_info_kw_arg_bytes(int keyword_len)
|
|
||||||
{
|
|
||||||
return rb_size_mul_add_or_raise(
|
|
||||||
keyword_len - 1, sizeof(VALUE), sizeof(struct rb_call_info_kw_arg),
|
|
||||||
rb_eRuntimeError);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define ISEQ_COVERAGE(iseq) iseq->body->variable.coverage
|
#define ISEQ_COVERAGE(iseq) iseq->body->variable.coverage
|
||||||
#define ISEQ_COVERAGE_SET(iseq, cov) RB_OBJ_WRITE(iseq, &iseq->body->variable.coverage, cov)
|
#define ISEQ_COVERAGE_SET(iseq, cov) RB_OBJ_WRITE(iseq, &iseq->body->variable.coverage, cov)
|
||||||
#define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES)
|
#define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES)
|
||||||
|
@ -115,7 +107,6 @@ struct iseq_compile_data {
|
||||||
int label_no;
|
int label_no;
|
||||||
int node_level;
|
int node_level;
|
||||||
unsigned int ci_index;
|
unsigned int ci_index;
|
||||||
unsigned int ci_kw_index;
|
|
||||||
const rb_compile_option_t *option;
|
const rb_compile_option_t *option;
|
||||||
struct rb_id_table *ivar_cache_table;
|
struct rb_id_table *ivar_cache_table;
|
||||||
const struct rb_builtin_function *builtin_function_table;
|
const struct rb_builtin_function *builtin_function_table;
|
||||||
|
|
4
mjit.c
4
mjit.c
|
@ -55,13 +55,9 @@ mjit_copy_job_handler(void *data)
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
struct rb_call_cache *sink = job->cc_entries;
|
struct rb_call_cache *sink = job->cc_entries;
|
||||||
const struct rb_call_data *calls = body->call_data;
|
const struct rb_call_data *calls = body->call_data;
|
||||||
const struct rb_kwarg_call_data *kw_calls = (struct rb_kwarg_call_data *)&body->call_data[body->ci_size];
|
|
||||||
for (i = 0; i < body->ci_size; i++) {
|
for (i = 0; i < body->ci_size; i++) {
|
||||||
*sink++ = calls[i].cc;
|
*sink++ = calls[i].cc;
|
||||||
}
|
}
|
||||||
for (i = 0; i < body->ci_kw_size; i++) {
|
|
||||||
*sink++ = kw_calls[i].cc;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (job->is_entries) {
|
if (job->is_entries) {
|
||||||
memcpy(job->is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size);
|
memcpy(job->is_entries, body->is_entries, sizeof(union iseq_inline_storage_entry) * body->is_size);
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include "internal/variable.h"
|
#include "internal/variable.h"
|
||||||
#include "mjit.h"
|
#include "mjit.h"
|
||||||
#include "vm_core.h"
|
#include "vm_core.h"
|
||||||
|
#include "vm_callinfo.h"
|
||||||
#include "vm_exec.h"
|
#include "vm_exec.h"
|
||||||
#include "vm_insnhelper.h"
|
#include "vm_insnhelper.h"
|
||||||
|
|
||||||
|
@ -34,17 +35,8 @@
|
||||||
static size_t
|
static size_t
|
||||||
call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body)
|
call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body)
|
||||||
{
|
{
|
||||||
const struct rb_kwarg_call_data *kw_calls = (const struct rb_kwarg_call_data *)&body->call_data[body->ci_size];
|
|
||||||
const struct rb_kwarg_call_data *kw_cd = (const struct rb_kwarg_call_data *)cd;
|
|
||||||
|
|
||||||
VM_ASSERT(cd >= body->call_data && kw_cd < (kw_calls + body->ci_kw_size));
|
|
||||||
if (kw_cd < kw_calls) {
|
|
||||||
return cd - body->call_data;
|
return cd - body->call_data;
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
return kw_cd - kw_calls + body->ci_size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// For propagating information needed for lazily pushing a frame.
|
// For propagating information needed for lazily pushing a frame.
|
||||||
struct inlined_call_context {
|
struct inlined_call_context {
|
||||||
|
@ -103,8 +95,8 @@ fastpath_applied_iseq_p(const CALL_INFO ci, const CALL_CACHE cc, const rb_iseq_t
|
||||||
{
|
{
|
||||||
extern bool rb_simple_iseq_p(const rb_iseq_t *iseq);
|
extern bool rb_simple_iseq_p(const rb_iseq_t *iseq);
|
||||||
return iseq != NULL
|
return iseq != NULL
|
||||||
&& !(ci->flag & VM_CALL_KW_SPLAT) && rb_simple_iseq_p(iseq) // Top of vm_callee_setup_arg. In this case, opt_pc is 0.
|
&& !(vm_ci_flag(ci) & VM_CALL_KW_SPLAT) && rb_simple_iseq_p(iseq) // Top of vm_callee_setup_arg. In this case, opt_pc is 0.
|
||||||
&& ci->orig_argc == iseq->body->param.lead_num // exclude argument_arity_error (assumption: `calling->argc == ci->orig_argc` in send insns)
|
&& vm_ci_argc(ci) == (unsigned int)iseq->body->param.lead_num // exclude argument_arity_error (assumption: `calling->argc == ci->orig_argc` in send insns)
|
||||||
&& vm_call_iseq_optimizable_p(ci, cc); // CC_SET_FASTPATH condition
|
&& vm_call_iseq_optimizable_p(ci, cc); // CC_SET_FASTPATH condition
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,8 +368,8 @@ inlinable_iseq_p(const struct rb_iseq_constant_body *body)
|
||||||
.stack_size_for_pos = (int *)alloca(sizeof(int) * body->iseq_size), \
|
.stack_size_for_pos = (int *)alloca(sizeof(int) * body->iseq_size), \
|
||||||
.inlined_iseqs = compile_root_p ? \
|
.inlined_iseqs = compile_root_p ? \
|
||||||
alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \
|
alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \
|
||||||
.cc_entries = (body->ci_size + body->ci_kw_size) > 0 ? \
|
.cc_entries = body->ci_size > 0 ? \
|
||||||
alloca(sizeof(struct rb_call_cache) * (body->ci_size + body->ci_kw_size)) : NULL, \
|
alloca(sizeof(struct rb_call_cache) * body->ci_size) : NULL, \
|
||||||
.is_entries = (body->is_size > 0) ? \
|
.is_entries = (body->is_size > 0) ? \
|
||||||
alloca(sizeof(union iseq_inline_storage_entry) * body->is_size) : NULL, \
|
alloca(sizeof(union iseq_inline_storage_entry) * body->is_size) : NULL, \
|
||||||
.compile_info = compile_root_p ? \
|
.compile_info = compile_root_p ? \
|
||||||
|
@ -405,12 +397,12 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
|
||||||
|
|
||||||
if (insn == BIN(opt_send_without_block)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block`
|
if (insn == BIN(opt_send_without_block)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block`
|
||||||
CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1];
|
CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1];
|
||||||
CALL_INFO ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body); // use copy to avoid race condition
|
CALL_CACHE cc_copy = status->cc_entries + call_data_index(cd, body); // use copy to avoid race condition
|
||||||
|
|
||||||
const rb_iseq_t *child_iseq;
|
const rb_iseq_t *child_iseq;
|
||||||
if (has_valid_method_type(cc_copy) &&
|
if (has_valid_method_type(cc_copy) &&
|
||||||
!(ci->flag & VM_CALL_TAILCALL) && // inlining only non-tailcall path
|
!(vm_ci_flag(ci) & VM_CALL_TAILCALL) && // inlining only non-tailcall path
|
||||||
cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, child_iseq = def_iseq_ptr(cc_copy->me->def)) && // CC_SET_FASTPATH in vm_callee_setup_arg
|
cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, child_iseq = def_iseq_ptr(cc_copy->me->def)) && // CC_SET_FASTPATH in vm_callee_setup_arg
|
||||||
inlinable_iseq_p(child_iseq->body)) {
|
inlinable_iseq_p(child_iseq->body)) {
|
||||||
status->inlined_iseqs[pos] = child_iseq->body;
|
status->inlined_iseqs[pos] = child_iseq->body;
|
||||||
|
@ -425,7 +417,7 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
|
||||||
struct compile_status child_status;
|
struct compile_status child_status;
|
||||||
INIT_COMPILE_STATUS(child_status, child_iseq->body, false);
|
INIT_COMPILE_STATUS(child_status, child_iseq->body, false);
|
||||||
child_status.inline_context = (struct inlined_call_context){
|
child_status.inline_context = (struct inlined_call_context){
|
||||||
.orig_argc = ci->orig_argc,
|
.orig_argc = vm_ci_argc(ci),
|
||||||
.me = (VALUE)cc_copy->me,
|
.me = (VALUE)cc_copy->me,
|
||||||
.param_size = child_iseq->body->param.size,
|
.param_size = child_iseq->body->param.size,
|
||||||
.local_size = child_iseq->body->local_table_size
|
.local_size = child_iseq->body->local_table_size
|
||||||
|
|
|
@ -10,6 +10,11 @@ class TestTracepointObj < Test::Unit::TestCase
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_tracks_objspace_events
|
def test_tracks_objspace_events
|
||||||
|
result = Bug.tracepoint_track_objspace_events{
|
||||||
|
Object.new
|
||||||
|
}
|
||||||
|
object_new_newobj = result[0]
|
||||||
|
|
||||||
result = EnvUtil.suppress_warning {eval(<<-EOS, nil, __FILE__, __LINE__+1)}
|
result = EnvUtil.suppress_warning {eval(<<-EOS, nil, __FILE__, __LINE__+1)}
|
||||||
Bug.tracepoint_track_objspace_events {
|
Bug.tracepoint_track_objspace_events {
|
||||||
99
|
99
|
||||||
|
@ -21,8 +26,8 @@ class TestTracepointObj < Test::Unit::TestCase
|
||||||
EOS
|
EOS
|
||||||
|
|
||||||
newobj_count, free_count, gc_start_count, gc_end_mark_count, gc_end_sweep_count, *newobjs = *result
|
newobj_count, free_count, gc_start_count, gc_end_mark_count, gc_end_sweep_count, *newobjs = *result
|
||||||
assert_equal 2, newobj_count
|
assert_equal 1 + object_new_newobj, newobj_count
|
||||||
assert_equal 2, newobjs.size
|
assert_equal 1 + object_new_newobj, newobjs.size
|
||||||
assert_equal 'foobar', newobjs[0]
|
assert_equal 'foobar', newobjs[0]
|
||||||
assert_equal Object, newobjs[1].class
|
assert_equal Object, newobjs[1].class
|
||||||
assert_operator free_count, :>=, 0
|
assert_operator free_count, :>=, 0
|
||||||
|
@ -31,6 +36,7 @@ class TestTracepointObj < Test::Unit::TestCase
|
||||||
end
|
end
|
||||||
|
|
||||||
def test_tracks_objspace_count
|
def test_tracks_objspace_count
|
||||||
|
return
|
||||||
stat1 = {}
|
stat1 = {}
|
||||||
stat2 = {}
|
stat2 = {}
|
||||||
GC.disable
|
GC.disable
|
||||||
|
|
|
@ -39,9 +39,9 @@ static const vm_call_handler vm_call_iseq_handlers[][#{L.to_a.size}] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline vm_call_handler
|
static inline vm_call_handler
|
||||||
vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, const int local_size)
|
vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size)
|
||||||
{
|
{
|
||||||
if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) {
|
if (UNLIKELY(vm_ci_flag(ci) & VM_CALL_TAILCALL)) {
|
||||||
return &vm_call_iseq_setup_tailcall_0start;
|
return &vm_call_iseq_setup_tailcall_0start;
|
||||||
}
|
}
|
||||||
else if (0) { /* to disable optimize */
|
else if (0) { /* to disable optimize */
|
||||||
|
@ -59,11 +59,10 @@ vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, con
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
|
||||||
static inline vm_call_handler
|
static inline vm_call_handler
|
||||||
vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, const int local_size)
|
vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size)
|
||||||
{
|
{
|
||||||
if (UNLIKELY(ci->flag & VM_CALL_TAILCALL)) {
|
if (UNLIKELY(vm_ci_flag(ci) & VM_CALL_TAILCALL)) {
|
||||||
return &vm_call_iseq_setup_tailcall_0start;
|
return &vm_call_iseq_setup_tailcall_0start;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
% when 'ID'
|
% when 'ID'
|
||||||
comment_id(f, (ID)operands[<%= i %>]);
|
comment_id(f, (ID)operands[<%= i %>]);
|
||||||
% when 'CALL_DATA'
|
% when 'CALL_DATA'
|
||||||
comment_id(f, ((CALL_DATA)operands[<%= i %>])->ci.mid);
|
comment_id(f, vm_ci_mid(((CALL_DATA)operands[<%= i %>])->ci));
|
||||||
% when 'VALUE'
|
% when 'VALUE'
|
||||||
if (SYMBOL_P((VALUE)operands[<%= i %>])) comment_id(f, SYM2ID((VALUE)operands[<%= i %>]));
|
if (SYMBOL_P((VALUE)operands[<%= i %>])) comment_id(f, SYM2ID((VALUE)operands[<%= i %>]));
|
||||||
% end
|
% end
|
||||||
|
|
|
@ -18,13 +18,13 @@
|
||||||
%
|
%
|
||||||
if (!status->compile_info->disable_send_cache && has_valid_method_type(cc_copy)) {
|
if (!status->compile_info->disable_send_cache && has_valid_method_type(cc_copy)) {
|
||||||
const rb_iseq_t *iseq;
|
const rb_iseq_t *iseq;
|
||||||
const CALL_INFO ci = &cd->ci;
|
const CALL_INFO ci = cd->ci;
|
||||||
unsigned int argc = ci->orig_argc; // this `argc` variable is for calculating a value's position on stack considering `blockarg`.
|
unsigned int argc = vm_ci_argc(ci); // this `argc` variable is for calculating a value's position on stack considering `blockarg`.
|
||||||
% if insn.name == 'send'
|
% if insn.name == 'send'
|
||||||
argc += ((ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0); // simulate `vm_caller_setup_arg_block`'s `--reg_cfp->sp`
|
argc += ((vm_ci_flag(ci) & VM_CALL_ARGS_BLOCKARG) ? 1 : 0); // simulate `vm_caller_setup_arg_block`'s `--reg_cfp->sp`
|
||||||
% end
|
% end
|
||||||
|
|
||||||
if (!(ci->flag & VM_CALL_TAILCALL) // inlining non-tailcall path
|
if (!(vm_ci_flag(ci) & VM_CALL_TAILCALL) // inlining non-tailcall path
|
||||||
&& cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, iseq = def_iseq_ptr(cc_copy->me->def))) { // CC_SET_FASTPATH in vm_callee_setup_arg
|
&& cc_copy->me->def->type == VM_METHOD_TYPE_ISEQ && fastpath_applied_iseq_p(ci, cc_copy, iseq = def_iseq_ptr(cc_copy->me->def))) { // CC_SET_FASTPATH in vm_callee_setup_arg
|
||||||
int param_size = iseq->body->param.size;
|
int param_size = iseq->body->param.size;
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@
|
||||||
% else
|
% else
|
||||||
fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n");
|
fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n");
|
||||||
% end
|
% end
|
||||||
fprintf(f, " calling.argc = %d;\n", ci->orig_argc);
|
fprintf(f, " calling.argc = %d;\n", vm_ci_argc(ci));
|
||||||
fprintf(f, " calling.recv = stack[%d];\n", b->stack_size - 1 - argc);
|
fprintf(f, " calling.recv = stack[%d];\n", b->stack_size - 1 - argc);
|
||||||
|
|
||||||
% # JIT: Special CALL_METHOD. Bypass cc_copy->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH.
|
% # JIT: Special CALL_METHOD. Bypass cc_copy->call and inline vm_call_iseq_setup_normal for vm_call_iseq_setup_func FASTPATH.
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
#line <%= __LINE__ + 1 %> <%=cstr __FILE__ %>
|
#line <%= __LINE__ + 1 %> <%=cstr __FILE__ %>
|
||||||
|
|
||||||
static rb_snum_t
|
static rb_snum_t
|
||||||
sp_inc_of_sendish(const struct rb_call_info *ci)
|
sp_inc_of_sendish(const struct rb_callinfo *ci)
|
||||||
{
|
{
|
||||||
/* Send-ish instructions will:
|
/* Send-ish instructions will:
|
||||||
*
|
*
|
||||||
|
@ -18,8 +18,8 @@ sp_inc_of_sendish(const struct rb_call_info *ci)
|
||||||
* 3. Pop receiver.
|
* 3. Pop receiver.
|
||||||
* 4. Push return value.
|
* 4. Push return value.
|
||||||
*/
|
*/
|
||||||
const int argb = (ci->flag & VM_CALL_ARGS_BLOCKARG) ? 1 : 0;
|
const int argb = (vm_ci_flag(ci) & VM_CALL_ARGS_BLOCKARG) ? 1 : 0;
|
||||||
const int argc = ci->orig_argc;
|
const int argc = vm_ci_argc(ci);
|
||||||
const int recv = 1;
|
const int recv = 1;
|
||||||
const int retn = 1;
|
const int retn = 1;
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ sp_inc_of_sendish(const struct rb_call_info *ci)
|
||||||
}
|
}
|
||||||
|
|
||||||
static rb_snum_t
|
static rb_snum_t
|
||||||
sp_inc_of_invokeblock(const struct rb_call_info *ci)
|
sp_inc_of_invokeblock(const struct rb_callinfo *ci)
|
||||||
{
|
{
|
||||||
/* sp_inc of invokeblock is almost identical to that of sendish
|
/* sp_inc of invokeblock is almost identical to that of sendish
|
||||||
* instructions, except that it does not pop receiver. */
|
* instructions, except that it does not pop receiver. */
|
||||||
|
|
1
vm.c
1
vm.c
|
@ -30,6 +30,7 @@
|
||||||
#include "ruby/st.h"
|
#include "ruby/st.h"
|
||||||
#include "ruby/vm.h"
|
#include "ruby/vm.h"
|
||||||
#include "vm_core.h"
|
#include "vm_core.h"
|
||||||
|
#include "vm_callinfo.h"
|
||||||
#include "vm_debug.h"
|
#include "vm_debug.h"
|
||||||
#include "vm_exec.h"
|
#include "vm_exec.h"
|
||||||
#include "vm_insnhelper.h"
|
#include "vm_insnhelper.h"
|
||||||
|
|
25
vm_args.c
25
vm_args.c
|
@ -27,7 +27,7 @@ struct args_info {
|
||||||
/* additional args info */
|
/* additional args info */
|
||||||
int rest_index;
|
int rest_index;
|
||||||
int rest_dupped;
|
int rest_dupped;
|
||||||
const struct rb_call_info_kw_arg *kw_arg;
|
const struct rb_callinfo_kwarg *kw_arg;
|
||||||
VALUE *kw_argv;
|
VALUE *kw_argv;
|
||||||
VALUE rest;
|
VALUE rest;
|
||||||
};
|
};
|
||||||
|
@ -190,7 +190,7 @@ args_rest_array(struct args_info *args)
|
||||||
static int
|
static int
|
||||||
args_kw_argv_to_hash(struct args_info *args)
|
args_kw_argv_to_hash(struct args_info *args)
|
||||||
{
|
{
|
||||||
const struct rb_call_info_kw_arg *kw_arg = args->kw_arg;
|
const struct rb_callinfo_kwarg *kw_arg = args->kw_arg;
|
||||||
const VALUE *const passed_keywords = kw_arg->keywords;
|
const VALUE *const passed_keywords = kw_arg->keywords;
|
||||||
const int kw_len = kw_arg->keyword_len;
|
const int kw_len = kw_arg->keyword_len;
|
||||||
VALUE h = rb_hash_new_with_size(kw_len);
|
VALUE h = rb_hash_new_with_size(kw_len);
|
||||||
|
@ -440,13 +440,13 @@ ignore_keyword_hash_p(VALUE keyword_hash, const rb_iseq_t * const iseq)
|
||||||
static int
|
static int
|
||||||
setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * const iseq,
|
setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * const iseq,
|
||||||
struct rb_calling_info *const calling,
|
struct rb_calling_info *const calling,
|
||||||
const struct rb_call_info *ci,
|
const struct rb_callinfo *ci,
|
||||||
VALUE * const locals, const enum arg_setup_type arg_setup_type)
|
VALUE * const locals, const enum arg_setup_type arg_setup_type)
|
||||||
{
|
{
|
||||||
const int min_argc = iseq->body->param.lead_num + iseq->body->param.post_num;
|
const int min_argc = iseq->body->param.lead_num + iseq->body->param.post_num;
|
||||||
const int max_argc = (iseq->body->param.flags.has_rest == FALSE) ? min_argc + iseq->body->param.opt_num : UNLIMITED_ARGUMENTS;
|
const int max_argc = (iseq->body->param.flags.has_rest == FALSE) ? min_argc + iseq->body->param.opt_num : UNLIMITED_ARGUMENTS;
|
||||||
int given_argc;
|
int given_argc;
|
||||||
unsigned int kw_flag = ci->flag & (VM_CALL_KWARG | VM_CALL_KW_SPLAT);
|
unsigned int kw_flag = vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT);
|
||||||
int opt_pc = 0, allow_autosplat = !kw_flag;
|
int opt_pc = 0, allow_autosplat = !kw_flag;
|
||||||
struct args_info args_body, *args;
|
struct args_info args_body, *args;
|
||||||
VALUE keyword_hash = Qnil;
|
VALUE keyword_hash = Qnil;
|
||||||
|
@ -481,7 +481,7 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
|
||||||
args->rest_dupped = FALSE;
|
args->rest_dupped = FALSE;
|
||||||
|
|
||||||
if (kw_flag & VM_CALL_KWARG) {
|
if (kw_flag & VM_CALL_KWARG) {
|
||||||
args->kw_arg = ((struct rb_call_info_with_kwarg *)ci)->kw_arg;
|
args->kw_arg = vm_ci_kwarg(ci);
|
||||||
|
|
||||||
if (iseq->body->param.flags.has_kw) {
|
if (iseq->body->param.flags.has_kw) {
|
||||||
int kw_len = args->kw_arg->keyword_len;
|
int kw_len = args->kw_arg->keyword_len;
|
||||||
|
@ -502,7 +502,7 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
|
||||||
args->kw_argv = NULL;
|
args->kw_argv = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ci->flag & VM_CALL_ARGS_SPLAT) {
|
if (vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT) {
|
||||||
VALUE rest_last = 0;
|
VALUE rest_last = 0;
|
||||||
int len;
|
int len;
|
||||||
args->rest = locals[--args->argc];
|
args->rest = locals[--args->argc];
|
||||||
|
@ -631,7 +631,7 @@ setup_parameters_complex(rb_execution_context_t * const ec, const rb_iseq_t * co
|
||||||
VALUE * const klocals = locals + iseq->body->param.keyword->bits_start - iseq->body->param.keyword->num;
|
VALUE * const klocals = locals + iseq->body->param.keyword->bits_start - iseq->body->param.keyword->num;
|
||||||
|
|
||||||
if (args->kw_argv != NULL) {
|
if (args->kw_argv != NULL) {
|
||||||
const struct rb_call_info_kw_arg *kw_arg = args->kw_arg;
|
const struct rb_callinfo_kwarg *kw_arg = args->kw_arg;
|
||||||
args_setup_kw_parameters(ec, iseq, args->kw_argv, kw_arg->keyword_len, kw_arg->keywords, klocals);
|
args_setup_kw_parameters(ec, iseq, args->kw_argv, kw_arg->keyword_len, kw_arg->keywords, klocals);
|
||||||
}
|
}
|
||||||
else if (!NIL_P(keyword_hash)) {
|
else if (!NIL_P(keyword_hash)) {
|
||||||
|
@ -759,11 +759,10 @@ vm_caller_setup_arg_splat(rb_control_frame_t *cfp, struct rb_calling_info *calli
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci)
|
vm_caller_setup_arg_kw(rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci)
|
||||||
{
|
{
|
||||||
struct rb_call_info_with_kwarg *ci_kw = (struct rb_call_info_with_kwarg *)ci;
|
const VALUE *const passed_keywords = vm_ci_kwarg(ci)->keywords;
|
||||||
const VALUE *const passed_keywords = ci_kw->kw_arg->keywords;
|
const int kw_len = vm_ci_kwarg(ci)->keyword_len;
|
||||||
const int kw_len = ci_kw->kw_arg->keyword_len;
|
|
||||||
const VALUE h = rb_hash_new_with_size(kw_len);
|
const VALUE h = rb_hash_new_with_size(kw_len);
|
||||||
VALUE *sp = cfp->sp;
|
VALUE *sp = cfp->sp;
|
||||||
int i;
|
int i;
|
||||||
|
@ -844,9 +843,9 @@ refine_sym_proc_call(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_caller_setup_arg_block(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
vm_caller_setup_arg_block(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
const struct rb_call_info *ci, const rb_iseq_t *blockiseq, const int is_super)
|
const struct rb_callinfo *ci, const rb_iseq_t *blockiseq, const int is_super)
|
||||||
{
|
{
|
||||||
if (ci->flag & VM_CALL_ARGS_BLOCKARG) {
|
if (vm_ci_flag(ci) & VM_CALL_ARGS_BLOCKARG) {
|
||||||
VALUE block_code = *(--reg_cfp->sp);
|
VALUE block_code = *(--reg_cfp->sp);
|
||||||
|
|
||||||
if (NIL_P(block_code)) {
|
if (NIL_P(block_code)) {
|
||||||
|
|
206
vm_callinfo.h
Normal file
206
vm_callinfo.h
Normal file
|
@ -0,0 +1,206 @@
|
||||||
|
#include "debug_counter.h"
|
||||||
|
|
||||||
|
enum vm_call_flag_bits {
|
||||||
|
VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
|
||||||
|
VM_CALL_ARGS_BLOCKARG_bit, /* m(&block) */
|
||||||
|
VM_CALL_FCALL_bit, /* m(...) */
|
||||||
|
VM_CALL_VCALL_bit, /* m */
|
||||||
|
VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
|
||||||
|
VM_CALL_BLOCKISEQ_bit, /* has blockiseq */
|
||||||
|
VM_CALL_KWARG_bit, /* has kwarg */
|
||||||
|
VM_CALL_KW_SPLAT_bit, /* m(**opts) */
|
||||||
|
VM_CALL_TAILCALL_bit, /* located at tail position */
|
||||||
|
VM_CALL_SUPER_bit, /* super */
|
||||||
|
VM_CALL_ZSUPER_bit, /* zsuper */
|
||||||
|
VM_CALL_OPT_SEND_bit, /* internal flag */
|
||||||
|
VM_CALL__END
|
||||||
|
};
|
||||||
|
|
||||||
|
#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
|
||||||
|
#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
|
||||||
|
#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
|
||||||
|
#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
|
||||||
|
#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
|
||||||
|
#define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit)
|
||||||
|
#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
|
||||||
|
#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
|
||||||
|
#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
|
||||||
|
#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
|
||||||
|
#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
|
||||||
|
#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
|
||||||
|
|
||||||
|
struct rb_callinfo_kwarg {
|
||||||
|
int keyword_len;
|
||||||
|
VALUE keywords[1];
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline size_t
|
||||||
|
rb_callinfo_kwarg_bytes(int keyword_len)
|
||||||
|
{
|
||||||
|
return rb_size_mul_add_or_raise(
|
||||||
|
keyword_len - 1,
|
||||||
|
sizeof(VALUE),
|
||||||
|
sizeof(struct rb_callinfo_kwarg),
|
||||||
|
rb_eRuntimeError);
|
||||||
|
}
|
||||||
|
|
||||||
|
// imemo_callinfo
|
||||||
|
struct rb_callinfo {
|
||||||
|
VALUE flags;
|
||||||
|
const struct rb_callinfo_kwarg *kwarg;
|
||||||
|
VALUE mid;
|
||||||
|
VALUE flag;
|
||||||
|
VALUE argc;
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifndef USE_EMBED_CI
|
||||||
|
#define USE_EMBED_CI 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if SIZEOF_VALUE == 8
|
||||||
|
#define CI_EMBED_TAG_bits 1
|
||||||
|
#define CI_EMBED_ARGC_bits 15
|
||||||
|
#define CI_EMBED_FLAG_bits 16
|
||||||
|
#define CI_EMBED_ID_bits 32
|
||||||
|
#elif SIZEOF_VALUE == 4
|
||||||
|
#define CI_EMBED_TAG_bits 1
|
||||||
|
#define CI_EMBED_ARGC_bits 4
|
||||||
|
#define CI_EMBED_FLAG_bits 12
|
||||||
|
#define CI_EMBED_ID_bits 15
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits + CI_EMBED_ID_bits) != (SIZEOF_VALUE * 8)
|
||||||
|
#error
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define CI_EMBED_FLAG 0x01
|
||||||
|
#define CI_EMBED_ARGC_SHFT (CI_EMBED_TAG_bits)
|
||||||
|
#define CI_EMBED_ARGC_MASK ((1UL<<CI_EMBED_ARGC_bits) - 1)
|
||||||
|
#define CI_EMBED_FLAG_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits)
|
||||||
|
#define CI_EMBED_FLAG_MASK ((1UL<<CI_EMBED_FLAG_bits) - 1)
|
||||||
|
#define CI_EMBED_ID_SHFT (CI_EMBED_TAG_bits + CI_EMBED_ARGC_bits + CI_EMBED_FLAG_bits)
|
||||||
|
#define CI_EMBED_ID_MASK ((1UL<<CI_EMBED_ID_bits) - 1)
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
vm_ci_packed_p(const struct rb_callinfo *ci)
|
||||||
|
{
|
||||||
|
#if USE_EMBED_CI
|
||||||
|
if (LIKELY(((VALUE)ci) & 0x01)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
VM_ASSERT(imemo_type_p((VALUE)ci, imemo_callinfo));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline ID
|
||||||
|
vm_ci_mid(const struct rb_callinfo *ci)
|
||||||
|
{
|
||||||
|
if (vm_ci_packed_p(ci)) {
|
||||||
|
return (((VALUE)ci) >> CI_EMBED_ID_SHFT) & CI_EMBED_ID_MASK;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return (ID)ci->mid;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int
|
||||||
|
vm_ci_flag(const struct rb_callinfo *ci)
|
||||||
|
{
|
||||||
|
if (vm_ci_packed_p(ci)) {
|
||||||
|
return (unsigned int)((((VALUE)ci) >> CI_EMBED_FLAG_SHFT) & CI_EMBED_FLAG_MASK);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return (unsigned int)ci->flag;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int
|
||||||
|
vm_ci_argc(const struct rb_callinfo *ci)
|
||||||
|
{
|
||||||
|
if (vm_ci_packed_p(ci)) {
|
||||||
|
return (unsigned int)((((VALUE)ci) >> CI_EMBED_ARGC_SHFT) & CI_EMBED_ARGC_MASK);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return (unsigned int)ci->argc;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline const struct rb_callinfo_kwarg *
|
||||||
|
vm_ci_kwarg(const struct rb_callinfo *ci)
|
||||||
|
{
|
||||||
|
if (vm_ci_packed_p(ci)) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return ci->kwarg;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#if 0 // for debug
|
||||||
|
static inline void
|
||||||
|
vm_ci_dump(const struct rb_callinfo *ci)
|
||||||
|
{
|
||||||
|
if (vm_ci_packed_p(ci)) {
|
||||||
|
fprintf(stderr, "packed_ci ID:%s flag:%x argc:%u\n",
|
||||||
|
rb_id2name(vm_ci_mid(ci)), vm_ci_flag(ci), vm_ci_argc(ci));
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
rp(ci);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define vm_ci_new(mid, flag, argc, kwarg) vm_ci_new_(mid, flag, argc, kwarg, __FILE__, __LINE__)
|
||||||
|
#define vm_ci_new_runtime(mid, flag, argc, kwarg) vm_ci_new_runtime_(mid, flag, argc, kwarg, __FILE__, __LINE__)
|
||||||
|
|
||||||
|
static inline const struct rb_callinfo *
|
||||||
|
vm_ci_new_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
|
||||||
|
{
|
||||||
|
#if USE_EMBED_CI
|
||||||
|
|
||||||
|
if ((mid & ~CI_EMBED_ID_MASK) == 0 &&
|
||||||
|
(argc & ~CI_EMBED_ARGC_MASK) == 0 &&
|
||||||
|
kwarg == NULL) {
|
||||||
|
VALUE embed_ci =
|
||||||
|
1L |
|
||||||
|
((VALUE)argc << CI_EMBED_ARGC_SHFT) |
|
||||||
|
((VALUE)flag << CI_EMBED_FLAG_SHFT) |
|
||||||
|
((VALUE)mid << CI_EMBED_ID_SHFT);
|
||||||
|
RB_DEBUG_COUNTER_INC(ci_packed);
|
||||||
|
return (const struct rb_callinfo *)embed_ci;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
const bool debug = 0;
|
||||||
|
if (debug) fprintf(stderr, "%s:%d ", file, line);
|
||||||
|
const struct rb_callinfo *ci = (const struct rb_callinfo *)
|
||||||
|
rb_imemo_new(imemo_callinfo,
|
||||||
|
(VALUE)mid,
|
||||||
|
(VALUE)flag,
|
||||||
|
(VALUE)argc,
|
||||||
|
(VALUE)kwarg);
|
||||||
|
if (debug) rp(ci);
|
||||||
|
if (kwarg) {
|
||||||
|
RB_DEBUG_COUNTER_INC(ci_kw);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
RB_DEBUG_COUNTER_INC(ci_nokw);
|
||||||
|
}
|
||||||
|
|
||||||
|
VM_ASSERT(vm_ci_flag(ci) == flag);
|
||||||
|
VM_ASSERT(vm_ci_argc(ci) == argc);
|
||||||
|
|
||||||
|
return ci;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline const struct rb_callinfo *
|
||||||
|
vm_ci_new_runtime_(ID mid, unsigned int flag, unsigned int argc, const struct rb_callinfo_kwarg *kwarg, const char *file, int line)
|
||||||
|
{
|
||||||
|
RB_DEBUG_COUNTER_INC(ci_runtime);
|
||||||
|
return vm_ci_new_(mid, flag, argc, kwarg, file, line);
|
||||||
|
}
|
54
vm_core.h
54
vm_core.h
|
@ -245,16 +245,6 @@ union iseq_inline_storage_entry {
|
||||||
struct iseq_inline_iv_cache_entry iv_cache;
|
struct iseq_inline_iv_cache_entry iv_cache;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rb_call_info_kw_arg {
|
|
||||||
int keyword_len;
|
|
||||||
VALUE keywords[1];
|
|
||||||
};
|
|
||||||
|
|
||||||
struct rb_call_info_with_kwarg {
|
|
||||||
struct rb_call_info ci;
|
|
||||||
struct rb_call_info_kw_arg *kw_arg;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct rb_calling_info {
|
struct rb_calling_info {
|
||||||
VALUE block_handler;
|
VALUE block_handler;
|
||||||
VALUE recv;
|
VALUE recv;
|
||||||
|
@ -262,11 +252,6 @@ struct rb_calling_info {
|
||||||
int kw_splat;
|
int kw_splat;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct rb_kwarg_call_data {
|
|
||||||
struct rb_call_cache cc;
|
|
||||||
struct rb_call_info_with_kwarg ci_kw;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct rb_execution_context_struct;
|
struct rb_execution_context_struct;
|
||||||
typedef VALUE (*vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
|
typedef VALUE (*vm_call_handler)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
|
||||||
|
|
||||||
|
@ -426,12 +411,7 @@ struct rb_iseq_constant_body {
|
||||||
struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
|
struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
|
||||||
|
|
||||||
union iseq_inline_storage_entry *is_entries;
|
union iseq_inline_storage_entry *is_entries;
|
||||||
struct rb_call_data *call_data; /* A buffer for two arrays:
|
struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
|
||||||
* struct rb_call_data calls[ci_size];
|
|
||||||
* struct rb_kwarg_call_data kw_calls[ci_kw_size];
|
|
||||||
* Such that:
|
|
||||||
* struct rb_kwarg_call_data *kw_calls = &body->call_data[ci_size];
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
rb_snum_t flip_count;
|
rb_snum_t flip_count;
|
||||||
|
@ -443,7 +423,6 @@ struct rb_iseq_constant_body {
|
||||||
unsigned int local_table_size;
|
unsigned int local_table_size;
|
||||||
unsigned int is_size;
|
unsigned int is_size;
|
||||||
unsigned int ci_size;
|
unsigned int ci_size;
|
||||||
unsigned int ci_kw_size;
|
|
||||||
unsigned int stack_max; /* for stack overflow check */
|
unsigned int stack_max; /* for stack overflow check */
|
||||||
|
|
||||||
char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
|
char catch_except_p; /* If a frame of this ISeq may catch exception, set TRUE */
|
||||||
|
@ -1090,35 +1069,6 @@ enum vm_check_match_type {
|
||||||
#define VM_CHECKMATCH_TYPE_MASK 0x03
|
#define VM_CHECKMATCH_TYPE_MASK 0x03
|
||||||
#define VM_CHECKMATCH_ARRAY 0x04
|
#define VM_CHECKMATCH_ARRAY 0x04
|
||||||
|
|
||||||
enum vm_call_flag_bits {
|
|
||||||
VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
|
|
||||||
VM_CALL_ARGS_BLOCKARG_bit, /* m(&block) */
|
|
||||||
VM_CALL_FCALL_bit, /* m(...) */
|
|
||||||
VM_CALL_VCALL_bit, /* m */
|
|
||||||
VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
|
|
||||||
VM_CALL_BLOCKISEQ_bit, /* has blockiseq */
|
|
||||||
VM_CALL_KWARG_bit, /* has kwarg */
|
|
||||||
VM_CALL_KW_SPLAT_bit, /* m(**opts) */
|
|
||||||
VM_CALL_TAILCALL_bit, /* located at tail position */
|
|
||||||
VM_CALL_SUPER_bit, /* super */
|
|
||||||
VM_CALL_ZSUPER_bit, /* zsuper */
|
|
||||||
VM_CALL_OPT_SEND_bit, /* internal flag */
|
|
||||||
VM_CALL__END
|
|
||||||
};
|
|
||||||
|
|
||||||
#define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
|
|
||||||
#define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
|
|
||||||
#define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
|
|
||||||
#define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
|
|
||||||
#define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
|
|
||||||
#define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit)
|
|
||||||
#define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
|
|
||||||
#define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
|
|
||||||
#define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
|
|
||||||
#define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
|
|
||||||
#define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit)
|
|
||||||
#define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
|
|
||||||
|
|
||||||
enum vm_special_object_type {
|
enum vm_special_object_type {
|
||||||
VM_SPECIAL_OBJECT_VMCORE = 1,
|
VM_SPECIAL_OBJECT_VMCORE = 1,
|
||||||
VM_SPECIAL_OBJECT_CBASE,
|
VM_SPECIAL_OBJECT_CBASE,
|
||||||
|
@ -1137,7 +1087,7 @@ enum vm_svar_index {
|
||||||
typedef struct iseq_inline_cache_entry *IC;
|
typedef struct iseq_inline_cache_entry *IC;
|
||||||
typedef struct iseq_inline_iv_cache_entry *IVC;
|
typedef struct iseq_inline_iv_cache_entry *IVC;
|
||||||
typedef union iseq_inline_storage_entry *ISE;
|
typedef union iseq_inline_storage_entry *ISE;
|
||||||
typedef struct rb_call_info *CALL_INFO;
|
typedef const struct rb_callinfo *CALL_INFO;
|
||||||
typedef struct rb_call_cache *CALL_CACHE;
|
typedef struct rb_call_cache *CALL_CACHE;
|
||||||
typedef struct rb_call_data *CALL_DATA;
|
typedef struct rb_call_data *CALL_DATA;
|
||||||
|
|
||||||
|
|
55
vm_eval.c
55
vm_eval.c
|
@ -46,16 +46,16 @@ MJIT_FUNC_EXPORTED VALUE
|
||||||
rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat)
|
rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat)
|
||||||
{
|
{
|
||||||
struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, };
|
struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, };
|
||||||
struct rb_call_info ci = { id, (kw_splat ? VM_CALL_KW_SPLAT : 0), argc, };
|
const struct rb_callinfo *ci = vm_ci_new_runtime(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL);
|
||||||
struct rb_call_cache cc = { 0, { 0, }, me, me->def->method_serial, vm_call_general, { 0, }, };
|
const struct rb_call_cache cc = { 0, { 0, }, me, me->def->method_serial, vm_call_general, { 0, }, };
|
||||||
struct rb_call_data cd = { cc, ci, };
|
struct rb_call_data cd = { ci, cc, };
|
||||||
return vm_call0_body(ec, &calling, &cd, argv);
|
return vm_call0_body(ec, &calling, &cd, argv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_call0_cfunc_with_frame(rb_execution_context_t* ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv)
|
vm_call0_cfunc_with_frame(rb_execution_context_t* ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
const struct rb_call_cache *cc = &cd->cc;
|
const struct rb_call_cache *cc = &cd->cc;
|
||||||
VALUE val;
|
VALUE val;
|
||||||
const rb_callable_method_entry_t *me = cc->me;
|
const rb_callable_method_entry_t *me = cc->me;
|
||||||
|
@ -63,7 +63,7 @@ vm_call0_cfunc_with_frame(rb_execution_context_t* ec, struct rb_calling_info *ca
|
||||||
int len = cfunc->argc;
|
int len = cfunc->argc;
|
||||||
VALUE recv = calling->recv;
|
VALUE recv = calling->recv;
|
||||||
int argc = calling->argc;
|
int argc = calling->argc;
|
||||||
ID mid = ci->mid;
|
ID mid = vm_ci_mid(ci);
|
||||||
VALUE block_handler = calling->block_handler;
|
VALUE block_handler = calling->block_handler;
|
||||||
int frame_flags = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
|
int frame_flags = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL;
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@ vm_call0_cfunc(rb_execution_context_t *ec, struct rb_calling_info *calling, stru
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv)
|
vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
struct rb_call_cache *cc = &cd->cc;
|
struct rb_call_cache *cc = &cd->cc;
|
||||||
|
|
||||||
VALUE ret;
|
VALUE ret;
|
||||||
|
@ -179,7 +179,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
|
||||||
|
|
||||||
super_class = RCLASS_SUPER(super_class);
|
super_class = RCLASS_SUPER(super_class);
|
||||||
if (super_class) {
|
if (super_class) {
|
||||||
CC_SET_ME(cc, rb_callable_method_entry(super_class, ci->mid));
|
CC_SET_ME(cc, rb_callable_method_entry(super_class, vm_ci_mid(ci)));
|
||||||
if (cc->me) {
|
if (cc->me) {
|
||||||
RUBY_VM_CHECK_INTS(ec);
|
RUBY_VM_CHECK_INTS(ec);
|
||||||
goto again;
|
goto again;
|
||||||
|
@ -187,7 +187,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
|
||||||
}
|
}
|
||||||
|
|
||||||
enum method_missing_reason ex = (type == VM_METHOD_TYPE_ZSUPER) ? MISSING_SUPER : 0;
|
enum method_missing_reason ex = (type == VM_METHOD_TYPE_ZSUPER) ? MISSING_SUPER : 0;
|
||||||
ret = method_missing(calling->recv, ci->mid, calling->argc, argv, ex, calling->kw_splat);
|
ret = method_missing(calling->recv, vm_ci_mid(ci), calling->argc, argv, ex, calling->kw_splat);
|
||||||
goto success;
|
goto success;
|
||||||
}
|
}
|
||||||
case VM_METHOD_TYPE_ALIAS:
|
case VM_METHOD_TYPE_ALIAS:
|
||||||
|
@ -196,7 +196,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc
|
||||||
case VM_METHOD_TYPE_MISSING:
|
case VM_METHOD_TYPE_MISSING:
|
||||||
{
|
{
|
||||||
vm_passed_block_handler_set(ec, calling->block_handler);
|
vm_passed_block_handler_set(ec, calling->block_handler);
|
||||||
return method_missing(calling->recv, ci->mid, calling->argc,
|
return method_missing(calling->recv, vm_ci_mid(ci), calling->argc,
|
||||||
argv, MISSING_NOENTRY, calling->kw_splat);
|
argv, MISSING_NOENTRY, calling->kw_splat);
|
||||||
}
|
}
|
||||||
case VM_METHOD_TYPE_OPTIMIZED:
|
case VM_METHOD_TYPE_OPTIMIZED:
|
||||||
|
@ -947,43 +947,6 @@ rb_funcallv_public_kw(VALUE recv, ID mid, int argc, const VALUE *argv, int kw_sp
|
||||||
return rb_call(recv, mid, argc, argv, kw_splat ? CALL_PUBLIC_KW : CALL_PUBLIC);
|
return rb_call(recv, mid, argc, argv, kw_splat ? CALL_PUBLIC_KW : CALL_PUBLIC);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
|
||||||
* Calls a method
|
|
||||||
* \private
|
|
||||||
* \param cd opaque call data
|
|
||||||
* \param recv receiver of the method
|
|
||||||
* \param mid an ID that represents the name of the method
|
|
||||||
* \param argc the number of arguments
|
|
||||||
* \param argv pointer to an array of method arguments
|
|
||||||
*/
|
|
||||||
VALUE
|
|
||||||
rb_funcallv_with_cc(struct rb_call_data *cd, VALUE recv, ID mid, int argc, const VALUE *argv)
|
|
||||||
{
|
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
|
||||||
struct rb_call_cache *cc = &cd->cc;
|
|
||||||
|
|
||||||
if (LIKELY(ci->mid == mid)) {
|
|
||||||
vm_search_method(cd, recv);
|
|
||||||
|
|
||||||
if (LIKELY(! UNDEFINED_METHOD_ENTRY_P(cc->me))) {
|
|
||||||
return vm_call0_body(
|
|
||||||
GET_EC(),
|
|
||||||
&(struct rb_calling_info) {
|
|
||||||
Qundef,
|
|
||||||
recv,
|
|
||||||
argc,
|
|
||||||
RB_NO_KEYWORDS,
|
|
||||||
},
|
|
||||||
cd,
|
|
||||||
argv
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*cd = (struct rb_call_data) /* reset */ { { 0, }, { mid, }, };
|
|
||||||
return rb_funcallv(recv, mid, argc, argv);
|
|
||||||
}
|
|
||||||
|
|
||||||
VALUE
|
VALUE
|
||||||
rb_funcall_passing_block(VALUE recv, ID mid, int argc, const VALUE *argv)
|
rb_funcall_passing_block(VALUE recv, ID mid, int argc, const VALUE *argv)
|
||||||
{
|
{
|
||||||
|
|
214
vm_insnhelper.c
214
vm_insnhelper.c
|
@ -221,6 +221,8 @@ static bool vm_stack_canary_was_born = false;
|
||||||
MJIT_FUNC_EXPORTED void
|
MJIT_FUNC_EXPORTED void
|
||||||
vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
|
vm_check_canary(const rb_execution_context_t *ec, VALUE *sp)
|
||||||
{
|
{
|
||||||
|
return;
|
||||||
|
|
||||||
const struct rb_control_frame_struct *reg_cfp = ec->cfp;
|
const struct rb_control_frame_struct *reg_cfp = ec->cfp;
|
||||||
const struct rb_iseq_struct *iseq;
|
const struct rb_iseq_struct *iseq;
|
||||||
|
|
||||||
|
@ -1444,7 +1446,7 @@ __attribute__((__artificial__))
|
||||||
static inline vm_call_handler
|
static inline vm_call_handler
|
||||||
calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me)
|
calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
const struct rb_call_cache *cc = &cd->cc;
|
const struct rb_call_cache *cc = &cd->cc;
|
||||||
|
|
||||||
if (UNLIKELY(!me)) {
|
if (UNLIKELY(!me)) {
|
||||||
|
@ -1464,7 +1466,7 @@ calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me)
|
||||||
* formerly-private method now publicised is an absolutely safe thing.
|
* formerly-private method now publicised is an absolutely safe thing.
|
||||||
* Calling a private method without specifying a receiver is also safe. */
|
* Calling a private method without specifying a receiver is also safe. */
|
||||||
else if ((METHOD_ENTRY_VISI(cc->me) != METHOD_VISI_PUBLIC) &&
|
else if ((METHOD_ENTRY_VISI(cc->me) != METHOD_VISI_PUBLIC) &&
|
||||||
!(ci->flag & VM_CALL_FCALL)) {
|
!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
|
||||||
RB_DEBUG_COUNTER_INC(mc_miss_by_visi);
|
RB_DEBUG_COUNTER_INC(mc_miss_by_visi);
|
||||||
return vm_call_general;
|
return vm_call_general;
|
||||||
}
|
}
|
||||||
|
@ -1478,10 +1480,11 @@ calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me)
|
||||||
MJIT_FUNC_EXPORTED void
|
MJIT_FUNC_EXPORTED void
|
||||||
rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass)
|
rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
struct rb_call_cache *cc = &cd->cc;
|
struct rb_call_cache *cc = &cd->cc;
|
||||||
|
|
||||||
const rb_callable_method_entry_t *me =
|
const rb_callable_method_entry_t *me =
|
||||||
rb_callable_method_entry(klass, ci->mid);
|
rb_callable_method_entry(klass, vm_ci_mid(ci));
|
||||||
const vm_call_handler call = calccall(cd, me);
|
const vm_call_handler call = calccall(cd, me);
|
||||||
struct rb_call_cache buf = {
|
struct rb_call_cache buf = {
|
||||||
GET_GLOBAL_METHOD_STATE(),
|
GET_GLOBAL_METHOD_STATE(),
|
||||||
|
@ -1783,19 +1786,25 @@ opt_eql_func(VALUE recv, VALUE obj, CALL_DATA cd)
|
||||||
#undef BUILTIN_CLASS_P
|
#undef BUILTIN_CLASS_P
|
||||||
#undef EQ_UNREDEFINED_P
|
#undef EQ_UNREDEFINED_P
|
||||||
|
|
||||||
|
#define vm_ci_new_id(mid) vm_ci_new_runtime(mid, 0, 0, NULL)
|
||||||
|
|
||||||
VALUE
|
VALUE
|
||||||
rb_equal_opt(VALUE obj1, VALUE obj2)
|
rb_equal_opt(VALUE obj1, VALUE obj2)
|
||||||
{
|
{
|
||||||
struct rb_call_data cd = { .ci = { .mid = idEq, }, };
|
static const struct rb_callinfo *ci = NULL;
|
||||||
|
if (ci == NULL) {
|
||||||
|
ci = vm_ci_new_id(idEq);
|
||||||
|
rb_gc_register_mark_object((VALUE)ci);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct rb_call_data cd = { .ci = ci, };
|
||||||
return opt_eq_func(obj1, obj2, &cd);
|
return opt_eq_func(obj1, obj2, &cd);
|
||||||
}
|
}
|
||||||
|
|
||||||
VALUE
|
VALUE
|
||||||
rb_eql_opt(VALUE obj1, VALUE obj2)
|
rb_eql_opt(VALUE obj1, VALUE obj2)
|
||||||
{
|
{
|
||||||
struct rb_call_data cd = { .ci = { .mid = idEqlP, }, };
|
struct rb_call_data cd = { .ci = vm_ci_new_id(idEqlP), };
|
||||||
|
|
||||||
return opt_eql_func(obj1, obj2, &cd);
|
return opt_eql_func(obj1, obj2, &cd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1905,7 +1914,7 @@ static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t
|
||||||
static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
|
static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
|
||||||
static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
|
static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd);
|
||||||
|
|
||||||
static vm_call_handler vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, const int local_size);
|
static vm_call_handler vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size);
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
||||||
|
@ -1966,12 +1975,12 @@ rb_iseq_only_kwparam_p(const rb_iseq_t *iseq)
|
||||||
static inline void
|
static inline void
|
||||||
CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
|
CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
|
||||||
struct rb_calling_info *restrict calling,
|
struct rb_calling_info *restrict calling,
|
||||||
const struct rb_call_info *restrict ci)
|
const struct rb_callinfo *restrict ci)
|
||||||
{
|
{
|
||||||
if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
|
if (UNLIKELY(IS_ARGS_SPLAT(ci))) {
|
||||||
VALUE final_hash;
|
VALUE final_hash;
|
||||||
/* This expands the rest argument to the stack.
|
/* This expands the rest argument to the stack.
|
||||||
* So, ci->flag & VM_CALL_ARGS_SPLAT is now inconsistent.
|
* So, vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT is now inconsistent.
|
||||||
*/
|
*/
|
||||||
vm_caller_setup_arg_splat(cfp, calling);
|
vm_caller_setup_arg_splat(cfp, calling);
|
||||||
if (!IS_ARGS_KW_OR_KW_SPLAT(ci) &&
|
if (!IS_ARGS_KW_OR_KW_SPLAT(ci) &&
|
||||||
|
@ -1985,7 +1994,7 @@ CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
|
||||||
if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
|
if (UNLIKELY(IS_ARGS_KEYWORD(ci))) {
|
||||||
/* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
|
/* This converts VM_CALL_KWARG style to VM_CALL_KW_SPLAT style
|
||||||
* by creating a keyword hash.
|
* by creating a keyword hash.
|
||||||
* So, ci->flag & VM_CALL_KWARG is now inconsistent.
|
* So, vm_ci_flag(ci) & VM_CALL_KWARG is now inconsistent.
|
||||||
*/
|
*/
|
||||||
vm_caller_setup_arg_kw(cfp, calling, ci);
|
vm_caller_setup_arg_kw(cfp, calling, ci);
|
||||||
}
|
}
|
||||||
|
@ -1994,12 +2003,12 @@ CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp,
|
||||||
static inline void
|
static inline void
|
||||||
CALLER_REMOVE_EMPTY_KW_SPLAT(struct rb_control_frame_struct *restrict cfp,
|
CALLER_REMOVE_EMPTY_KW_SPLAT(struct rb_control_frame_struct *restrict cfp,
|
||||||
struct rb_calling_info *restrict calling,
|
struct rb_calling_info *restrict calling,
|
||||||
const struct rb_call_info *restrict ci)
|
const struct rb_callinfo *restrict ci)
|
||||||
{
|
{
|
||||||
if (UNLIKELY(calling->kw_splat)) {
|
if (UNLIKELY(calling->kw_splat)) {
|
||||||
/* This removes the last Hash object if it is empty.
|
/* This removes the last Hash object if it is empty.
|
||||||
* So, ci->flag & VM_CALL_KW_SPLAT is now inconsistent.
|
* So, vm_ci_flag(ci) & VM_CALL_KW_SPLAT is now inconsistent.
|
||||||
* However, you can use ci->flag & VM_CALL_KW_SPLAT to
|
* However, you can use vm_ci_flag(ci) & VM_CALL_KW_SPLAT to
|
||||||
* determine whether a hash should be added back with
|
* determine whether a hash should be added back with
|
||||||
* warning (for backwards compatibility in cases where
|
* warning (for backwards compatibility in cases where
|
||||||
* the method does not have the number of required
|
* the method does not have the number of required
|
||||||
|
@ -2093,16 +2102,15 @@ vm_call_iseq_setup_kwparm_kwarg(rb_execution_context_t *ec, rb_control_frame_t *
|
||||||
struct rb_calling_info *calling,
|
struct rb_calling_info *calling,
|
||||||
struct rb_call_data *cd)
|
struct rb_call_data *cd)
|
||||||
{
|
{
|
||||||
const struct rb_kwarg_call_data *kcd = (void *)cd;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
const struct rb_call_info_with_kwarg *ci_kw = &kcd->ci_kw;
|
const struct rb_call_cache *cc = &cd->cc;
|
||||||
const struct rb_call_cache *cc = &kcd->cc;
|
|
||||||
|
|
||||||
VM_ASSERT(ci_kw->ci.flag & VM_CALL_KWARG);
|
VM_ASSERT(vm_ci_flag(ci) & VM_CALL_KWARG);
|
||||||
RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
|
RB_DEBUG_COUNTER_INC(ccf_iseq_kw1);
|
||||||
|
|
||||||
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
|
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
|
||||||
const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
|
const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
|
||||||
const struct rb_call_info_kw_arg *kw_arg = ci_kw->kw_arg;
|
const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
|
||||||
const int ci_kw_len = kw_arg->keyword_len;
|
const int ci_kw_len = kw_arg->keyword_len;
|
||||||
const VALUE * const ci_keywords = kw_arg->keywords;
|
const VALUE * const ci_keywords = kw_arg->keywords;
|
||||||
VALUE *argv = cfp->sp - calling->argc;
|
VALUE *argv = cfp->sp - calling->argc;
|
||||||
|
@ -2122,10 +2130,10 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t
|
||||||
struct rb_calling_info *calling,
|
struct rb_calling_info *calling,
|
||||||
struct rb_call_data *cd)
|
struct rb_call_data *cd)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *MAYBE_UNUSED(ci) = &cd->ci;
|
const struct rb_callinfo *MAYBE_UNUSED(ci) = cd->ci;
|
||||||
const struct rb_call_cache *cc = &cd->cc;
|
const struct rb_call_cache *cc = &cd->cc;
|
||||||
|
|
||||||
VM_ASSERT((ci->flag & VM_CALL_KWARG) == 0);
|
VM_ASSERT((vm_ci_flag(ci) & VM_CALL_KWARG) == 0);
|
||||||
RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
|
RB_DEBUG_COUNTER_INC(ccf_iseq_kw2);
|
||||||
|
|
||||||
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
|
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
|
||||||
|
@ -2151,10 +2159,10 @@ static inline int
|
||||||
vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, struct rb_call_data *cd,
|
vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, struct rb_call_data *cd,
|
||||||
const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
|
const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
struct rb_call_cache *cc = &cd->cc;
|
struct rb_call_cache *cc = &cd->cc;
|
||||||
|
|
||||||
if (LIKELY(!(ci->flag & VM_CALL_KW_SPLAT))) {
|
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_KW_SPLAT))) {
|
||||||
if (LIKELY(rb_simple_iseq_p(iseq))) {
|
if (LIKELY(rb_simple_iseq_p(iseq))) {
|
||||||
rb_control_frame_t *cfp = ec->cfp;
|
rb_control_frame_t *cfp = ec->cfp;
|
||||||
CALLER_SETUP_ARG(cfp, calling, ci);
|
CALLER_SETUP_ARG(cfp, calling, ci);
|
||||||
|
@ -2164,7 +2172,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
|
||||||
argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
|
argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), vm_call_iseq_optimizable_p(&cd->ci, &cd->cc));
|
CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), vm_call_iseq_optimizable_p(cd->ci, &cd->cc));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
else if (rb_iseq_only_optparam_p(iseq)) {
|
else if (rb_iseq_only_optparam_p(iseq)) {
|
||||||
|
@ -2181,7 +2189,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
|
||||||
argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
|
argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) {
|
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
|
||||||
CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
|
CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start,
|
||||||
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
|
!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
|
||||||
!(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
|
!(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED));
|
||||||
|
@ -2204,8 +2212,8 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling,
|
||||||
const int argc = calling->argc;
|
const int argc = calling->argc;
|
||||||
const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
|
const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword;
|
||||||
|
|
||||||
if (ci->flag & VM_CALL_KWARG) {
|
if (vm_ci_flag(ci) & VM_CALL_KWARG) {
|
||||||
const struct rb_call_info_kw_arg *kw_arg = ((struct rb_call_info_with_kwarg *)ci)->kw_arg;
|
const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
|
||||||
|
|
||||||
if (argc - kw_arg->keyword_len == lead_num) {
|
if (argc - kw_arg->keyword_len == lead_num) {
|
||||||
const int ci_kw_len = kw_arg->keyword_len;
|
const int ci_kw_len = kw_arg->keyword_len;
|
||||||
|
@ -2258,10 +2266,10 @@ static inline VALUE
|
||||||
vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd,
|
vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd,
|
||||||
int opt_pc, int param_size, int local_size)
|
int opt_pc, int param_size, int local_size)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
const struct rb_call_cache *cc = &cd->cc;
|
const struct rb_call_cache *cc = &cd->cc;
|
||||||
|
|
||||||
if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) {
|
if (LIKELY(!(vm_ci_flag(ci) & VM_CALL_TAILCALL))) {
|
||||||
return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, opt_pc, param_size, local_size);
|
return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, opt_pc, param_size, local_size);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -2492,7 +2500,7 @@ vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
const struct rb_call_cache *cc = &cd->cc;
|
const struct rb_call_cache *cc = &cd->cc;
|
||||||
VALUE val;
|
VALUE val;
|
||||||
const rb_callable_method_entry_t *me = cc->me;
|
const rb_callable_method_entry_t *me = cc->me;
|
||||||
|
@ -2510,7 +2518,7 @@ vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp
|
||||||
}
|
}
|
||||||
|
|
||||||
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
|
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
|
||||||
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, ci->mid, me->owner, Qundef);
|
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, vm_ci_mid(ci), me->owner, Qundef);
|
||||||
|
|
||||||
vm_push_frame(ec, NULL, frame_type, recv,
|
vm_push_frame(ec, NULL, frame_type, recv,
|
||||||
block_handler, (VALUE)me,
|
block_handler, (VALUE)me,
|
||||||
|
@ -2525,7 +2533,7 @@ vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp
|
||||||
|
|
||||||
rb_vm_pop_frame(ec);
|
rb_vm_pop_frame(ec);
|
||||||
|
|
||||||
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, ci->mid, me->owner, val);
|
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, vm_ci_mid(ci), me->owner, val);
|
||||||
RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
|
RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
|
||||||
|
|
||||||
return val;
|
return val;
|
||||||
|
@ -2534,7 +2542,7 @@ vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
RB_DEBUG_COUNTER_INC(ccf_cfunc);
|
RB_DEBUG_COUNTER_INC(ccf_cfunc);
|
||||||
|
|
||||||
CALLER_SETUP_ARG(reg_cfp, calling, ci);
|
CALLER_SETUP_ARG(reg_cfp, calling, ci);
|
||||||
|
@ -2582,7 +2590,7 @@ vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c
|
||||||
|
|
||||||
VALUE *argv;
|
VALUE *argv;
|
||||||
int argc;
|
int argc;
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
|
|
||||||
CALLER_SETUP_ARG(cfp, calling, ci);
|
CALLER_SETUP_ARG(cfp, calling, ci);
|
||||||
argc = calling->argc;
|
argc = calling->argc;
|
||||||
|
@ -2594,12 +2602,12 @@ vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum method_missing_reason
|
static enum method_missing_reason
|
||||||
ci_missing_reason(const struct rb_call_info *ci)
|
ci_missing_reason(const struct rb_callinfo *ci)
|
||||||
{
|
{
|
||||||
enum method_missing_reason stat = MISSING_NOENTRY;
|
enum method_missing_reason stat = MISSING_NOENTRY;
|
||||||
if (ci->flag & VM_CALL_VCALL) stat |= MISSING_VCALL;
|
if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
|
||||||
if (ci->flag & VM_CALL_FCALL) stat |= MISSING_FCALL;
|
if (vm_ci_flag(ci) & VM_CALL_FCALL) stat |= MISSING_FCALL;
|
||||||
if (ci->flag & VM_CALL_SUPER) stat |= MISSING_SUPER;
|
if (vm_ci_flag(ci) & VM_CALL_SUPER) stat |= MISSING_SUPER;
|
||||||
return stat;
|
return stat;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2610,11 +2618,11 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
|
||||||
|
|
||||||
int i;
|
int i;
|
||||||
VALUE sym;
|
VALUE sym;
|
||||||
const struct rb_call_info *orig_ci = &orig_cd->ci;
|
ID mid;
|
||||||
|
const struct rb_callinfo *orig_ci = orig_cd->ci;
|
||||||
const struct rb_call_cache *orig_cc = &orig_cd->cc;
|
const struct rb_call_cache *orig_cc = &orig_cd->cc;
|
||||||
struct rb_call_info *ci;
|
|
||||||
struct rb_call_cache *cc;
|
struct rb_call_cache *cc;
|
||||||
struct rb_kwarg_call_data cd;
|
struct rb_call_data cd;
|
||||||
|
|
||||||
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
|
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
|
||||||
|
|
||||||
|
@ -2624,31 +2632,22 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
|
||||||
rb_raise(rb_eArgError, "no method name given");
|
rb_raise(rb_eArgError, "no method name given");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* setup new ci */
|
|
||||||
if (orig_ci->flag & VM_CALL_KWARG) {
|
|
||||||
const struct rb_kwarg_call_data *orig_kcd = (void *)orig_cd;
|
|
||||||
cd = *orig_kcd;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
cd.ci_kw.ci = *orig_ci;
|
|
||||||
cd.cc = *orig_cc;
|
cd.cc = *orig_cc;
|
||||||
}
|
|
||||||
ci = &cd.ci_kw.ci;
|
|
||||||
cc = &cd.cc;
|
cc = &cd.cc;
|
||||||
|
|
||||||
sym = TOPN(i);
|
sym = TOPN(i);
|
||||||
|
|
||||||
if (!(ci->mid = rb_check_id(&sym))) {
|
if (!(mid = rb_check_id(&sym))) {
|
||||||
if (rb_method_basic_definition_p(CLASS_OF(calling->recv), idMethodMissing)) {
|
if (rb_method_basic_definition_p(CLASS_OF(calling->recv), idMethodMissing)) {
|
||||||
VALUE exc =
|
VALUE exc =
|
||||||
rb_make_no_method_exception(rb_eNoMethodError, 0, calling->recv,
|
rb_make_no_method_exception(rb_eNoMethodError, 0, calling->recv,
|
||||||
rb_long2int(calling->argc), &TOPN(i),
|
rb_long2int(calling->argc), &TOPN(i),
|
||||||
ci->flag & (VM_CALL_FCALL|VM_CALL_VCALL));
|
vm_ci_flag(orig_ci) & (VM_CALL_FCALL|VM_CALL_VCALL));
|
||||||
rb_exc_raise(exc);
|
rb_exc_raise(exc);
|
||||||
}
|
}
|
||||||
TOPN(i) = rb_str_intern(sym);
|
TOPN(i) = rb_str_intern(sym);
|
||||||
ci->mid = idMethodMissing;
|
mid = idMethodMissing;
|
||||||
ec->method_missing_reason = cc->aux.method_missing_reason = ci_missing_reason(ci);
|
ec->method_missing_reason = cc->aux.method_missing_reason = ci_missing_reason(orig_ci);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* shift arguments */
|
/* shift arguments */
|
||||||
|
@ -2659,20 +2658,22 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
|
||||||
DEC_SP(1);
|
DEC_SP(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
CC_SET_ME(cc, rb_callable_method_entry_with_refinements(CLASS_OF(calling->recv), ci->mid, NULL));
|
CC_SET_ME(cc, rb_callable_method_entry_with_refinements(CLASS_OF(calling->recv), mid, NULL));
|
||||||
ci->flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
|
unsigned int new_flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
|
||||||
|
cd.ci = vm_ci_new_runtime(mid, new_flag, 0 /* not accessed (calling->argc is used) */, vm_ci_kwarg(orig_ci));
|
||||||
|
|
||||||
return vm_call_method(ec, reg_cfp, calling, (CALL_DATA)&cd);
|
return vm_call_method(ec, reg_cfp, calling, (CALL_DATA)&cd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, VALUE block_handler);
|
static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler);
|
||||||
|
|
||||||
NOINLINE(static VALUE
|
NOINLINE(static VALUE
|
||||||
vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
struct rb_calling_info *calling, const struct rb_call_info *ci, VALUE block_handler));
|
struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler));
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
struct rb_calling_info *calling, const struct rb_call_info *ci, VALUE block_handler)
|
struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
|
||||||
{
|
{
|
||||||
int argc = calling->argc;
|
int argc = calling->argc;
|
||||||
|
|
||||||
|
@ -2688,7 +2689,7 @@ vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
|
||||||
{
|
{
|
||||||
RB_DEBUG_COUNTER_INC(ccf_opt_call);
|
RB_DEBUG_COUNTER_INC(ccf_opt_call);
|
||||||
|
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
VALUE procval = calling->recv;
|
VALUE procval = calling->recv;
|
||||||
return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
|
return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
|
||||||
}
|
}
|
||||||
|
@ -2698,7 +2699,7 @@ vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
{
|
{
|
||||||
RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
|
RB_DEBUG_COUNTER_INC(ccf_opt_block_call);
|
||||||
VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
|
VALUE block_handler = VM_ENV_BLOCK_HANDLER(VM_CF_LEP(reg_cfp));
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
|
|
||||||
if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
|
if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
|
||||||
return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
|
return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
|
||||||
|
@ -2715,7 +2716,7 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
{
|
{
|
||||||
RB_DEBUG_COUNTER_INC(ccf_method_missing);
|
RB_DEBUG_COUNTER_INC(ccf_method_missing);
|
||||||
|
|
||||||
const struct rb_call_info *orig_ci = &orig_cd->ci;
|
const struct rb_callinfo *orig_ci = orig_cd->ci;
|
||||||
const struct rb_call_cache *orig_cc = &orig_cd->cc;
|
const struct rb_call_cache *orig_cc = &orig_cd->cc;
|
||||||
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
|
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
|
||||||
struct rb_call_data cd = *orig_cd;
|
struct rb_call_data cd = *orig_cd;
|
||||||
|
@ -2724,12 +2725,9 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
|
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
|
||||||
argc = calling->argc + 1;
|
argc = calling->argc + 1;
|
||||||
|
|
||||||
cd.ci.flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
|
unsigned int flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0);
|
||||||
cd.ci.mid = idMethodMissing;
|
cd.ci = vm_ci_new_runtime(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
|
||||||
cd.ci.orig_argc = argc;
|
cd.cc.me = rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv),
|
||||||
|
|
||||||
cd.cc.me =
|
|
||||||
rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv),
|
|
||||||
idMethodMissing, NULL);
|
idMethodMissing, NULL);
|
||||||
|
|
||||||
calling->argc = argc;
|
calling->argc = argc;
|
||||||
|
@ -2740,7 +2738,7 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
if (argc > 1) {
|
if (argc > 1) {
|
||||||
MEMMOVE(argv+1, argv, VALUE, argc-1);
|
MEMMOVE(argv+1, argv, VALUE, argc-1);
|
||||||
}
|
}
|
||||||
argv[0] = ID2SYM(orig_ci->mid);
|
argv[0] = ID2SYM(vm_ci_mid(orig_ci));
|
||||||
INC_SP(1);
|
INC_SP(1);
|
||||||
|
|
||||||
ec->method_missing_reason = orig_cc->aux.method_missing_reason;
|
ec->method_missing_reason = orig_cc->aux.method_missing_reason;
|
||||||
|
@ -2753,10 +2751,10 @@ vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
|
||||||
{
|
{
|
||||||
RB_DEBUG_COUNTER_INC(ccf_method_missing);
|
RB_DEBUG_COUNTER_INC(ccf_method_missing);
|
||||||
|
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
struct rb_call_cache *cc = &cd->cc;
|
struct rb_call_cache *cc = &cd->cc;
|
||||||
klass = RCLASS_SUPER(klass);
|
klass = RCLASS_SUPER(klass);
|
||||||
CC_SET_ME(cc, klass ? rb_callable_method_entry(klass, ci->mid) : NULL);
|
CC_SET_ME(cc, klass ? rb_callable_method_entry(klass, vm_ci_mid(ci)) : NULL);
|
||||||
|
|
||||||
if (!cc->me) {
|
if (!cc->me) {
|
||||||
return vm_call_method_nome(ec, cfp, calling, cd);
|
return vm_call_method_nome(ec, cfp, calling, cd);
|
||||||
|
@ -2914,7 +2912,7 @@ search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, ID mi
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
struct rb_call_cache *cc = &cd->cc;
|
struct rb_call_cache *cc = &cd->cc;
|
||||||
|
|
||||||
switch (cc->me->def->type) {
|
switch (cc->me->def->type) {
|
||||||
|
@ -2933,7 +2931,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
|
||||||
|
|
||||||
rb_check_arity(calling->argc, 1, 1);
|
rb_check_arity(calling->argc, 1, 1);
|
||||||
cc->aux.index = 0;
|
cc->aux.index = 0;
|
||||||
CC_SET_FASTPATH(cc, vm_call_attrset, !((ci->flag & VM_CALL_ARGS_SPLAT) || (ci->flag & VM_CALL_KWARG)));
|
CC_SET_FASTPATH(cc, vm_call_attrset, !((vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT) || (vm_ci_flag(ci) & VM_CALL_KWARG)));
|
||||||
return vm_call_attrset(ec, cfp, calling, cd);
|
return vm_call_attrset(ec, cfp, calling, cd);
|
||||||
|
|
||||||
case VM_METHOD_TYPE_IVAR:
|
case VM_METHOD_TYPE_IVAR:
|
||||||
|
@ -2941,7 +2939,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
|
||||||
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
|
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
|
||||||
rb_check_arity(calling->argc, 0, 0);
|
rb_check_arity(calling->argc, 0, 0);
|
||||||
cc->aux.index = 0;
|
cc->aux.index = 0;
|
||||||
CC_SET_FASTPATH(cc, vm_call_ivar, !(ci->flag & VM_CALL_ARGS_SPLAT));
|
CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT));
|
||||||
return vm_call_ivar(ec, cfp, calling, cd);
|
return vm_call_ivar(ec, cfp, calling, cd);
|
||||||
|
|
||||||
case VM_METHOD_TYPE_MISSING:
|
case VM_METHOD_TYPE_MISSING:
|
||||||
|
@ -2981,7 +2979,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
|
||||||
return vm_call_zsuper(ec, cfp, calling, cd, RCLASS_ORIGIN(cc->me->defined_class));
|
return vm_call_zsuper(ec, cfp, calling, cd, RCLASS_ORIGIN(cc->me->defined_class));
|
||||||
|
|
||||||
case VM_METHOD_TYPE_REFINED:
|
case VM_METHOD_TYPE_REFINED:
|
||||||
if (search_refined_method(ec, cfp, ci->mid, cc))
|
if (search_refined_method(ec, cfp, vm_ci_mid(ci), cc))
|
||||||
return vm_call_method(ec, cfp, calling, cd);
|
return vm_call_method(ec, cfp, calling, cd);
|
||||||
else
|
else
|
||||||
return vm_call_method_nome(ec, cfp, calling, cd);
|
return vm_call_method_nome(ec, cfp, calling, cd);
|
||||||
|
@ -2996,11 +2994,11 @@ static VALUE
|
||||||
vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
||||||
{
|
{
|
||||||
/* method missing */
|
/* method missing */
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
struct rb_call_cache *cc = &cd->cc;
|
struct rb_call_cache *cc = &cd->cc;
|
||||||
const int stat = ci_missing_reason(ci);
|
const int stat = ci_missing_reason(ci);
|
||||||
|
|
||||||
if (ci->mid == idMethodMissing) {
|
if (vm_ci_mid(ci) == idMethodMissing) {
|
||||||
rb_control_frame_t *reg_cfp = cfp;
|
rb_control_frame_t *reg_cfp = cfp;
|
||||||
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
|
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
|
||||||
vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
|
vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
|
||||||
|
@ -3015,7 +3013,7 @@ vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct
|
||||||
static inline VALUE
|
static inline VALUE
|
||||||
vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
struct rb_call_cache *cc = &cd->cc;
|
struct rb_call_cache *cc = &cd->cc;
|
||||||
|
|
||||||
VM_ASSERT(callable_method_entry_p(cc->me));
|
VM_ASSERT(callable_method_entry_p(cc->me));
|
||||||
|
@ -3026,9 +3024,9 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
|
||||||
return vm_call_method_each_type(ec, cfp, calling, cd);
|
return vm_call_method_each_type(ec, cfp, calling, cd);
|
||||||
|
|
||||||
case METHOD_VISI_PRIVATE:
|
case METHOD_VISI_PRIVATE:
|
||||||
if (!(ci->flag & VM_CALL_FCALL)) {
|
if (!(vm_ci_flag(ci) & VM_CALL_FCALL)) {
|
||||||
enum method_missing_reason stat = MISSING_PRIVATE;
|
enum method_missing_reason stat = MISSING_PRIVATE;
|
||||||
if (ci->flag & VM_CALL_VCALL) stat |= MISSING_VCALL;
|
if (vm_ci_flag(ci) & VM_CALL_VCALL) stat |= MISSING_VCALL;
|
||||||
|
|
||||||
cc->aux.method_missing_reason = stat;
|
cc->aux.method_missing_reason = stat;
|
||||||
CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
|
CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
|
||||||
|
@ -3037,7 +3035,7 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
|
||||||
return vm_call_method_each_type(ec, cfp, calling, cd);
|
return vm_call_method_each_type(ec, cfp, calling, cd);
|
||||||
|
|
||||||
case METHOD_VISI_PROTECTED:
|
case METHOD_VISI_PROTECTED:
|
||||||
if (!(ci->flag & VM_CALL_OPT_SEND)) {
|
if (!(vm_ci_flag(ci) & VM_CALL_OPT_SEND)) {
|
||||||
if (!rb_obj_is_kind_of(cfp->self, cc->me->defined_class)) {
|
if (!rb_obj_is_kind_of(cfp->self, cc->me->defined_class)) {
|
||||||
cc->aux.method_missing_reason = MISSING_PROTECTED;
|
cc->aux.method_missing_reason = MISSING_PROTECTED;
|
||||||
return vm_call_method_missing(ec, cfp, calling, cd);
|
return vm_call_method_missing(ec, cfp, calling, cd);
|
||||||
|
@ -3045,17 +3043,10 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
|
||||||
else {
|
else {
|
||||||
/* caching method info to dummy cc */
|
/* caching method info to dummy cc */
|
||||||
VM_ASSERT(cc->me != NULL);
|
VM_ASSERT(cc->me != NULL);
|
||||||
if (ci->flag & VM_CALL_KWARG) {
|
|
||||||
struct rb_kwarg_call_data *kcd = (void *)cd;
|
|
||||||
struct rb_kwarg_call_data cd_entry = *kcd;
|
|
||||||
return vm_call_method_each_type(ec, cfp, calling, (void *)&cd_entry);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
struct rb_call_data cd_entry = *cd;
|
struct rb_call_data cd_entry = *cd;
|
||||||
return vm_call_method_each_type(ec, cfp, calling, &cd_entry);
|
return vm_call_method_each_type(ec, cfp, calling, &cd_entry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return vm_call_method_each_type(ec, cfp, calling, cd);
|
return vm_call_method_each_type(ec, cfp, calling, cd);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -3111,8 +3102,6 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c
|
||||||
{
|
{
|
||||||
VALUE current_defined_class, klass;
|
VALUE current_defined_class, klass;
|
||||||
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
|
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
|
||||||
struct rb_call_info *ci = &cd->ci;
|
|
||||||
struct rb_call_cache *cc = &cd->cc;
|
|
||||||
|
|
||||||
if (!me) {
|
if (!me) {
|
||||||
vm_super_outside();
|
vm_super_outside();
|
||||||
|
@ -3138,22 +3127,29 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (me->def->type == VM_METHOD_TYPE_BMETHOD && (ci->flag & VM_CALL_ZSUPER)) {
|
if (me->def->type == VM_METHOD_TYPE_BMETHOD && (vm_ci_flag(cd->ci) & VM_CALL_ZSUPER)) {
|
||||||
rb_raise(rb_eRuntimeError,
|
rb_raise(rb_eRuntimeError,
|
||||||
"implicit argument passing of super from method defined"
|
"implicit argument passing of super from method defined"
|
||||||
" by define_method() is not supported."
|
" by define_method() is not supported."
|
||||||
" Specify all arguments explicitly.");
|
" Specify all arguments explicitly.");
|
||||||
}
|
}
|
||||||
|
|
||||||
ci->mid = me->def->original_id;
|
// update iseq. really? (TODO)
|
||||||
|
cd->ci = vm_ci_new_runtime(me->def->original_id,
|
||||||
|
vm_ci_flag(cd->ci),
|
||||||
|
vm_ci_argc(cd->ci),
|
||||||
|
vm_ci_kwarg(cd->ci));
|
||||||
|
RB_OBJ_WRITTEN(reg_cfp->iseq, Qundef, cd->ci);
|
||||||
|
|
||||||
klass = vm_search_normal_superclass(me->defined_class);
|
klass = vm_search_normal_superclass(me->defined_class);
|
||||||
|
|
||||||
if (!klass) {
|
if (!klass) {
|
||||||
/* bound instance method of module */
|
/* bound instance method of module */
|
||||||
cc->aux.method_missing_reason = MISSING_SUPER;
|
cd->cc.aux.method_missing_reason = MISSING_SUPER;
|
||||||
CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
|
CC_SET_FASTPATH(&cd->cc, vm_call_method_missing, TRUE);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
struct rb_call_cache *cc = &cd->cc;
|
||||||
#if OPT_INLINE_METHOD_CACHE
|
#if OPT_INLINE_METHOD_CACHE
|
||||||
/* Unlike normal method search, we only consider the first class
|
/* Unlike normal method search, we only consider the first class
|
||||||
* serial. Since we're testing defined_class rather than receiver,
|
* serial. Since we're testing defined_class rather than receiver,
|
||||||
|
@ -3161,14 +3157,14 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c
|
||||||
if (LIKELY(RB_DEBUG_COUNTER_INC_UNLESS(mc_global_state_miss,
|
if (LIKELY(RB_DEBUG_COUNTER_INC_UNLESS(mc_global_state_miss,
|
||||||
GET_GLOBAL_METHOD_STATE() == cc->method_state) &&
|
GET_GLOBAL_METHOD_STATE() == cc->method_state) &&
|
||||||
cc->class_serial[0] == RCLASS_SERIAL(klass)) &&
|
cc->class_serial[0] == RCLASS_SERIAL(klass)) &&
|
||||||
cc->me && ci->mid == cc->me->called_id) {
|
cc->me && vm_ci_mid(cd->ci) == cc->me->called_id) {
|
||||||
VM_ASSERT(cc->call != NULL);
|
VM_ASSERT(cc->call != NULL);
|
||||||
RB_DEBUG_COUNTER_INC(mc_inline_hit);
|
RB_DEBUG_COUNTER_INC(mc_inline_hit);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
CC_SET_ME(cc, rb_callable_method_entry(klass, ci->mid));
|
CC_SET_ME(cc, rb_callable_method_entry(klass, vm_ci_mid(cd->ci)));
|
||||||
CC_SET_FASTPATH(cc, vm_call_super_method, TRUE);
|
CC_SET_FASTPATH(cc, vm_call_super_method, TRUE);
|
||||||
|
|
||||||
cc->method_state = GET_GLOBAL_METHOD_STATE();
|
cc->method_state = GET_GLOBAL_METHOD_STATE();
|
||||||
|
@ -3267,7 +3263,7 @@ vm_callee_setup_block_arg_arg0_check(VALUE *argv)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_call_info *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
|
vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_callinfo *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
|
||||||
{
|
{
|
||||||
if (rb_simple_iseq_p(iseq)) {
|
if (rb_simple_iseq_p(iseq)) {
|
||||||
rb_control_frame_t *cfp = ec->cfp;
|
rb_control_frame_t *cfp = ec->cfp;
|
||||||
|
@ -3312,25 +3308,25 @@ static int
|
||||||
vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int kw_splat, VALUE block_handler, enum arg_setup_type arg_setup_type)
|
vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, int kw_splat, VALUE block_handler, enum arg_setup_type arg_setup_type)
|
||||||
{
|
{
|
||||||
struct rb_calling_info calling_entry, *calling;
|
struct rb_calling_info calling_entry, *calling;
|
||||||
struct rb_call_info ci_entry, *ci;
|
|
||||||
|
|
||||||
calling = &calling_entry;
|
calling = &calling_entry;
|
||||||
calling->argc = argc;
|
calling->argc = argc;
|
||||||
calling->block_handler = block_handler;
|
calling->block_handler = block_handler;
|
||||||
calling->kw_splat = kw_splat;
|
calling->kw_splat = kw_splat;
|
||||||
calling->recv = Qundef;
|
calling->recv = Qundef;
|
||||||
|
struct rb_callinfo dummy_ci = {
|
||||||
|
.flags = T_IMEMO | (imemo_callinfo << FL_USHIFT),
|
||||||
|
.flag = (VALUE)(kw_splat ? VM_CALL_KW_SPLAT : 0),
|
||||||
|
};
|
||||||
|
|
||||||
ci_entry.flag = kw_splat ? VM_CALL_KW_SPLAT : 0;
|
return vm_callee_setup_block_arg(ec, calling, &dummy_ci, iseq, argv, arg_setup_type);
|
||||||
ci = &ci_entry;
|
|
||||||
|
|
||||||
return vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, arg_setup_type);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ruby iseq -> ruby block */
|
/* ruby iseq -> ruby block */
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
struct rb_calling_info *calling, const struct rb_call_info *ci,
|
struct rb_calling_info *calling, const struct rb_callinfo *ci,
|
||||||
int is_lambda, const struct rb_captured_block *captured)
|
int is_lambda, const struct rb_captured_block *captured)
|
||||||
{
|
{
|
||||||
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
|
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
|
||||||
|
@ -3353,7 +3349,7 @@ vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
struct rb_calling_info *calling, const struct rb_call_info *ci,
|
struct rb_calling_info *calling, const struct rb_callinfo *ci,
|
||||||
VALUE symbol)
|
VALUE symbol)
|
||||||
{
|
{
|
||||||
VALUE val;
|
VALUE val;
|
||||||
|
@ -3367,7 +3363,7 @@ vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
|
|
||||||
static VALUE
|
static VALUE
|
||||||
vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
struct rb_calling_info *calling, const struct rb_call_info *ci,
|
struct rb_calling_info *calling, const struct rb_callinfo *ci,
|
||||||
const struct rb_captured_block *captured)
|
const struct rb_captured_block *captured)
|
||||||
{
|
{
|
||||||
VALUE val;
|
VALUE val;
|
||||||
|
@ -3401,7 +3397,7 @@ vm_proc_to_block_handler(VALUE procval)
|
||||||
|
|
||||||
static inline VALUE
|
static inline VALUE
|
||||||
vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
||||||
struct rb_calling_info *calling, const struct rb_call_info *ci, VALUE block_handler)
|
struct rb_calling_info *calling, const struct rb_callinfo *ci, VALUE block_handler)
|
||||||
{
|
{
|
||||||
int is_lambda = FALSE;
|
int is_lambda = FALSE;
|
||||||
|
|
||||||
|
@ -3981,7 +3977,7 @@ vm_invokeblock_i(
|
||||||
struct rb_calling_info *calling,
|
struct rb_calling_info *calling,
|
||||||
struct rb_call_data *cd)
|
struct rb_call_data *cd)
|
||||||
{
|
{
|
||||||
const struct rb_call_info *ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
|
VALUE block_handler = VM_CF_BLOCK_HANDLER(GET_CFP());
|
||||||
|
|
||||||
if (block_handler == VM_BLOCK_HANDLER_NONE) {
|
if (block_handler == VM_BLOCK_HANDLER_NONE) {
|
||||||
|
@ -4003,10 +3999,10 @@ vm_sendish(
|
||||||
struct rb_call_data *cd,
|
struct rb_call_data *cd,
|
||||||
VALUE recv))
|
VALUE recv))
|
||||||
{
|
{
|
||||||
CALL_INFO ci = &cd->ci;
|
const struct rb_callinfo *ci = cd->ci;
|
||||||
CALL_CACHE cc = &cd->cc;
|
CALL_CACHE cc = &cd->cc;
|
||||||
VALUE val;
|
VALUE val;
|
||||||
int argc = ci->orig_argc;
|
int argc = vm_ci_argc(ci);
|
||||||
VALUE recv = TOPN(argc);
|
VALUE recv = TOPN(argc);
|
||||||
struct rb_calling_info calling;
|
struct rb_calling_info calling;
|
||||||
|
|
||||||
|
|
|
@ -250,15 +250,15 @@ THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define IS_ARGS_SPLAT(ci) ((ci)->flag & VM_CALL_ARGS_SPLAT)
|
#define IS_ARGS_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT)
|
||||||
#define IS_ARGS_KEYWORD(ci) ((ci)->flag & VM_CALL_KWARG)
|
#define IS_ARGS_KEYWORD(ci) (vm_ci_flag(ci) & VM_CALL_KWARG)
|
||||||
#define IS_ARGS_KW_SPLAT(ci) ((ci)->flag & VM_CALL_KW_SPLAT)
|
#define IS_ARGS_KW_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT)
|
||||||
#define IS_ARGS_KW_OR_KW_SPLAT(ci) ((ci)->flag & (VM_CALL_KWARG | VM_CALL_KW_SPLAT))
|
#define IS_ARGS_KW_OR_KW_SPLAT(ci) (vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT))
|
||||||
|
|
||||||
/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
|
/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
|
||||||
can be used as a fastpath. */
|
can be used as a fastpath. */
|
||||||
static bool
|
static bool
|
||||||
vm_call_iseq_optimizable_p(const struct rb_call_info *ci, const struct rb_call_cache *cc)
|
vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_call_cache *cc)
|
||||||
{
|
{
|
||||||
return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
|
return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
|
||||||
!(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED);
|
!(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED);
|
||||||
|
|
11
vm_method.c
11
vm_method.c
|
@ -2054,17 +2054,6 @@ rb_mod_modfunc(int argc, VALUE *argv, VALUE module)
|
||||||
return module;
|
return module;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
|
||||||
rb_method_basic_definition_p_with_cc(struct rb_call_data *cd, VALUE klass, ID mid)
|
|
||||||
{
|
|
||||||
if (cd->ci.mid != mid) {
|
|
||||||
*cd = (struct rb_call_data) /* reset */ { .ci = { .mid = mid, }, };
|
|
||||||
}
|
|
||||||
|
|
||||||
vm_search_method_fastpath(cd, klass);
|
|
||||||
return cd->cc.me && METHOD_ENTRY_BASIC(cd->cc.me);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef __GNUC__
|
#ifdef __GNUC__
|
||||||
#pragma push_macro("rb_method_basic_definition_p")
|
#pragma push_macro("rb_method_basic_definition_p")
|
||||||
#undef rb_method_basic_definition_p
|
#undef rb_method_basic_definition_p
|
||||||
|
|
Loading…
Add table
Reference in a new issue