2007-06-24 13:19:22 -04:00
|
|
|
/**********************************************************************
|
|
|
|
|
2008-11-14 06:31:10 -05:00
|
|
|
vm_insnhelper.c - instruction helper functions.
|
2007-06-24 13:19:22 -04:00
|
|
|
|
|
|
|
$Author$
|
|
|
|
|
|
|
|
Copyright (C) 2007 Koichi Sasada
|
|
|
|
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
/* finish iseq array */
|
|
|
|
#include "insns.inc"
|
2008-01-17 12:06:51 -05:00
|
|
|
#include <math.h>
|
2010-10-26 13:27:32 -04:00
|
|
|
#include "constant.h"
|
2011-05-18 09:41:54 -04:00
|
|
|
#include "internal.h"
|
* probes.d: add DTrace probe declarations. [ruby-core:27448]
* array.c (empty_ary_alloc, ary_new): added array create DTrace probe.
* compile.c (rb_insns_name): allowing DTrace probes to access
instruction sequence name.
* Makefile.in: translate probes.d file to appropriate header file.
* common.mk: declare dependencies on the DTrace header.
* configure.in: add a test for existence of DTrace.
* eval.c (setup_exception): add a probe for when an exception is
raised.
* gc.c: Add DTrace probes for mark begin and end, and sweep begin and
end.
* hash.c (empty_hash_alloc): Add a probe for hash allocation.
* insns.def: Add probes for function entry and return.
* internal.h: function declaration for compile.c change.
* load.c (rb_f_load): add probes for `load` entry and exit, require
entry and exit, and wrapping search_required for load path search.
* object.c (rb_obj_alloc): added a probe for general object creation.
* parse.y (yycompile0): added a probe around parse and compile phase.
* string.c (empty_str_alloc, str_new): DTrace probes for string
allocation.
* test/dtrace/*: tests for DTrace probes.
* vm.c (vm_invoke_proc): add probes for function return on exception
raise, hash create, and instruction sequence execution.
* vm_core.h: add probe declarations for function entry and exit.
* vm_dump.c: add probes header file.
* vm_eval.c (vm_call0_cfunc, vm_call0_cfunc_with_frame): add probe on
function entry and return.
* vm_exec.c: expose instruction number to instruction name function.
* vm_insnshelper.c: add function entry and exit probes for cfunc
methods.
* vm_insnhelper.h: vm usage information is always collected, so
uncomment the functions.
12 19:14:50 2012 Akinori MUSHA <knu@iDaemons.org>
* configure.in (isinf, isnan): isinf() and isnan() are macros on
DragonFly which cannot be found by AC_REPLACE_FUNCS(). This
workaround enforces the fact that they exist on DragonFly.
12 15:59:38 2012 Shugo Maeda <shugo@ruby-lang.org>
* vm_core.h (rb_call_info_t::refinements), compile.c (new_callinfo),
vm_insnhelper.c (vm_search_method): revert r37616 because it's too
slow. [ruby-dev:46477]
* test/ruby/test_refinement.rb (test_inline_method_cache): skip
the test until the bug is fixed efficiently.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37631 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-11-12 16:52:12 -05:00
|
|
|
#include "probes.h"
|
2012-11-18 11:30:10 -05:00
|
|
|
#include "probes_helper.h"
|
2016-08-09 03:52:23 -04:00
|
|
|
#include "ruby/config.h"
|
2017-02-21 03:18:15 -05:00
|
|
|
#include "debug_counter.h"
|
2008-01-17 12:06:51 -05:00
|
|
|
|
2007-06-24 13:19:22 -04:00
|
|
|
/* control stack frame */
|
|
|
|
|
2017-10-26 20:46:11 -04:00
|
|
|
static rb_control_frame_t *vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
|
2009-01-18 22:03:09 -05:00
|
|
|
|
2014-06-28 00:58:25 -04:00
|
|
|
VALUE
|
2017-01-24 09:52:07 -05:00
|
|
|
ruby_vm_special_exception_copy(VALUE exc)
|
2012-12-25 04:57:07 -05:00
|
|
|
{
|
2017-01-24 09:52:07 -05:00
|
|
|
VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
|
|
|
|
rb_obj_copy_ivar(e, exc);
|
2014-06-28 00:58:25 -04:00
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
2017-10-26 21:13:35 -04:00
|
|
|
NORETURN(static void ec_stack_overflow(rb_execution_context_t *ec, int));
|
2017-04-16 22:08:41 -04:00
|
|
|
static void
|
2017-10-26 21:13:35 -04:00
|
|
|
ec_stack_overflow(rb_execution_context_t *ec, int setup)
|
2017-04-16 22:08:41 -04:00
|
|
|
{
|
2017-10-26 21:13:35 -04:00
|
|
|
VALUE mesg = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_sysstack];
|
|
|
|
ec->raised_flag = RAISED_STACKOVERFLOW;
|
2017-04-16 22:08:41 -04:00
|
|
|
if (setup) {
|
2017-10-28 09:22:04 -04:00
|
|
|
VALUE at = rb_ec_backtrace_object(ec);
|
2017-04-16 22:08:41 -04:00
|
|
|
mesg = ruby_vm_special_exception_copy(mesg);
|
|
|
|
rb_ivar_set(mesg, idBt, at);
|
|
|
|
rb_ivar_set(mesg, idBt_locations, at);
|
|
|
|
}
|
2017-10-26 21:13:35 -04:00
|
|
|
ec->errinfo = mesg;
|
|
|
|
EC_JUMP_TAG(ec, TAG_RAISE);
|
2017-04-16 22:08:41 -04:00
|
|
|
}
|
|
|
|
|
2014-06-28 00:58:25 -04:00
|
|
|
static void
|
|
|
|
vm_stackoverflow(void)
|
|
|
|
{
|
2017-10-26 21:13:35 -04:00
|
|
|
ec_stack_overflow(GET_EC(), TRUE);
|
2012-12-25 04:57:07 -05:00
|
|
|
}
|
|
|
|
|
2017-10-26 21:13:35 -04:00
|
|
|
NORETURN(void rb_ec_stack_overflow(rb_execution_context_t *ec, int crit));
|
2017-04-16 22:08:41 -04:00
|
|
|
void
|
2017-10-26 21:13:35 -04:00
|
|
|
rb_ec_stack_overflow(rb_execution_context_t *ec, int crit)
|
2017-04-16 22:08:41 -04:00
|
|
|
{
|
2017-08-21 22:46:16 -04:00
|
|
|
if (crit || rb_during_gc()) {
|
2017-10-26 21:13:35 -04:00
|
|
|
ec->raised_flag = RAISED_STACKOVERFLOW;
|
|
|
|
ec->errinfo = rb_ec_vm_ptr(ec)->special_exceptions[ruby_error_stackfatal];
|
|
|
|
EC_JUMP_TAG(ec, TAG_RAISE);
|
2017-06-15 03:16:17 -04:00
|
|
|
}
|
2017-04-16 22:08:41 -04:00
|
|
|
#ifdef USE_SIGALTSTACK
|
2017-10-26 21:13:35 -04:00
|
|
|
ec_stack_overflow(ec, TRUE);
|
2017-04-16 22:08:41 -04:00
|
|
|
#else
|
2017-10-26 21:13:35 -04:00
|
|
|
ec_stack_overflow(ec, FALSE);
|
2017-04-16 22:08:41 -04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-02 00:20:30 -04:00
|
|
|
#if VM_CHECK_MODE > 0
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
static int
|
|
|
|
callable_class_p(VALUE klass)
|
|
|
|
{
|
|
|
|
#if VM_CHECK_MODE >= 2
|
2016-10-09 05:42:17 -04:00
|
|
|
if (!klass) return FALSE;
|
|
|
|
switch (RB_BUILTIN_TYPE(klass)) {
|
|
|
|
case T_ICLASS:
|
|
|
|
if (!RB_TYPE_P(RCLASS_SUPER(klass), T_MODULE)) break;
|
|
|
|
case T_MODULE:
|
|
|
|
return TRUE;
|
|
|
|
}
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
while (klass) {
|
|
|
|
if (klass == rb_cBasicObject) {
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
klass = RCLASS_SUPER(klass);
|
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
#else
|
|
|
|
return klass != 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
callable_method_entry_p(const rb_callable_method_entry_t *me)
|
|
|
|
{
|
|
|
|
if (me == NULL || callable_class_p(me->defined_class)) {
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-02 00:20:30 -04:00
|
|
|
static void
|
2016-08-02 20:16:34 -04:00
|
|
|
vm_check_frame_detail(VALUE type, int req_block, int req_me, int req_cref, VALUE specval, VALUE cref_or_me, int is_cframe, const rb_iseq_t *iseq)
|
2015-06-02 00:20:30 -04:00
|
|
|
{
|
2016-07-28 07:02:30 -04:00
|
|
|
unsigned int magic = (unsigned int)(type & VM_FRAME_MAGIC_MASK);
|
2016-07-28 15:13:26 -04:00
|
|
|
enum imemo_type cref_or_me_type = imemo_env; /* impossible value */
|
2015-06-10 17:56:23 -04:00
|
|
|
|
|
|
|
if (RB_TYPE_P(cref_or_me, T_IMEMO)) {
|
|
|
|
cref_or_me_type = imemo_type(cref_or_me);
|
|
|
|
}
|
2015-10-29 17:14:29 -04:00
|
|
|
if (type & VM_FRAME_FLAG_BMETHOD) {
|
|
|
|
req_me = TRUE;
|
|
|
|
}
|
2015-06-10 17:56:23 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
if (req_block && (type & VM_ENV_FLAG_LOCAL) == 0) {
|
2015-06-02 00:20:30 -04:00
|
|
|
rb_bug("vm_push_frame: specval (%p) should be a block_ptr on %x frame", (void *)specval, magic);
|
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
if (!req_block && (type & VM_ENV_FLAG_LOCAL) != 0) {
|
2015-06-02 00:20:30 -04:00
|
|
|
rb_bug("vm_push_frame: specval (%p) should not be a block_ptr on %x frame", (void *)specval, magic);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req_me) {
|
2015-06-10 17:56:23 -04:00
|
|
|
if (cref_or_me_type != imemo_ment) {
|
2015-06-02 00:20:30 -04:00
|
|
|
rb_bug("vm_push_frame: (%s) should be method entry on %x frame", rb_obj_info(cref_or_me), magic);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2015-06-10 17:56:23 -04:00
|
|
|
if (req_cref && cref_or_me_type != imemo_cref) {
|
2015-06-02 00:20:30 -04:00
|
|
|
rb_bug("vm_push_frame: (%s) should be CREF on %x frame", rb_obj_info(cref_or_me), magic);
|
|
|
|
}
|
|
|
|
else { /* cref or Qfalse */
|
2015-06-10 17:56:23 -04:00
|
|
|
if (cref_or_me != Qfalse && cref_or_me_type != imemo_cref) {
|
2017-06-03 06:07:44 -04:00
|
|
|
if (((type & VM_FRAME_FLAG_LAMBDA) || magic == VM_FRAME_MAGIC_IFUNC) && (cref_or_me_type == imemo_ment)) {
|
2015-06-02 00:20:30 -04:00
|
|
|
/* ignore */
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_bug("vm_push_frame: (%s) should be false or cref on %x frame", rb_obj_info(cref_or_me), magic);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
|
|
|
|
if (cref_or_me_type == imemo_ment) {
|
|
|
|
const rb_callable_method_entry_t *me = (const rb_callable_method_entry_t *)cref_or_me;
|
|
|
|
|
|
|
|
if (!callable_method_entry_p(me)) {
|
|
|
|
rb_bug("vm_push_frame: ment (%s) should be callable on %x frame.", rb_obj_info(cref_or_me), magic);
|
|
|
|
}
|
|
|
|
}
|
2016-08-02 20:16:34 -04:00
|
|
|
|
|
|
|
if ((type & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY) {
|
|
|
|
VM_ASSERT(iseq == NULL ||
|
|
|
|
RUBY_VM_NORMAL_ISEQ_P(iseq) /* argument error. it shold be fixed */);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VM_ASSERT(is_cframe == !RUBY_VM_NORMAL_ISEQ_P(iseq));
|
|
|
|
}
|
2015-06-02 00:20:30 -04:00
|
|
|
}
|
|
|
|
|
2015-09-12 14:47:43 -04:00
|
|
|
static void
|
|
|
|
vm_check_frame(VALUE type,
|
|
|
|
VALUE specval,
|
2016-08-02 20:16:34 -04:00
|
|
|
VALUE cref_or_me,
|
|
|
|
const rb_iseq_t *iseq)
|
2007-06-24 13:19:22 -04:00
|
|
|
{
|
2016-08-02 20:16:34 -04:00
|
|
|
VALUE given_magic = type & VM_FRAME_MAGIC_MASK;
|
2016-07-28 07:02:30 -04:00
|
|
|
VM_ASSERT(FIXNUM_P(type));
|
2015-06-02 00:20:30 -04:00
|
|
|
|
2016-08-02 22:30:37 -04:00
|
|
|
#define CHECK(magic, req_block, req_me, req_cref, is_cframe) \
|
|
|
|
case magic: \
|
|
|
|
vm_check_frame_detail(type, req_block, req_me, req_cref, \
|
|
|
|
specval, cref_or_me, is_cframe, iseq); \
|
|
|
|
break
|
2016-08-02 20:16:34 -04:00
|
|
|
switch (given_magic) {
|
|
|
|
/* BLK ME CREF CFRAME */
|
|
|
|
CHECK(VM_FRAME_MAGIC_METHOD, TRUE, TRUE, FALSE, FALSE);
|
|
|
|
CHECK(VM_FRAME_MAGIC_CLASS, TRUE, FALSE, TRUE, FALSE);
|
|
|
|
CHECK(VM_FRAME_MAGIC_TOP, TRUE, FALSE, TRUE, FALSE);
|
|
|
|
CHECK(VM_FRAME_MAGIC_CFUNC, TRUE, TRUE, FALSE, TRUE);
|
|
|
|
CHECK(VM_FRAME_MAGIC_BLOCK, FALSE, FALSE, FALSE, FALSE);
|
|
|
|
CHECK(VM_FRAME_MAGIC_IFUNC, FALSE, FALSE, FALSE, TRUE);
|
|
|
|
CHECK(VM_FRAME_MAGIC_EVAL, FALSE, FALSE, FALSE, FALSE);
|
|
|
|
CHECK(VM_FRAME_MAGIC_RESCUE, FALSE, FALSE, FALSE, FALSE);
|
|
|
|
CHECK(VM_FRAME_MAGIC_DUMMY, TRUE, FALSE, FALSE, FALSE);
|
2015-06-02 00:20:30 -04:00
|
|
|
default:
|
2016-08-02 20:16:34 -04:00
|
|
|
rb_bug("vm_push_frame: unknown type (%x)", (unsigned int)given_magic);
|
2015-06-02 00:20:30 -04:00
|
|
|
}
|
2015-09-12 14:47:43 -04:00
|
|
|
#undef CHECK
|
|
|
|
}
|
|
|
|
#else
|
2016-08-02 20:16:34 -04:00
|
|
|
#define vm_check_frame(a, b, c, d)
|
2015-09-12 14:47:43 -04:00
|
|
|
#endif /* VM_CHECK_MODE > 0 */
|
|
|
|
|
|
|
|
static inline rb_control_frame_t *
|
2017-10-26 06:53:42 -04:00
|
|
|
vm_push_frame(rb_execution_context_t *ec,
|
|
|
|
const rb_iseq_t *iseq,
|
|
|
|
VALUE type,
|
|
|
|
VALUE self,
|
|
|
|
VALUE specval,
|
|
|
|
VALUE cref_or_me,
|
|
|
|
const VALUE *pc,
|
|
|
|
VALUE *sp,
|
|
|
|
int local_size,
|
|
|
|
int stack_max)
|
2015-09-12 14:47:43 -04:00
|
|
|
{
|
2017-09-10 15:00:08 -04:00
|
|
|
rb_control_frame_t *const cfp = ec->cfp - 1;
|
2015-09-12 14:47:43 -04:00
|
|
|
int i;
|
|
|
|
|
2016-08-02 20:16:34 -04:00
|
|
|
vm_check_frame(type, specval, cref_or_me, iseq);
|
2016-07-28 07:02:30 -04:00
|
|
|
VM_ASSERT(local_size >= 0);
|
2015-06-02 00:20:30 -04:00
|
|
|
|
2012-06-10 23:14:59 -04:00
|
|
|
/* check stack overflow */
|
2014-07-16 07:46:06 -04:00
|
|
|
CHECK_VM_STACK_OVERFLOW0(cfp, sp, local_size + stack_max);
|
2013-08-06 04:33:05 -04:00
|
|
|
|
2017-09-10 15:00:08 -04:00
|
|
|
ec->cfp = cfp;
|
2012-06-10 23:14:59 -04:00
|
|
|
|
2015-10-10 18:15:18 -04:00
|
|
|
/* setup new frame */
|
|
|
|
cfp->pc = (VALUE *)pc;
|
|
|
|
cfp->iseq = (rb_iseq_t *)iseq;
|
|
|
|
cfp->self = self;
|
2016-07-28 07:02:30 -04:00
|
|
|
cfp->block_code = NULL;
|
2015-10-10 18:15:18 -04:00
|
|
|
|
* vm.c, insns.def, eval.c, vm_insnhelper.c: fix CREF handling.
VM value stack frame of block contains cref information.
(dfp[-1] points CREF)
* compile.c, eval_intern.h, eval_method.c, load.c, proc.c,
vm_dump.h, vm_core.h: ditto.
* include/ruby/ruby.h, gc.c: remove T_VALUES because of above
changes.
* bootstraptest/test_eval.rb, test_knownbug.rb: move solved test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@16468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-05-18 23:08:50 -04:00
|
|
|
/* setup vm value stack */
|
2009-02-22 09:23:33 -05:00
|
|
|
|
2012-06-10 23:14:59 -04:00
|
|
|
/* initialize local variables */
|
2016-07-28 07:02:30 -04:00
|
|
|
for (i=0; i < local_size; i++) {
|
2012-06-10 23:14:59 -04:00
|
|
|
*sp++ = Qnil;
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
/* setup ep with managing data */
|
|
|
|
VM_ASSERT(VM_ENV_DATA_INDEX_ME_CREF == -2);
|
|
|
|
VM_ASSERT(VM_ENV_DATA_INDEX_SPECVAL == -1);
|
|
|
|
VM_ASSERT(VM_ENV_DATA_INDEX_FLAGS == -0);
|
|
|
|
*sp++ = cref_or_me; /* ep[-2] / Qnil or T_IMEMO(cref) or T_IMEMO(ment) */
|
|
|
|
*sp++ = specval /* ep[-1] / block handler or prev env ptr */;
|
|
|
|
*sp = type; /* ep[-0] / ENV_FLAGS */
|
* vm.c, insns.def, eval.c, vm_insnhelper.c: fix CREF handling.
VM value stack frame of block contains cref information.
(dfp[-1] points CREF)
* compile.c, eval_intern.h, eval_method.c, load.c, proc.c,
vm_dump.h, vm_core.h: ditto.
* include/ruby/ruby.h, gc.c: remove T_VALUES because of above
changes.
* bootstraptest/test_eval.rb, test_knownbug.rb: move solved test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@16468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-05-18 23:08:50 -04:00
|
|
|
|
2015-09-12 14:47:43 -04:00
|
|
|
cfp->ep = sp;
|
2007-06-24 13:19:22 -04:00
|
|
|
cfp->sp = sp + 1;
|
2015-10-10 18:15:18 -04:00
|
|
|
|
2012-09-28 00:05:36 -04:00
|
|
|
#if VM_DEBUG_BP_CHECK
|
|
|
|
cfp->bp_check = sp + 1;
|
|
|
|
#endif
|
2015-06-02 00:20:30 -04:00
|
|
|
|
2007-08-12 15:12:55 -04:00
|
|
|
if (VMDEBUG == 2) {
|
|
|
|
SDR();
|
|
|
|
}
|
|
|
|
|
2007-06-24 13:19:22 -04:00
|
|
|
return cfp;
|
|
|
|
}
|
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
rb_control_frame_t *
|
2017-09-10 15:00:08 -04:00
|
|
|
rb_vm_push_frame(rb_execution_context_t *ec,
|
2016-07-28 07:02:30 -04:00
|
|
|
const rb_iseq_t *iseq,
|
|
|
|
VALUE type,
|
|
|
|
VALUE self,
|
|
|
|
VALUE specval,
|
|
|
|
VALUE cref_or_me,
|
|
|
|
const VALUE *pc,
|
|
|
|
VALUE *sp,
|
|
|
|
int local_size,
|
|
|
|
int stack_max)
|
|
|
|
{
|
2017-10-26 06:53:42 -04:00
|
|
|
return vm_push_frame(ec, iseq, type, self, specval, cref_or_me, pc, sp, local_size, stack_max);
|
2016-07-28 07:02:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* return TRUE if the frame is finished */
|
2016-07-26 06:28:21 -04:00
|
|
|
static inline int
|
2017-10-26 06:55:24 -04:00
|
|
|
vm_pop_frame(rb_execution_context_t *ec, rb_control_frame_t *cfp, const VALUE *ep)
|
2007-06-24 13:19:22 -04:00
|
|
|
{
|
2016-07-28 07:02:30 -04:00
|
|
|
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
|
|
|
|
2016-07-26 06:28:21 -04:00
|
|
|
if (VM_CHECK_MODE >= 4) rb_gc_verify_internal_consistency();
|
|
|
|
if (VMDEBUG == 2) SDR();
|
2007-08-12 15:12:55 -04:00
|
|
|
|
2017-10-26 06:55:24 -04:00
|
|
|
ec->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
|
2016-07-26 06:28:21 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
return flags & VM_FRAME_FLAG_FINISH;
|
2016-07-26 06:28:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-10-26 06:55:24 -04:00
|
|
|
rb_vm_pop_frame(rb_execution_context_t *ec)
|
2016-07-26 06:28:21 -04:00
|
|
|
{
|
2017-10-26 06:55:24 -04:00
|
|
|
vm_pop_frame(ec, ec->cfp, ec->cfp->ep);
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* method dispatch */
|
2012-03-14 17:10:16 -04:00
|
|
|
static inline VALUE
|
2014-11-27 05:15:47 -05:00
|
|
|
rb_arity_error_new(int argc, int min, int max)
|
2012-03-14 21:39:00 -04:00
|
|
|
{
|
2012-03-14 17:10:16 -04:00
|
|
|
VALUE err_mess = 0;
|
|
|
|
if (min == max) {
|
2015-10-23 23:47:40 -04:00
|
|
|
err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d)", argc, min);
|
2012-03-14 17:10:16 -04:00
|
|
|
}
|
|
|
|
else if (max == UNLIMITED_ARGUMENTS) {
|
2015-10-23 23:47:40 -04:00
|
|
|
err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d+)", argc, min);
|
2012-03-14 17:10:16 -04:00
|
|
|
}
|
|
|
|
else {
|
2015-10-23 23:47:40 -04:00
|
|
|
err_mess = rb_sprintf("wrong number of arguments (given %d, expected %d..%d)", argc, min, max);
|
2012-03-14 17:10:16 -04:00
|
|
|
}
|
|
|
|
return rb_exc_new3(rb_eArgError, err_mess);
|
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-03-14 17:10:34 -04:00
|
|
|
void
|
2012-03-14 21:39:00 -04:00
|
|
|
rb_error_arity(int argc, int min, int max)
|
|
|
|
{
|
2014-11-27 05:15:47 -05:00
|
|
|
rb_exc_raise(rb_arity_error_new(argc, min, max));
|
2012-03-14 17:10:34 -04:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
/* lvar */
|
|
|
|
|
|
|
|
NOINLINE(static void vm_env_write_slowpath(const VALUE *ep, int index, VALUE v));
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
static void
|
|
|
|
vm_env_write_slowpath(const VALUE *ep, int index, VALUE v)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2016-07-28 07:02:30 -04:00
|
|
|
/* remember env value forcely */
|
|
|
|
rb_gc_writebarrier_remember(VM_ENV_ENVVAL(ep));
|
|
|
|
VM_FORCE_WRITE(&ep[index], v);
|
|
|
|
VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_WB_REQUIRED);
|
2017-05-31 02:46:57 -04:00
|
|
|
RB_DEBUG_COUNTER_INC(lvar_set_slowpath);
|
2016-07-28 07:02:30 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
static inline void
|
|
|
|
vm_env_write(const VALUE *ep, int index, VALUE v)
|
|
|
|
{
|
|
|
|
VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
|
|
|
|
if (LIKELY((flags & VM_ENV_FLAG_WB_REQUIRED) == 0)) {
|
|
|
|
VM_STACK_ENV_WRITE(ep, index, v);
|
2008-06-06 10:48:07 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
else {
|
2016-07-28 07:02:30 -04:00
|
|
|
vm_env_write_slowpath(ep, index, v);
|
2015-06-02 00:20:30 -04:00
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
}
|
|
|
|
|
2017-10-24 07:13:49 -04:00
|
|
|
VALUE
|
2017-10-29 11:25:32 -04:00
|
|
|
rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler)
|
2017-10-24 07:13:49 -04:00
|
|
|
{
|
|
|
|
if (block_handler == VM_BLOCK_HANDLER_NONE) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
switch (vm_block_handler_type(block_handler)) {
|
|
|
|
case block_handler_type_iseq:
|
|
|
|
case block_handler_type_ifunc:
|
2017-10-26 19:33:59 -04:00
|
|
|
return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
|
2017-10-24 07:13:49 -04:00
|
|
|
case block_handler_type_symbol:
|
|
|
|
return rb_sym_to_proc(VM_BH_TO_SYMBOL(block_handler));
|
|
|
|
case block_handler_type_proc:
|
|
|
|
return VM_BH_TO_PROC(block_handler);
|
|
|
|
default:
|
|
|
|
VM_UNREACHABLE(rb_vm_bh_to_procval);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
|
|
|
|
/* svar */
|
2015-06-02 00:20:30 -04:00
|
|
|
|
|
|
|
#if VM_CHECK_MODE > 0
|
2016-07-28 07:02:30 -04:00
|
|
|
static int
|
|
|
|
vm_svar_valid_p(VALUE svar)
|
|
|
|
{
|
|
|
|
if (RB_TYPE_P((VALUE)svar, T_IMEMO)) {
|
|
|
|
switch (imemo_type(svar)) {
|
|
|
|
case imemo_svar:
|
|
|
|
case imemo_cref:
|
|
|
|
case imemo_ment:
|
|
|
|
return TRUE;
|
|
|
|
default:
|
|
|
|
break;
|
2015-06-02 00:20:30 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
rb_bug("vm_svar_valid_p: unknown type: %s", rb_obj_info(svar));
|
|
|
|
return FALSE;
|
|
|
|
}
|
2015-06-02 00:20:30 -04:00
|
|
|
#endif
|
2015-02-24 05:11:14 -05:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
static inline struct vm_svar *
|
2017-10-26 21:31:15 -04:00
|
|
|
lep_svar(const rb_execution_context_t *ec, const VALUE *lep)
|
2016-07-28 07:02:30 -04:00
|
|
|
{
|
|
|
|
VALUE svar;
|
|
|
|
|
2017-10-26 21:22:01 -04:00
|
|
|
if (lep && (ec == NULL || ec->root_lep != lep)) {
|
2016-07-28 07:02:30 -04:00
|
|
|
svar = lep[VM_ENV_DATA_INDEX_ME_CREF];
|
|
|
|
}
|
|
|
|
else {
|
2017-10-26 21:22:01 -04:00
|
|
|
svar = ec->root_svar;
|
2016-07-28 07:02:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
VM_ASSERT(svar == Qfalse || vm_svar_valid_p(svar));
|
|
|
|
|
|
|
|
return (struct vm_svar *)svar;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2017-10-26 21:31:15 -04:00
|
|
|
lep_svar_write(const rb_execution_context_t *ec, const VALUE *lep, const struct vm_svar *svar)
|
2016-07-28 07:02:30 -04:00
|
|
|
{
|
|
|
|
VM_ASSERT(vm_svar_valid_p((VALUE)svar));
|
|
|
|
|
2017-10-26 21:22:01 -04:00
|
|
|
if (lep && (ec == NULL || ec->root_lep != lep)) {
|
2016-07-28 07:02:30 -04:00
|
|
|
vm_env_write(lep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)svar);
|
|
|
|
}
|
|
|
|
else {
|
2017-10-26 21:22:01 -04:00
|
|
|
RB_OBJ_WRITE(rb_ec_thread_ptr(ec)->self, &ec->root_svar, svar);
|
2016-07-28 07:02:30 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2008-06-06 10:48:07 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
2017-10-26 21:31:15 -04:00
|
|
|
lep_svar_get(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key)
|
2007-06-24 13:19:22 -04:00
|
|
|
{
|
2017-10-26 21:22:01 -04:00
|
|
|
const struct vm_svar *svar = lep_svar(ec, lep);
|
2015-02-24 05:11:14 -05:00
|
|
|
|
2015-06-02 00:20:30 -04:00
|
|
|
if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) return Qnil;
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
switch (key) {
|
2015-02-27 03:10:04 -05:00
|
|
|
case VM_SVAR_LASTLINE:
|
2015-03-08 17:53:05 -04:00
|
|
|
return svar->lastline;
|
2015-02-27 03:10:04 -05:00
|
|
|
case VM_SVAR_BACKREF:
|
2015-03-08 17:53:05 -04:00
|
|
|
return svar->backref;
|
2012-10-14 15:58:59 -04:00
|
|
|
default: {
|
2015-03-08 17:53:05 -04:00
|
|
|
const VALUE ary = svar->others;
|
2008-06-06 10:48:07 -04:00
|
|
|
|
2012-12-10 01:11:16 -05:00
|
|
|
if (NIL_P(ary)) {
|
2012-10-14 15:58:59 -04:00
|
|
|
return Qnil;
|
2011-12-26 09:20:09 -05:00
|
|
|
}
|
|
|
|
else {
|
2015-02-27 03:10:04 -05:00
|
|
|
return rb_ary_entry(ary, key - VM_SVAR_EXTRA_START);
|
2011-12-26 09:20:09 -05:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2011-12-26 09:20:09 -05:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2011-12-26 09:20:09 -05:00
|
|
|
|
2015-03-11 08:27:34 -04:00
|
|
|
static struct vm_svar *
|
2015-06-02 00:20:30 -04:00
|
|
|
svar_new(VALUE obj)
|
2015-03-11 08:27:34 -04:00
|
|
|
{
|
2015-06-02 00:20:30 -04:00
|
|
|
return (struct vm_svar *)rb_imemo_new(imemo_svar, Qnil, Qnil, Qnil, obj);
|
2015-03-11 08:27:34 -04:00
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static void
|
2017-10-26 21:31:15 -04:00
|
|
|
lep_svar_set(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, VALUE val)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2017-10-26 21:22:01 -04:00
|
|
|
struct vm_svar *svar = lep_svar(ec, lep);
|
2015-02-24 05:11:14 -05:00
|
|
|
|
2015-06-02 00:20:30 -04:00
|
|
|
if ((VALUE)svar == Qfalse || imemo_type((VALUE)svar) != imemo_svar) {
|
2017-10-26 21:22:01 -04:00
|
|
|
lep_svar_write(ec, lep, svar = svar_new((VALUE)svar));
|
2015-02-24 05:11:14 -05:00
|
|
|
}
|
2008-05-21 11:18:15 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
switch (key) {
|
2015-02-27 03:10:04 -05:00
|
|
|
case VM_SVAR_LASTLINE:
|
2015-03-08 17:53:05 -04:00
|
|
|
RB_OBJ_WRITE(svar, &svar->lastline, val);
|
2012-10-14 15:58:59 -04:00
|
|
|
return;
|
2015-02-27 03:10:04 -05:00
|
|
|
case VM_SVAR_BACKREF:
|
2015-03-08 17:53:05 -04:00
|
|
|
RB_OBJ_WRITE(svar, &svar->backref, val);
|
2012-10-14 15:58:59 -04:00
|
|
|
return;
|
|
|
|
default: {
|
2015-03-08 17:53:05 -04:00
|
|
|
VALUE ary = svar->others;
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-12-10 01:11:16 -05:00
|
|
|
if (NIL_P(ary)) {
|
2015-03-08 17:53:05 -04:00
|
|
|
RB_OBJ_WRITE(svar, &svar->others, ary = rb_ary_new());
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
2015-02-27 03:10:04 -05:00
|
|
|
rb_ary_store(ary, key - VM_SVAR_EXTRA_START, val);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline VALUE
|
2017-10-26 21:31:15 -04:00
|
|
|
vm_getspecial(const rb_execution_context_t *ec, const VALUE *lep, rb_num_t key, rb_num_t type)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
|
|
|
VALUE val;
|
|
|
|
|
|
|
|
if (type == 0) {
|
2017-10-26 21:22:01 -04:00
|
|
|
val = lep_svar_get(ec, lep, key);
|
2008-06-06 10:48:07 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
else {
|
2017-10-26 21:22:01 -04:00
|
|
|
VALUE backref = lep_svar_get(ec, lep, VM_SVAR_BACKREF);
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (type & 0x01) {
|
|
|
|
switch (type >> 1) {
|
|
|
|
case '&':
|
|
|
|
val = rb_reg_last_match(backref);
|
|
|
|
break;
|
|
|
|
case '`':
|
|
|
|
val = rb_reg_match_pre(backref);
|
|
|
|
break;
|
|
|
|
case '\'':
|
|
|
|
val = rb_reg_match_post(backref);
|
|
|
|
break;
|
|
|
|
case '+':
|
|
|
|
val = rb_reg_match_last(backref);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rb_bug("unexpected back-ref");
|
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
2008-06-06 10:48:07 -04:00
|
|
|
else {
|
2012-10-14 15:58:59 -04:00
|
|
|
val = rb_reg_nth_match((int)(type >> 1), backref);
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
2008-06-06 10:48:07 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
return val;
|
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2016-05-08 13:44:51 -04:00
|
|
|
PUREFUNC(static rb_callable_method_entry_t *check_method_entry(VALUE obj, int can_be_svar));
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
static rb_callable_method_entry_t *
|
2015-06-02 00:20:30 -04:00
|
|
|
check_method_entry(VALUE obj, int can_be_svar)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2015-06-02 00:20:30 -04:00
|
|
|
if (obj == Qfalse) return NULL;
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
|
2015-06-02 00:20:30 -04:00
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_method_entry: unknown type: %s", rb_obj_info(obj));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
switch (imemo_type(obj)) {
|
|
|
|
case imemo_ment:
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
return (rb_callable_method_entry_t *)obj;
|
2015-06-02 00:20:30 -04:00
|
|
|
case imemo_cref:
|
|
|
|
return NULL;
|
|
|
|
case imemo_svar:
|
|
|
|
if (can_be_svar) {
|
|
|
|
return check_method_entry(((struct vm_svar *)obj)->cref_or_me, FALSE);
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
rb_bug("check_method_entry: svar should not be there:");
|
|
|
|
#endif
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
return NULL;
|
|
|
|
}
|
2015-06-02 00:20:30 -04:00
|
|
|
}
|
|
|
|
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
const rb_callable_method_entry_t *
|
2015-06-02 00:20:30 -04:00
|
|
|
rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
|
|
|
|
{
|
2016-07-28 07:02:30 -04:00
|
|
|
const VALUE *ep = cfp->ep;
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
rb_callable_method_entry_t *me;
|
2015-06-02 00:20:30 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
while (!VM_ENV_LOCAL_P(ep)) {
|
|
|
|
if ((me = check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return me;
|
|
|
|
ep = VM_ENV_PREV_EP(ep);
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
}
|
2015-06-02 00:20:30 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
return check_method_entry(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
|
2015-06-02 00:20:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static rb_cref_t *
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
method_entry_cref(rb_callable_method_entry_t *me)
|
2015-06-02 00:20:30 -04:00
|
|
|
{
|
|
|
|
switch (me->def->type) {
|
|
|
|
case VM_METHOD_TYPE_ISEQ:
|
|
|
|
return me->def->body.iseq.cref;
|
|
|
|
default:
|
|
|
|
return NULL;
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-08 13:44:51 -04:00
|
|
|
#if VM_CHECK_MODE == 0
|
|
|
|
PUREFUNC(static rb_cref_t *check_cref(VALUE, int));
|
|
|
|
#endif
|
2015-03-08 17:22:43 -04:00
|
|
|
static rb_cref_t *
|
2015-06-02 00:20:30 -04:00
|
|
|
check_cref(VALUE obj, int can_be_svar)
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
{
|
2015-06-02 00:20:30 -04:00
|
|
|
if (obj == Qfalse) return NULL;
|
|
|
|
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
if (!RB_TYPE_P(obj, T_IMEMO)) rb_bug("check_cref: unknown type: %s", rb_obj_info(obj));
|
|
|
|
#endif
|
|
|
|
|
|
|
|
switch (imemo_type(obj)) {
|
|
|
|
case imemo_ment:
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
return method_entry_cref((rb_callable_method_entry_t *)obj);
|
2015-06-02 00:20:30 -04:00
|
|
|
case imemo_cref:
|
|
|
|
return (rb_cref_t *)obj;
|
|
|
|
case imemo_svar:
|
|
|
|
if (can_be_svar) {
|
|
|
|
return check_cref(((struct vm_svar *)obj)->cref_or_me, FALSE);
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
rb_bug("check_method_entry: svar should not be there:");
|
|
|
|
#endif
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-29 17:47:05 -04:00
|
|
|
static inline rb_cref_t *
|
2015-06-02 00:20:30 -04:00
|
|
|
vm_env_cref(const VALUE *ep)
|
|
|
|
{
|
|
|
|
rb_cref_t *cref;
|
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
while (!VM_ENV_LOCAL_P(ep)) {
|
|
|
|
if ((cref = check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) != NULL) return cref;
|
|
|
|
ep = VM_ENV_PREV_EP(ep);
|
2015-06-02 00:20:30 -04:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
return check_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
|
2015-06-02 00:20:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
is_cref(const VALUE v, int can_be_svar)
|
|
|
|
{
|
|
|
|
if (RB_TYPE_P(v, T_IMEMO)) {
|
|
|
|
switch (imemo_type(v)) {
|
|
|
|
case imemo_cref:
|
|
|
|
return TRUE;
|
|
|
|
case imemo_svar:
|
|
|
|
if (can_be_svar) return is_cref(((struct vm_svar *)v)->cref_or_me, FALSE);
|
|
|
|
default:
|
|
|
|
break;
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2015-06-02 00:20:30 -04:00
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vm_env_cref_by_cref(const VALUE *ep)
|
|
|
|
{
|
2016-07-28 07:02:30 -04:00
|
|
|
while (!VM_ENV_LOCAL_P(ep)) {
|
|
|
|
if (is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE)) return TRUE;
|
|
|
|
ep = VM_ENV_PREV_EP(ep);
|
2008-06-06 10:48:07 -04:00
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
return is_cref(ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2015-11-19 19:17:25 -05:00
|
|
|
static rb_cref_t *
|
2016-07-28 07:02:30 -04:00
|
|
|
cref_replace_with_duplicated_cref_each_frame(const VALUE *vptr, int can_be_svar, VALUE parent)
|
2015-11-19 19:17:25 -05:00
|
|
|
{
|
|
|
|
const VALUE v = *vptr;
|
|
|
|
rb_cref_t *cref, *new_cref;
|
|
|
|
|
|
|
|
if (RB_TYPE_P(v, T_IMEMO)) {
|
|
|
|
switch (imemo_type(v)) {
|
|
|
|
case imemo_cref:
|
|
|
|
cref = (rb_cref_t *)v;
|
|
|
|
new_cref = vm_cref_dup(cref);
|
|
|
|
if (parent) {
|
|
|
|
RB_OBJ_WRITE(parent, vptr, new_cref);
|
|
|
|
}
|
|
|
|
else {
|
2016-07-28 07:02:30 -04:00
|
|
|
VM_FORCE_WRITE(vptr, (VALUE)new_cref);
|
2015-11-19 19:17:25 -05:00
|
|
|
}
|
|
|
|
return (rb_cref_t *)new_cref;
|
|
|
|
case imemo_svar:
|
|
|
|
if (can_be_svar) {
|
2016-07-28 07:02:30 -04:00
|
|
|
return cref_replace_with_duplicated_cref_each_frame((const VALUE *)&((struct vm_svar *)v)->cref_or_me, FALSE, v);
|
2015-11-19 19:17:25 -05:00
|
|
|
}
|
|
|
|
case imemo_ment:
|
|
|
|
rb_bug("cref_replace_with_duplicated_cref_each_frame: unreachable");
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static rb_cref_t *
|
|
|
|
vm_cref_replace_with_duplicated_cref(const VALUE *ep)
|
|
|
|
{
|
|
|
|
if (vm_env_cref_by_cref(ep)) {
|
|
|
|
rb_cref_t *cref;
|
2016-07-28 07:02:30 -04:00
|
|
|
VALUE envval;
|
2015-11-19 19:17:25 -05:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
while (!VM_ENV_LOCAL_P(ep)) {
|
|
|
|
envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
|
|
|
|
if ((cref = cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], FALSE, envval)) != NULL) {
|
2015-11-19 19:17:25 -05:00
|
|
|
return cref;
|
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
ep = VM_ENV_PREV_EP(ep);
|
2015-11-19 19:17:25 -05:00
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
envval = VM_ENV_ESCAPED_P(ep) ? VM_ENV_ENVVAL(ep) : Qfalse;
|
|
|
|
return cref_replace_with_duplicated_cref_each_frame(&ep[VM_ENV_DATA_INDEX_ME_CREF], TRUE, envval);
|
2015-11-19 19:17:25 -05:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
rb_bug("vm_cref_dup: unreachable");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-06-02 00:20:30 -04:00
|
|
|
static rb_cref_t *
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
rb_vm_get_cref(const VALUE *ep)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2015-06-02 00:20:30 -04:00
|
|
|
rb_cref_t *cref = vm_env_cref(ep);
|
2011-12-26 09:20:09 -05:00
|
|
|
|
2015-10-29 17:47:05 -04:00
|
|
|
if (cref != NULL) {
|
|
|
|
return cref;
|
|
|
|
}
|
|
|
|
else {
|
2012-10-14 15:58:59 -04:00
|
|
|
rb_bug("rb_vm_get_cref: unreachable");
|
|
|
|
}
|
|
|
|
}
|
2007-08-18 00:17:39 -04:00
|
|
|
|
2015-10-29 18:43:45 -04:00
|
|
|
static const rb_cref_t *
|
|
|
|
vm_get_const_key_cref(const VALUE *ep)
|
|
|
|
{
|
|
|
|
const rb_cref_t *cref = rb_vm_get_cref(ep);
|
|
|
|
const rb_cref_t *key_cref = cref;
|
|
|
|
|
|
|
|
while (cref) {
|
|
|
|
if (FL_TEST(CREF_CLASS(cref), FL_SINGLETON)) {
|
|
|
|
return key_cref;
|
|
|
|
}
|
|
|
|
cref = CREF_NEXT(cref);
|
|
|
|
}
|
|
|
|
|
2015-12-13 21:51:13 -05:00
|
|
|
/* does not include singleton class */
|
2015-10-29 18:43:45 -04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2015-02-22 02:05:14 -05:00
|
|
|
void
|
2015-06-03 15:12:26 -04:00
|
|
|
rb_vm_rewrite_cref(rb_cref_t *cref, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
|
2015-02-22 02:05:14 -05:00
|
|
|
{
|
2015-06-03 15:12:26 -04:00
|
|
|
rb_cref_t *new_cref;
|
|
|
|
|
|
|
|
while (cref) {
|
|
|
|
if (CREF_CLASS(cref) == old_klass) {
|
2015-11-13 15:02:19 -05:00
|
|
|
new_cref = vm_cref_new_use_prev(new_klass, METHOD_VISI_UNDEF, FALSE, cref, FALSE);
|
2015-06-03 15:12:26 -04:00
|
|
|
*new_cref_ptr = new_cref;
|
2015-02-22 02:05:14 -05:00
|
|
|
return;
|
|
|
|
}
|
2015-11-13 15:02:19 -05:00
|
|
|
new_cref = vm_cref_new_use_prev(CREF_CLASS(cref), METHOD_VISI_UNDEF, FALSE, cref, FALSE);
|
2015-06-03 15:12:26 -04:00
|
|
|
cref = CREF_NEXT(cref);
|
|
|
|
*new_cref_ptr = new_cref;
|
|
|
|
new_cref_ptr = (rb_cref_t **)&new_cref->next;
|
2015-02-22 02:05:14 -05:00
|
|
|
}
|
|
|
|
*new_cref_ptr = NULL;
|
|
|
|
}
|
|
|
|
|
2015-03-08 17:22:43 -04:00
|
|
|
static rb_cref_t *
|
2017-10-26 21:31:15 -04:00
|
|
|
vm_cref_push(const rb_execution_context_t *ec, VALUE klass, const VALUE *ep, int pushed_by_eval)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2015-11-13 15:02:19 -05:00
|
|
|
rb_cref_t *prev_cref = NULL;
|
2007-12-09 00:56:00 -05:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
if (ep) {
|
|
|
|
prev_cref = vm_env_cref(ep);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
else {
|
2017-10-26 21:31:15 -04:00
|
|
|
rb_control_frame_t *cfp = vm_get_ruby_level_caller_cfp(ec, ec->cfp);
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
|
|
|
|
if (cfp) {
|
2015-06-02 00:20:30 -04:00
|
|
|
prev_cref = vm_env_cref(cfp->ep);
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
|
2015-11-13 15:02:19 -05:00
|
|
|
return vm_cref_new(klass, METHOD_VISI_PUBLIC, FALSE, prev_cref, pushed_by_eval);
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline VALUE
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
vm_get_cbase(const VALUE *ep)
|
2007-06-24 13:19:22 -04:00
|
|
|
{
|
2015-03-08 17:22:43 -04:00
|
|
|
const rb_cref_t *cref = rb_vm_get_cref(ep);
|
2012-10-14 15:58:59 -04:00
|
|
|
VALUE klass = Qundef;
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
while (cref) {
|
2015-03-08 15:50:37 -04:00
|
|
|
if ((klass = CREF_CLASS(cref)) != 0) {
|
2012-10-14 15:58:59 -04:00
|
|
|
break;
|
2007-06-24 14:40:13 -04:00
|
|
|
}
|
2015-03-08 15:50:37 -04:00
|
|
|
cref = CREF_NEXT(cref);
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
return klass;
|
|
|
|
}
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline VALUE
|
* fix namespace issue on singleton class expressions. [Bug #10943]
* vm_core.h, method.h: remove rb_iseq_t::cref_stack. CREF is stored
to rb_method_definition_t::body.iseq_body.cref.
* vm_insnhelper.c: modify SVAR usage.
When calling ISEQ type method, push CREF information onto method
frame, SVAR located place. Before this fix, SVAR is simply nil.
After this patch, CREF (or NULL == Qfalse for not iseq methods)
is stored at the method invocation.
When SVAR is requierd, then put NODE_IF onto SVAR location,
and NDOE_IF::nd_reserved points CREF itself.
* vm.c (vm_cref_new, vm_cref_dump, vm_cref_new_toplevel): added.
* vm_insnhelper.c (vm_push_frame): accept CREF.
* method.h, vm_method.c (rb_add_method_iseq): added. This function
accepts iseq and CREF.
* class.c (clone_method): use rb_add_method_iseq().
* gc.c (mark_method_entry): mark method_entry::body.iseq_body.cref.
* iseq.c: remove CREF related codes.
* insns.def (getinlinecache/setinlinecache): CREF should be cache key
because a different CREF has a different namespace.
* node.c (rb_gc_mark_node): mark NODE_IF::nd_reserved for SVAR.
* proc.c: catch up changes.
* struct.c: ditto.
* insns.def: ditto.
* vm_args.c (raise_argument_error): ditto.
* vm_eval.c: ditto.
* test/ruby/test_class.rb: add a test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@49874 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-03-06 07:24:58 -05:00
|
|
|
vm_get_const_base(const VALUE *ep)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2015-03-08 17:22:43 -04:00
|
|
|
const rb_cref_t *cref = rb_vm_get_cref(ep);
|
2012-10-14 15:58:59 -04:00
|
|
|
VALUE klass = Qundef;
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
while (cref) {
|
2015-03-08 15:50:37 -04:00
|
|
|
if (!CREF_PUSHED_BY_EVAL(cref) &&
|
|
|
|
(klass = CREF_CLASS(cref)) != 0) {
|
2012-10-14 15:58:59 -04:00
|
|
|
break;
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
2015-03-08 15:50:37 -04:00
|
|
|
cref = CREF_NEXT(cref);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
return klass;
|
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline void
|
|
|
|
vm_check_if_namespace(VALUE klass)
|
|
|
|
{
|
|
|
|
if (!RB_TYPE_P(klass, T_CLASS) && !RB_TYPE_P(klass, T_MODULE)) {
|
2016-01-24 05:36:16 -05:00
|
|
|
rb_raise(rb_eTypeError, "%+"PRIsVALUE" is not a class/module", klass);
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 00:44:51 -04:00
|
|
|
static inline void
|
|
|
|
vm_ensure_not_refinement_module(VALUE self)
|
|
|
|
{
|
|
|
|
if (RB_TYPE_P(self, T_MODULE) && FL_TEST(self, RMODULE_IS_REFINEMENT)) {
|
|
|
|
rb_warn("not defined at the refinement, but at the outer class/module");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-24 13:19:22 -04:00
|
|
|
static inline VALUE
|
2012-10-14 15:58:59 -04:00
|
|
|
vm_get_iclass(rb_control_frame_t *cfp, VALUE klass)
|
2007-06-24 13:19:22 -04:00
|
|
|
{
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
return klass;
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
2017-10-27 01:18:58 -04:00
|
|
|
vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, int is_defined)
|
2007-06-24 13:19:22 -04:00
|
|
|
{
|
2016-05-14 21:57:28 -04:00
|
|
|
void rb_const_warn_if_deprecated(const rb_const_entry_t *ce, VALUE klass, ID id);
|
2012-10-14 15:58:59 -04:00
|
|
|
VALUE val;
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (orig_klass == Qnil) {
|
|
|
|
/* in current lexical scope */
|
2017-10-27 01:18:58 -04:00
|
|
|
const rb_cref_t *root_cref = rb_vm_get_cref(ec->cfp->ep);
|
2015-03-08 17:22:43 -04:00
|
|
|
const rb_cref_t *cref;
|
2016-03-31 04:49:09 -04:00
|
|
|
VALUE klass = Qnil;
|
2009-07-15 10:59:41 -04:00
|
|
|
|
2015-03-08 15:50:37 -04:00
|
|
|
while (root_cref && CREF_PUSHED_BY_EVAL(root_cref)) {
|
|
|
|
root_cref = CREF_NEXT(root_cref);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
cref = root_cref;
|
2015-03-08 15:50:37 -04:00
|
|
|
while (cref && CREF_NEXT(cref)) {
|
|
|
|
if (CREF_PUSHED_BY_EVAL(cref)) {
|
2012-10-14 15:58:59 -04:00
|
|
|
klass = Qnil;
|
|
|
|
}
|
|
|
|
else {
|
2015-03-08 15:50:37 -04:00
|
|
|
klass = CREF_CLASS(cref);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2015-03-08 15:50:37 -04:00
|
|
|
cref = CREF_NEXT(cref);
|
2012-06-10 23:14:59 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (!NIL_P(klass)) {
|
|
|
|
VALUE av, am = 0;
|
2014-08-03 21:12:53 -04:00
|
|
|
rb_const_entry_t *ce;
|
2012-10-14 15:58:59 -04:00
|
|
|
search_continue:
|
2014-08-03 21:12:53 -04:00
|
|
|
if ((ce = rb_const_lookup(klass, id))) {
|
2016-05-14 21:57:28 -04:00
|
|
|
rb_const_warn_if_deprecated(ce, klass, id);
|
2014-08-03 21:12:53 -04:00
|
|
|
val = ce->value;
|
2012-10-14 15:58:59 -04:00
|
|
|
if (val == Qundef) {
|
|
|
|
if (am == klass) break;
|
|
|
|
am = klass;
|
|
|
|
if (is_defined) return 1;
|
|
|
|
if (rb_autoloading_value(klass, id, &av)) return av;
|
|
|
|
rb_autoload_load(klass, id);
|
|
|
|
goto search_continue;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (is_defined) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
/* search self */
|
2015-03-08 15:50:37 -04:00
|
|
|
if (root_cref && !NIL_P(CREF_CLASS(root_cref))) {
|
2017-10-27 01:18:58 -04:00
|
|
|
klass = vm_get_iclass(ec->cfp, CREF_CLASS(root_cref));
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
else {
|
2017-10-27 01:18:58 -04:00
|
|
|
klass = CLASS_OF(ec->cfp->self);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (is_defined) {
|
|
|
|
return rb_const_defined(klass, id);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_const_get(klass, id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
vm_check_if_namespace(orig_klass);
|
|
|
|
if (is_defined) {
|
|
|
|
return rb_public_const_defined_from(orig_klass, id);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_public_const_get_from(orig_klass, id);
|
|
|
|
}
|
2010-01-24 08:52:32 -05:00
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
2015-03-08 17:22:43 -04:00
|
|
|
vm_get_cvar_base(const rb_cref_t *cref, rb_control_frame_t *cfp)
|
2007-06-24 13:19:22 -04:00
|
|
|
{
|
2012-10-14 15:58:59 -04:00
|
|
|
VALUE klass;
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (!cref) {
|
|
|
|
rb_bug("vm_get_cvar_base: no cref");
|
|
|
|
}
|
2011-06-30 09:34:53 -04:00
|
|
|
|
2015-03-08 15:50:37 -04:00
|
|
|
while (CREF_NEXT(cref) &&
|
|
|
|
(NIL_P(CREF_CLASS(cref)) || FL_TEST(CREF_CLASS(cref), FL_SINGLETON) ||
|
|
|
|
CREF_PUSHED_BY_EVAL(cref))) {
|
|
|
|
cref = CREF_NEXT(cref);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2015-03-08 15:50:37 -04:00
|
|
|
if (!CREF_NEXT(cref)) {
|
2012-10-14 15:58:59 -04:00
|
|
|
rb_warn("class variable access from toplevel");
|
|
|
|
}
|
2011-06-30 09:34:53 -04:00
|
|
|
|
2015-03-08 15:50:37 -04:00
|
|
|
klass = vm_get_iclass(cfp, CREF_CLASS(cref));
|
2011-06-30 09:34:53 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (NIL_P(klass)) {
|
|
|
|
rb_raise(rb_eTypeError, "no class variables available");
|
|
|
|
}
|
|
|
|
return klass;
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
|
|
|
vm_search_const_defined_class(const VALUE cbase, ID id)
|
2009-02-21 20:43:59 -05:00
|
|
|
{
|
2012-10-14 15:58:59 -04:00
|
|
|
if (rb_const_defined_at(cbase, id)) return cbase;
|
|
|
|
if (cbase == rb_cObject) {
|
|
|
|
VALUE tmp = RCLASS_SUPER(cbase);
|
|
|
|
while (tmp) {
|
|
|
|
if (rb_const_defined_at(tmp, id)) return tmp;
|
|
|
|
tmp = RCLASS_SUPER(tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
#ifndef USE_IC_FOR_IVAR
|
|
|
|
#define USE_IC_FOR_IVAR 1
|
|
|
|
#endif
|
|
|
|
|
2016-08-12 21:21:29 -04:00
|
|
|
ALWAYS_INLINE(static VALUE vm_getivar(VALUE, ID, IC, struct rb_call_cache *, int));
|
|
|
|
static inline VALUE
|
2015-09-19 13:59:58 -04:00
|
|
|
vm_getivar(VALUE obj, ID id, IC ic, struct rb_call_cache *cc, int is_attr)
|
2007-06-24 13:19:22 -04:00
|
|
|
{
|
2012-10-14 15:58:59 -04:00
|
|
|
#if USE_IC_FOR_IVAR
|
2016-05-11 11:04:27 -04:00
|
|
|
if (LIKELY(RB_TYPE_P(obj, T_OBJECT))) {
|
2012-10-14 15:58:59 -04:00
|
|
|
VALUE val = Qundef;
|
2017-03-15 04:25:58 -04:00
|
|
|
if (LIKELY(is_attr ?
|
|
|
|
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, cc->aux.index > 0) :
|
|
|
|
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial,
|
|
|
|
ic->ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
|
2016-04-22 05:47:34 -04:00
|
|
|
st_index_t index = !is_attr ? ic->ic_value.index : (cc->aux.index - 1);
|
2016-05-11 11:04:27 -04:00
|
|
|
if (LIKELY(index < ROBJECT_NUMIV(obj))) {
|
|
|
|
val = ROBJECT_IVPTR(obj)[index];
|
2007-08-06 07:36:30 -04:00
|
|
|
}
|
2016-05-11 11:04:27 -04:00
|
|
|
undef_check:
|
|
|
|
if (UNLIKELY(val == Qundef)) {
|
|
|
|
if (!is_attr && RTEST(ruby_verbose))
|
2017-03-15 08:41:56 -04:00
|
|
|
rb_warning("instance variable %"PRIsVALUE" not initialized", QUOTE_ID(id));
|
2016-05-11 11:04:27 -04:00
|
|
|
val = Qnil;
|
|
|
|
}
|
2017-03-15 04:25:58 -04:00
|
|
|
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
|
2016-05-11 11:04:27 -04:00
|
|
|
return val;
|
2007-08-06 07:36:30 -04:00
|
|
|
}
|
|
|
|
else {
|
2012-10-14 15:58:59 -04:00
|
|
|
st_data_t index;
|
|
|
|
struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
|
2007-08-06 07:36:30 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (iv_index_tbl) {
|
|
|
|
if (st_lookup(iv_index_tbl, id, &index)) {
|
2016-05-11 11:04:27 -04:00
|
|
|
if (index < ROBJECT_NUMIV(obj)) {
|
|
|
|
val = ROBJECT_IVPTR(obj)[index];
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2012-10-16 13:07:23 -04:00
|
|
|
if (!is_attr) {
|
|
|
|
ic->ic_value.index = index;
|
2016-05-11 11:04:27 -04:00
|
|
|
ic->ic_serial = RCLASS_SERIAL(RBASIC(obj)->klass);
|
2012-10-16 13:07:23 -04:00
|
|
|
}
|
|
|
|
else { /* call_info */
|
2015-09-19 13:59:58 -04:00
|
|
|
cc->aux.index = (int)index + 1;
|
2012-10-16 13:07:23 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
}
|
2016-05-11 11:04:27 -04:00
|
|
|
goto undef_check;
|
* vm.c, insns.def, eval.c, vm_insnhelper.c: fix CREF handling.
VM value stack frame of block contains cref information.
(dfp[-1] points CREF)
* compile.c, eval_intern.h, eval_method.c, load.c, proc.c,
vm_dump.h, vm_core.h: ditto.
* include/ruby/ruby.h, gc.c: remove T_VALUES because of above
changes.
* bootstraptest/test_eval.rb, test_knownbug.rb: move solved test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@16468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-05-18 23:08:50 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2017-03-15 04:25:58 -04:00
|
|
|
else {
|
2017-03-15 04:25:59 -04:00
|
|
|
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_noobject);
|
2017-03-15 04:25:58 -04:00
|
|
|
}
|
2012-12-11 23:39:48 -05:00
|
|
|
#endif /* USE_IC_FOR_IVAR */
|
2017-03-15 04:25:58 -04:00
|
|
|
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
|
2017-02-21 03:18:15 -05:00
|
|
|
|
2012-12-11 23:40:55 -05:00
|
|
|
if (is_attr)
|
|
|
|
return rb_attr_get(obj, id);
|
2012-10-14 15:58:59 -04:00
|
|
|
return rb_ivar_get(obj, id);
|
2010-09-22 20:01:40 -04:00
|
|
|
}
|
|
|
|
|
2016-04-23 18:30:42 -04:00
|
|
|
static inline VALUE
|
2015-09-19 13:59:58 -04:00
|
|
|
vm_setivar(VALUE obj, ID id, VALUE val, IC ic, struct rb_call_cache *cc, int is_attr)
|
2010-09-22 20:01:40 -04:00
|
|
|
{
|
2012-10-14 15:58:59 -04:00
|
|
|
#if USE_IC_FOR_IVAR
|
|
|
|
rb_check_frozen(obj);
|
* common.mk: clean up
- remove blockinlining.$(OBJEXT) to built
- make ENCODING_H_INCLDUES variable (include/ruby/encoding.h)
- make VM_CORE_H_INCLUDES variable (vm_core.h)
- simplify rules.
- make depends rule to output depend status using gcc -MM.
* include/ruby/mvm.h, include/ruby/vm.h: rename mvm.h to vm.h.
* include/ruby.h: ditto.
* load.c: add inclusion explicitly.
* enumerator.c, object.c, parse.y, thread.c, vm_dump.c:
remove useless inclusion.
* eval_intern.h: cleanup inclusion.
* vm_core.h: rb_thread_t should be defined in this file.
* vm_evalbody.c, vm_exec.c: rename vm_evalbody.c to vm_exec.c.
* vm.h, vm_exec.h: rename vm.h to vm_exec.h.
* insnhelper.h, vm_insnhelper.h: rename insnhelper.h to vm_insnhelper.h.
* vm.c, vm_insnhelper.c, vm_insnhelper.h:
- rename vm_eval() to vm_exec_core().
- rename vm_eval_body() to vm_exec().
- cleanup include order.
* vm_method.c: fix comment.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@19466 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-09-22 20:20:28 -04:00
|
|
|
|
2016-05-11 11:04:27 -04:00
|
|
|
if (LIKELY(RB_TYPE_P(obj, T_OBJECT))) {
|
2012-10-14 15:58:59 -04:00
|
|
|
VALUE klass = RBASIC(obj)->klass;
|
|
|
|
st_data_t index;
|
* common.mk: clean up
- remove blockinlining.$(OBJEXT) to built
- make ENCODING_H_INCLDUES variable (include/ruby/encoding.h)
- make VM_CORE_H_INCLUDES variable (vm_core.h)
- simplify rules.
- make depends rule to output depend status using gcc -MM.
* include/ruby/mvm.h, include/ruby/vm.h: rename mvm.h to vm.h.
* include/ruby.h: ditto.
* load.c: add inclusion explicitly.
* enumerator.c, object.c, parse.y, thread.c, vm_dump.c:
remove useless inclusion.
* eval_intern.h: cleanup inclusion.
* vm_core.h: rb_thread_t should be defined in this file.
* vm_evalbody.c, vm_exec.c: rename vm_evalbody.c to vm_exec.c.
* vm.h, vm_exec.h: rename vm.h to vm_exec.h.
* insnhelper.h, vm_insnhelper.h: rename insnhelper.h to vm_insnhelper.h.
* vm.c, vm_insnhelper.c, vm_insnhelper.h:
- rename vm_eval() to vm_exec_core().
- rename vm_eval_body() to vm_exec().
- cleanup include order.
* vm_method.c: fix comment.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@19466 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-09-22 20:20:28 -04:00
|
|
|
|
2012-10-16 13:07:23 -04:00
|
|
|
if (LIKELY(
|
2017-03-15 04:25:58 -04:00
|
|
|
(!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, ic->ic_serial == RCLASS_SERIAL(klass))) ||
|
|
|
|
( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, cc->aux.index > 0)))) {
|
2012-10-14 15:58:59 -04:00
|
|
|
VALUE *ptr = ROBJECT_IVPTR(obj);
|
2016-04-22 05:47:34 -04:00
|
|
|
index = !is_attr ? ic->ic_value.index : cc->aux.index-1;
|
* common.mk: clean up
- remove blockinlining.$(OBJEXT) to built
- make ENCODING_H_INCLDUES variable (include/ruby/encoding.h)
- make VM_CORE_H_INCLUDES variable (vm_core.h)
- simplify rules.
- make depends rule to output depend status using gcc -MM.
* include/ruby/mvm.h, include/ruby/vm.h: rename mvm.h to vm.h.
* include/ruby.h: ditto.
* load.c: add inclusion explicitly.
* enumerator.c, object.c, parse.y, thread.c, vm_dump.c:
remove useless inclusion.
* eval_intern.h: cleanup inclusion.
* vm_core.h: rb_thread_t should be defined in this file.
* vm_evalbody.c, vm_exec.c: rename vm_evalbody.c to vm_exec.c.
* vm.h, vm_exec.h: rename vm.h to vm_exec.h.
* insnhelper.h, vm_insnhelper.h: rename insnhelper.h to vm_insnhelper.h.
* vm.c, vm_insnhelper.c, vm_insnhelper.h:
- rename vm_eval() to vm_exec_core().
- rename vm_eval_body() to vm_exec().
- cleanup include order.
* vm_method.c: fix comment.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@19466 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-09-22 20:20:28 -04:00
|
|
|
|
2017-03-15 04:25:58 -04:00
|
|
|
if (RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_oorange, index < ROBJECT_NUMIV(obj))) {
|
* include/ruby/ruby.h: rename OBJ_WRITE and OBJ_WRITTEN into
RB_OBJ_WRITE and RB_OBJ_WRITTEN.
* array.c, class.c, compile.c, hash.c, internal.h, iseq.c,
proc.c, process.c, re.c, string.c, variable.c, vm.c,
vm_eval.c, vm_insnhelper.c, vm_insnhelper.h,
vm_method.c: catch up this change.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@44299 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-12-20 03:07:47 -05:00
|
|
|
RB_OBJ_WRITE(obj, &ptr[index], val);
|
2017-03-15 04:25:58 -04:00
|
|
|
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
|
2013-02-06 12:31:22 -05:00
|
|
|
return val; /* inline cache hit */
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
|
* common.mk: clean up
- remove blockinlining.$(OBJEXT) to built
- make ENCODING_H_INCLDUES variable (include/ruby/encoding.h)
- make VM_CORE_H_INCLUDES variable (vm_core.h)
- simplify rules.
- make depends rule to output depend status using gcc -MM.
* include/ruby/mvm.h, include/ruby/vm.h: rename mvm.h to vm.h.
* include/ruby.h: ditto.
* load.c: add inclusion explicitly.
* enumerator.c, object.c, parse.y, thread.c, vm_dump.c:
remove useless inclusion.
* eval_intern.h: cleanup inclusion.
* vm_core.h: rb_thread_t should be defined in this file.
* vm_evalbody.c, vm_exec.c: rename vm_evalbody.c to vm_exec.c.
* vm.h, vm_exec.h: rename vm.h to vm_exec.h.
* insnhelper.h, vm_insnhelper.h: rename insnhelper.h to vm_insnhelper.h.
* vm.c, vm_insnhelper.c, vm_insnhelper.h:
- rename vm_eval() to vm_exec_core().
- rename vm_eval_body() to vm_exec().
- cleanup include order.
* vm_method.c: fix comment.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@19466 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-09-22 20:20:28 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (iv_index_tbl && st_lookup(iv_index_tbl, (st_data_t)id, &index)) {
|
2012-10-16 13:07:23 -04:00
|
|
|
if (!is_attr) {
|
|
|
|
ic->ic_value.index = index;
|
2013-12-09 06:00:23 -05:00
|
|
|
ic->ic_serial = RCLASS_SERIAL(klass);
|
2012-10-16 13:07:23 -04:00
|
|
|
}
|
2014-09-10 04:05:12 -04:00
|
|
|
else if (index >= INT_MAX) {
|
|
|
|
rb_raise(rb_eArgError, "too many instance variables");
|
|
|
|
}
|
2012-10-16 13:07:23 -04:00
|
|
|
else {
|
2015-09-19 13:59:58 -04:00
|
|
|
cc->aux.index = (int)(index + 1);
|
2012-10-16 13:07:23 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
/* fall through */
|
* common.mk: clean up
- remove blockinlining.$(OBJEXT) to built
- make ENCODING_H_INCLDUES variable (include/ruby/encoding.h)
- make VM_CORE_H_INCLUDES variable (vm_core.h)
- simplify rules.
- make depends rule to output depend status using gcc -MM.
* include/ruby/mvm.h, include/ruby/vm.h: rename mvm.h to vm.h.
* include/ruby.h: ditto.
* load.c: add inclusion explicitly.
* enumerator.c, object.c, parse.y, thread.c, vm_dump.c:
remove useless inclusion.
* eval_intern.h: cleanup inclusion.
* vm_core.h: rb_thread_t should be defined in this file.
* vm_evalbody.c, vm_exec.c: rename vm_evalbody.c to vm_exec.c.
* vm.h, vm_exec.h: rename vm.h to vm_exec.h.
* insnhelper.h, vm_insnhelper.h: rename insnhelper.h to vm_insnhelper.h.
* vm.c, vm_insnhelper.c, vm_insnhelper.h:
- rename vm_eval() to vm_exec_core().
- rename vm_eval_body() to vm_exec().
- cleanup include order.
* vm_method.c: fix comment.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@19466 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-09-22 20:20:28 -04:00
|
|
|
}
|
|
|
|
}
|
2017-03-15 04:25:58 -04:00
|
|
|
else {
|
|
|
|
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
|
|
|
|
}
|
2012-12-11 23:39:48 -05:00
|
|
|
#endif /* USE_IC_FOR_IVAR */
|
2017-03-15 04:25:58 -04:00
|
|
|
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
|
2013-02-06 12:31:22 -05:00
|
|
|
return rb_ivar_set(obj, id, val);
|
* common.mk: clean up
- remove blockinlining.$(OBJEXT) to built
- make ENCODING_H_INCLDUES variable (include/ruby/encoding.h)
- make VM_CORE_H_INCLUDES variable (vm_core.h)
- simplify rules.
- make depends rule to output depend status using gcc -MM.
* include/ruby/mvm.h, include/ruby/vm.h: rename mvm.h to vm.h.
* include/ruby.h: ditto.
* load.c: add inclusion explicitly.
* enumerator.c, object.c, parse.y, thread.c, vm_dump.c:
remove useless inclusion.
* eval_intern.h: cleanup inclusion.
* vm_core.h: rb_thread_t should be defined in this file.
* vm_evalbody.c, vm_exec.c: rename vm_evalbody.c to vm_exec.c.
* vm.h, vm_exec.h: rename vm.h to vm_exec.h.
* insnhelper.h, vm_insnhelper.h: rename insnhelper.h to vm_insnhelper.h.
* vm.c, vm_insnhelper.c, vm_insnhelper.h:
- rename vm_eval() to vm_exec_core().
- rename vm_eval_body() to vm_exec().
- cleanup include order.
* vm_method.c: fix comment.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@19466 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-09-22 20:20:28 -04:00
|
|
|
}
|
* vm.c, insns.def, eval.c, vm_insnhelper.c: fix CREF handling.
VM value stack frame of block contains cref information.
(dfp[-1] points CREF)
* compile.c, eval_intern.h, eval_method.c, load.c, proc.c,
vm_dump.h, vm_core.h: ditto.
* include/ruby/ruby.h, gc.c: remove T_VALUES because of above
changes.
* bootstraptest/test_eval.rb, test_knownbug.rb: move solved test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@16468 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2008-05-18 23:08:50 -04:00
|
|
|
|
2016-05-11 11:04:27 -04:00
|
|
|
static inline VALUE
|
2012-10-16 13:07:23 -04:00
|
|
|
vm_getinstancevariable(VALUE obj, ID id, IC ic)
|
|
|
|
{
|
|
|
|
return vm_getivar(obj, id, ic, 0, 0);
|
|
|
|
}
|
|
|
|
|
2016-05-11 11:04:27 -04:00
|
|
|
static inline void
|
2012-10-16 13:07:23 -04:00
|
|
|
vm_setinstancevariable(VALUE obj, ID id, VALUE val, IC ic)
|
|
|
|
{
|
|
|
|
vm_setivar(obj, id, val, ic, 0, 0);
|
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
2017-10-26 21:35:12 -04:00
|
|
|
vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
|
2009-12-03 13:25:57 -05:00
|
|
|
{
|
2015-01-15 21:54:22 -05:00
|
|
|
/* continue throw */
|
2009-12-03 13:25:57 -05:00
|
|
|
|
2015-01-15 21:54:22 -05:00
|
|
|
if (FIXNUM_P(err)) {
|
2017-10-26 21:35:12 -04:00
|
|
|
ec->tag->state = FIX2INT(err);
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
|
|
|
else if (SYMBOL_P(err)) {
|
2017-10-26 21:35:12 -04:00
|
|
|
ec->tag->state = TAG_THROW;
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
2015-03-10 14:39:46 -04:00
|
|
|
else if (THROW_DATA_P(err)) {
|
2017-10-26 21:35:12 -04:00
|
|
|
ec->tag->state = THROW_DATA_STATE((struct vm_throw_data *)err);
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
|
|
|
else {
|
2017-10-26 21:35:12 -04:00
|
|
|
ec->tag->state = TAG_RAISE;
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
|
|
|
return err;
|
|
|
|
}
|
2009-12-03 13:25:57 -05:00
|
|
|
|
2015-01-15 21:54:22 -05:00
|
|
|
static VALUE
|
2017-10-26 21:35:12 -04:00
|
|
|
vm_throw_start(const rb_execution_context_t *ec, rb_control_frame_t *const reg_cfp, enum ruby_tag_type state,
|
2015-07-19 20:08:23 -04:00
|
|
|
const int flag, const rb_num_t level, const VALUE throwobj)
|
2015-01-15 21:54:22 -05:00
|
|
|
{
|
2016-07-28 07:02:30 -04:00
|
|
|
const rb_control_frame_t *escape_cfp = NULL;
|
2017-10-26 21:35:12 -04:00
|
|
|
const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
|
2015-01-15 21:54:22 -05:00
|
|
|
|
|
|
|
if (flag != 0) {
|
|
|
|
/* do nothing */
|
|
|
|
}
|
|
|
|
else if (state == TAG_BREAK) {
|
|
|
|
int is_orphan = 1;
|
2016-07-28 07:02:30 -04:00
|
|
|
const VALUE *ep = GET_EP();
|
2015-07-16 09:13:50 -04:00
|
|
|
const rb_iseq_t *base_iseq = GET_ISEQ();
|
2015-01-15 21:54:22 -05:00
|
|
|
escape_cfp = reg_cfp;
|
|
|
|
|
2015-07-21 18:52:59 -04:00
|
|
|
while (base_iseq->body->type != ISEQ_TYPE_BLOCK) {
|
|
|
|
if (escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
|
2015-01-15 21:54:22 -05:00
|
|
|
escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
|
|
|
|
ep = escape_cfp->ep;
|
|
|
|
base_iseq = escape_cfp->iseq;
|
|
|
|
}
|
|
|
|
else {
|
2016-07-28 07:02:30 -04:00
|
|
|
ep = VM_ENV_PREV_EP(ep);
|
2015-07-21 18:52:59 -04:00
|
|
|
base_iseq = base_iseq->body->parent_iseq;
|
2017-10-26 21:35:12 -04:00
|
|
|
escape_cfp = rb_vm_search_cf_from_ep(ec, escape_cfp, ep);
|
2015-06-10 19:42:01 -04:00
|
|
|
VM_ASSERT(escape_cfp->iseq == base_iseq);
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
|
|
|
}
|
2008-04-03 06:59:44 -04:00
|
|
|
|
2017-06-03 06:07:44 -04:00
|
|
|
if (VM_FRAME_LAMBDA_P(escape_cfp)) {
|
2015-01-15 21:54:22 -05:00
|
|
|
/* lambda{... break ...} */
|
|
|
|
is_orphan = 0;
|
|
|
|
state = TAG_RETURN;
|
|
|
|
}
|
|
|
|
else {
|
2016-07-28 07:02:30 -04:00
|
|
|
ep = VM_ENV_PREV_EP(ep);
|
2015-01-15 21:54:22 -05:00
|
|
|
|
|
|
|
while (escape_cfp < eocfp) {
|
|
|
|
if (escape_cfp->ep == ep) {
|
2017-06-26 23:45:55 -04:00
|
|
|
const rb_iseq_t *const iseq = escape_cfp->iseq;
|
|
|
|
const VALUE epc = escape_cfp->pc - iseq->body->iseq_encoded;
|
|
|
|
const struct iseq_catch_table *const ct = iseq->body->catch_table;
|
|
|
|
unsigned int i;
|
2015-01-15 21:54:22 -05:00
|
|
|
|
2017-06-26 23:45:55 -04:00
|
|
|
if (!ct) break;
|
|
|
|
for (i=0; i < ct->size; i++) {
|
2016-09-17 09:46:07 -04:00
|
|
|
const struct iseq_catch_table_entry * const entry = &ct->entries[i];
|
2015-01-15 21:54:22 -05:00
|
|
|
|
2017-06-08 00:13:51 -04:00
|
|
|
if (entry->type == CATCH_TYPE_BREAK &&
|
|
|
|
entry->iseq == base_iseq &&
|
|
|
|
entry->start < epc && entry->end >= epc) {
|
2015-01-15 21:54:22 -05:00
|
|
|
if (entry->cont == epc) { /* found! */
|
|
|
|
is_orphan = 0;
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-01-15 21:54:22 -05:00
|
|
|
break;
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2015-01-15 21:54:22 -05:00
|
|
|
escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
|
2011-03-31 05:07:42 -04:00
|
|
|
}
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2015-01-15 21:54:22 -05:00
|
|
|
if (is_orphan) {
|
|
|
|
rb_vm_localjump_error("break from proc-closure", throwobj, TAG_BREAK);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (state == TAG_RETRY) {
|
|
|
|
rb_num_t i;
|
2016-07-28 07:02:30 -04:00
|
|
|
const VALUE *ep = VM_ENV_PREV_EP(GET_EP());
|
2015-01-15 21:54:22 -05:00
|
|
|
|
|
|
|
for (i = 0; i < level; i++) {
|
2016-07-28 07:02:30 -04:00
|
|
|
ep = VM_ENV_PREV_EP(ep);
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2017-10-26 21:35:12 -04:00
|
|
|
escape_cfp = rb_vm_search_cf_from_ep(ec, reg_cfp, ep);
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
|
|
|
else if (state == TAG_RETURN) {
|
2016-07-28 07:02:30 -04:00
|
|
|
const VALUE *current_ep = GET_EP();
|
|
|
|
const VALUE *target_lep = VM_EP_LEP(current_ep);
|
2015-01-15 21:54:22 -05:00
|
|
|
int in_class_frame = 0;
|
2017-06-27 02:57:34 -04:00
|
|
|
int toplevel = 1;
|
2015-01-15 21:54:22 -05:00
|
|
|
escape_cfp = reg_cfp;
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2015-01-15 21:54:22 -05:00
|
|
|
while (escape_cfp < eocfp) {
|
2016-07-28 07:02:30 -04:00
|
|
|
const VALUE *lep = VM_CF_LEP(escape_cfp);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2015-01-15 21:54:22 -05:00
|
|
|
if (!target_lep) {
|
|
|
|
target_lep = lep;
|
|
|
|
}
|
|
|
|
|
2016-06-06 05:37:59 -04:00
|
|
|
if (lep == target_lep &&
|
2016-08-02 21:50:50 -04:00
|
|
|
VM_FRAME_RUBYFRAME_P(escape_cfp) &&
|
2016-06-06 05:37:59 -04:00
|
|
|
escape_cfp->iseq->body->type == ISEQ_TYPE_CLASS) {
|
2015-01-15 21:54:22 -05:00
|
|
|
in_class_frame = 1;
|
|
|
|
target_lep = 0;
|
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2015-01-15 21:54:22 -05:00
|
|
|
if (lep == target_lep) {
|
2017-06-03 06:07:44 -04:00
|
|
|
if (VM_FRAME_LAMBDA_P(escape_cfp)) {
|
2017-06-27 02:57:34 -04:00
|
|
|
toplevel = 0;
|
2015-01-15 21:54:22 -05:00
|
|
|
if (in_class_frame) {
|
|
|
|
/* lambda {class A; ... return ...; end} */
|
2012-10-14 15:58:59 -04:00
|
|
|
goto valid_return;
|
|
|
|
}
|
2015-01-15 21:54:22 -05:00
|
|
|
else {
|
2016-07-28 07:02:30 -04:00
|
|
|
const VALUE *tep = current_ep;
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2015-01-15 21:54:22 -05:00
|
|
|
while (target_lep != tep) {
|
|
|
|
if (escape_cfp->ep == tep) {
|
|
|
|
/* in lambda */
|
|
|
|
goto valid_return;
|
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
tep = VM_ENV_PREV_EP(tep);
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
2017-06-27 02:57:34 -04:00
|
|
|
else if (VM_FRAME_RUBYFRAME_P(escape_cfp)) {
|
|
|
|
switch (escape_cfp->iseq->body->type) {
|
|
|
|
case ISEQ_TYPE_TOP:
|
|
|
|
case ISEQ_TYPE_MAIN:
|
|
|
|
if (toplevel) goto valid_return;
|
|
|
|
break;
|
|
|
|
case ISEQ_TYPE_EVAL:
|
|
|
|
case ISEQ_TYPE_CLASS:
|
|
|
|
toplevel = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2015-01-15 21:54:22 -05:00
|
|
|
|
2015-07-21 18:52:59 -04:00
|
|
|
if (escape_cfp->ep == target_lep && escape_cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
|
2015-01-15 21:54:22 -05:00
|
|
|
goto valid_return;
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2015-01-15 21:54:22 -05:00
|
|
|
|
|
|
|
escape_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(escape_cfp);
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
2015-01-15 21:54:22 -05:00
|
|
|
rb_vm_localjump_error("unexpected return", throwobj, TAG_RETURN);
|
|
|
|
|
|
|
|
valid_return:;
|
|
|
|
/* do nothing */
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
else {
|
2015-01-15 21:54:22 -05:00
|
|
|
rb_bug("isns(throw): unsupport throw type");
|
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2017-10-26 21:35:12 -04:00
|
|
|
ec->tag->state = state;
|
2015-03-11 08:49:27 -04:00
|
|
|
return (VALUE)THROW_DATA_NEW(throwobj, escape_cfp, state);
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-10-26 21:35:12 -04:00
|
|
|
vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
2015-01-15 21:54:22 -05:00
|
|
|
rb_num_t throw_state, VALUE throwobj)
|
|
|
|
{
|
2015-07-19 20:08:23 -04:00
|
|
|
const int state = (int)(throw_state & VM_THROW_STATE_MASK);
|
|
|
|
const int flag = (int)(throw_state & VM_THROW_NO_ESCAPE_FLAG);
|
|
|
|
const rb_num_t level = throw_state >> VM_THROW_LEVEL_SHIFT;
|
2015-01-15 21:54:22 -05:00
|
|
|
|
|
|
|
if (state != 0) {
|
2017-10-26 21:35:12 -04:00
|
|
|
return vm_throw_start(ec, reg_cfp, state, flag, level, throwobj);
|
2015-01-15 21:54:22 -05:00
|
|
|
}
|
|
|
|
else {
|
2017-10-26 21:35:12 -04:00
|
|
|
return vm_throw_continue(ec, throwobj);
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline void
|
|
|
|
vm_expandarray(rb_control_frame_t *cfp, VALUE ary, rb_num_t num, int flag)
|
2007-06-24 13:19:22 -04:00
|
|
|
{
|
2012-10-14 15:58:59 -04:00
|
|
|
int is_splat = flag & 0x01;
|
|
|
|
rb_num_t space_size = num + is_splat;
|
2013-07-24 05:57:49 -04:00
|
|
|
VALUE *base = cfp->sp;
|
|
|
|
const VALUE *ptr;
|
2012-10-14 15:58:59 -04:00
|
|
|
rb_num_t len;
|
2008-05-19 14:47:56 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (!RB_TYPE_P(ary, T_ARRAY)) {
|
|
|
|
ary = rb_ary_to_ary(ary);
|
2011-09-20 05:09:00 -04:00
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
cfp->sp += space_size;
|
2008-05-19 14:47:56 -04:00
|
|
|
|
* include/ruby/ruby.h: rename RARRAY_RAWPTR() to RARRAY_CONST_PTR().
RARRAY_RAWPTR(ary) returns (const VALUE *) type pointer and
usecase of this macro is not acquire raw pointer, but acquire
read-only pointer. So we rename to better name.
RSTRUCT_RAWPTR() is also renamed to RSTRUCT_CONST_PTR()
(I expect that nobody use it).
* array.c, compile.c, cont.c, enumerator.c, gc.c, proc.c, random.c,
string.c, struct.c, thread.c, vm_eval.c, vm_insnhelper.c:
catch up this change.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@43043 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2013-09-25 04:24:34 -04:00
|
|
|
ptr = RARRAY_CONST_PTR(ary);
|
2012-10-14 15:58:59 -04:00
|
|
|
len = (rb_num_t)RARRAY_LEN(ary);
|
2008-05-19 14:47:56 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
if (flag & 0x02) {
|
|
|
|
/* post: ..., nil ,ary[-1], ..., ary[0..-num] # top */
|
|
|
|
rb_num_t i = 0, j;
|
|
|
|
|
|
|
|
if (len < num) {
|
|
|
|
for (i=0; i<num-len; i++) {
|
|
|
|
*base++ = Qnil;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (j=0; i<num; i++, j++) {
|
|
|
|
VALUE v = ptr[len - j - 1];
|
|
|
|
*base++ = v;
|
|
|
|
}
|
|
|
|
if (is_splat) {
|
|
|
|
*base = rb_ary_new4(len - j, ptr);
|
|
|
|
}
|
2007-06-24 13:19:22 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
else {
|
|
|
|
/* normal: ary[num..-1], ary[num-2], ary[num-3], ..., ary[0] # top */
|
|
|
|
rb_num_t i;
|
|
|
|
VALUE *bptr = &base[space_size - 1];
|
2007-06-24 13:19:22 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
for (i=0; i<num; i++) {
|
|
|
|
if (len <= i) {
|
|
|
|
for (; i<num; i++) {
|
|
|
|
*bptr-- = Qnil;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
*bptr-- = ptr[i];
|
|
|
|
}
|
|
|
|
if (is_splat) {
|
|
|
|
if (num > len) {
|
|
|
|
*bptr = rb_ary_new();
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
*bptr = rb_ary_new4(len - num, ptr + num);
|
|
|
|
}
|
2011-09-02 01:36:49 -04:00
|
|
|
}
|
2011-09-01 04:31:24 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
RB_GC_GUARD(ary);
|
2011-09-01 04:31:24 -04:00
|
|
|
}
|
2009-09-06 03:40:24 -04:00
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
static VALUE vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
|
2009-09-06 03:40:24 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static void
|
2015-09-19 13:59:58 -04:00
|
|
|
vm_search_method(const struct rb_call_info *ci, struct rb_call_cache *cc, VALUE recv)
|
2009-07-13 00:44:20 -04:00
|
|
|
{
|
2012-10-14 15:58:59 -04:00
|
|
|
VALUE klass = CLASS_OF(recv);
|
2009-07-13 00:44:20 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
#if OPT_INLINE_METHOD_CACHE
|
2017-02-21 03:18:15 -05:00
|
|
|
if (LIKELY(RB_DEBUG_COUNTER_INC_UNLESS(mc_global_state_miss,
|
|
|
|
GET_GLOBAL_METHOD_STATE() == cc->method_state) &&
|
|
|
|
RB_DEBUG_COUNTER_INC_UNLESS(mc_class_serial_miss,
|
|
|
|
RCLASS_SERIAL(klass) == cc->class_serial))) {
|
2012-10-14 15:58:59 -04:00
|
|
|
/* cache hit! */
|
2016-07-28 07:02:30 -04:00
|
|
|
VM_ASSERT(cc->call != NULL);
|
2017-02-21 03:18:15 -05:00
|
|
|
RB_DEBUG_COUNTER_INC(mc_inline_hit);
|
2013-08-27 03:11:49 -04:00
|
|
|
return;
|
2009-07-13 00:44:20 -04:00
|
|
|
}
|
2017-02-21 03:18:15 -05:00
|
|
|
RB_DEBUG_COUNTER_INC(mc_inline_miss);
|
2013-08-27 03:11:49 -04:00
|
|
|
#endif
|
2015-09-19 13:59:58 -04:00
|
|
|
cc->me = rb_callable_method_entry(klass, ci->mid);
|
|
|
|
VM_ASSERT(callable_method_entry_p(cc->me));
|
|
|
|
cc->call = vm_call_general;
|
2013-08-27 03:11:49 -04:00
|
|
|
#if OPT_INLINE_METHOD_CACHE
|
2015-09-19 13:59:58 -04:00
|
|
|
cc->method_state = GET_GLOBAL_METHOD_STATE();
|
|
|
|
cc->class_serial = RCLASS_SERIAL(klass);
|
2009-07-13 00:44:20 -04:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline int
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
check_cfunc(const rb_callable_method_entry_t *me, VALUE (*func)())
|
2009-09-06 03:40:24 -04:00
|
|
|
{
|
2012-10-14 15:58:59 -04:00
|
|
|
if (me && me->def->type == VM_METHOD_TYPE_CFUNC &&
|
|
|
|
me->def->body.cfunc.func == func) {
|
|
|
|
return 1;
|
2009-09-06 03:40:24 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
* array.c, gc.c, hash.c, object.c, string.c, struct.c,
transcode.c, variable.c, vm.c, vm_insnhelper.c, vm_method.c:
replace calls to rb_error_frozen() with rb_check_frozen(). a
patch from Run Paint Run Run at [ruby-core:32014]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@29583 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2010-10-24 04:14:05 -04:00
|
|
|
|
2017-05-26 02:28:38 -04:00
|
|
|
static inline int
|
|
|
|
vm_method_cfunc_is(CALL_INFO ci, CALL_CACHE cc,
|
|
|
|
VALUE recv, VALUE (*func)())
|
|
|
|
{
|
|
|
|
vm_search_method(ci, cc, recv);
|
|
|
|
return check_cfunc(cc->me, func);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
opt_equal_fallback(VALUE recv, VALUE obj, CALL_INFO ci, CALL_CACHE cc)
|
|
|
|
{
|
|
|
|
if (vm_method_cfunc_is(ci, cc, recv, rb_obj_equal)) {
|
|
|
|
return recv == obj ? Qtrue : Qfalse;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
|
2017-05-25 01:29:35 -04:00
|
|
|
#define BUILTIN_CLASS_P(x, k) (!SPECIAL_CONST_P(x) && RBASIC_CLASS(x) == k)
|
|
|
|
#define EQ_UNREDEFINED_P(t) BASIC_OP_UNREDEFINED_P(BOP_EQ, t##_REDEFINED_OP_FLAG)
|
|
|
|
|
2017-05-26 02:28:38 -04:00
|
|
|
/* 1: compare by identity, 0: not applicable, -1: redefined */
|
|
|
|
static inline int
|
|
|
|
comparable_by_identity(VALUE recv, VALUE obj)
|
|
|
|
{
|
|
|
|
if (FIXNUM_2_P(recv, obj)) {
|
|
|
|
return (EQ_UNREDEFINED_P(INTEGER) != 0) * 2 - 1;
|
|
|
|
}
|
|
|
|
if (FLONUM_2_P(recv, obj)) {
|
|
|
|
return (EQ_UNREDEFINED_P(FLOAT) != 0) * 2 - 1;
|
|
|
|
}
|
|
|
|
if (SYMBOL_P(recv) && SYMBOL_P(obj)) {
|
|
|
|
return (EQ_UNREDEFINED_P(SYMBOL) != 0) * 2 - 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static
|
|
|
|
#ifndef NO_BIG_INLINE
|
|
|
|
inline
|
|
|
|
#endif
|
|
|
|
VALUE
|
2015-09-19 13:59:58 -04:00
|
|
|
opt_eq_func(VALUE recv, VALUE obj, CALL_INFO ci, CALL_CACHE cc)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2017-05-26 02:28:38 -04:00
|
|
|
switch (comparable_by_identity(recv, obj)) {
|
|
|
|
case 1:
|
|
|
|
return (recv == obj) ? Qtrue : Qfalse;
|
|
|
|
case -1:
|
|
|
|
goto fallback;
|
2017-03-06 01:44:11 -05:00
|
|
|
}
|
2017-05-26 02:28:38 -04:00
|
|
|
if (0) {
|
2017-03-06 01:44:11 -05:00
|
|
|
}
|
|
|
|
else if (BUILTIN_CLASS_P(recv, rb_cFloat)) {
|
|
|
|
if (EQ_UNREDEFINED_P(FLOAT)) {
|
|
|
|
return rb_float_equal(recv, obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (BUILTIN_CLASS_P(recv, rb_cString)) {
|
|
|
|
if (EQ_UNREDEFINED_P(STRING)) {
|
2012-10-14 15:58:59 -04:00
|
|
|
return rb_str_equal(recv, obj);
|
|
|
|
}
|
|
|
|
}
|
2017-05-25 01:29:35 -04:00
|
|
|
|
2017-05-26 02:28:38 -04:00
|
|
|
fallback:
|
|
|
|
return opt_equal_fallback(recv, obj, ci, cc);
|
2017-05-25 01:29:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
#ifndef NO_BIG_INLINE
|
|
|
|
inline
|
|
|
|
#endif
|
|
|
|
VALUE
|
|
|
|
opt_eql_func(VALUE recv, VALUE obj, CALL_INFO ci, CALL_CACHE cc)
|
|
|
|
{
|
2017-05-26 02:28:38 -04:00
|
|
|
switch (comparable_by_identity(recv, obj)) {
|
|
|
|
case 1:
|
|
|
|
return (recv == obj) ? Qtrue : Qfalse;
|
|
|
|
case -1:
|
|
|
|
goto fallback;
|
2017-05-25 01:29:35 -04:00
|
|
|
}
|
2017-05-26 02:28:38 -04:00
|
|
|
if (0) {
|
2017-05-25 01:29:35 -04:00
|
|
|
}
|
|
|
|
else if (BUILTIN_CLASS_P(recv, rb_cFloat)) {
|
|
|
|
if (EQ_UNREDEFINED_P(FLOAT)) {
|
|
|
|
return rb_float_eql(recv, obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (BUILTIN_CLASS_P(recv, rb_cString)) {
|
|
|
|
if (EQ_UNREDEFINED_P(STRING)) {
|
|
|
|
return rb_str_eql(recv, obj);
|
|
|
|
}
|
|
|
|
}
|
2009-09-06 03:40:24 -04:00
|
|
|
|
2017-05-26 02:28:38 -04:00
|
|
|
fallback:
|
|
|
|
return opt_equal_fallback(recv, obj, ci, cc);
|
2009-09-06 03:40:24 -04:00
|
|
|
}
|
2017-05-25 03:36:47 -04:00
|
|
|
#undef BUILTIN_CLASS_P
|
|
|
|
#undef EQ_UNREDEFINED_P
|
2009-09-06 03:40:24 -04:00
|
|
|
|
2013-08-27 03:46:08 -04:00
|
|
|
VALUE
|
|
|
|
rb_equal_opt(VALUE obj1, VALUE obj2)
|
|
|
|
{
|
2015-09-19 13:59:58 -04:00
|
|
|
struct rb_call_info ci;
|
|
|
|
struct rb_call_cache cc;
|
|
|
|
|
2013-08-27 03:46:08 -04:00
|
|
|
ci.mid = idEq;
|
2015-09-19 13:59:58 -04:00
|
|
|
cc.method_state = 0;
|
|
|
|
cc.class_serial = 0;
|
|
|
|
cc.me = NULL;
|
|
|
|
return opt_eq_func(obj1, obj2, &ci, &cc);
|
2013-08-27 03:46:08 -04:00
|
|
|
}
|
|
|
|
|
2017-05-25 00:25:39 -04:00
|
|
|
VALUE
|
|
|
|
rb_eql_opt(VALUE obj1, VALUE obj2)
|
|
|
|
{
|
|
|
|
struct rb_call_info ci;
|
|
|
|
struct rb_call_cache cc;
|
|
|
|
|
|
|
|
ci.mid = idEqlP;
|
|
|
|
cc.method_state = 0;
|
|
|
|
cc.class_serial = 0;
|
|
|
|
cc.me = NULL;
|
2017-05-25 01:29:35 -04:00
|
|
|
return opt_eql_func(obj1, obj2, &ci, &cc);
|
2017-05-25 00:25:39 -04:00
|
|
|
}
|
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
static VALUE vm_call0(rb_execution_context_t *ec, VALUE, ID, int, const VALUE*, const rb_callable_method_entry_t *);
|
2013-09-07 02:44:31 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
2017-11-16 01:10:31 -05:00
|
|
|
check_match(rb_execution_context_t *ec, VALUE pattern, VALUE target, enum vm_check_match_type type)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case VM_CHECKMATCH_TYPE_WHEN:
|
|
|
|
return pattern;
|
2013-09-07 02:44:31 -04:00
|
|
|
case VM_CHECKMATCH_TYPE_RESCUE:
|
2012-10-14 15:58:59 -04:00
|
|
|
if (!rb_obj_is_kind_of(pattern, rb_cModule)) {
|
|
|
|
rb_raise(rb_eTypeError, "class or module required for rescue clause");
|
|
|
|
}
|
2013-09-07 02:44:31 -04:00
|
|
|
/* fall through */
|
|
|
|
case VM_CHECKMATCH_TYPE_CASE: {
|
2017-10-06 01:55:11 -04:00
|
|
|
const rb_callable_method_entry_t *me =
|
|
|
|
rb_callable_method_entry_with_refinements(CLASS_OF(pattern), idEqq, NULL);
|
2013-11-29 03:57:02 -05:00
|
|
|
if (me) {
|
2017-11-16 01:10:31 -05:00
|
|
|
return vm_call0(ec, pattern, idEqq, 1, &target, me);
|
2013-11-29 03:57:02 -05:00
|
|
|
}
|
|
|
|
else {
|
2014-02-08 04:20:33 -05:00
|
|
|
/* fallback to funcall (e.g. method_missing) */
|
2016-07-29 07:57:14 -04:00
|
|
|
return rb_funcallv(pattern, idEqq, 1, &target);
|
2013-11-29 03:57:02 -05:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
rb_bug("check_match: unreachable");
|
|
|
|
}
|
|
|
|
}
|
2007-08-06 07:36:30 -04:00
|
|
|
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
#if defined(_MSC_VER) && _MSC_VER < 1300
|
|
|
|
#define CHECK_CMP_NAN(a, b) if (isnan(a) || isnan(b)) return Qfalse;
|
|
|
|
#else
|
|
|
|
#define CHECK_CMP_NAN(a, b) /* do nothing */
|
|
|
|
#endif
|
2007-08-06 07:36:30 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline VALUE
|
|
|
|
double_cmp_lt(double a, double b)
|
|
|
|
{
|
|
|
|
CHECK_CMP_NAN(a, b);
|
|
|
|
return a < b ? Qtrue : Qfalse;
|
|
|
|
}
|
2007-08-06 07:36:30 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline VALUE
|
|
|
|
double_cmp_le(double a, double b)
|
|
|
|
{
|
|
|
|
CHECK_CMP_NAN(a, b);
|
|
|
|
return a <= b ? Qtrue : Qfalse;
|
|
|
|
}
|
2007-08-06 07:36:30 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline VALUE
|
|
|
|
double_cmp_gt(double a, double b)
|
|
|
|
{
|
|
|
|
CHECK_CMP_NAN(a, b);
|
|
|
|
return a > b ? Qtrue : Qfalse;
|
|
|
|
}
|
2012-06-10 23:14:59 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static inline VALUE
|
|
|
|
double_cmp_ge(double a, double b)
|
|
|
|
{
|
|
|
|
CHECK_CMP_NAN(a, b);
|
|
|
|
return a >= b ? Qtrue : Qfalse;
|
|
|
|
}
|
2012-06-10 23:14:59 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE *
|
2016-07-28 07:02:30 -04:00
|
|
|
vm_base_ptr(const rb_control_frame_t *cfp)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2016-07-28 07:02:30 -04:00
|
|
|
const rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
|
2008-06-17 15:27:24 -04:00
|
|
|
|
2016-08-02 21:50:50 -04:00
|
|
|
if (cfp->iseq && VM_FRAME_RUBYFRAME_P(cfp)) {
|
2016-07-28 07:02:30 -04:00
|
|
|
VALUE *bp = prev_cfp->sp + cfp->iseq->body->local_table_size + VM_ENV_DATA_SIZE;
|
|
|
|
if (cfp->iseq->body->type == ISEQ_TYPE_METHOD) {
|
|
|
|
/* adjust `self' */
|
|
|
|
bp += 1;
|
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
#if VM_DEBUG_BP_CHECK
|
2016-07-28 07:02:30 -04:00
|
|
|
if (bp != cfp->bp_check) {
|
|
|
|
fprintf(stderr, "bp_check: %ld, bp: %ld\n",
|
2017-10-26 10:44:09 -04:00
|
|
|
(long)(cfp->bp_check - GET_EC()->vm_stack),
|
|
|
|
(long)(bp - GET_EC()->vm_stack));
|
2016-07-28 07:02:30 -04:00
|
|
|
rb_bug("vm_base_ptr: unreachable");
|
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
#endif
|
2016-07-28 07:02:30 -04:00
|
|
|
return bp;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return NULL;
|
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2008-06-17 15:27:24 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
/* method call processes with call_info */
|
2008-06-17 15:27:24 -04:00
|
|
|
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
#include "vm_args.c"
|
2012-11-13 03:34:43 -05:00
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, int opt_pc, int param_size, int local_size);
|
|
|
|
static inline VALUE vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, int opt_pc, int param_size, int local_size);
|
|
|
|
static inline VALUE vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, int opt_pc);
|
|
|
|
static VALUE vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
|
|
|
|
static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
|
|
|
|
static VALUE vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
|
|
|
|
static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
|
2015-10-23 13:53:35 -04:00
|
|
|
static vm_call_handler vm_call_iseq_setup_func(const struct rb_call_info *ci, const int param_size, const int local_size);
|
|
|
|
|
2015-10-05 15:44:05 -04:00
|
|
|
static rb_method_definition_t *method_definition_create(rb_method_type_t type, ID mid);
|
|
|
|
static void method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *def, void *opts);
|
|
|
|
static int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2015-10-23 13:53:35 -04:00
|
|
|
static const rb_iseq_t *
|
|
|
|
def_iseq_ptr(rb_method_definition_t *def)
|
|
|
|
{
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
|
|
if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
|
|
|
|
#endif
|
2015-12-08 08:58:50 -05:00
|
|
|
return rb_iseq_check(def->body.iseq.iseqptr);
|
2015-10-23 13:53:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_iseq_setup_tailcall_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2015-10-23 13:53:35 -04:00
|
|
|
{
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_iseq_setup_tailcall(ec, cfp, calling, ci, cc, 0);
|
2015-10-23 13:53:35 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_iseq_setup_normal_0start(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2015-10-23 13:53:35 -04:00
|
|
|
{
|
|
|
|
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
|
|
|
|
int param = iseq->body->param.size;
|
2016-07-28 07:02:30 -04:00
|
|
|
int local = iseq->body->local_table_size;
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, 0, param, local);
|
2015-10-23 13:53:35 -04:00
|
|
|
}
|
|
|
|
|
2015-12-22 06:20:12 -05:00
|
|
|
static inline int
|
|
|
|
simple_iseq_p(const rb_iseq_t *iseq)
|
|
|
|
{
|
|
|
|
return iseq->body->param.flags.has_opt == FALSE &&
|
|
|
|
iseq->body->param.flags.has_rest == FALSE &&
|
|
|
|
iseq->body->param.flags.has_post == FALSE &&
|
|
|
|
iseq->body->param.flags.has_kw == FALSE &&
|
|
|
|
iseq->body->param.flags.has_kwrest == FALSE &&
|
|
|
|
iseq->body->param.flags.has_block == FALSE;
|
|
|
|
}
|
|
|
|
|
2015-09-11 14:25:54 -04:00
|
|
|
static inline int
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc,
|
2015-10-23 13:53:35 -04:00
|
|
|
const rb_iseq_t *iseq, VALUE *argv, int param_size, int local_size)
|
2014-03-11 22:18:50 -04:00
|
|
|
{
|
2017-08-05 02:58:44 -04:00
|
|
|
if (LIKELY(simple_iseq_p(iseq) && !(ci->flag & VM_CALL_KW_SPLAT))) {
|
2017-10-26 22:49:30 -04:00
|
|
|
rb_control_frame_t *cfp = ec->cfp;
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
if (calling->argc != iseq->body->param.lead_num) {
|
2017-10-26 22:49:30 -04:00
|
|
|
argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
|
2014-03-11 22:18:50 -04:00
|
|
|
}
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
|
2015-10-23 13:53:35 -04:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size),
|
2015-09-19 13:59:58 -04:00
|
|
|
(!IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
|
|
|
|
!(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)));
|
2015-09-11 14:25:54 -04:00
|
|
|
return 0;
|
2012-10-14 16:59:21 -04:00
|
|
|
}
|
2014-03-11 22:18:50 -04:00
|
|
|
else {
|
2017-10-26 22:49:30 -04:00
|
|
|
return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_method);
|
2014-03-11 22:18:50 -04:00
|
|
|
}
|
|
|
|
}
|
2012-10-14 16:59:21 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_iseq_setup(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2007-12-18 07:07:51 -05:00
|
|
|
{
|
2015-10-23 13:53:35 -04:00
|
|
|
const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def);
|
|
|
|
const int param_size = iseq->body->param.size;
|
2016-07-28 07:02:30 -04:00
|
|
|
const int local_size = iseq->body->local_table_size;
|
2017-10-26 22:49:30 -04:00
|
|
|
const int opt_pc = vm_callee_setup_arg(ec, calling, ci, cc, def_iseq_ptr(cc->me->def), cfp->sp - calling->argc, param_size, local_size);
|
|
|
|
return vm_call_iseq_setup_2(ec, cfp, calling, ci, cc, opt_pc, param_size, local_size);
|
2007-12-18 07:07:51 -05:00
|
|
|
}
|
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
static inline VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc,
|
2015-10-23 13:53:35 -04:00
|
|
|
int opt_pc, int param_size, int local_size)
|
2012-10-16 17:20:11 -04:00
|
|
|
{
|
|
|
|
if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) {
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_iseq_setup_normal(ec, cfp, calling, ci, cc, opt_pc, param_size, local_size);
|
2012-10-16 17:20:11 -04:00
|
|
|
}
|
|
|
|
else {
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_iseq_setup_tailcall(ec, cfp, calling, ci, cc, opt_pc);
|
2012-10-16 17:20:11 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-18 02:14:39 -04:00
|
|
|
static inline VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_iseq_setup_normal(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc,
|
2015-10-23 13:53:35 -04:00
|
|
|
int opt_pc, int param_size, int local_size)
|
2007-12-18 07:07:51 -05:00
|
|
|
{
|
2015-09-19 13:59:58 -04:00
|
|
|
const rb_callable_method_entry_t *me = cc->me;
|
2015-07-21 17:19:02 -04:00
|
|
|
const rb_iseq_t *iseq = def_iseq_ptr(me->def);
|
2015-09-19 13:59:58 -04:00
|
|
|
VALUE *argv = cfp->sp - calling->argc;
|
2015-10-23 13:53:35 -04:00
|
|
|
VALUE *sp = argv + param_size;
|
2015-10-10 17:20:59 -04:00
|
|
|
cfp->sp = argv - 1 /* recv */;
|
2007-12-18 07:07:51 -05:00
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL, calling->recv,
|
2016-07-28 07:02:30 -04:00
|
|
|
calling->block_handler, (VALUE)me,
|
2015-09-12 14:47:43 -04:00
|
|
|
iseq->body->iseq_encoded + opt_pc, sp,
|
2015-10-23 13:53:35 -04:00
|
|
|
local_size - param_size,
|
2015-09-12 14:47:43 -04:00
|
|
|
iseq->body->stack_max);
|
2012-10-16 17:20:11 -04:00
|
|
|
return Qundef;
|
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2012-10-18 02:14:39 -04:00
|
|
|
static inline VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_iseq_setup_tailcall(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc,
|
2015-10-23 13:53:35 -04:00
|
|
|
int opt_pc)
|
2012-10-16 17:20:11 -04:00
|
|
|
{
|
2015-07-24 17:44:14 -04:00
|
|
|
unsigned int i;
|
2015-09-19 13:59:58 -04:00
|
|
|
VALUE *argv = cfp->sp - calling->argc;
|
|
|
|
const rb_callable_method_entry_t *me = cc->me;
|
2015-07-21 17:19:02 -04:00
|
|
|
const rb_iseq_t *iseq = def_iseq_ptr(me->def);
|
2012-10-16 17:20:11 -04:00
|
|
|
VALUE *src_argv = argv;
|
|
|
|
VALUE *sp_orig, *sp;
|
2016-08-02 20:28:12 -04:00
|
|
|
VALUE finish_flag = VM_FRAME_FINISHED_P(cfp) ? VM_FRAME_FLAG_FINISH : 0;
|
2007-12-18 07:07:51 -05:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
if (VM_BH_FROM_CFP_P(calling->block_handler, cfp)) {
|
|
|
|
struct rb_captured_block *dst_captured = VM_CFP_TO_CAPTURED_BLOCK(RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp));
|
|
|
|
const struct rb_captured_block *src_captured = VM_BH_TO_CAPT_BLOCK(calling->block_handler);
|
|
|
|
dst_captured->code.val = src_captured->code.val;
|
|
|
|
if (VM_BH_ISEQ_BLOCK_P(calling->block_handler)) {
|
|
|
|
calling->block_handler = VM_BH_FROM_ISEQ_BLOCK(dst_captured);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
calling->block_handler = VM_BH_FROM_IFUNC_BLOCK(dst_captured);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_pop_frame(ec, cfp, cfp->ep);
|
|
|
|
cfp = ec->cfp;
|
2009-08-15 14:18:07 -04:00
|
|
|
|
2012-10-16 17:20:11 -04:00
|
|
|
sp_orig = sp = cfp->sp;
|
2007-12-18 07:07:51 -05:00
|
|
|
|
2012-10-16 17:20:11 -04:00
|
|
|
/* push self */
|
2015-09-19 13:59:58 -04:00
|
|
|
sp[0] = calling->recv;
|
2012-10-16 17:20:11 -04:00
|
|
|
sp++;
|
2009-08-12 01:55:06 -04:00
|
|
|
|
2012-10-16 17:20:11 -04:00
|
|
|
/* copy arguments */
|
2015-07-21 18:52:59 -04:00
|
|
|
for (i=0; i < iseq->body->param.size; i++) {
|
2012-10-16 17:20:11 -04:00
|
|
|
*sp++ = src_argv[i];
|
|
|
|
}
|
2012-08-02 07:34:19 -04:00
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_push_frame(ec, iseq, VM_FRAME_MAGIC_METHOD | VM_ENV_FLAG_LOCAL | finish_flag,
|
2016-07-28 07:02:30 -04:00
|
|
|
calling->recv, calling->block_handler, (VALUE)me,
|
2015-09-12 14:47:43 -04:00
|
|
|
iseq->body->iseq_encoded + opt_pc, sp,
|
2016-07-28 07:02:30 -04:00
|
|
|
iseq->body->local_table_size - iseq->body->param.size,
|
2015-09-12 14:47:43 -04:00
|
|
|
iseq->body->stack_max);
|
2012-10-16 17:20:11 -04:00
|
|
|
|
|
|
|
cfp->sp = sp_orig;
|
2017-11-06 02:44:28 -05:00
|
|
|
RUBY_VM_CHECK_INTS(ec);
|
2017-01-08 21:55:39 -05:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
return Qundef;
|
2012-08-02 07:34:19 -04:00
|
|
|
}
|
|
|
|
|
2012-10-19 06:38:30 -04:00
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_m2(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-08-02 07:34:19 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, rb_ary_new4(argc, argv));
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
2012-08-02 07:34:19 -04:00
|
|
|
|
2012-10-19 06:38:30 -04:00
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_m1(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(argc, argv, recv);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2012-10-19 06:38:30 -04:00
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_0(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_1(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_2(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_3(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_4(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_5(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_6(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_7(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_8(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_9(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_10(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_11(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_12(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_13(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_14(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2012-11-13 04:48:08 -05:00
|
|
|
call_cfunc_15(VALUE (*func)(ANYARGS), VALUE recv, int argc, const VALUE *argv)
|
2012-10-19 06:38:30 -04:00
|
|
|
{
|
2012-11-13 04:48:08 -05:00
|
|
|
return (*func)(recv, argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
|
2012-10-19 06:38:30 -04:00
|
|
|
}
|
|
|
|
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 00:22:31 -04:00
|
|
|
#ifndef VM_PROFILE
|
|
|
|
#define VM_PROFILE 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if VM_PROFILE
|
2015-10-29 02:03:17 -04:00
|
|
|
enum {
|
|
|
|
VM_PROFILE_R2C_CALL,
|
|
|
|
VM_PROFILE_R2C_POPF,
|
|
|
|
VM_PROFILE_C2C_CALL,
|
|
|
|
VM_PROFILE_C2C_POPF,
|
|
|
|
VM_PROFILE_COUNT
|
|
|
|
};
|
|
|
|
static int vm_profile_counter[VM_PROFILE_COUNT];
|
|
|
|
#define VM_PROFILE_UP(x) (vm_profile_counter[VM_PROFILE_##x]++)
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 00:22:31 -04:00
|
|
|
#define VM_PROFILE_ATEXIT() atexit(vm_profile_show_result)
|
2013-09-09 01:17:17 -04:00
|
|
|
static void
|
|
|
|
vm_profile_show_result(void)
|
2012-11-06 17:50:30 -05:00
|
|
|
{
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 00:22:31 -04:00
|
|
|
fprintf(stderr, "VM Profile results: \n");
|
2015-10-29 02:03:17 -04:00
|
|
|
fprintf(stderr, "r->c call: %d\n", vm_profile_counter[VM_PROFILE_R2C_CALL]);
|
|
|
|
fprintf(stderr, "r->c popf: %d\n", vm_profile_counter[VM_PROFILE_R2C_POPF]);
|
|
|
|
fprintf(stderr, "c->c call: %d\n", vm_profile_counter[VM_PROFILE_C2C_CALL]);
|
|
|
|
fprintf(stderr, "c->c popf: %d\n", vm_profile_counter[VM_PROFILE_C2C_POPF]);
|
2012-08-02 07:34:19 -04:00
|
|
|
}
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 00:22:31 -04:00
|
|
|
#else
|
|
|
|
#define VM_PROFILE_UP(x)
|
|
|
|
#define VM_PROFILE_ATEXIT()
|
|
|
|
#endif
|
2012-08-02 07:34:19 -04:00
|
|
|
|
2017-08-18 08:44:30 -04:00
|
|
|
static inline int
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_cfp_consistent_p(rb_execution_context_t *ec, const rb_control_frame_t *reg_cfp)
|
2017-08-18 08:44:30 -04:00
|
|
|
{
|
|
|
|
const int ov_flags = RAISED_STACKOVERFLOW;
|
2017-10-26 22:49:30 -04:00
|
|
|
if (LIKELY(reg_cfp == ec->cfp + 1)) return TRUE;
|
2017-11-07 00:22:09 -05:00
|
|
|
if (rb_ec_raised_p(ec, ov_flags)) {
|
|
|
|
rb_ec_raised_reset(ec, ov_flags);
|
2017-08-18 08:44:30 -04:00
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
2017-04-11 00:17:45 -04:00
|
|
|
#define CHECK_CFP_CONSISTENCY(func) \
|
2017-10-26 22:49:30 -04:00
|
|
|
(LIKELY(vm_cfp_consistent_p(ec, reg_cfp)) ? (void)0 : \
|
2018-01-02 01:41:40 -05:00
|
|
|
rb_bug(func ": cfp consistency error (%p, %p)", (void *)reg_cfp, (void *)(ec->cfp+1)))
|
2017-04-11 00:17:45 -04:00
|
|
|
|
2013-09-09 01:17:19 -04:00
|
|
|
static inline
|
|
|
|
const rb_method_cfunc_t *
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
vm_method_cfunc_entry(const rb_callable_method_entry_t *me)
|
2013-09-09 01:17:19 -04:00
|
|
|
{
|
|
|
|
#if VM_DEBUG_VERIFY_METHOD_CACHE
|
|
|
|
switch (me->def->type) {
|
|
|
|
case VM_METHOD_TYPE_CFUNC:
|
|
|
|
case VM_METHOD_TYPE_NOTIMPLEMENTED:
|
|
|
|
break;
|
|
|
|
# define METHOD_BUG(t) case VM_METHOD_TYPE_##t: rb_bug("wrong method type: " #t)
|
|
|
|
METHOD_BUG(ISEQ);
|
|
|
|
METHOD_BUG(ATTRSET);
|
|
|
|
METHOD_BUG(IVAR);
|
|
|
|
METHOD_BUG(BMETHOD);
|
|
|
|
METHOD_BUG(ZSUPER);
|
|
|
|
METHOD_BUG(UNDEF);
|
|
|
|
METHOD_BUG(OPTIMIZED);
|
|
|
|
METHOD_BUG(MISSING);
|
|
|
|
METHOD_BUG(REFINED);
|
2015-05-30 14:45:28 -04:00
|
|
|
METHOD_BUG(ALIAS);
|
2013-09-09 01:17:19 -04:00
|
|
|
# undef METHOD_BUG
|
|
|
|
default:
|
|
|
|
rb_bug("wrong method type: %d", me->def->type);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return &me->def->body.cfunc;
|
|
|
|
}
|
|
|
|
|
2012-08-08 03:52:19 -04:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2012-08-08 03:52:19 -04:00
|
|
|
{
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 00:22:31 -04:00
|
|
|
VALUE val;
|
2015-09-19 13:59:58 -04:00
|
|
|
const rb_callable_method_entry_t *me = cc->me;
|
2013-09-09 01:17:19 -04:00
|
|
|
const rb_method_cfunc_t *cfunc = vm_method_cfunc_entry(me);
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 00:22:31 -04:00
|
|
|
int len = cfunc->argc;
|
2012-11-13 04:48:08 -05:00
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
VALUE recv = calling->recv;
|
2016-07-28 07:02:30 -04:00
|
|
|
VALUE block_handler = calling->block_handler;
|
2015-09-19 13:59:58 -04:00
|
|
|
int argc = calling->argc;
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2017-11-07 03:19:25 -05:00
|
|
|
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
|
2017-10-29 09:19:14 -04:00
|
|
|
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, ci->mid, me->owner, Qundef);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_push_frame(ec, NULL, VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL, recv,
|
2016-07-28 07:02:30 -04:00
|
|
|
block_handler, (VALUE)me,
|
2017-10-26 22:49:30 -04:00
|
|
|
0, ec->cfp->sp, 0, 0);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2012-11-13 04:48:08 -05:00
|
|
|
if (len >= 0) rb_check_arity(argc, len, len);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2012-11-13 04:48:08 -05:00
|
|
|
reg_cfp->sp -= argc + 1;
|
2015-10-29 02:03:17 -04:00
|
|
|
VM_PROFILE_UP(R2C_CALL);
|
2012-11-13 04:48:08 -05:00
|
|
|
val = (*cfunc->invoker)(cfunc->func, recv, argc, reg_cfp->sp + 1);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2017-04-11 00:17:45 -04:00
|
|
|
CHECK_CFP_CONSISTENCY("vm_call_cfunc");
|
2012-08-08 03:52:19 -04:00
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
rb_vm_pop_frame(ec);
|
2012-08-23 03:22:40 -04:00
|
|
|
|
2017-10-29 09:19:14 -04:00
|
|
|
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, ci->mid, me->owner, val);
|
2017-11-07 03:19:25 -05:00
|
|
|
RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 00:22:31 -04:00
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 00:22:31 -04:00
|
|
|
{
|
2015-09-19 13:59:58 -04:00
|
|
|
CALLER_SETUP_ARG(reg_cfp, calling, ci);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_cfunc_with_frame(ec, reg_cfp, calling, ci, cc);
|
* vm_core.h, vm_insnhelper.c, vm_eval.c (OPT_CALL_CFUNC_WITHOUT_FRAME):
add a new otpimization and its macro `OPT_CALL_CFUNC_WITHOUT_FRAME'.
This optimization makes all cfunc method calls `frameless', which
is fster than ordinal cfunc method call.
If `frame' is needed (for example, it calls another method with
`rb_funcall()'), then build a frame. In other words, this
optimization delays frame building.
However, to delay the frame building, we need additional overheads:
(1) Store the last call information.
(2) Check the delayed frame buidling before the frame is needed.
(3) Overhead to build a delayed frame.
rb_thread_t::passed_ci is storage of delayed cfunc call information.
(1) is lightweight because it is only 1 assignment to `passed_ci'.
To achieve (2), we modify GET_THREAD() to check `passed_ci' every
time. It causes 10% overhead on my envrionment.
This optimization only works for cfunc methods which do not need
their `frame'.
After evaluation on my environment, this optimization does not
effective every time. Because of this evaluation results, this
optimization is disabled at default.
* vm_insnhelper.c, vm.c: add VM_PROFILE* macros to measure behaviour
of VM internals. I will extend this feature.
* vm_method.c, method.h: change parameters of the `invoker' function.
Receive `func' pointer as the first parameter.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37293 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-23 00:22:31 -04:00
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
|
|
|
cfp->sp -= 1;
|
2016-04-23 08:17:36 -04:00
|
|
|
return vm_getivar(calling->recv, cc->me->def->body.attr.id, NULL, cc, 1);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2012-08-23 03:22:40 -04:00
|
|
|
{
|
2016-04-23 08:17:36 -04:00
|
|
|
VALUE val = *(cfp->sp - 1);
|
2012-10-14 15:58:59 -04:00
|
|
|
cfp->sp -= 2;
|
2016-04-23 08:17:36 -04:00
|
|
|
return vm_setivar(calling->recv, cc->me->def->body.attr.id, val, NULL, cc, 1);
|
2012-08-23 03:22:40 -04:00
|
|
|
}
|
|
|
|
|
2012-10-18 02:14:39 -04:00
|
|
|
static inline VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, const VALUE *argv)
|
2012-08-23 03:22:40 -04:00
|
|
|
{
|
2012-10-14 15:58:59 -04:00
|
|
|
rb_proc_t *proc;
|
|
|
|
VALUE val;
|
|
|
|
|
|
|
|
/* control block frame */
|
2017-10-28 06:01:54 -04:00
|
|
|
ec->passed_bmethod_me = cc->me;
|
2015-09-19 13:59:58 -04:00
|
|
|
GetProcPtr(cc->me->def->body.proc, proc);
|
2017-10-27 02:06:31 -04:00
|
|
|
val = vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->block_handler);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
|
|
|
return val;
|
2012-08-23 03:22:40 -04:00
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2012-08-23 03:22:40 -04:00
|
|
|
{
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
VALUE *argv;
|
2015-09-19 13:59:58 -04:00
|
|
|
int argc;
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
CALLER_SETUP_ARG(cfp, calling, ci);
|
|
|
|
argc = calling->argc;
|
|
|
|
argv = ALLOCA_N(VALUE, argc);
|
|
|
|
MEMCPY(argv, cfp->sp - argc, VALUE, argc);
|
|
|
|
cfp->sp += - argc - 1;
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_bmethod_body(ec, calling, ci, cc, argv);
|
2012-08-23 03:22:40 -04:00
|
|
|
}
|
2012-09-28 00:05:36 -04:00
|
|
|
|
2015-06-03 06:42:18 -04:00
|
|
|
static enum method_missing_reason
|
2015-09-19 13:59:58 -04:00
|
|
|
ci_missing_reason(const struct rb_call_info *ci)
|
2015-02-04 22:31:07 -05:00
|
|
|
{
|
2015-06-03 06:42:18 -04:00
|
|
|
enum method_missing_reason stat = MISSING_NOENTRY;
|
2015-06-02 21:39:16 -04:00
|
|
|
if (ci->flag & VM_CALL_VCALL) stat |= MISSING_VCALL;
|
2016-02-27 23:41:38 -05:00
|
|
|
if (ci->flag & VM_CALL_FCALL) stat |= MISSING_FCALL;
|
2015-06-02 21:39:16 -04:00
|
|
|
if (ci->flag & VM_CALL_SUPER) stat |= MISSING_SUPER;
|
2015-02-04 22:31:07 -05:00
|
|
|
return stat;
|
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *orig_ci, struct rb_call_cache *orig_cc)
|
2012-09-28 00:05:36 -04:00
|
|
|
{
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
int i;
|
2012-10-14 15:58:59 -04:00
|
|
|
VALUE sym;
|
2015-09-19 13:59:58 -04:00
|
|
|
struct rb_call_info *ci;
|
|
|
|
struct rb_call_info_with_kwarg ci_entry;
|
|
|
|
struct rb_call_cache cc_entry, *cc;
|
2012-09-28 00:05:36 -04:00
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
i = calling->argc - 1;
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
if (calling->argc == 0) {
|
2012-10-14 15:58:59 -04:00
|
|
|
rb_raise(rb_eArgError, "no method name given");
|
2012-09-28 00:05:36 -04:00
|
|
|
}
|
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
/* setup new ci */
|
|
|
|
if (orig_ci->flag & VM_CALL_KWARG) {
|
|
|
|
ci = (struct rb_call_info *)&ci_entry;
|
|
|
|
ci_entry = *(struct rb_call_info_with_kwarg *)orig_ci;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ci = &ci_entry.ci;
|
|
|
|
ci_entry.ci = *orig_ci;
|
|
|
|
}
|
|
|
|
ci->flag = ci->flag & ~VM_CALL_KWARG; /* TODO: delegate kw_arg without making a Hash object */
|
|
|
|
|
|
|
|
/* setup new cc */
|
|
|
|
cc_entry = *orig_cc;
|
|
|
|
cc = &cc_entry;
|
2012-10-14 15:58:59 -04:00
|
|
|
|
|
|
|
sym = TOPN(i);
|
|
|
|
|
2015-02-04 14:03:04 -05:00
|
|
|
if (!(ci->mid = rb_check_id(&sym))) {
|
2015-09-19 13:59:58 -04:00
|
|
|
if (rb_method_basic_definition_p(CLASS_OF(calling->recv), idMethodMissing)) {
|
2016-01-13 03:05:07 -05:00
|
|
|
VALUE exc = make_no_method_exception(rb_eNoMethodError, 0, calling->recv,
|
2016-02-27 23:41:38 -05:00
|
|
|
rb_long2int(calling->argc), &TOPN(i),
|
|
|
|
ci->flag & (VM_CALL_FCALL|VM_CALL_VCALL));
|
2012-10-14 15:58:59 -04:00
|
|
|
rb_exc_raise(exc);
|
|
|
|
}
|
2015-02-04 23:41:05 -05:00
|
|
|
TOPN(i) = rb_str_intern(sym);
|
2015-02-04 22:31:07 -05:00
|
|
|
ci->mid = idMethodMissing;
|
2017-11-07 00:01:51 -05:00
|
|
|
ec->method_missing_reason = cc->aux.method_missing_reason = ci_missing_reason(ci);
|
2015-02-04 14:45:16 -05:00
|
|
|
}
|
2015-02-04 22:31:07 -05:00
|
|
|
else {
|
|
|
|
/* shift arguments */
|
|
|
|
if (i > 0) {
|
|
|
|
MEMMOVE(&TOPN(i), &TOPN(i-1), VALUE, i);
|
|
|
|
}
|
2015-09-19 13:59:58 -04:00
|
|
|
calling->argc -= 1;
|
2015-02-04 22:31:07 -05:00
|
|
|
DEC_SP(1);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2012-09-28 00:05:36 -04:00
|
|
|
|
2017-10-06 01:55:11 -04:00
|
|
|
cc->me = rb_callable_method_entry_with_refinements(CLASS_OF(calling->recv), ci->mid, NULL);
|
2012-10-17 03:12:40 -04:00
|
|
|
ci->flag = VM_CALL_FCALL | VM_CALL_OPT_SEND;
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method(ec, reg_cfp, calling, ci, cc);
|
2012-09-28 00:05:36 -04:00
|
|
|
}
|
|
|
|
|
2018-01-07 14:18:49 -05:00
|
|
|
static inline VALUE vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, VALUE block_handler);
|
|
|
|
|
|
|
|
NOINLINE(static VALUE
|
2018-01-08 04:11:26 -05:00
|
|
|
vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
2018-01-07 14:18:49 -05:00
|
|
|
struct rb_calling_info *calling, const struct rb_call_info *ci, VALUE block_handler));
|
|
|
|
|
|
|
|
static VALUE
|
2018-01-08 04:11:26 -05:00
|
|
|
vm_invoke_block_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
|
|
|
struct rb_calling_info *calling, const struct rb_call_info *ci, VALUE block_handler)
|
2018-01-07 14:18:49 -05:00
|
|
|
{
|
2018-01-08 04:11:26 -05:00
|
|
|
int argc = calling->argc;
|
|
|
|
|
|
|
|
/* remove self */
|
|
|
|
if (argc > 0) MEMMOVE(&TOPN(argc), &TOPN(argc-1), VALUE, argc);
|
|
|
|
DEC_SP(1);
|
|
|
|
|
2018-01-07 14:18:49 -05:00
|
|
|
return vm_invoke_block(ec, reg_cfp, calling, ci, block_handler);
|
|
|
|
}
|
2018-01-05 12:51:10 -05:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
2018-01-05 12:51:10 -05:00
|
|
|
vm_call_opt_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2018-01-05 12:51:10 -05:00
|
|
|
VALUE procval = calling->recv;
|
2018-01-08 04:11:26 -05:00
|
|
|
return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, VM_BH_FROM_PROC(procval));
|
2018-01-07 14:18:49 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_call_opt_block_call(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
|
|
|
{
|
|
|
|
VALUE block_handler = VM_ENV_BLOCK_HANDLER(reg_cfp->ep);
|
|
|
|
|
|
|
|
if (BASIC_OP_UNREDEFINED_P(BOP_CALL, PROC_REDEFINED_OP_FLAG)) {
|
2018-01-08 04:11:26 -05:00
|
|
|
return vm_invoke_block_opt_call(ec, reg_cfp, calling, ci, block_handler);
|
2018-01-07 14:18:49 -05:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
calling->recv = rb_vm_bh_to_procval(ec, block_handler);
|
2018-01-08 04:04:07 -05:00
|
|
|
vm_search_method(ci, cc, calling->recv);
|
2018-01-07 14:18:49 -05:00
|
|
|
return vm_call_general(ec, reg_cfp, calling, ci, cc);
|
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *orig_ci, struct rb_call_cache *orig_cc)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2015-09-19 13:59:58 -04:00
|
|
|
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
|
|
|
|
struct rb_call_info ci_entry;
|
|
|
|
const struct rb_call_info *ci;
|
|
|
|
struct rb_call_cache cc_entry, *cc;
|
|
|
|
unsigned int argc;
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
CALLER_SETUP_ARG(reg_cfp, calling, orig_ci);
|
|
|
|
argc = calling->argc+1;
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
|
2012-10-17 03:12:40 -04:00
|
|
|
ci_entry.flag = VM_CALL_FCALL | VM_CALL_OPT_SEND;
|
|
|
|
ci_entry.mid = idMethodMissing;
|
2015-09-19 13:59:58 -04:00
|
|
|
ci_entry.orig_argc = argc;
|
|
|
|
ci = &ci_entry;
|
|
|
|
|
|
|
|
cc_entry = *orig_cc;
|
2015-12-12 10:08:52 -05:00
|
|
|
cc_entry.me =
|
2015-12-12 16:32:36 -05:00
|
|
|
rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv),
|
2017-10-06 01:55:11 -04:00
|
|
|
idMethodMissing, NULL);
|
2015-09-19 13:59:58 -04:00
|
|
|
cc = &cc_entry;
|
|
|
|
|
|
|
|
calling->argc = argc;
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2012-10-17 03:12:40 -04:00
|
|
|
/* shift arguments: m(a, b, c) #=> method_missing(:m, a, b, c) */
|
2012-12-25 04:57:07 -05:00
|
|
|
CHECK_VM_STACK_OVERFLOW(reg_cfp, 1);
|
2015-10-31 00:54:45 -04:00
|
|
|
if (argc > 1) {
|
|
|
|
MEMMOVE(argv+1, argv, VALUE, argc-1);
|
2012-10-17 03:12:40 -04:00
|
|
|
}
|
2015-09-19 13:59:58 -04:00
|
|
|
argv[0] = ID2SYM(orig_ci->mid);
|
2012-10-17 03:12:40 -04:00
|
|
|
INC_SP(1);
|
|
|
|
|
2017-11-07 00:01:51 -05:00
|
|
|
ec->method_missing_reason = orig_cc->aux.method_missing_reason;
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method(ec, reg_cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
}
|
|
|
|
|
2016-12-08 00:16:33 -05:00
|
|
|
static const rb_callable_method_entry_t *refined_method_callable_without_refinement(const rb_callable_method_entry_t *me);
|
2015-10-05 15:44:05 -04:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc, VALUE klass)
|
2015-10-05 15:44:05 -04:00
|
|
|
{
|
|
|
|
klass = RCLASS_SUPER(klass);
|
|
|
|
cc->me = klass ? rb_callable_method_entry(klass, ci->mid) : NULL;
|
|
|
|
|
2016-12-08 00:16:33 -05:00
|
|
|
if (!cc->me) {
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_nome(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
}
|
2017-02-18 20:27:52 -05:00
|
|
|
if (cc->me->def->type == VM_METHOD_TYPE_REFINED &&
|
|
|
|
cc->me->def->body.refined.orig_me) {
|
2016-12-08 00:16:33 -05:00
|
|
|
cc->me = refined_method_callable_without_refinement(cc->me);
|
|
|
|
}
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_each_type(ec, cfp, calling, ci, cc);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
|
2012-12-07 10:49:21 -05:00
|
|
|
static inline VALUE
|
* revised r37993 to avoid SEGV/ILL in tests. In r37993, a method
entry with VM_METHOD_TYPE_REFINED holds only the original method
definition, so ci->me is set to a method entry allocated in the
stack, and it causes SEGV/ILL. In this commit, a method entry
with VM_METHOD_TYPE_REFINED holds the whole original method entry.
Furthermore, rb_thread_mark() is changed to mark cfp->klass to
avoid GC for iclasses created by copy_refinement_iclass().
* vm_method.c (rb_method_entry_make): add a method entry with
VM_METHOD_TYPE_REFINED to the class refined by the refinement if
the target module is a refinement. When a method entry with
VM_METHOD_TYPE_UNDEF is invoked by vm_call_method(), a method with
the same name is searched in refinements. If such a method is
found, the method is invoked. Otherwise, the original method in
the refined class (rb_method_definition_t::body.orig_me) is
invoked. This change is made to simplify the normal method lookup
and to improve the performance of normal method calls.
* vm_method.c (EXPR1, search_method, rb_method_entry),
vm_eval.c (rb_call0, rb_search_method_entry): do not use
refinements for method lookup.
* vm_insnhelper.c (vm_call_method): search methods in refinements if
ci->me is VM_METHOD_TYPE_REFINED. If the method is called by
super (i.e., ci->call == vm_call_super_method), skip the same
method entry as the current method to avoid infinite call of the
same method.
* class.c (include_modules_at): add a refined method entry for each
method defined in a module included in a refinement.
* class.c (rb_prepend_module): set an empty table to
RCLASS_M_TBL(klass) to add refined method entries, because
refinements should have priority over prepended modules.
* proc.c (mnew): use rb_method_entry_with_refinements() to get
a refined method.
* vm.c (rb_thread_mark): mark cfp->klass for iclasses created by
copy_refinement_iclass().
* vm.c (Init_VM), cont.c (fiber_init): initialize th->cfp->klass.
* test/ruby/test_refinement.rb (test_inline_method_cache): do not skip
the test because it should pass successfully.
* test/ruby/test_refinement.rb (test_redefine_refined_method): new
test for the case a refined method is redefined.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38236 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-06 08:08:41 -05:00
|
|
|
find_refinement(VALUE refinements, VALUE klass)
|
|
|
|
{
|
|
|
|
if (NIL_P(refinements)) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
2012-12-07 10:49:21 -05:00
|
|
|
return rb_hash_lookup(refinements, klass);
|
* revised r37993 to avoid SEGV/ILL in tests. In r37993, a method
entry with VM_METHOD_TYPE_REFINED holds only the original method
definition, so ci->me is set to a method entry allocated in the
stack, and it causes SEGV/ILL. In this commit, a method entry
with VM_METHOD_TYPE_REFINED holds the whole original method entry.
Furthermore, rb_thread_mark() is changed to mark cfp->klass to
avoid GC for iclasses created by copy_refinement_iclass().
* vm_method.c (rb_method_entry_make): add a method entry with
VM_METHOD_TYPE_REFINED to the class refined by the refinement if
the target module is a refinement. When a method entry with
VM_METHOD_TYPE_UNDEF is invoked by vm_call_method(), a method with
the same name is searched in refinements. If such a method is
found, the method is invoked. Otherwise, the original method in
the refined class (rb_method_definition_t::body.orig_me) is
invoked. This change is made to simplify the normal method lookup
and to improve the performance of normal method calls.
* vm_method.c (EXPR1, search_method, rb_method_entry),
vm_eval.c (rb_call0, rb_search_method_entry): do not use
refinements for method lookup.
* vm_insnhelper.c (vm_call_method): search methods in refinements if
ci->me is VM_METHOD_TYPE_REFINED. If the method is called by
super (i.e., ci->call == vm_call_super_method), skip the same
method entry as the current method to avoid infinite call of the
same method.
* class.c (include_modules_at): add a refined method entry for each
method defined in a module included in a refinement.
* class.c (rb_prepend_module): set an empty table to
RCLASS_M_TBL(klass) to add refined method entries, because
refinements should have priority over prepended modules.
* proc.c (mnew): use rb_method_entry_with_refinements() to get
a refined method.
* vm.c (rb_thread_mark): mark cfp->klass for iclasses created by
copy_refinement_iclass().
* vm.c (Init_VM), cont.c (fiber_init): initialize th->cfp->klass.
* test/ruby/test_refinement.rb (test_inline_method_cache): do not skip
the test because it should pass successfully.
* test/ruby/test_refinement.rb (test_redefine_refined_method): new
test for the case a refined method is redefined.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38236 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-06 08:08:41 -05:00
|
|
|
}
|
|
|
|
|
2017-10-26 22:49:30 -04:00
|
|
|
PUREFUNC(static rb_control_frame_t * current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp));
|
2013-02-23 23:36:00 -05:00
|
|
|
static rb_control_frame_t *
|
2017-10-26 22:49:30 -04:00
|
|
|
current_method_entry(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
|
2013-02-23 23:36:00 -05:00
|
|
|
{
|
|
|
|
rb_control_frame_t *top_cfp = cfp;
|
|
|
|
|
2015-07-21 18:52:59 -04:00
|
|
|
if (cfp->iseq && cfp->iseq->body->type == ISEQ_TYPE_BLOCK) {
|
|
|
|
const rb_iseq_t *local_iseq = cfp->iseq->body->local_iseq;
|
|
|
|
|
2013-02-23 23:36:00 -05:00
|
|
|
do {
|
|
|
|
cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
|
2017-10-26 22:49:30 -04:00
|
|
|
if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
|
2013-02-23 23:36:00 -05:00
|
|
|
/* TODO: orphan block */
|
|
|
|
return top_cfp;
|
|
|
|
}
|
|
|
|
} while (cfp->iseq != local_iseq);
|
|
|
|
}
|
|
|
|
return cfp;
|
|
|
|
}
|
|
|
|
|
2015-05-30 14:45:28 -04:00
|
|
|
static VALUE
|
2015-06-01 11:00:17 -04:00
|
|
|
find_defined_class_by_owner(VALUE current_class, VALUE target_owner)
|
2015-05-30 14:45:28 -04:00
|
|
|
{
|
|
|
|
VALUE klass = current_class;
|
|
|
|
|
|
|
|
/* for prepended Module, then start from cover class */
|
|
|
|
if (RB_TYPE_P(klass, T_ICLASS) && FL_TEST(klass, RICLASS_IS_ORIGIN)) klass = RBASIC_CLASS(klass);
|
|
|
|
|
|
|
|
while (RTEST(klass)) {
|
|
|
|
VALUE owner = RB_TYPE_P(klass, T_ICLASS) ? RBASIC_CLASS(klass) : klass;
|
|
|
|
if (owner == target_owner) {
|
|
|
|
return klass;
|
|
|
|
}
|
|
|
|
klass = RCLASS_SUPER(klass);
|
|
|
|
}
|
|
|
|
|
|
|
|
return current_class; /* maybe module function */
|
|
|
|
}
|
|
|
|
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
static const rb_callable_method_entry_t *
|
|
|
|
aliased_callable_method_entry(const rb_callable_method_entry_t *me)
|
|
|
|
{
|
|
|
|
const rb_method_entry_t *orig_me = me->def->body.alias.original_me;
|
|
|
|
const rb_callable_method_entry_t *cme;
|
|
|
|
|
|
|
|
if (orig_me->defined_class == 0) {
|
|
|
|
VALUE defined_class = find_defined_class_by_owner(me->defined_class, orig_me->owner);
|
|
|
|
VM_ASSERT(RB_TYPE_P(orig_me->owner, T_MODULE));
|
2016-11-05 09:15:26 -04:00
|
|
|
cme = rb_method_entry_complement_defined_class(orig_me, me->called_id, defined_class);
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
|
2015-11-18 03:15:51 -05:00
|
|
|
if (me->def->alias_count + me->def->complemented_count == 0) {
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
RB_OBJ_WRITE(me, &me->def->body.alias.original_me, cme);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
method_definition_set((rb_method_entry_t *)me,
|
|
|
|
method_definition_create(VM_METHOD_TYPE_ALIAS, me->def->original_id),
|
|
|
|
(void *)cme);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
cme = (const rb_callable_method_entry_t *)orig_me;
|
|
|
|
}
|
|
|
|
|
|
|
|
VM_ASSERT(callable_method_entry_p(cme));
|
|
|
|
return cme;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const rb_callable_method_entry_t *
|
|
|
|
refined_method_callable_without_refinement(const rb_callable_method_entry_t *me)
|
|
|
|
{
|
|
|
|
const rb_method_entry_t *orig_me = me->def->body.refined.orig_me;
|
|
|
|
const rb_callable_method_entry_t *cme;
|
|
|
|
|
|
|
|
if (orig_me->defined_class == 0) {
|
|
|
|
cme = NULL;
|
|
|
|
rb_notimplement();
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
cme = (const rb_callable_method_entry_t *)orig_me;
|
|
|
|
}
|
|
|
|
|
|
|
|
VM_ASSERT(callable_method_entry_p(cme));
|
2016-12-08 00:16:33 -05:00
|
|
|
|
|
|
|
if (UNDEFINED_METHOD_ENTRY_P(cme)) {
|
|
|
|
cme = NULL;
|
|
|
|
}
|
|
|
|
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
return cme;
|
|
|
|
}
|
|
|
|
|
2015-10-05 15:44:05 -04:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
{
|
2015-10-05 15:44:05 -04:00
|
|
|
switch (cc->me->def->type) {
|
|
|
|
case VM_METHOD_TYPE_ISEQ:
|
2015-10-10 05:36:10 -04:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_iseq_setup, TRUE);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_iseq_setup(ec, cfp, calling, ci, cc);
|
2015-09-19 13:59:58 -04:00
|
|
|
|
2015-10-05 15:44:05 -04:00
|
|
|
case VM_METHOD_TYPE_NOTIMPLEMENTED:
|
|
|
|
case VM_METHOD_TYPE_CFUNC:
|
2015-10-10 05:36:10 -04:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_cfunc, TRUE);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_cfunc(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
|
|
|
|
case VM_METHOD_TYPE_ATTRSET:
|
|
|
|
CALLER_SETUP_ARG(cfp, calling, ci);
|
|
|
|
rb_check_arity(calling->argc, 1, 1);
|
|
|
|
cc->aux.index = 0;
|
2015-12-16 13:20:29 -05:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_attrset, !((ci->flag & VM_CALL_ARGS_SPLAT) || (ci->flag & VM_CALL_KWARG)));
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_attrset(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
|
|
|
|
case VM_METHOD_TYPE_IVAR:
|
|
|
|
CALLER_SETUP_ARG(cfp, calling, ci);
|
|
|
|
rb_check_arity(calling->argc, 0, 0);
|
|
|
|
cc->aux.index = 0;
|
2015-10-10 05:36:10 -04:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_ivar, !(ci->flag & VM_CALL_ARGS_SPLAT));
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_ivar(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
|
|
|
|
case VM_METHOD_TYPE_MISSING:
|
|
|
|
cc->aux.method_missing_reason = 0;
|
2015-10-10 05:36:10 -04:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_method_missing, TRUE);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_missing(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
|
|
|
|
case VM_METHOD_TYPE_BMETHOD:
|
2015-10-10 05:36:10 -04:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_bmethod, TRUE);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_bmethod(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
|
|
|
|
case VM_METHOD_TYPE_ALIAS:
|
|
|
|
cc->me = aliased_callable_method_entry(cc->me);
|
|
|
|
VM_ASSERT(cc->me != NULL);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_each_type(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
|
|
|
|
case VM_METHOD_TYPE_OPTIMIZED:
|
|
|
|
switch (cc->me->def->body.optimize_type) {
|
|
|
|
case OPTIMIZED_METHOD_TYPE_SEND:
|
2015-10-10 05:36:10 -04:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_opt_send, TRUE);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_opt_send(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
case OPTIMIZED_METHOD_TYPE_CALL:
|
2015-10-10 05:36:10 -04:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_opt_call, TRUE);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_opt_call(ec, cfp, calling, ci, cc);
|
2018-01-07 14:18:49 -05:00
|
|
|
case OPTIMIZED_METHOD_TYPE_BLOCK_CALL:
|
|
|
|
CI_SET_FASTPATH(cc, vm_call_opt_block_call, TRUE);
|
|
|
|
return vm_call_opt_block_call(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
default:
|
|
|
|
rb_bug("vm_call_method: unsupported optimized method type (%d)",
|
|
|
|
cc->me->def->body.optimize_type);
|
|
|
|
}
|
2013-08-29 04:03:23 -04:00
|
|
|
|
2015-10-05 15:44:05 -04:00
|
|
|
case VM_METHOD_TYPE_UNDEF:
|
|
|
|
break;
|
2015-09-19 13:59:58 -04:00
|
|
|
|
2015-10-05 15:44:05 -04:00
|
|
|
case VM_METHOD_TYPE_ZSUPER:
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_zsuper(ec, cfp, calling, ci, cc, RCLASS_ORIGIN(cc->me->owner));
|
2015-10-05 15:44:05 -04:00
|
|
|
|
|
|
|
case VM_METHOD_TYPE_REFINED: {
|
2017-07-29 08:42:41 -04:00
|
|
|
const rb_cref_t *cref = rb_vm_get_cref(cfp->ep);
|
|
|
|
VALUE refinements = cref ? CREF_REFINEMENTS(cref) : Qnil;
|
|
|
|
VALUE refinement;
|
|
|
|
const rb_callable_method_entry_t *ref_me;
|
|
|
|
|
|
|
|
refinement = find_refinement(refinements, cc->me->owner);
|
|
|
|
|
|
|
|
if (NIL_P(refinement)) {
|
|
|
|
goto no_refinement_dispatch;
|
|
|
|
}
|
|
|
|
ref_me = rb_callable_method_entry(refinement, ci->mid);
|
|
|
|
|
|
|
|
if (ref_me) {
|
|
|
|
if (cc->call == vm_call_super_method) {
|
2017-10-26 22:49:30 -04:00
|
|
|
const rb_control_frame_t *top_cfp = current_method_entry(ec, cfp);
|
2017-07-29 08:42:41 -04:00
|
|
|
const rb_callable_method_entry_t *top_me = rb_vm_frame_method_entry(top_cfp);
|
|
|
|
if (top_me && rb_method_definition_eq(ref_me->def, top_me->def)) {
|
|
|
|
goto no_refinement_dispatch;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cc->me = ref_me;
|
|
|
|
if (ref_me->def->type != VM_METHOD_TYPE_REFINED) {
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method(ec, cfp, calling, ci, cc);
|
2017-07-29 08:42:41 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
cc->me = NULL;
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_nome(ec, cfp, calling, ci, cc);
|
2017-07-29 08:42:41 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
no_refinement_dispatch:
|
|
|
|
if (cc->me->def->body.refined.orig_me) {
|
|
|
|
cc->me = refined_method_callable_without_refinement(cc->me);
|
|
|
|
}
|
|
|
|
else {
|
2017-11-29 03:39:47 -05:00
|
|
|
VALUE klass = RCLASS_SUPER(cc->me->defined_class);
|
2017-07-29 08:42:42 -04:00
|
|
|
cc->me = klass ? rb_callable_method_entry(klass, ci->mid) : NULL;
|
2017-07-29 08:42:41 -04:00
|
|
|
}
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method(ec, cfp, calling, ci, cc);
|
2015-10-05 15:44:05 -04:00
|
|
|
}
|
|
|
|
}
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
|
2015-10-05 15:44:05 -04:00
|
|
|
rb_bug("vm_call_method: unsupported method type (%d)", cc->me->def->type);
|
|
|
|
}
|
* revised r37993 to avoid SEGV/ILL in tests. In r37993, a method
entry with VM_METHOD_TYPE_REFINED holds only the original method
definition, so ci->me is set to a method entry allocated in the
stack, and it causes SEGV/ILL. In this commit, a method entry
with VM_METHOD_TYPE_REFINED holds the whole original method entry.
Furthermore, rb_thread_mark() is changed to mark cfp->klass to
avoid GC for iclasses created by copy_refinement_iclass().
* vm_method.c (rb_method_entry_make): add a method entry with
VM_METHOD_TYPE_REFINED to the class refined by the refinement if
the target module is a refinement. When a method entry with
VM_METHOD_TYPE_UNDEF is invoked by vm_call_method(), a method with
the same name is searched in refinements. If such a method is
found, the method is invoked. Otherwise, the original method in
the refined class (rb_method_definition_t::body.orig_me) is
invoked. This change is made to simplify the normal method lookup
and to improve the performance of normal method calls.
* vm_method.c (EXPR1, search_method, rb_method_entry),
vm_eval.c (rb_call0, rb_search_method_entry): do not use
refinements for method lookup.
* vm_insnhelper.c (vm_call_method): search methods in refinements if
ci->me is VM_METHOD_TYPE_REFINED. If the method is called by
super (i.e., ci->call == vm_call_super_method), skip the same
method entry as the current method to avoid infinite call of the
same method.
* class.c (include_modules_at): add a refined method entry for each
method defined in a module included in a refinement.
* class.c (rb_prepend_module): set an empty table to
RCLASS_M_TBL(klass) to add refined method entries, because
refinements should have priority over prepended modules.
* proc.c (mnew): use rb_method_entry_with_refinements() to get
a refined method.
* vm.c (rb_thread_mark): mark cfp->klass for iclasses created by
copy_refinement_iclass().
* vm.c (Init_VM), cont.c (fiber_init): initialize th->cfp->klass.
* test/ruby/test_refinement.rb (test_inline_method_cache): do not skip
the test because it should pass successfully.
* test/ruby/test_refinement.rb (test_redefine_refined_method): new
test for the case a refined method is redefined.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38236 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-06 08:08:41 -05:00
|
|
|
|
2017-11-16 02:38:41 -05:00
|
|
|
NORETURN(static void vm_raise_method_missing(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE obj, int call_status));
|
|
|
|
|
2015-10-05 15:44:05 -04:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2015-10-10 05:36:10 -04:00
|
|
|
{
|
|
|
|
/* method missing */
|
|
|
|
const int stat = ci_missing_reason(ci);
|
|
|
|
|
|
|
|
if (ci->mid == idMethodMissing) {
|
|
|
|
rb_control_frame_t *reg_cfp = cfp;
|
|
|
|
VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc);
|
2017-11-16 02:38:41 -05:00
|
|
|
vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat);
|
2015-10-10 05:36:10 -04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
cc->aux.method_missing_reason = stat;
|
|
|
|
CI_SET_FASTPATH(cc, vm_call_method_missing, 1);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_missing(ec, cfp, calling, ci, cc);
|
2015-10-10 05:36:10 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2015-10-05 15:44:05 -04:00
|
|
|
{
|
|
|
|
VM_ASSERT(callable_method_entry_p(cc->me));
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
|
2015-10-05 15:44:05 -04:00
|
|
|
if (cc->me != NULL) {
|
2015-10-06 06:25:25 -04:00
|
|
|
switch (METHOD_ENTRY_VISI(cc->me)) {
|
|
|
|
case METHOD_VISI_PUBLIC: /* likely */
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_each_type(ec, cfp, calling, ci, cc);
|
2015-10-06 06:25:25 -04:00
|
|
|
|
|
|
|
case METHOD_VISI_PRIVATE:
|
|
|
|
if (!(ci->flag & VM_CALL_FCALL)) {
|
2015-06-03 06:42:18 -04:00
|
|
|
enum method_missing_reason stat = MISSING_PRIVATE;
|
2015-06-02 21:39:16 -04:00
|
|
|
if (ci->flag & VM_CALL_VCALL) stat |= MISSING_VCALL;
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
cc->aux.method_missing_reason = stat;
|
|
|
|
CI_SET_FASTPATH(cc, vm_call_method_missing, 1);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_missing(ec, cfp, calling, ci, cc);
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
}
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_each_type(ec, cfp, calling, ci, cc);
|
2015-10-06 06:25:25 -04:00
|
|
|
|
|
|
|
case METHOD_VISI_PROTECTED:
|
|
|
|
if (!(ci->flag & VM_CALL_OPT_SEND)) {
|
2015-09-19 13:59:58 -04:00
|
|
|
if (!rb_obj_is_kind_of(cfp->self, cc->me->defined_class)) {
|
|
|
|
cc->aux.method_missing_reason = MISSING_PROTECTED;
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_missing(ec, cfp, calling, ci, cc);
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
}
|
|
|
|
else {
|
2015-10-10 05:36:10 -04:00
|
|
|
/* caching method info to dummy cc */
|
|
|
|
struct rb_call_cache cc_entry;
|
|
|
|
cc_entry = *cc;
|
|
|
|
cc = &cc_entry;
|
|
|
|
|
2015-09-19 13:59:58 -04:00
|
|
|
VM_ASSERT(cc->me != NULL);
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_each_type(ec, cfp, calling, ci, cc);
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
}
|
|
|
|
}
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_each_type(ec, cfp, calling, ci, cc);
|
2015-10-06 06:25:25 -04:00
|
|
|
|
|
|
|
default:
|
|
|
|
rb_bug("unreachable");
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method_nome(ec, cfp, calling, ci, cc);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method(ec, reg_cfp, calling, ci, cc);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
|
* revised r37993 to avoid SEGV/ILL in tests. In r37993, a method
entry with VM_METHOD_TYPE_REFINED holds only the original method
definition, so ci->me is set to a method entry allocated in the
stack, and it causes SEGV/ILL. In this commit, a method entry
with VM_METHOD_TYPE_REFINED holds the whole original method entry.
Furthermore, rb_thread_mark() is changed to mark cfp->klass to
avoid GC for iclasses created by copy_refinement_iclass().
* vm_method.c (rb_method_entry_make): add a method entry with
VM_METHOD_TYPE_REFINED to the class refined by the refinement if
the target module is a refinement. When a method entry with
VM_METHOD_TYPE_UNDEF is invoked by vm_call_method(), a method with
the same name is searched in refinements. If such a method is
found, the method is invoked. Otherwise, the original method in
the refined class (rb_method_definition_t::body.orig_me) is
invoked. This change is made to simplify the normal method lookup
and to improve the performance of normal method calls.
* vm_method.c (EXPR1, search_method, rb_method_entry),
vm_eval.c (rb_call0, rb_search_method_entry): do not use
refinements for method lookup.
* vm_insnhelper.c (vm_call_method): search methods in refinements if
ci->me is VM_METHOD_TYPE_REFINED. If the method is called by
super (i.e., ci->call == vm_call_super_method), skip the same
method entry as the current method to avoid infinite call of the
same method.
* class.c (include_modules_at): add a refined method entry for each
method defined in a module included in a refinement.
* class.c (rb_prepend_module): set an empty table to
RCLASS_M_TBL(klass) to add refined method entries, because
refinements should have priority over prepended modules.
* proc.c (mnew): use rb_method_entry_with_refinements() to get
a refined method.
* vm.c (rb_thread_mark): mark cfp->klass for iclasses created by
copy_refinement_iclass().
* vm.c (Init_VM), cont.c (fiber_init): initialize th->cfp->klass.
* test/ruby/test_refinement.rb (test_inline_method_cache): do not skip
the test because it should pass successfully.
* test/ruby/test_refinement.rb (test_redefine_refined_method): new
test for the case a refined method is redefined.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38236 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-06 08:08:41 -05:00
|
|
|
static VALUE
|
2017-10-26 22:49:30 -04:00
|
|
|
vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
|
* revised r37993 to avoid SEGV/ILL in tests. In r37993, a method
entry with VM_METHOD_TYPE_REFINED holds only the original method
definition, so ci->me is set to a method entry allocated in the
stack, and it causes SEGV/ILL. In this commit, a method entry
with VM_METHOD_TYPE_REFINED holds the whole original method entry.
Furthermore, rb_thread_mark() is changed to mark cfp->klass to
avoid GC for iclasses created by copy_refinement_iclass().
* vm_method.c (rb_method_entry_make): add a method entry with
VM_METHOD_TYPE_REFINED to the class refined by the refinement if
the target module is a refinement. When a method entry with
VM_METHOD_TYPE_UNDEF is invoked by vm_call_method(), a method with
the same name is searched in refinements. If such a method is
found, the method is invoked. Otherwise, the original method in
the refined class (rb_method_definition_t::body.orig_me) is
invoked. This change is made to simplify the normal method lookup
and to improve the performance of normal method calls.
* vm_method.c (EXPR1, search_method, rb_method_entry),
vm_eval.c (rb_call0, rb_search_method_entry): do not use
refinements for method lookup.
* vm_insnhelper.c (vm_call_method): search methods in refinements if
ci->me is VM_METHOD_TYPE_REFINED. If the method is called by
super (i.e., ci->call == vm_call_super_method), skip the same
method entry as the current method to avoid infinite call of the
same method.
* class.c (include_modules_at): add a refined method entry for each
method defined in a module included in a refinement.
* class.c (rb_prepend_module): set an empty table to
RCLASS_M_TBL(klass) to add refined method entries, because
refinements should have priority over prepended modules.
* proc.c (mnew): use rb_method_entry_with_refinements() to get
a refined method.
* vm.c (rb_thread_mark): mark cfp->klass for iclasses created by
copy_refinement_iclass().
* vm.c (Init_VM), cont.c (fiber_init): initialize th->cfp->klass.
* test/ruby/test_refinement.rb (test_inline_method_cache): do not skip
the test because it should pass successfully.
* test/ruby/test_refinement.rb (test_redefine_refined_method): new
test for the case a refined method is redefined.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38236 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-06 08:08:41 -05:00
|
|
|
{
|
2015-10-05 15:44:05 -04:00
|
|
|
/* this check is required to distinguish with other functions. */
|
|
|
|
if (cc->call != vm_call_super_method) rb_bug("bug");
|
2017-10-26 22:49:30 -04:00
|
|
|
return vm_call_method(ec, reg_cfp, calling, ci, cc);
|
* revised r37993 to avoid SEGV/ILL in tests. In r37993, a method
entry with VM_METHOD_TYPE_REFINED holds only the original method
definition, so ci->me is set to a method entry allocated in the
stack, and it causes SEGV/ILL. In this commit, a method entry
with VM_METHOD_TYPE_REFINED holds the whole original method entry.
Furthermore, rb_thread_mark() is changed to mark cfp->klass to
avoid GC for iclasses created by copy_refinement_iclass().
* vm_method.c (rb_method_entry_make): add a method entry with
VM_METHOD_TYPE_REFINED to the class refined by the refinement if
the target module is a refinement. When a method entry with
VM_METHOD_TYPE_UNDEF is invoked by vm_call_method(), a method with
the same name is searched in refinements. If such a method is
found, the method is invoked. Otherwise, the original method in
the refined class (rb_method_definition_t::body.orig_me) is
invoked. This change is made to simplify the normal method lookup
and to improve the performance of normal method calls.
* vm_method.c (EXPR1, search_method, rb_method_entry),
vm_eval.c (rb_call0, rb_search_method_entry): do not use
refinements for method lookup.
* vm_insnhelper.c (vm_call_method): search methods in refinements if
ci->me is VM_METHOD_TYPE_REFINED. If the method is called by
super (i.e., ci->call == vm_call_super_method), skip the same
method entry as the current method to avoid infinite call of the
same method.
* class.c (include_modules_at): add a refined method entry for each
method defined in a module included in a refinement.
* class.c (rb_prepend_module): set an empty table to
RCLASS_M_TBL(klass) to add refined method entries, because
refinements should have priority over prepended modules.
* proc.c (mnew): use rb_method_entry_with_refinements() to get
a refined method.
* vm.c (rb_thread_mark): mark cfp->klass for iclasses created by
copy_refinement_iclass().
* vm.c (Init_VM), cont.c (fiber_init): initialize th->cfp->klass.
* test/ruby/test_refinement.rb (test_inline_method_cache): do not skip
the test because it should pass successfully.
* test/ruby/test_refinement.rb (test_redefine_refined_method): new
test for the case a refined method is redefined.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38236 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-06 08:08:41 -05:00
|
|
|
}
|
|
|
|
|
2012-10-14 15:58:59 -04:00
|
|
|
/* super */
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
vm_search_normal_superclass(VALUE klass)
|
|
|
|
{
|
2012-12-07 22:36:58 -05:00
|
|
|
if (BUILTIN_TYPE(klass) == T_ICLASS &&
|
|
|
|
FL_TEST(RBASIC(klass)->klass, RMODULE_IS_REFINEMENT)) {
|
* fix the behavior when a module is included into a refinement.
This change is a little tricky, so it might be better to prohibit
module inclusion to refinements.
* include/ruby/ruby.h (RMODULE_INCLUDED_INTO_REFINEMENT): new flag
to represent that a module (iclass) is included into a refinement.
* class.c (include_modules_at): set RMODULE_INCLUDED_INTO_REFINEMENT
if klass is a refinement.
* eval.c (rb_mod_refine): set the superclass of a refinement to the
refined class for super.
* eval.c (rb_using_refinement): skip the above superclass (the
refined class) when creating iclasses for refinements. Otherwise,
`using Refinement1; using Refinement2' creates iclasses:
<Refinement2> -> <RefinedClass> -> <Refinement1> -> RefinedClass,
where <Module> is an iclass for Module, so RefinedClass is
searched before Refinement1. The correct iclasses should be
<Refinement2> -> <Refinement1> -> RefinedClass.
* vm_insnhelper.c (vm_search_normal_superclass): if klass is an
iclass for a refinement, use the refinement's superclass instead
of the iclass's superclass. Otherwise, multiple refinements are
searched by super. For example, if a refinement Refinement2
includes a module M (i.e., Refinement2 -> <M> -> RefinedClass,
and if refinements iclasses are <Refinement2> -> <M>' ->
<Refinement1> -> RefinedClass, then super in <Refinement2> should
use Refinement2's superclass <M> instead of <Refinement2>'s
superclass <M>'.
* vm_insnhelper.c (vm_search_super_method): do not raise a
NotImplementError if current_defind_class is a module included
into a refinement. Because of the change of
vm_search_normal_superclass(), the receiver might not be an
instance of the module('s iclass).
* test/ruby/test_refinement.rb: related test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38298 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-10 11:05:45 -05:00
|
|
|
klass = RBASIC(klass)->klass;
|
2012-12-07 22:36:58 -05:00
|
|
|
}
|
* fix the behavior when a module is included into a refinement.
This change is a little tricky, so it might be better to prohibit
module inclusion to refinements.
* include/ruby/ruby.h (RMODULE_INCLUDED_INTO_REFINEMENT): new flag
to represent that a module (iclass) is included into a refinement.
* class.c (include_modules_at): set RMODULE_INCLUDED_INTO_REFINEMENT
if klass is a refinement.
* eval.c (rb_mod_refine): set the superclass of a refinement to the
refined class for super.
* eval.c (rb_using_refinement): skip the above superclass (the
refined class) when creating iclasses for refinements. Otherwise,
`using Refinement1; using Refinement2' creates iclasses:
<Refinement2> -> <RefinedClass> -> <Refinement1> -> RefinedClass,
where <Module> is an iclass for Module, so RefinedClass is
searched before Refinement1. The correct iclasses should be
<Refinement2> -> <Refinement1> -> RefinedClass.
* vm_insnhelper.c (vm_search_normal_superclass): if klass is an
iclass for a refinement, use the refinement's superclass instead
of the iclass's superclass. Otherwise, multiple refinements are
searched by super. For example, if a refinement Refinement2
includes a module M (i.e., Refinement2 -> <M> -> RefinedClass,
and if refinements iclasses are <Refinement2> -> <M>' ->
<Refinement1> -> RefinedClass, then super in <Refinement2> should
use Refinement2's superclass <M> instead of <Refinement2>'s
superclass <M>'.
* vm_insnhelper.c (vm_search_super_method): do not raise a
NotImplementError if current_defind_class is a module included
into a refinement. Because of the change of
vm_search_normal_superclass(), the receiver might not be an
instance of the module('s iclass).
* test/ruby/test_refinement.rb: related test.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@38298 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-12-10 11:05:45 -05:00
|
|
|
klass = RCLASS_ORIGIN(klass);
|
|
|
|
return RCLASS_SUPER(klass);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vm_super_outside(void)
|
|
|
|
{
|
|
|
|
rb_raise(rb_eNoMethodError, "super called outside of method");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-10-27 01:22:24 -04:00
|
|
|
vm_search_super_method(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
2015-10-31 01:01:40 -04:00
|
|
|
struct rb_calling_info *calling, struct rb_call_info *ci, struct rb_call_cache *cc)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2015-07-08 22:10:51 -04:00
|
|
|
VALUE current_defined_class, klass;
|
2015-09-19 13:59:58 -04:00
|
|
|
VALUE sigval = TOPN(calling->argc);
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
if (!me) {
|
2012-10-14 15:58:59 -04:00
|
|
|
vm_super_outside();
|
|
|
|
}
|
|
|
|
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
current_defined_class = me->defined_class;
|
|
|
|
|
2012-12-11 04:31:26 -05:00
|
|
|
if (!NIL_P(RCLASS_REFINED_CLASS(current_defined_class))) {
|
|
|
|
current_defined_class = RCLASS_REFINED_CLASS(current_defined_class);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
|
2014-01-10 04:01:44 -05:00
|
|
|
if (BUILTIN_TYPE(current_defined_class) != T_MODULE &&
|
2014-04-11 02:05:28 -04:00
|
|
|
BUILTIN_TYPE(current_defined_class) != T_ICLASS && /* bound UnboundMethod */
|
2014-01-10 04:01:44 -05:00
|
|
|
!FL_TEST(current_defined_class, RMODULE_INCLUDED_INTO_REFINEMENT) &&
|
2015-09-19 13:59:58 -04:00
|
|
|
!rb_obj_is_kind_of(calling->recv, current_defined_class)) {
|
2013-01-10 02:51:35 -05:00
|
|
|
VALUE m = RB_TYPE_P(current_defined_class, T_ICLASS) ?
|
|
|
|
RBASIC(current_defined_class)->klass : current_defined_class;
|
|
|
|
|
|
|
|
rb_raise(rb_eTypeError,
|
|
|
|
"self has wrong type to call super in this context: "
|
2014-04-11 01:40:52 -04:00
|
|
|
"%"PRIsVALUE" (expected %"PRIsVALUE")",
|
2015-09-19 13:59:58 -04:00
|
|
|
rb_obj_class(calling->recv), m);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
|
2015-07-08 22:10:51 -04:00
|
|
|
if (me->def->type == VM_METHOD_TYPE_BMETHOD && !sigval) {
|
2013-05-05 05:57:02 -04:00
|
|
|
rb_raise(rb_eRuntimeError,
|
|
|
|
"implicit argument passing of super from method defined"
|
|
|
|
" by define_method() is not supported."
|
|
|
|
" Specify all arguments explicitly.");
|
|
|
|
}
|
2015-07-08 22:10:51 -04:00
|
|
|
|
2015-10-31 01:01:40 -04:00
|
|
|
ci->mid = me->def->original_id;
|
2015-07-08 22:10:51 -04:00
|
|
|
klass = vm_search_normal_superclass(me->defined_class);
|
|
|
|
|
|
|
|
if (!klass) {
|
2014-01-08 08:53:18 -05:00
|
|
|
/* bound instance method of module */
|
2015-09-19 13:59:58 -04:00
|
|
|
cc->aux.method_missing_reason = MISSING_SUPER;
|
|
|
|
CI_SET_FASTPATH(cc, vm_call_method_missing, 1);
|
2014-01-08 08:53:18 -05:00
|
|
|
}
|
2015-07-08 22:10:51 -04:00
|
|
|
else {
|
|
|
|
/* TODO: use inline cache */
|
2015-10-31 01:01:40 -04:00
|
|
|
cc->me = rb_callable_method_entry(klass, ci->mid);
|
2015-09-19 13:59:58 -04:00
|
|
|
CI_SET_FASTPATH(cc, vm_call_super_method, 1);
|
2015-07-08 22:10:51 -04:00
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* yield */
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
block_proc_is_lambda(const VALUE procval)
|
|
|
|
{
|
|
|
|
rb_proc_t *proc;
|
|
|
|
|
|
|
|
if (procval) {
|
|
|
|
GetProcPtr(procval, proc);
|
|
|
|
return proc->is_lambda;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
static VALUE
|
2017-10-27 02:06:31 -04:00
|
|
|
vm_yield_with_cfunc(rb_execution_context_t *ec,
|
2016-07-28 07:02:30 -04:00
|
|
|
const struct rb_captured_block *captured,
|
|
|
|
VALUE self, int argc, const VALUE *argv, VALUE block_handler)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2016-07-28 07:02:30 -04:00
|
|
|
int is_lambda = FALSE; /* TODO */
|
|
|
|
VALUE val, arg, blockarg;
|
|
|
|
const struct vm_ifunc *ifunc = captured->code.ifunc;
|
2017-10-28 06:01:54 -04:00
|
|
|
const rb_callable_method_entry_t *me = ec->passed_bmethod_me;
|
|
|
|
ec->passed_bmethod_me = NULL;
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
if (is_lambda) {
|
2012-10-14 15:58:59 -04:00
|
|
|
arg = rb_ary_new4(argc, argv);
|
|
|
|
}
|
|
|
|
else if (argc == 0) {
|
|
|
|
arg = Qnil;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
arg = argv[0];
|
|
|
|
}
|
|
|
|
|
2017-10-27 02:06:31 -04:00
|
|
|
blockarg = rb_vm_bh_to_procval(ec, block_handler);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2017-10-27 02:06:31 -04:00
|
|
|
vm_push_frame(ec, (const rb_iseq_t *)captured->code.ifunc,
|
2016-08-02 20:16:34 -04:00
|
|
|
VM_FRAME_MAGIC_IFUNC | VM_FRAME_FLAG_CFRAME,
|
2016-07-28 07:02:30 -04:00
|
|
|
self,
|
|
|
|
VM_GUARDED_PREV_EP(captured->ep),
|
|
|
|
(VALUE)me,
|
2017-10-27 02:06:31 -04:00
|
|
|
0, ec->cfp->sp, 0, 0);
|
2016-07-28 07:02:30 -04:00
|
|
|
val = (*ifunc->func)(arg, ifunc->data, argc, argv, blockarg);
|
2017-10-27 02:06:31 -04:00
|
|
|
rb_vm_pop_frame(ec);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
static VALUE
|
2017-10-27 02:06:31 -04:00
|
|
|
vm_yield_with_symbol(rb_execution_context_t *ec, VALUE symbol, int argc, const VALUE *argv, VALUE block_handler)
|
2016-07-28 07:02:30 -04:00
|
|
|
{
|
2017-10-27 02:06:31 -04:00
|
|
|
return rb_sym_proc_call(SYM2ID(symbol), argc, argv, rb_vm_bh_to_procval(ec, block_handler));
|
2016-07-28 07:02:30 -04:00
|
|
|
}
|
|
|
|
|
2015-12-22 06:20:12 -05:00
|
|
|
static inline int
|
|
|
|
vm_callee_setup_block_arg_arg0_splat(rb_control_frame_t *cfp, const rb_iseq_t *iseq, VALUE *argv, VALUE ary)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
long len = RARRAY_LEN(ary);
|
|
|
|
|
|
|
|
CHECK_VM_STACK_OVERFLOW(cfp, iseq->body->param.lead_num);
|
|
|
|
|
|
|
|
for (i=0; i<len && i<iseq->body->param.lead_num; i++) {
|
|
|
|
argv[i] = RARRAY_AREF(ary, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline VALUE
|
|
|
|
vm_callee_setup_block_arg_arg0_check(VALUE *argv)
|
|
|
|
{
|
|
|
|
VALUE ary, arg0 = argv[0];
|
|
|
|
ary = rb_check_array_type(arg0);
|
2017-03-18 07:29:35 -04:00
|
|
|
#if 0
|
2015-12-22 06:20:12 -05:00
|
|
|
argv[0] = arg0;
|
2017-03-18 07:29:35 -04:00
|
|
|
#else
|
|
|
|
VM_ASSERT(argv[0] == arg0);
|
|
|
|
#endif
|
2015-12-22 06:20:12 -05:00
|
|
|
return ary;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-10-27 02:06:31 -04:00
|
|
|
vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_call_info *ci, const rb_iseq_t *iseq, VALUE *argv, const enum arg_setup_type arg_setup_type)
|
2015-12-22 06:20:12 -05:00
|
|
|
{
|
|
|
|
if (simple_iseq_p(iseq)) {
|
2017-10-27 02:06:31 -04:00
|
|
|
rb_control_frame_t *cfp = ec->cfp;
|
2015-12-22 06:20:12 -05:00
|
|
|
VALUE arg0;
|
|
|
|
|
|
|
|
CALLER_SETUP_ARG(cfp, calling, ci); /* splat arg */
|
|
|
|
|
|
|
|
if (arg_setup_type == arg_setup_block &&
|
|
|
|
calling->argc == 1 &&
|
|
|
|
iseq->body->param.flags.has_lead &&
|
|
|
|
!iseq->body->param.flags.ambiguous_param0 &&
|
|
|
|
!NIL_P(arg0 = vm_callee_setup_block_arg_arg0_check(argv))) {
|
|
|
|
calling->argc = vm_callee_setup_block_arg_arg0_splat(cfp, iseq, argv, arg0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (calling->argc != iseq->body->param.lead_num) {
|
|
|
|
if (arg_setup_type == arg_setup_block) {
|
|
|
|
if (calling->argc < iseq->body->param.lead_num) {
|
|
|
|
int i;
|
|
|
|
CHECK_VM_STACK_OVERFLOW(cfp, iseq->body->param.lead_num);
|
|
|
|
for (i=calling->argc; i<iseq->body->param.lead_num; i++) argv[i] = Qnil;
|
|
|
|
calling->argc = iseq->body->param.lead_num; /* fill rest parameters */
|
|
|
|
}
|
|
|
|
else if (calling->argc > iseq->body->param.lead_num) {
|
|
|
|
calling->argc = iseq->body->param.lead_num; /* simply truncate arguments */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2017-10-27 02:06:31 -04:00
|
|
|
argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num);
|
2015-12-22 06:20:12 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
else {
|
2017-10-27 02:06:31 -04:00
|
|
|
return setup_parameters_complex(ec, iseq, calling, ci, argv, arg_setup_type);
|
2015-12-22 06:20:12 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
static int
|
2017-10-27 02:06:31 -04:00
|
|
|
vm_yield_setup_args(rb_execution_context_t *ec, const rb_iseq_t *iseq, const int argc, VALUE *argv, VALUE block_handler, enum arg_setup_type arg_setup_type)
|
2012-10-14 15:58:59 -04:00
|
|
|
{
|
2015-09-19 13:59:58 -04:00
|
|
|
struct rb_calling_info calling_entry, *calling;
|
|
|
|
struct rb_call_info ci_entry, *ci;
|
|
|
|
|
|
|
|
calling = &calling_entry;
|
|
|
|
calling->argc = argc;
|
2016-07-28 07:02:30 -04:00
|
|
|
calling->block_handler = block_handler;
|
2015-09-19 13:59:58 -04:00
|
|
|
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
ci_entry.flag = 0;
|
2015-09-19 13:59:58 -04:00
|
|
|
ci = &ci_entry;
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2017-10-27 02:06:31 -04:00
|
|
|
return vm_callee_setup_block_arg(ec, calling, ci, iseq, argv, arg_setup_type);
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
/* ruby iseq -> ruby block */
|
2015-10-10 16:32:07 -04:00
|
|
|
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
static VALUE
|
2017-10-27 02:06:31 -04:00
|
|
|
vm_invoke_iseq_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
2016-07-28 07:02:30 -04:00
|
|
|
struct rb_calling_info *calling, const struct rb_call_info *ci,
|
|
|
|
int is_lambda, const struct rb_captured_block *captured)
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
{
|
2017-02-16 04:15:26 -05:00
|
|
|
const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
|
2016-07-28 07:02:30 -04:00
|
|
|
const int arg_size = iseq->body->param.size;
|
|
|
|
VALUE * const rsp = GET_SP() - calling->argc;
|
2017-10-27 02:06:31 -04:00
|
|
|
int opt_pc = vm_callee_setup_block_arg(ec, calling, ci, iseq, rsp, is_lambda ? arg_setup_method : arg_setup_block);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
SET_SP(rsp);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2017-10-27 02:06:31 -04:00
|
|
|
vm_push_frame(ec, iseq,
|
2017-06-03 06:07:44 -04:00
|
|
|
VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0),
|
2016-07-28 07:02:30 -04:00
|
|
|
captured->self,
|
|
|
|
VM_GUARDED_PREV_EP(captured->ep), 0,
|
|
|
|
iseq->body->iseq_encoded + opt_pc,
|
|
|
|
rsp + arg_size,
|
|
|
|
iseq->body->local_table_size - arg_size, iseq->body->stack_max);
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
return Qundef;
|
|
|
|
}
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
static VALUE
|
2017-10-27 02:06:31 -04:00
|
|
|
vm_invoke_symbol_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
2016-07-28 07:02:30 -04:00
|
|
|
struct rb_calling_info *calling, const struct rb_call_info *ci,
|
|
|
|
VALUE symbol)
|
|
|
|
{
|
|
|
|
VALUE val;
|
|
|
|
int argc;
|
2017-10-27 02:06:31 -04:00
|
|
|
CALLER_SETUP_ARG(ec->cfp, calling, ci);
|
2016-07-28 07:02:30 -04:00
|
|
|
argc = calling->argc;
|
2018-01-05 12:51:10 -05:00
|
|
|
val = vm_yield_with_symbol(ec, symbol, argc, STACK_ADDR_FROM_TOP(argc), calling->block_handler);
|
2016-07-28 07:02:30 -04:00
|
|
|
POPN(argc);
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-10-27 02:06:31 -04:00
|
|
|
vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
2016-07-28 07:02:30 -04:00
|
|
|
struct rb_calling_info *calling, const struct rb_call_info *ci,
|
|
|
|
const struct rb_captured_block *captured)
|
|
|
|
{
|
|
|
|
VALUE val;
|
|
|
|
int argc;
|
2017-10-27 02:06:31 -04:00
|
|
|
CALLER_SETUP_ARG(ec->cfp, calling, ci);
|
2016-07-28 07:02:30 -04:00
|
|
|
argc = calling->argc;
|
2018-01-05 12:51:10 -05:00
|
|
|
val = vm_yield_with_cfunc(ec, captured, captured->self, argc, STACK_ADDR_FROM_TOP(argc), calling->block_handler);
|
2016-07-28 07:02:30 -04:00
|
|
|
POPN(argc); /* TODO: should put before C/yield? */
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_proc_to_block_handler(VALUE procval)
|
|
|
|
{
|
|
|
|
const struct rb_block *block = vm_proc_block(procval);
|
2012-10-14 15:58:59 -04:00
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
switch (vm_block_type(block)) {
|
|
|
|
case block_type_iseq:
|
|
|
|
return VM_BH_FROM_ISEQ_BLOCK(&block->as.captured);
|
|
|
|
case block_type_ifunc:
|
|
|
|
return VM_BH_FROM_IFUNC_BLOCK(&block->as.captured);
|
|
|
|
case block_type_symbol:
|
|
|
|
return VM_BH_FROM_SYMBOL(block->as.symbol);
|
|
|
|
case block_type_proc:
|
|
|
|
return VM_BH_FROM_PROC(block->as.proc);
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
VM_UNREACHABLE(vm_yield_with_proc);
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
|
2018-01-07 14:18:49 -05:00
|
|
|
static inline VALUE
|
|
|
|
vm_invoke_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp,
|
|
|
|
struct rb_calling_info *calling, const struct rb_call_info *ci, VALUE block_handler)
|
2016-07-28 07:02:30 -04:00
|
|
|
{
|
|
|
|
int is_lambda = FALSE;
|
|
|
|
|
|
|
|
again:
|
|
|
|
switch (vm_block_handler_type(block_handler)) {
|
|
|
|
case block_handler_type_iseq:
|
|
|
|
{
|
|
|
|
const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
|
2017-10-27 02:06:31 -04:00
|
|
|
return vm_invoke_iseq_block(ec, reg_cfp, calling, ci, is_lambda, captured);
|
2016-07-28 07:02:30 -04:00
|
|
|
}
|
|
|
|
case block_handler_type_ifunc:
|
|
|
|
{
|
|
|
|
const struct rb_captured_block *captured = VM_BH_TO_IFUNC_BLOCK(block_handler);
|
2017-10-27 02:06:31 -04:00
|
|
|
return vm_invoke_ifunc_block(ec, reg_cfp, calling, ci, captured);
|
2016-07-28 07:02:30 -04:00
|
|
|
}
|
|
|
|
case block_handler_type_proc:
|
|
|
|
is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
|
|
|
|
block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
|
|
|
|
goto again;
|
|
|
|
case block_handler_type_symbol:
|
2017-10-27 02:06:31 -04:00
|
|
|
return vm_invoke_symbol_block(ec, reg_cfp, calling, ci, VM_BH_TO_SYMBOL(block_handler));
|
2012-10-14 15:58:59 -04:00
|
|
|
}
|
2016-07-28 07:02:30 -04:00
|
|
|
VM_UNREACHABLE(vm_invoke_block: unreachable);
|
|
|
|
return Qnil;
|
* insns.def (send, invokesuper, invokeblock, opt_*), vm_core.h:
use only a `ci' (rb_call_info_t) parameter instead of using
parameters such as `op_id', 'op_argc', `blockiseq' and flag.
These information are stored in rb_call_info_t at the compile
time.
This technique simplifies parameter passings at related
function calls (~10% speedups for simple mehtod invocation at
my machine).
`rb_call_info_t' also has new function pointer variable `call'.
This `call' variable enables to customize method (block)
invocation process for each place. However, it always call
`vm_call_general()' at this changes.
`rb_call_info_t' also has temporary variables for method
(block) invocation.
* vm_core.h, compile.c, insns.def: introduce VM_CALL_ARGS_SKIP_SETUP
VM_CALL macro. This flag indicates that this call can skip
caller_setup (block arg and splat arg).
* compile.c: catch up above changes.
* iseq.c: catch up above changes (especially for TS_CALLINFO).
* tool/instruction.rb: catch up above chagnes.
* vm_insnhelper.c, vm_insnhelper.h: ditto. Macros and functions
parameters are changed.
* vm_eval.c (vm_call0): ditto (it will be rewriten soon).
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@37180 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2012-10-14 12:59:05 -04:00
|
|
|
}
|
2013-08-20 13:41:13 -04:00
|
|
|
|
|
|
|
static VALUE
|
2015-07-21 17:28:43 -04:00
|
|
|
vm_make_proc_with_iseq(const rb_iseq_t *blockiseq)
|
2013-08-20 13:41:13 -04:00
|
|
|
{
|
2017-10-26 04:41:34 -04:00
|
|
|
const rb_execution_context_t *ec = GET_EC();
|
|
|
|
const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
|
2016-07-28 07:02:30 -04:00
|
|
|
struct rb_captured_block *captured;
|
2013-08-20 13:41:13 -04:00
|
|
|
|
|
|
|
if (cfp == 0) {
|
2013-09-22 07:57:50 -04:00
|
|
|
rb_bug("vm_make_proc_with_iseq: unreachable");
|
2013-08-20 13:41:13 -04:00
|
|
|
}
|
|
|
|
|
2016-07-28 07:02:30 -04:00
|
|
|
captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
|
|
|
|
captured->code.iseq = blockiseq;
|
2013-08-20 13:41:13 -04:00
|
|
|
|
2017-10-26 04:41:34 -04:00
|
|
|
return rb_vm_make_proc(ec, captured, rb_cProc);
|
2013-08-20 13:41:13 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2014-06-20 02:59:28 -04:00
|
|
|
vm_once_exec(VALUE iseq)
|
2013-08-20 13:41:13 -04:00
|
|
|
{
|
2014-06-20 02:59:28 -04:00
|
|
|
VALUE proc = vm_make_proc_with_iseq((rb_iseq_t *)iseq);
|
2013-08-20 13:41:13 -04:00
|
|
|
return rb_proc_call_with_block(proc, 0, 0, Qnil);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_once_clear(VALUE data)
|
|
|
|
{
|
|
|
|
union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)data;
|
|
|
|
is->once.running_thread = NULL;
|
|
|
|
return Qnil;
|
|
|
|
}
|
* rewrite method/block parameter fitting logic to optimize
keyword arguments/parameters and a splat argument.
[Feature #10440] (Details are described in this ticket)
Most of complex part is moved to vm_args.c.
Now, ISeq#to_a does not catch up new instruction format.
* vm_core.h: change iseq data structures.
* introduce rb_call_info_kw_arg_t to represent keyword arguments.
* add rb_call_info_t::kw_arg.
* rename rb_iseq_t::arg_post_len to rb_iseq_t::arg_post_num.
* rename rb_iseq_t::arg_keywords to arg_keyword_num.
* rename rb_iseq_t::arg_keyword to rb_iseq_t::arg_keyword_bits.
to represent keyword bitmap parameter index.
This bitmap parameter shows that which keyword parameters are given
or not given (0 for given).
It is refered by `checkkeyword' instruction described bellow.
* rename rb_iseq_t::arg_keyword_check to rb_iseq_t::arg_keyword_rest
to represent keyword rest parameter index.
* add rb_iseq_t::arg_keyword_default_values to represent default
keyword values.
* rename VM_CALL_ARGS_SKIP_SETUP to VM_CALL_ARGS_SIMPLE
to represent
(ci->flag & (SPLAT|BLOCKARG)) &&
ci->blockiseq == NULL &&
ci->kw_arg == NULL.
* vm_insnhelper.c, vm_args.c: rewrite with refactoring.
* rewrite splat argument code.
* rewrite keyword arguments/parameters code.
* merge method and block parameter fitting code into one code base.
* vm.c, vm_eval.c: catch up these changes.
* compile.c (new_callinfo): callinfo requires kw_arg parameter.
* compile.c (compile_array_): check the last argument Hash object or
not. If Hash object and all keys are Symbol literals, they are
compiled to keyword arguments.
* insns.def (checkkeyword): add new instruction.
This instruction check the availability of corresponding keyword.
For example, a method "def foo k1: 'v1'; end" is cimpiled to the
following instructions.
0000 checkkeyword 2, 0 # check k1 is given.
0003 branchif 9 # if given, jump to address #9
0005 putstring "v1"
0007 setlocal_OP__WC__0 3 # k1 = 'v1'
0009 trace 8
0011 putnil
0012 trace 16
0014 leave
* insns.def (opt_send_simple): removed and add new instruction
"opt_send_without_block".
* parse.y (new_args_tail_gen): reorder variables.
Before this patch, a method "def foo(k1: 1, kr1:, k2: 2, **krest, &b)"
has parameter variables "k1, kr1, k2, &b, internal_id, krest",
but this patch reorders to "kr1, k1, k2, internal_id, krest, &b".
(locate a block variable at last)
* parse.y (vtable_pop): added.
This function remove latest `n' variables from vtable.
* iseq.c: catch up iseq data changes.
* proc.c: ditto.
* class.c (keyword_error): export as rb_keyword_error().
* common.mk: depend vm_args.c for vm.o.
* hash.c (rb_hash_has_key): export.
* internal.h: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@48239 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2014-11-02 13:02:55 -05:00
|
|
|
|
2014-12-16 01:18:25 -05:00
|
|
|
rb_control_frame_t *
|
2017-10-27 15:08:31 -04:00
|
|
|
FUNC_FASTCALL(rb_vm_opt_struct_aref)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
|
2014-12-16 01:18:25 -05:00
|
|
|
{
|
|
|
|
TOPN(0) = rb_struct_aref(GET_SELF(), TOPN(0));
|
|
|
|
return reg_cfp;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_control_frame_t *
|
2017-11-07 03:01:26 -05:00
|
|
|
FUNC_FASTCALL(rb_vm_opt_struct_aset)(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
|
2014-12-16 01:18:25 -05:00
|
|
|
{
|
|
|
|
rb_struct_aset(GET_SELF(), TOPN(0), TOPN(1));
|
|
|
|
return reg_cfp;
|
|
|
|
}
|
2015-06-02 15:15:29 -04:00
|
|
|
|
|
|
|
/* defined insn */
|
|
|
|
|
2015-06-02 15:49:22 -04:00
|
|
|
static enum defined_type
|
2015-06-02 16:03:54 -04:00
|
|
|
check_respond_to_missing(VALUE obj, VALUE v)
|
2015-06-02 15:49:22 -04:00
|
|
|
{
|
|
|
|
VALUE args[2];
|
|
|
|
VALUE r;
|
|
|
|
|
|
|
|
args[0] = obj; args[1] = Qfalse;
|
|
|
|
r = rb_check_funcall(v, idRespond_to_missing, 2, args);
|
|
|
|
if (r != Qundef && RTEST(r)) {
|
|
|
|
return DEFINED_METHOD;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-02 15:15:29 -04:00
|
|
|
static VALUE
|
2017-10-27 01:30:05 -04:00
|
|
|
vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE needstr, VALUE v)
|
2015-06-02 15:15:29 -04:00
|
|
|
{
|
|
|
|
VALUE klass;
|
|
|
|
enum defined_type expr_type = 0;
|
|
|
|
enum defined_type type = (enum defined_type)op_type;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case DEFINED_IVAR:
|
|
|
|
if (rb_ivar_defined(GET_SELF(), SYM2ID(obj))) {
|
|
|
|
expr_type = DEFINED_IVAR;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DEFINED_IVAR2:
|
|
|
|
klass = vm_get_cbase(GET_EP());
|
|
|
|
break;
|
|
|
|
case DEFINED_GVAR:
|
|
|
|
if (rb_gvar_defined(rb_global_entry(SYM2ID(obj)))) {
|
|
|
|
expr_type = DEFINED_GVAR;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DEFINED_CVAR: {
|
|
|
|
const rb_cref_t *cref = rb_vm_get_cref(GET_EP());
|
|
|
|
klass = vm_get_cvar_base(cref, GET_CFP());
|
|
|
|
if (rb_cvar_defined(klass, SYM2ID(obj))) {
|
|
|
|
expr_type = DEFINED_CVAR;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case DEFINED_CONST:
|
|
|
|
klass = v;
|
2017-10-27 01:30:05 -04:00
|
|
|
if (vm_get_ev_const(ec, klass, SYM2ID(obj), 1)) {
|
2015-06-02 15:15:29 -04:00
|
|
|
expr_type = DEFINED_CONST;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DEFINED_FUNC:
|
|
|
|
klass = CLASS_OF(v);
|
|
|
|
if (rb_method_boundp(klass, SYM2ID(obj), 0)) {
|
|
|
|
expr_type = DEFINED_METHOD;
|
|
|
|
}
|
2015-06-02 15:49:22 -04:00
|
|
|
else {
|
2015-06-02 16:03:54 -04:00
|
|
|
expr_type = check_respond_to_missing(obj, v);
|
2015-06-02 15:49:22 -04:00
|
|
|
}
|
2015-06-02 15:15:29 -04:00
|
|
|
break;
|
|
|
|
case DEFINED_METHOD:{
|
|
|
|
VALUE klass = CLASS_OF(v);
|
* method.h: introduce rb_callable_method_entry_t to remove
rb_control_frame_t::klass.
[Bug #11278], [Bug #11279]
rb_method_entry_t data belong to modules/classes.
rb_method_entry_t::owner points defined module or class.
module M
def foo; end
end
In this case, owner is M.
rb_callable_method_entry_t data belong to only classes.
For modules, MRI creates corresponding T_ICLASS internally.
rb_callable_method_entry_t can also belong to T_ICLASS.
rb_callable_method_entry_t::defined_class points T_CLASS or
T_ICLASS.
rb_method_entry_t data for classes (not for modules) are also
rb_callable_method_entry_t data because it is completely same data.
In this case, rb_method_entry_t::owner == rb_method_entry_t::defined_class.
For example, there are classes C and D, and incldues M,
class C; include M; end
class D; include M; end
then, two T_ICLASS objects for C's super class and D's super class
will be created.
When C.new.foo is called, then M#foo is searcheed and
rb_callable_method_t data is used by VM to invoke M#foo.
rb_method_entry_t data is only one for M#foo.
However, rb_callable_method_entry_t data are two (and can be more).
It is proportional to the number of including (and prepending)
classes (the number of T_ICLASS which point to the module).
Now, created rb_callable_method_entry_t are collected when
the original module M was modified. We can think it is a cache.
We need to select what kind of method entry data is needed.
To operate definition, then you need to use rb_method_entry_t.
You can access them by the following functions.
* rb_method_entry(VALUE klass, ID id);
* rb_method_entry_with_refinements(VALUE klass, ID id);
* rb_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method(VALUE refinements, const rb_method_entry_t *me);
To invoke methods, then you need to use rb_callable_method_entry_t
which you can get by the following APIs corresponding to the
above listed functions.
* rb_callable_method_entry(VALUE klass, ID id);
* rb_callable_method_entry_with_refinements(VALUE klass, ID id);
* rb_callable_method_entry_without_refinements(VALUE klass, ID id);
* rb_resolve_refined_method_callable(VALUE refinements, const rb_callable_method_entry_t *me);
VM pushes rb_callable_method_entry_t, so that rb_vm_frame_method_entry()
returns rb_callable_method_entry_t.
You can check a super class of current method by
rb_callable_method_entry_t::defined_class.
* method.h: renamed from rb_method_entry_t::klass to
rb_method_entry_t::owner.
* internal.h: add rb_classext_struct::callable_m_tbl to cache
rb_callable_method_entry_t data.
We need to consider abotu this field again because it is only
active for T_ICLASS.
* class.c (method_entry_i): ditto.
* class.c (rb_define_attr): rb_method_entry() does not takes
defiend_class_ptr.
* gc.c (mark_method_entry): mark RCLASS_CALLABLE_M_TBL() for T_ICLASS.
* cont.c (fiber_init): rb_control_frame_t::klass is removed.
* proc.c: fix `struct METHOD' data structure because
rb_callable_method_t has all information.
* vm_core.h: remove several fields.
* rb_control_frame_t::klass.
* rb_block_t::klass.
And catch up changes.
* eval.c: catch up changes.
* gc.c: ditto.
* insns.def: ditto.
* vm.c: ditto.
* vm_args.c: ditto.
* vm_backtrace.c: ditto.
* vm_dump.c: ditto.
* vm_eval.c: ditto.
* vm_insnhelper.c: ditto.
* vm_method.c: ditto.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@51126 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2015-07-03 07:24:50 -04:00
|
|
|
const rb_method_entry_t *me = rb_method_entry(klass, SYM2ID(obj));
|
2015-06-02 15:15:29 -04:00
|
|
|
|
|
|
|
if (me) {
|
2015-06-06 06:19:48 -04:00
|
|
|
switch (METHOD_ENTRY_VISI(me)) {
|
2015-06-02 21:39:16 -04:00
|
|
|
case METHOD_VISI_PRIVATE:
|
|
|
|
break;
|
|
|
|
case METHOD_VISI_PROTECTED:
|
|
|
|
if (!rb_obj_is_kind_of(GET_SELF(), rb_class_real(klass))) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case METHOD_VISI_PUBLIC:
|
2015-06-02 15:36:43 -04:00
|
|
|
expr_type = DEFINED_METHOD;
|
2015-06-02 21:39:16 -04:00
|
|
|
break;
|
|
|
|
default:
|
2015-06-06 06:19:48 -04:00
|
|
|
rb_bug("vm_defined: unreachable: %u", (unsigned int)METHOD_ENTRY_VISI(me));
|
2015-06-02 15:15:29 -04:00
|
|
|
}
|
|
|
|
}
|
2015-06-02 15:36:43 -04:00
|
|
|
else {
|
2015-06-02 16:03:54 -04:00
|
|
|
expr_type = check_respond_to_missing(obj, v);
|
2015-06-02 15:15:29 -04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case DEFINED_YIELD:
|
2016-07-28 07:02:30 -04:00
|
|
|
if (GET_BLOCK_HANDLER() != VM_BLOCK_HANDLER_NONE) {
|
2015-06-02 15:15:29 -04:00
|
|
|
expr_type = DEFINED_YIELD;
|
|
|
|
}
|
|
|
|
break;
|
2015-07-08 22:10:51 -04:00
|
|
|
case DEFINED_ZSUPER:
|
|
|
|
{
|
|
|
|
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(GET_CFP());
|
|
|
|
|
|
|
|
if (me) {
|
|
|
|
VALUE klass = vm_search_normal_superclass(me->defined_class);
|
|
|
|
ID id = me->def->original_id;
|
|
|
|
|
|
|
|
if (rb_method_boundp(klass, id, 0)) {
|
|
|
|
expr_type = DEFINED_ZSUPER;
|
|
|
|
}
|
2015-06-02 15:15:29 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DEFINED_REF:{
|
2017-10-27 01:30:05 -04:00
|
|
|
if (vm_getspecial(ec, GET_LEP(), Qfalse, FIX2INT(obj)) != Qnil) {
|
2015-06-02 15:15:29 -04:00
|
|
|
expr_type = DEFINED_GVAR;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
rb_bug("unimplemented defined? type (VM)");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (expr_type != 0) {
|
|
|
|
if (needstr != Qfalse) {
|
|
|
|
return rb_iseq_defined_string(expr_type);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qtrue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
}
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
|
|
|
|
static const VALUE *
|
|
|
|
vm_get_ep(const VALUE *const reg_ep, rb_num_t lv)
|
|
|
|
{
|
|
|
|
rb_num_t i;
|
|
|
|
const VALUE *ep = reg_ep;
|
|
|
|
for (i = 0; i < lv; i++) {
|
|
|
|
ep = GET_PREV_EP(ep);
|
|
|
|
}
|
|
|
|
return ep;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_get_special_object(const VALUE *const reg_ep,
|
|
|
|
enum vm_special_object_type type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
2017-04-18 09:14:05 -04:00
|
|
|
case VM_SPECIAL_OBJECT_VMCORE:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return rb_mRubyVMFrozenCore;
|
2017-04-18 09:14:05 -04:00
|
|
|
case VM_SPECIAL_OBJECT_CBASE:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return vm_get_cbase(reg_ep);
|
2017-04-18 09:14:05 -04:00
|
|
|
case VM_SPECIAL_OBJECT_CONST_BASE:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return vm_get_const_base(reg_ep);
|
2017-04-18 09:14:05 -04:00
|
|
|
default:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
rb_bug("putspecialobject insn: unknown value_type %d", type);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vm_freezestring(VALUE str, VALUE debug)
|
|
|
|
{
|
|
|
|
if (!NIL_P(debug)) {
|
|
|
|
rb_ivar_set(str, id_debug_created_info, debug);
|
|
|
|
}
|
|
|
|
rb_str_freeze(str);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_concat_array(VALUE ary1, VALUE ary2st)
|
|
|
|
{
|
|
|
|
const VALUE ary2 = ary2st;
|
2017-05-31 08:30:57 -04:00
|
|
|
VALUE tmp1 = rb_check_convert_type_with_id(ary1, T_ARRAY, "Array", idTo_a);
|
|
|
|
VALUE tmp2 = rb_check_convert_type_with_id(ary2, T_ARRAY, "Array", idTo_a);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
|
|
|
|
if (NIL_P(tmp1)) {
|
|
|
|
tmp1 = rb_ary_new3(1, ary1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NIL_P(tmp2)) {
|
|
|
|
tmp2 = rb_ary_new3(1, ary2);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tmp1 == ary1) {
|
|
|
|
tmp1 = rb_ary_dup(ary1);
|
|
|
|
}
|
|
|
|
return rb_ary_concat(tmp1, tmp2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_splat_array(VALUE flag, VALUE ary)
|
|
|
|
{
|
2017-05-31 08:30:57 -04:00
|
|
|
VALUE tmp = rb_check_convert_type_with_id(ary, T_ARRAY, "Array", idTo_a);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
if (NIL_P(tmp)) {
|
|
|
|
return rb_ary_new3(1, ary);
|
|
|
|
}
|
|
|
|
else if (RTEST(flag)) {
|
|
|
|
return rb_ary_dup(tmp);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-11-16 01:10:31 -05:00
|
|
|
vm_check_match(rb_execution_context_t *ec, VALUE target, VALUE pattern, rb_num_t flag)
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
{
|
|
|
|
enum vm_check_match_type type = ((int)flag) & VM_CHECKMATCH_TYPE_MASK;
|
|
|
|
|
|
|
|
if (flag & VM_CHECKMATCH_ARRAY) {
|
|
|
|
long i;
|
|
|
|
const long n = RARRAY_LEN(pattern);
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
VALUE v = RARRAY_AREF(pattern, i);
|
2017-11-16 01:10:31 -05:00
|
|
|
VALUE c = check_match(ec, v, target, type);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
|
|
|
|
if (RTEST(c)) {
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Qfalse;
|
|
|
|
}
|
|
|
|
else {
|
2017-11-16 01:10:31 -05:00
|
|
|
return check_match(ec, pattern, target, type);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_check_keyword(lindex_t bits, lindex_t idx, const VALUE *ep)
|
|
|
|
{
|
|
|
|
const VALUE kw_bits = *(ep - bits);
|
|
|
|
|
|
|
|
if (FIXNUM_P(kw_bits)) {
|
|
|
|
int b = FIX2INT(kw_bits);
|
|
|
|
return (b & (0x01 << idx)) ? Qfalse : Qtrue;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VM_ASSERT(RB_TYPE_P(kw_bits, T_HASH));
|
|
|
|
return rb_hash_has_key(kw_bits, INT2FIX(idx));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-11-07 03:19:25 -05:00
|
|
|
vm_dtrace(rb_event_flag_t flag, rb_execution_context_t *ec)
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
{
|
|
|
|
if (RUBY_DTRACE_METHOD_ENTRY_ENABLED() ||
|
|
|
|
RUBY_DTRACE_METHOD_RETURN_ENABLED() ||
|
|
|
|
RUBY_DTRACE_CMETHOD_ENTRY_ENABLED() ||
|
|
|
|
RUBY_DTRACE_CMETHOD_RETURN_ENABLED()) {
|
|
|
|
|
|
|
|
switch (flag) {
|
|
|
|
case RUBY_EVENT_CALL:
|
2017-11-07 03:19:25 -05:00
|
|
|
RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, 0, 0);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return;
|
|
|
|
case RUBY_EVENT_C_CALL:
|
2017-11-07 03:19:25 -05:00
|
|
|
RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, 0, 0);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return;
|
|
|
|
case RUBY_EVENT_RETURN:
|
2017-11-07 03:19:25 -05:00
|
|
|
RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return;
|
|
|
|
case RUBY_EVENT_C_RETURN:
|
2017-11-07 03:19:25 -05:00
|
|
|
RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, 0, 0);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_const_get_under(ID id, rb_num_t flags, VALUE cbase)
|
|
|
|
{
|
|
|
|
VALUE ns;
|
2017-04-18 06:58:50 -04:00
|
|
|
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
if ((ns = vm_search_const_defined_class(cbase, id)) == 0) {
|
|
|
|
return ns;
|
|
|
|
}
|
|
|
|
else if (VM_DEFINECLASS_SCOPED_P(flags)) {
|
|
|
|
return rb_public_const_get_at(ns, id);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_const_get_at(ns, id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_check_if_class(ID id, rb_num_t flags, VALUE super, VALUE klass)
|
|
|
|
{
|
|
|
|
if (!RB_TYPE_P(klass, T_CLASS)) {
|
|
|
|
rb_raise(rb_eTypeError, "%"PRIsVALUE" is not a class", rb_id2str(id));
|
|
|
|
}
|
|
|
|
else if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags)) {
|
|
|
|
VALUE tmp = rb_class_real(RCLASS_SUPER(klass));
|
|
|
|
|
|
|
|
if (tmp != super) {
|
|
|
|
rb_raise(rb_eTypeError,
|
|
|
|
"superclass mismatch for class %"PRIsVALUE"",
|
|
|
|
rb_id2str(id));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return klass;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return klass;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_check_if_module(ID id, VALUE mod)
|
|
|
|
{
|
|
|
|
if (!RB_TYPE_P(mod, T_MODULE)) {
|
|
|
|
rb_raise(rb_eTypeError, "%"PRIsVALUE" is not a module", rb_id2str(id));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return mod;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_declare_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
|
|
|
|
{
|
|
|
|
/* new class declaration */
|
|
|
|
VALUE s = VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) ? super : rb_cObject;
|
|
|
|
VALUE c = rb_define_class_id(id, s);
|
|
|
|
|
|
|
|
rb_set_class_path_string(c, cbase, rb_id2str(id));
|
|
|
|
rb_const_set(cbase, id, c);
|
|
|
|
rb_class_inherited(s, c);
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_declare_module(ID id, VALUE cbase)
|
|
|
|
{
|
|
|
|
/* new module declaration */
|
|
|
|
VALUE mod = rb_define_module_id(id);
|
|
|
|
rb_set_class_path_string(mod, cbase, rb_id2str(id));
|
|
|
|
rb_const_set(cbase, id, mod);
|
|
|
|
return mod;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_define_class(ID id, rb_num_t flags, VALUE cbase, VALUE super)
|
|
|
|
{
|
|
|
|
VALUE klass;
|
|
|
|
|
|
|
|
if (VM_DEFINECLASS_HAS_SUPERCLASS_P(flags) && !RB_TYPE_P(super, T_CLASS)) {
|
|
|
|
rb_raise(rb_eTypeError,
|
|
|
|
"superclass must be a Class (%"PRIsVALUE" given)",
|
|
|
|
rb_obj_class(super));
|
|
|
|
}
|
|
|
|
|
|
|
|
vm_check_if_namespace(cbase);
|
|
|
|
|
|
|
|
/* find klass */
|
|
|
|
rb_autoload_load(cbase, id);
|
|
|
|
if ((klass = vm_const_get_under(id, flags, cbase)) != 0) {
|
|
|
|
return vm_check_if_class(id, flags, super, klass);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return vm_declare_class(id, flags, cbase, super);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_define_module(ID id, rb_num_t flags, VALUE cbase)
|
|
|
|
{
|
|
|
|
VALUE mod;
|
|
|
|
|
|
|
|
vm_check_if_namespace(cbase);
|
|
|
|
if ((mod = vm_const_get_under(id, flags, cbase)) != 0) {
|
|
|
|
return vm_check_if_module(id, mod);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return vm_declare_module(id, cbase);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_find_or_create_class_by_id(ID id,
|
|
|
|
rb_num_t flags,
|
|
|
|
VALUE cbase,
|
|
|
|
VALUE super)
|
|
|
|
{
|
|
|
|
rb_vm_defineclass_type_t type = VM_DEFINECLASS_TYPE(flags);
|
|
|
|
|
|
|
|
switch (type) {
|
2017-04-18 09:14:08 -04:00
|
|
|
case VM_DEFINECLASS_TYPE_CLASS:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
/* classdef returns class scope value */
|
|
|
|
return vm_define_class(id, flags, cbase, super);
|
|
|
|
|
2017-04-18 09:14:08 -04:00
|
|
|
case VM_DEFINECLASS_TYPE_SINGLETON_CLASS:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
/* classdef returns class scope value */
|
|
|
|
return rb_singleton_class(cbase);
|
|
|
|
|
2017-04-18 09:14:08 -04:00
|
|
|
case VM_DEFINECLASS_TYPE_MODULE:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
/* classdef returns class scope value */
|
|
|
|
return vm_define_module(id, flags, cbase);
|
|
|
|
|
2017-04-18 09:14:08 -04:00
|
|
|
default:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
rb_bug("unknown defineclass type: %d", (int)type);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this macro is mandatory to use OPTIMIZED_CMP. What a design! */
|
|
|
|
#define id_cmp idCmp
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_newarray_max(rb_num_t num, const VALUE *ptr)
|
|
|
|
{
|
|
|
|
if (BASIC_OP_UNREDEFINED_P(BOP_MAX, ARRAY_REDEFINED_OP_FLAG)) {
|
|
|
|
if (num == 0) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
struct cmp_opt_data cmp_opt = { 0, 0 };
|
2018-01-11 03:26:21 -05:00
|
|
|
VALUE result = *ptr;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
rb_num_t i = num - 1;
|
|
|
|
while (i-- > 0) {
|
2018-01-11 03:26:21 -05:00
|
|
|
const VALUE v = *++ptr;
|
|
|
|
if (OPTIMIZED_CMP(v, result, cmp_opt) > 0) {
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
result = v;
|
|
|
|
}
|
|
|
|
}
|
2018-01-11 03:26:21 -05:00
|
|
|
return result;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VALUE ary = rb_ary_new4(num, ptr);
|
|
|
|
return rb_funcall(ary, idMax, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_newarray_min(rb_num_t num, const VALUE *ptr)
|
|
|
|
{
|
|
|
|
if (BASIC_OP_UNREDEFINED_P(BOP_MIN, ARRAY_REDEFINED_OP_FLAG)) {
|
|
|
|
if (num == 0) {
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
struct cmp_opt_data cmp_opt = { 0, 0 };
|
2018-01-11 03:26:21 -05:00
|
|
|
VALUE result = *ptr;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
rb_num_t i = num - 1;
|
|
|
|
while (i-- > 0) {
|
2018-01-11 03:26:21 -05:00
|
|
|
const VALUE v = *++ptr;
|
|
|
|
if (OPTIMIZED_CMP(v, result, cmp_opt) < 0) {
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
result = v;
|
|
|
|
}
|
|
|
|
}
|
2018-01-11 03:26:21 -05:00
|
|
|
return result;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
VALUE ary = rb_ary_new4(num, ptr);
|
|
|
|
return rb_funcall(ary, idMin, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef id_cmp
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_ic_hit_p(IC ic, const VALUE *reg_ep)
|
|
|
|
{
|
|
|
|
if (ic->ic_serial == GET_GLOBAL_CONSTANT_STATE() &&
|
|
|
|
(ic->ic_cref == NULL || ic->ic_cref == rb_vm_get_cref(reg_ep))) {
|
|
|
|
return ic->ic_value.value;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qnil;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vm_ic_update(IC ic, VALUE val, const VALUE *reg_ep)
|
|
|
|
{
|
|
|
|
VM_ASSERT(ic->ic_value.value != Qundef);
|
|
|
|
ic->ic_value.value = val;
|
|
|
|
ic->ic_serial = GET_GLOBAL_CONSTANT_STATE() - ruby_vm_const_missing_count;
|
|
|
|
ic->ic_cref = vm_get_const_key_cref(reg_ep);
|
|
|
|
ruby_vm_const_missing_count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
2017-11-07 01:14:00 -05:00
|
|
|
vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, IC ic)
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
{
|
2017-11-07 01:14:00 -05:00
|
|
|
rb_thread_t *th = rb_ec_thread_ptr(ec);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
|
2017-04-19 11:14:03 -04:00
|
|
|
union iseq_inline_storage_entry *const is = (union iseq_inline_storage_entry *)ic;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
|
2017-04-19 11:14:03 -04:00
|
|
|
again:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
|
|
|
|
return is->once.value;
|
|
|
|
}
|
|
|
|
else if (is->once.running_thread == NULL) {
|
2017-04-18 07:06:58 -04:00
|
|
|
VALUE val;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
is->once.running_thread = th;
|
|
|
|
val = is->once.value = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
|
|
|
|
/* is->once.running_thread is cleared by vm_once_clear() */
|
|
|
|
is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
|
2017-11-07 01:14:00 -05:00
|
|
|
rb_iseq_add_mark_object(ec->cfp->iseq, val);
|
2017-04-18 07:06:58 -04:00
|
|
|
return val;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
else if (is->once.running_thread == th) {
|
|
|
|
/* recursive once */
|
|
|
|
return vm_once_exec((VALUE)iseq);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* waiting for finish */
|
2017-11-07 01:14:00 -05:00
|
|
|
RUBY_VM_CHECK_INTS(ec);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
rb_thread_schedule();
|
2017-04-19 11:14:03 -04:00
|
|
|
goto again;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static OFFSET
|
|
|
|
vm_case_dispatch(CDHASH hash, OFFSET else_offset, VALUE key)
|
|
|
|
{
|
|
|
|
switch (OBJ_BUILTIN_TYPE(key)) {
|
2017-04-18 09:14:08 -04:00
|
|
|
case -1:
|
|
|
|
case T_FLOAT:
|
|
|
|
case T_SYMBOL:
|
|
|
|
case T_BIGNUM:
|
|
|
|
case T_STRING:
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
if (BASIC_OP_UNREDEFINED_P(BOP_EQQ,
|
|
|
|
SYMBOL_REDEFINED_OP_FLAG |
|
|
|
|
INTEGER_REDEFINED_OP_FLAG |
|
|
|
|
FLOAT_REDEFINED_OP_FLAG |
|
|
|
|
NIL_REDEFINED_OP_FLAG |
|
|
|
|
TRUE_REDEFINED_OP_FLAG |
|
|
|
|
FALSE_REDEFINED_OP_FLAG |
|
|
|
|
STRING_REDEFINED_OP_FLAG)) {
|
|
|
|
st_data_t val;
|
|
|
|
if (RB_FLOAT_TYPE_P(key)) {
|
|
|
|
double kval = RFLOAT_VALUE(key);
|
|
|
|
if (!isinf(kval) && modf(kval, &kval) == 0.0) {
|
|
|
|
key = FIXABLE(kval) ? LONG2FIX((long)kval) : rb_dbl2big(kval);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (st_lookup(RHASH_TBL_RAW(hash), key, &val)) {
|
|
|
|
return FIX2INT((VALUE)val);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return else_offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-22 21:43:39 -04:00
|
|
|
NORETURN(static void
|
2017-10-27 01:33:33 -04:00
|
|
|
vm_stack_consistency_error(const rb_execution_context_t *ec,
|
2017-06-22 21:43:39 -04:00
|
|
|
const rb_control_frame_t *,
|
|
|
|
const VALUE *));
|
|
|
|
static void
|
2017-10-27 01:33:33 -04:00
|
|
|
vm_stack_consistency_error(const rb_execution_context_t *ec,
|
2017-06-22 21:43:39 -04:00
|
|
|
const rb_control_frame_t *cfp,
|
|
|
|
const VALUE *bp)
|
|
|
|
{
|
2017-10-27 01:33:33 -04:00
|
|
|
const ptrdiff_t nsp = VM_SP_CNT(ec, cfp->sp);
|
|
|
|
const ptrdiff_t nbp = VM_SP_CNT(ec, bp);
|
2017-06-22 21:43:39 -04:00
|
|
|
static const char stack_consistency_error[] =
|
|
|
|
"Stack consistency error (sp: %"PRIdPTRDIFF", bp: %"PRIdPTRDIFF")";
|
2017-06-29 21:56:53 -04:00
|
|
|
#if defined RUBY_DEVEL
|
|
|
|
VALUE mesg = rb_sprintf(stack_consistency_error, nsp, nbp);
|
|
|
|
rb_str_cat_cstr(mesg, "\n");
|
|
|
|
rb_str_append(mesg, rb_iseq_disasm(cfp->iseq));
|
|
|
|
rb_exc_fatal(rb_exc_new3(rb_eFatal, mesg));
|
|
|
|
#else
|
2017-06-22 21:43:39 -04:00
|
|
|
rb_bug(stack_consistency_error, nsp, nbp);
|
2017-06-29 21:56:53 -04:00
|
|
|
#endif
|
2017-06-22 21:43:39 -04:00
|
|
|
}
|
|
|
|
|
2017-12-11 15:30:37 -05:00
|
|
|
static VALUE
|
|
|
|
vm_opt_plus(VALUE recv, VALUE obj)
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
{
|
|
|
|
if (FIXNUM_2_P(recv, obj) &&
|
2017-12-11 15:30:37 -05:00
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_PLUS, INTEGER_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_fix_plus_fix(recv, obj);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
else if (FLONUM_2_P(recv, obj) &&
|
2017-12-11 15:30:37 -05:00
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
2017-04-19 03:27:03 -04:00
|
|
|
else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
|
2017-12-11 15:30:37 -05:00
|
|
|
return Qundef;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
2017-04-19 03:27:03 -04:00
|
|
|
else if (RBASIC_CLASS(recv) == rb_cFloat &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cFloat &&
|
2017-12-11 15:30:37 -05:00
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_PLUS, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(RFLOAT_VALUE(recv) + RFLOAT_VALUE(obj));
|
2017-04-19 03:27:03 -04:00
|
|
|
}
|
2017-12-11 15:30:37 -05:00
|
|
|
else if (RBASIC_CLASS(recv) == rb_cString &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cString &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_PLUS, STRING_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_str_plus(recv, obj);
|
2017-04-19 03:27:03 -04:00
|
|
|
}
|
2017-12-11 15:30:37 -05:00
|
|
|
else if (RBASIC_CLASS(recv) == rb_cArray &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_PLUS, ARRAY_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_ary_plus(recv, obj);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_minus(VALUE recv, VALUE obj)
|
|
|
|
{
|
2017-12-11 15:30:37 -05:00
|
|
|
if (FIXNUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MINUS, INTEGER_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_fix_minus_fix(recv, obj);
|
|
|
|
}
|
|
|
|
else if (FLONUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
|
|
|
|
}
|
|
|
|
else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cFloat &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cFloat &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MINUS, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(RFLOAT_VALUE(recv) - RFLOAT_VALUE(obj));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_mult(VALUE recv, VALUE obj)
|
|
|
|
{
|
2017-12-11 15:30:37 -05:00
|
|
|
if (FIXNUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MULT, INTEGER_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_fix_mul_fix(recv, obj);
|
|
|
|
}
|
|
|
|
else if (FLONUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
|
|
|
|
}
|
|
|
|
else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cFloat &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cFloat &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MULT, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(RFLOAT_VALUE(recv) * RFLOAT_VALUE(obj));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_div(VALUE recv, VALUE obj)
|
|
|
|
{
|
2017-12-11 15:30:37 -05:00
|
|
|
if (FIXNUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_DIV, INTEGER_REDEFINED_OP_FLAG)) {
|
2017-04-19 03:27:03 -04:00
|
|
|
return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_div_fix(recv, obj);
|
2017-12-11 15:30:37 -05:00
|
|
|
}
|
|
|
|
else if (FLONUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(RFLOAT_VALUE(recv) / RFLOAT_VALUE(obj));
|
|
|
|
}
|
|
|
|
else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cFloat &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cFloat &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_DIV, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(RFLOAT_VALUE(recv) / RFLOAT_VALUE(obj));
|
|
|
|
}
|
|
|
|
else {
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_mod(VALUE recv, VALUE obj)
|
|
|
|
{
|
2017-12-11 15:30:37 -05:00
|
|
|
if (FIXNUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MOD, INTEGER_REDEFINED_OP_FLAG)) {
|
2017-04-19 03:27:03 -04:00
|
|
|
return (FIX2LONG(obj) == 0) ? Qundef : rb_fix_mod_fix(recv, obj);
|
2017-12-11 15:30:37 -05:00
|
|
|
}
|
|
|
|
else if (FLONUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
|
|
|
|
}
|
|
|
|
else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cFloat &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cFloat &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MOD, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return DBL2NUM(ruby_float_mod(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj)));
|
|
|
|
}
|
|
|
|
else {
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_neq(CALL_INFO ci, CALL_CACHE cc,
|
|
|
|
CALL_INFO ci_eq, CALL_CACHE cc_eq,
|
|
|
|
VALUE recv, VALUE obj)
|
|
|
|
{
|
|
|
|
if (vm_method_cfunc_is(ci, cc, recv, rb_obj_not_equal)) {
|
|
|
|
VALUE val = opt_eq_func(recv, obj, ci_eq, cc_eq);
|
|
|
|
|
|
|
|
if (val != Qundef) {
|
|
|
|
return RTEST(val) ? Qfalse : Qtrue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_lt(VALUE recv, VALUE obj)
|
|
|
|
{
|
2017-12-11 15:30:37 -05:00
|
|
|
if (FIXNUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_LT, INTEGER_REDEFINED_OP_FLAG)) {
|
|
|
|
return (SIGNED_VALUE)recv < (SIGNED_VALUE)obj ? Qtrue : Qfalse;
|
|
|
|
}
|
|
|
|
else if (FLONUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
|
|
|
|
}
|
|
|
|
else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cFloat &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cFloat &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_LT, FLOAT_REDEFINED_OP_FLAG)) {
|
2017-04-19 03:27:03 -04:00
|
|
|
CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return RFLOAT_VALUE(recv) < RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
|
2017-12-11 15:30:37 -05:00
|
|
|
}
|
|
|
|
else {
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_le(VALUE recv, VALUE obj)
|
|
|
|
{
|
2017-12-11 15:30:37 -05:00
|
|
|
if (FIXNUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_LE, INTEGER_REDEFINED_OP_FLAG)) {
|
|
|
|
return (SIGNED_VALUE)recv <= (SIGNED_VALUE)obj ? Qtrue : Qfalse;
|
|
|
|
}
|
|
|
|
else if (FLONUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
|
|
|
|
}
|
|
|
|
else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cFloat &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cFloat &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_LE, FLOAT_REDEFINED_OP_FLAG)) {
|
2017-04-19 03:27:03 -04:00
|
|
|
CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return RFLOAT_VALUE(recv) <= RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
|
2017-12-11 15:30:37 -05:00
|
|
|
}
|
|
|
|
else {
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_gt(VALUE recv, VALUE obj)
|
|
|
|
{
|
2017-12-11 15:30:37 -05:00
|
|
|
if (FIXNUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_GT, INTEGER_REDEFINED_OP_FLAG)) {
|
|
|
|
return (SIGNED_VALUE)recv > (SIGNED_VALUE)obj ? Qtrue : Qfalse;
|
|
|
|
}
|
|
|
|
else if (FLONUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
|
|
|
|
}
|
|
|
|
else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cFloat &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cFloat &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_GT, FLOAT_REDEFINED_OP_FLAG)) {
|
2017-04-19 03:27:03 -04:00
|
|
|
CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return RFLOAT_VALUE(recv) > RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
|
2017-12-11 15:30:37 -05:00
|
|
|
}
|
|
|
|
else {
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_ge(VALUE recv, VALUE obj)
|
|
|
|
{
|
2017-12-11 15:30:37 -05:00
|
|
|
if (FIXNUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_GE, INTEGER_REDEFINED_OP_FLAG)) {
|
|
|
|
return (SIGNED_VALUE)recv >= (SIGNED_VALUE)obj ? Qtrue : Qfalse;
|
|
|
|
}
|
|
|
|
else if (FLONUM_2_P(recv, obj) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
|
|
|
|
return RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
|
|
|
|
}
|
|
|
|
else if (SPECIAL_CONST_P(recv) || SPECIAL_CONST_P(obj)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cFloat &&
|
|
|
|
RBASIC_CLASS(obj) == rb_cFloat &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_GE, FLOAT_REDEFINED_OP_FLAG)) {
|
2017-04-19 03:27:03 -04:00
|
|
|
CHECK_CMP_NAN(RFLOAT_VALUE(recv), RFLOAT_VALUE(obj));
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return RFLOAT_VALUE(recv) >= RFLOAT_VALUE(obj) ? Qtrue : Qfalse;
|
2017-12-11 15:30:37 -05:00
|
|
|
}
|
|
|
|
else {
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-19 03:27:03 -04:00
|
|
|
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
static VALUE
|
|
|
|
vm_opt_ltlt(VALUE recv, VALUE obj)
|
|
|
|
{
|
|
|
|
if (SPECIAL_CONST_P(recv)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cString &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_LTLT, STRING_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_str_concat(recv, obj);
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cArray &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_LTLT, ARRAY_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_ary_push(recv, obj);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_aref(VALUE recv, VALUE obj)
|
|
|
|
{
|
|
|
|
if (SPECIAL_CONST_P(recv)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cArray &&
|
2017-10-21 20:19:12 -04:00
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_AREF, ARRAY_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_ary_aref1(recv, obj);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cHash &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_hash_aref(recv, obj);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_aset(VALUE recv, VALUE obj, VALUE set)
|
|
|
|
{
|
|
|
|
if (SPECIAL_CONST_P(recv)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cArray &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_ASET, ARRAY_REDEFINED_OP_FLAG) &&
|
|
|
|
FIXNUM_P(obj)) {
|
|
|
|
rb_ary_store(recv, FIX2LONG(obj), set);
|
|
|
|
return set;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cHash &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG)) {
|
|
|
|
rb_hash_aset(recv, obj, set);
|
|
|
|
return set;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_aref_with(VALUE recv, VALUE key)
|
|
|
|
{
|
|
|
|
if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_AREF, HASH_REDEFINED_OP_FLAG) &&
|
|
|
|
rb_hash_compare_by_id_p(recv) == Qfalse) {
|
|
|
|
return rb_hash_aref(recv, key);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_aset_with(VALUE recv, VALUE key, VALUE val)
|
|
|
|
{
|
|
|
|
if (!SPECIAL_CONST_P(recv) && RBASIC_CLASS(recv) == rb_cHash &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_ASET, HASH_REDEFINED_OP_FLAG) &&
|
|
|
|
rb_hash_compare_by_id_p(recv) == Qfalse) {
|
|
|
|
return rb_hash_aset(recv, key, val);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_length(VALUE recv, int bop)
|
|
|
|
{
|
|
|
|
if (SPECIAL_CONST_P(recv)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cString &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(bop, STRING_REDEFINED_OP_FLAG)) {
|
|
|
|
if (bop == BOP_EMPTY_P) {
|
|
|
|
return LONG2NUM(RSTRING_LEN(recv));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_str_length(recv);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cArray &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(bop, ARRAY_REDEFINED_OP_FLAG)) {
|
|
|
|
return LONG2NUM(RARRAY_LEN(recv));
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cHash &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(bop, HASH_REDEFINED_OP_FLAG)) {
|
|
|
|
return INT2FIX(RHASH_SIZE(recv));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_empty_p(VALUE recv)
|
|
|
|
{
|
|
|
|
switch (vm_opt_length(recv, BOP_EMPTY_P)) {
|
2017-04-18 09:14:08 -04:00
|
|
|
case Qundef: return Qundef;
|
|
|
|
case INT2FIX(0): return Qtrue;
|
|
|
|
default: return Qfalse;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_succ(VALUE recv)
|
|
|
|
{
|
2017-04-19 03:27:03 -04:00
|
|
|
if (FIXNUM_P(recv) &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_SUCC, INTEGER_REDEFINED_OP_FLAG)) {
|
|
|
|
/* fixnum + INT2FIX(1) */
|
|
|
|
if (recv == LONG2FIX(FIXNUM_MAX)) {
|
|
|
|
return LONG2NUM(FIXNUM_MAX + 1);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
else {
|
2017-04-19 03:27:03 -04:00
|
|
|
return recv - 1 + INT2FIX(1);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
2017-04-19 03:27:03 -04:00
|
|
|
else if (SPECIAL_CONST_P(recv)) {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
else if (RBASIC_CLASS(recv) == rb_cString &&
|
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_SUCC, STRING_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_str_succ(recv);
|
|
|
|
}
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
else {
|
2017-04-19 03:27:03 -04:00
|
|
|
return Qundef;
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_not(CALL_INFO ci, CALL_CACHE cc, VALUE recv)
|
|
|
|
{
|
|
|
|
if (vm_method_cfunc_is(ci, cc, recv, rb_obj_not)) {
|
|
|
|
return RTEST(recv) ? Qfalse : Qtrue;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_regexpmatch1(VALUE recv, VALUE obj)
|
|
|
|
{
|
|
|
|
if (BASIC_OP_UNREDEFINED_P(BOP_MATCH, REGEXP_REDEFINED_OP_FLAG)) {
|
|
|
|
return rb_reg_match(recv, obj);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return rb_funcall(recv, idEqTilde, 1, obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static VALUE
|
|
|
|
vm_opt_regexpmatch2(VALUE recv, VALUE obj)
|
|
|
|
{
|
2017-09-03 08:35:25 -04:00
|
|
|
if (CLASS_OF(recv) == rb_cString &&
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
BASIC_OP_UNREDEFINED_P(BOP_MATCH, STRING_REDEFINED_OP_FLAG)) {
|
2017-09-03 08:35:25 -04:00
|
|
|
return rb_reg_match(obj, recv);
|
split insns.def into functions
Contemporary C compilers are good at function inlining. They fold
multiple functions into one. However they are not yet smart enough to
unfold a function into several ones. So generally speaking, it is
wiser for a C programmer to manually split C functions whenever
possible. That should make rooms for compilers to optimize at will.
Before this changeset insns.def was converted into single HUGE
function called vm_exec_core(). By moving each instruction's core
into individual functions, generated C source code is reduced from
3,428 lines to 2,847 lines. Looking at the generated assembly
however, it seems my compiler (gcc 6.2) is extraordinary smart so that
it inlines almost all functions I introduced in this changeset back
into that vm_exec_core. On my machine compiled machine binary of the
function does not shrink very much in size (28,432 bytes to 26,816
bytes, according to nm(1)).
I believe this change is zero-cost. Several benchmarks I exercised
showed no significant difference beyond error mergin. For instance
3 repeated runs of optcarrot benchmark on my machine resulted in:
before this: 28.330329285707490, 27.513378371065920, 29.40420215754537
after this: 27.107195867280414, 25.549324021385907, 30.31581919050884
in fps (greater==faster).
----
* internal.h (rb_obj_not_equal): used from vm_insnhelper.c
* insns.def: move vast majority of lines into vm_insnhelper.c
* vm_insnhelper.c: moved here.
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@58390 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2017-04-18 06:58:49 -04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
return Qundef;
|
|
|
|
}
|
|
|
|
}
|
2017-11-14 07:58:36 -05:00
|
|
|
|
|
|
|
rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
|
|
|
|
|
|
|
|
NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc));
|
|
|
|
|
|
|
|
static void
|
|
|
|
vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc)
|
|
|
|
{
|
2017-11-17 01:59:22 -05:00
|
|
|
rb_event_flag_t vm_event_flags = ruby_vm_event_flags;
|
2017-11-14 07:58:36 -05:00
|
|
|
|
2017-12-11 14:17:25 -05:00
|
|
|
if (vm_event_flags == 0) {
|
2017-11-17 01:24:55 -05:00
|
|
|
return;
|
|
|
|
}
|
2017-12-11 14:17:25 -05:00
|
|
|
else {
|
|
|
|
const rb_iseq_t *iseq = reg_cfp->iseq;
|
|
|
|
size_t pos = pc - iseq->body->iseq_encoded;
|
|
|
|
rb_event_flag_t events = rb_iseq_event_flags(iseq, pos);
|
|
|
|
rb_event_flag_t event;
|
2017-11-17 01:24:55 -05:00
|
|
|
|
2017-12-11 14:17:25 -05:00
|
|
|
if ((events & vm_event_flags) == 0) {
|
|
|
|
#if 0
|
|
|
|
/* disable trace */
|
|
|
|
rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
|
|
|
|
#else
|
|
|
|
/* do not disable trace because of performance problem
|
|
|
|
* (re-enable overhead)
|
|
|
|
*/
|
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
}
|
2017-11-14 07:58:36 -05:00
|
|
|
|
2017-12-11 14:17:25 -05:00
|
|
|
if (ec->trace_arg != NULL) return;
|
|
|
|
|
|
|
|
if (0) {
|
|
|
|
fprintf(stderr, "vm_trace>>%4d (%4x) - %s:%d %s\n",
|
|
|
|
(int)pos,
|
|
|
|
(int)events,
|
|
|
|
RSTRING_PTR(rb_iseq_path(iseq)),
|
|
|
|
(int)rb_iseq_line_no(iseq, pos),
|
|
|
|
RSTRING_PTR(rb_iseq_label(iseq)));
|
|
|
|
}
|
|
|
|
|
|
|
|
VM_ASSERT(reg_cfp->pc == pc);
|
|
|
|
VM_ASSERT(events != 0);
|
|
|
|
VM_ASSERT(vm_event_flags & events);
|
|
|
|
|
|
|
|
/* increment PC because source line is calculated with PC-1 */
|
|
|
|
if (event = (events & (RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL))) {
|
|
|
|
VM_ASSERT(event == RUBY_EVENT_CLASS ||
|
|
|
|
event == RUBY_EVENT_CALL ||
|
|
|
|
event == RUBY_EVENT_B_CALL);
|
|
|
|
reg_cfp->pc++;
|
|
|
|
vm_dtrace(event, ec);
|
|
|
|
EXEC_EVENT_HOOK(ec, event, GET_SELF(), 0, 0, 0, Qundef);
|
|
|
|
reg_cfp->pc--;
|
|
|
|
}
|
|
|
|
if (events & RUBY_EVENT_LINE) {
|
|
|
|
reg_cfp->pc++;
|
|
|
|
vm_dtrace(RUBY_EVENT_LINE, ec);
|
|
|
|
EXEC_EVENT_HOOK(ec, RUBY_EVENT_LINE, GET_SELF(), 0, 0, 0, Qundef);
|
|
|
|
reg_cfp->pc--;
|
|
|
|
}
|
|
|
|
if (event = (events & (RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN))) {
|
|
|
|
VM_ASSERT(event == RUBY_EVENT_END ||
|
|
|
|
event == RUBY_EVENT_RETURN ||
|
|
|
|
event == RUBY_EVENT_B_RETURN);
|
|
|
|
reg_cfp->pc++;
|
2017-12-19 20:51:50 -05:00
|
|
|
vm_dtrace(event, ec);
|
2017-12-11 14:17:25 -05:00
|
|
|
EXEC_EVENT_HOOK(ec, event, GET_SELF(), 0, 0, 0, TOPN(0));
|
|
|
|
reg_cfp->pc--;
|
|
|
|
}
|
2017-11-14 07:58:36 -05:00
|
|
|
}
|
|
|
|
}
|