1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

rb_vm_call0: on-stack call info

This changeset reduces the generated binary of rb_vm_call0 from 281
bytes to 211 bytes on my machine.  Should reduce GC pressure as well.
This commit is contained in:
卜部昌平 2020-06-01 16:01:30 +09:00
parent db406daa60
commit 46728557c1
Notes: git 2020-06-09 09:53:17 +09:00
3 changed files with 35 additions and 44 deletions

View file

@ -251,6 +251,17 @@ vm_ci_markable(const struct rb_callinfo *ci)
} }
} }
#define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
(struct rb_callinfo) { \
.flags = T_IMEMO | \
(imemo_callinfo << FL_USHIFT) | \
VM_CALLINFO_NOT_UNDER_GC, \
.mid = mid_, \
.flag = flags_, \
.argc = argc_, \
.kwarg = kwarg_, \
}
typedef VALUE (*vm_call_handler)( typedef VALUE (*vm_call_handler)(
struct rb_execution_context_struct *ec, struct rb_execution_context_struct *ec,
struct rb_control_frame_struct *cfp, struct rb_control_frame_struct *cfp,
@ -290,22 +301,16 @@ vm_cc_new(VALUE klass,
return cc; return cc;
} }
static inline const struct rb_callcache * #define VM_CC_ON_STACK(clazz, call, aux, cme) \
vm_cc_fill(struct rb_callcache *cc, (struct rb_callcache) { \
VALUE klass, .flags = T_IMEMO | \
const struct rb_callable_method_entry_struct *cme, (imemo_callcache << FL_USHIFT) | \
vm_call_handler call) VM_CALLCACHE_UNMARKABLE, \
{ .klass = clazz, \
struct rb_callcache cc_body = { .cme_ = cme, \
.flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE, .call_ = call, \
.klass = klass, .aux_ = aux, \
.cme_ = cme, }
.call_ = call,
.aux_.v = 0,
};
MEMCPY(cc, &cc_body, struct rb_callcache, 1);
return cc;
}
static inline bool static inline bool
vm_cc_class_check(const struct rb_callcache *cc, VALUE klass) vm_cc_class_check(const struct rb_callcache *cc, VALUE klass)

View file

@ -45,12 +45,20 @@ static VALUE vm_call0_body(rb_execution_context_t* ec, struct rb_calling_info *c
MJIT_FUNC_EXPORTED VALUE MJIT_FUNC_EXPORTED VALUE
rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat) rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat)
{ {
struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, }; return vm_call0_body(
const struct rb_callinfo *ci = vm_ci_new_runtime(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL); ec,
struct rb_callcache cc_body; &(struct rb_calling_info) {
const struct rb_callcache *cc = vm_cc_fill(&cc_body, 0, me, vm_call_general); .block_handler = VM_BLOCK_HANDLER_NONE,
struct rb_call_data cd = { ci, cc, }; .recv = recv,
return vm_call0_body(ec, &calling, &cd, argv); .argc = argc,
.kw_splat = kw_splat,
},
&(struct rb_call_data) {
.ci = &VM_CI_ON_STACK(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL),
.cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, { 0 }, me),
},
argv
);
} }
static VALUE static VALUE

View file

@ -2682,28 +2682,6 @@ aliased_callable_method_entry(const rb_callable_method_entry_t *me)
return cme; return cme;
} }
#define VM_CI_ON_STACK(mid_, flags_, argc_, kwarg_) \
(struct rb_callinfo) { \
.flags = T_IMEMO | \
(imemo_callinfo << FL_USHIFT) | \
VM_CALLINFO_NOT_UNDER_GC, \
.mid = mid_, \
.flag = flags_, \
.argc = argc_, \
.kwarg = kwarg_, \
}
#define VM_CC_ON_STACK(clazz, call, aux, cme) \
(struct rb_callcache) { \
.flags = T_IMEMO | \
(imemo_callcache << FL_USHIFT) | \
VM_CALLCACHE_UNMARKABLE, \
.klass = clazz, \
.cme_ = cme, \
.call_ = call, \
.aux_ = aux, \
}
static VALUE static VALUE
vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd) vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
{ {