1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Support tracing of attr_reader and attr_writer

In vm_call_method_each_type, check for c_call and c_return events before
dispatching to vm_call_ivar and vm_call_attrset.  With this approach, the
call cache will still dispatch directly to those functions, so this
change will only decrease performance for the first (uncached) call, and
even then, the performance decrease is very minimal.

This approach requires that we clear the call caches when tracing is
enabled or disabled.  The approach currently switches all vm_call_ivar
and vm_call_attrset call caches to vm_call_general any time tracing is
enabled or disabled. So it could theoretically result in a slowdown for
code that constantly enables or disables tracing.

This approach does not handle targeted tracepoints, but from my testing,
c_call and c_return events are not supported for targeted tracepoints,
so that shouldn't matter.

This includes a benchmark showing the performance decrease is minimal
if detectable at all.

Fixes [Bug #16383]
Fixes [Bug #10470]

Co-authored-by: Takashi Kokubun <takashikkbn@gmail.com>
This commit is contained in:
Jeremy Evans 2021-08-23 15:22:14 -07:00
parent 5f7c2291d6
commit 2d98593bf5
Notes: git 2021-08-29 23:24:05 +09:00
6 changed files with 176 additions and 6 deletions

View file

@ -0,0 +1,29 @@
prelude: |
class C
attr_accessor :x
def initialize
@x = nil
end
class_eval <<-END
def ar
#{'x;'*256}
end
def aw
#{'self.x = nil;'*256}
end
def arm
m = method(:x)
#{'m.call;'*256}
end
def awm
m = method(:x=)
#{'m.call(nil);'*256}
end
END
end
obj = C.new
benchmark:
attr_reader: "obj.ar"
attr_writer: "obj.aw"
attr_reader_method: "obj.arm"
attr_writer_method: "obj.awm"

29
iseq.c
View file

@ -3407,6 +3407,32 @@ rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
}
}
bool rb_vm_call_ivar_attrset_p(const vm_call_handler ch);
void rb_vm_cc_general(const struct rb_callcache *cc);
static int
clear_attr_ccs_i(void *vstart, void *vend, size_t stride, void *data)
{
VALUE v = (VALUE)vstart;
for (; v != (VALUE)vend; v += stride) {
void *ptr = asan_poisoned_object_p(v);
asan_unpoison_object(v, false);
if (imemo_type_p(v, imemo_callcache) && rb_vm_call_ivar_attrset_p(((const struct rb_callcache *)v)->call_)) {
rb_vm_cc_general((struct rb_callcache *)v);
}
asan_poison_object_if(ptr, v);
}
return 0;
}
void
rb_clear_attr_ccs(void)
{
rb_objspace_each_objects(clear_attr_ccs_i, NULL);
}
static int
trace_set_i(void *vstart, void *vend, size_t stride, void *data)
{
@ -3420,6 +3446,9 @@ trace_set_i(void *vstart, void *vend, size_t stride, void *data)
if (rb_obj_is_iseq(v)) {
rb_iseq_trace_set(rb_iseq_check((rb_iseq_t *)v), turnon_events);
}
else if (imemo_type_p(v, imemo_callcache) && rb_vm_call_ivar_attrset_p(((const struct rb_callcache *)v)->call_)) {
rb_vm_cc_general((struct rb_callcache *)v);
}
asan_poison_object_if(ptr, v);
}

View file

@ -831,6 +831,56 @@ class TestSetTraceFunc < Test::Unit::TestCase
}
end
def test_tracepoint_attr
c = Class.new do
attr_accessor :x
alias y x
alias y= x=
end
obj = c.new
ar_meth = obj.method(:x)
aw_meth = obj.method(:x=)
aar_meth = obj.method(:y)
aaw_meth = obj.method(:y=)
events = []
trace = TracePoint.new(:c_call, :c_return){|tp|
next if !target_thread?
next if tp.path != __FILE__
next if tp.method_id == :call
case tp.event
when :c_call
assert_raise(RuntimeError) {tp.return_value}
events << [tp.event, tp.method_id, tp.callee_id]
when :c_return
events << [tp.event, tp.method_id, tp.callee_id, tp.return_value]
end
}
test_proc = proc do
obj.x = 1
obj.x
obj.y = 2
obj.y
aw_meth.call(1)
ar_meth.call
aaw_meth.call(2)
aar_meth.call
end
test_proc.call # populate call caches
trace.enable(&test_proc)
expected = [
[:c_call, :x=, :x=],
[:c_return, :x=, :x=, 1],
[:c_call, :x, :x],
[:c_return, :x, :x, 1],
[:c_call, :x=, :y=],
[:c_return, :x=, :y=, 2],
[:c_call, :x, :y],
[:c_return, :x, :y, 2],
]
assert_equal(expected*2, events)
end
class XYZZYException < Exception; end
def method_test_tracepoint_raised_exception err
raise err

View file

@ -190,7 +190,16 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const
}
rb_check_arity(calling->argc, 1, 1);
ret = rb_ivar_set(calling->recv, vm_cc_cme(cc)->def->body.attr.id, argv[0]);
if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv,
vm_cc_cme(cc)->def->original_id, vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef);
ret = rb_ivar_set(calling->recv, vm_cc_cme(cc)->def->body.attr.id, argv[0]);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv,
vm_cc_cme(cc)->def->original_id, vm_ci_mid(ci), vm_cc_cme(cc)->owner, ret);
}
else {
ret = rb_ivar_set(calling->recv, vm_cc_cme(cc)->def->body.attr.id, argv[0]);
}
goto success;
case VM_METHOD_TYPE_IVAR:
if (calling->kw_splat &&
@ -201,7 +210,16 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const
}
rb_check_arity(calling->argc, 0, 0);
ret = rb_attr_get(calling->recv, vm_cc_cme(cc)->def->body.attr.id);
if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv,
vm_cc_cme(cc)->def->original_id, vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef);
ret = rb_attr_get(calling->recv, vm_cc_cme(cc)->def->body.attr.id);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv,
vm_cc_cme(cc)->def->original_id, vm_ci_mid(ci), vm_cc_cme(cc)->owner, ret);
}
else {
ret = rb_attr_get(calling->recv, vm_cc_cme(cc)->def->body.attr.id);
}
goto success;
case VM_METHOD_TYPE_BMETHOD:
ret = vm_call_bmethod_body(ec, calling, argv);

View file

@ -3029,6 +3029,12 @@ vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c
return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, NULL, cc, 1);
}
bool
rb_vm_call_ivar_attrset_p(const vm_call_handler ch)
{
return (ch == vm_call_ivar || ch == vm_call_attrset);
}
static inline VALUE
vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv)
{
@ -3484,16 +3490,40 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
rb_check_arity(calling->argc, 1, 1);
vm_cc_attr_index_set(cc, 0);
CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG)));
return vm_call_attrset(ec, cfp, calling);
if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
const struct rb_callinfo *ci = calling->ci;
VALUE v;
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id,
vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef);
v = vm_call_attrset(ec, cfp, calling);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id,
vm_ci_mid(ci), vm_cc_cme(cc)->owner, v);
return v;
}
else {
CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG)));
return vm_call_attrset(ec, cfp, calling);
}
case VM_METHOD_TYPE_IVAR:
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 0, 0);
vm_cc_attr_index_set(cc, 0);
CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT)));
return vm_call_ivar(ec, cfp, calling);
if (UNLIKELY(ruby_vm_event_flags & (RUBY_EVENT_C_CALL | RUBY_EVENT_C_RETURN))) {
const struct rb_callinfo *ci = calling->ci;
VALUE v;
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, calling->recv, vm_cc_cme(cc)->def->original_id,
vm_ci_mid(ci), vm_cc_cme(cc)->owner, Qundef);
v = vm_call_ivar(ec, cfp, calling);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, calling->recv, vm_cc_cme(cc)->def->original_id,
vm_ci_mid(ci), vm_cc_cme(cc)->owner, v);
return v;
}
else {
CC_SET_FASTPATH(cc, vm_call_ivar, !(vm_ci_flag(ci) & (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT)));
return vm_call_ivar(ec, cfp, calling);
}
case VM_METHOD_TYPE_MISSING:
vm_cc_method_missing_reason_set(cc, 0);
@ -3615,6 +3645,15 @@ vm_call_general(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct
return vm_call_method(ec, reg_cfp, calling);
}
void
rb_vm_cc_general(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*(vm_call_handler *)&cc->call_ = vm_call_general;
}
static VALUE
vm_call_super_method(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling)
{

View file

@ -74,6 +74,8 @@ rb_hook_list_free(rb_hook_list_t *hooks)
/* ruby_vm_event_flags management */
void rb_clear_attr_ccs(void);
static void
update_global_event_hook(rb_event_flag_t vm_events)
{
@ -91,6 +93,9 @@ update_global_event_hook(rb_event_flag_t vm_events)
/* write all ISeqs if and only if new events are added */
rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
}
else {
rb_clear_attr_ccs();
}
ruby_vm_event_flags = vm_events;
ruby_vm_event_enabled_global_flags |= vm_events;