1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Rework tracing for blocks running as methods

The main impetus for this change is to fix [Bug #13392]. Previously, we
fired the "return" TracePoint event after popping the stack frame for
the block running as method (BMETHOD). This gave undesirable source
location outputs as the return event normally fires right before the
frame going away.

The iseq for each block can run both as a block and as a method. To
accommodate that, this commit makes vm_trace() fire call/return events for
instructions that have b_call/b_return events attached when the iseq is
running as a BMETHOD. The logic for rewriting to "trace_*" instruction
is tweaked so that when the user listens to call/return events,
instructions with b_call/b_return become trace variants.

To continue to provide the return value for non-local returns done using
the "return" or "break" keyword inside BMETHODs, the stack unwinding
code is tweaked. b_return events now provide the same return value as
return events for these non-local cases. A pre-existing test deemed not
providing a return value for these b_return events as a limitation.

This commit removes the checks for call/return TracePoint events that
happen when calling into BMETHODs when no TracePoints are active.
Technically, migrating just the return event is enough to fix the bug,
but migrating both call and return removes our reliance on
`VM_FRAME_FLAG_FINISH` and re-entering the interpreter when the caller
is already in the interpreter.
This commit is contained in:
Alan Wu 2021-07-13 19:01:09 -04:00
parent 3b2b28d035
commit 9121e57a5f
Notes: git 2021-12-02 07:42:54 +09:00
6 changed files with 209 additions and 59 deletions

25
iseq.c
View file

@ -3310,6 +3310,23 @@ rb_iseq_trace_flag_cleared(const rb_iseq_t *iseq, size_t pos)
encoded_iseq_trace_instrument(&iseq_encoded[pos], 0, false);
}
// We need to fire call events on instructions with b_call events if the block
// is running as a method. So, if we are listening for call events, then
// instructions that have b_call events need to become trace variants.
// Use this function when making decisions about recompiling to trace variants.
static inline rb_event_flag_t
add_bmethod_events(rb_event_flag_t events)
{
if (events & RUBY_EVENT_CALL) {
events |= RUBY_EVENT_B_CALL;
}
if (events & RUBY_EVENT_RETURN) {
events |= RUBY_EVENT_B_RETURN;
}
return events;
}
// Note, to support call/return events for bmethods, turnon_event can have more events than tpval.
static int
iseq_add_local_tracepoint(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line)
{
@ -3365,9 +3382,12 @@ iseq_add_local_tracepoint_i(const rb_iseq_t *iseq, void *p)
}
int
rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line)
rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line, bool target_bmethod)
{
struct trace_set_local_events_struct data;
if (target_bmethod) {
turnon_events = add_bmethod_events(turnon_events);
}
data.turnon_events = turnon_events;
data.tpval = tpval;
data.target_line = target_line;
@ -3399,6 +3419,7 @@ iseq_remove_local_tracepoint(const rb_iseq_t *iseq, VALUE tpval)
((rb_iseq_t *)iseq)->aux.exec.local_hooks = NULL;
}
local_events = add_bmethod_events(local_events);
for (pc = 0; pc<body->iseq_size;) {
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pc);
pc += encoded_iseq_trace_instrument(&iseq_encoded[pc], pc_events & (local_events | iseq->aux.exec.global_trace_events), false);
@ -3449,7 +3470,7 @@ rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
rb_event_flag_t enabled_events;
rb_event_flag_t local_events = iseq->aux.exec.local_hooks ? iseq->aux.exec.local_hooks->events : 0;
((rb_iseq_t *)iseq)->aux.exec.global_trace_events = turnon_events;
enabled_events = turnon_events | local_events;
enabled_events = add_bmethod_events(turnon_events | local_events);
for (pc=0; pc<body->iseq_size;) {
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pc);

2
iseq.h
View file

@ -160,7 +160,7 @@ const rb_iseq_t *rb_iseq_ibf_load(VALUE str);
const rb_iseq_t *rb_iseq_ibf_load_bytes(const char *cstr, size_t);
VALUE rb_iseq_ibf_load_extra_data(VALUE str);
void rb_iseq_init_trace(rb_iseq_t *iseq);
int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line);
int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval, unsigned int target_line, bool target_bmethod);
int rb_iseq_remove_local_tracepoint_recursively(const rb_iseq_t *iseq, VALUE tpval);
const rb_iseq_t *rb_iseq_load_iseq(VALUE fname);

View file

@ -393,7 +393,7 @@ class TestSetTraceFunc < Test::Unit::TestCase
[["c-return", 3, :set_trace_func, Kernel],
["line", 6, __method__, self.class],
["call", 1, :foobar, FooBar],
["return", 6, :foobar, FooBar],
["return", 1, :foobar, FooBar],
["line", 7, __method__, self.class],
["c-call", 7, :set_trace_func, Kernel]].each{|e|
assert_equal(e, events.shift)
@ -1951,7 +1951,7 @@ CODE
end
define_method(:f_break_defined) do
return :f_break_defined
break :f_break_defined
end
define_method(:f_raise_defined) do
@ -1972,27 +1972,44 @@ CODE
tp_return_value(:f_last_defined),
'[Bug #13369]'
assert_equal [[:b_return, :f_return_defined, nil], # current limitation
assert_equal [[:b_return, :f_return_defined, :f_return_defined],
[:return, :f_return_defined, :f_return_defined]],
tp_return_value(:f_return_defined),
'[Bug #13369]'
assert_equal [[:b_return, :f_break_defined, nil],
assert_equal [[:b_return, :f_break_defined, :f_break_defined],
[:return, :f_break_defined, :f_break_defined]],
tp_return_value(:f_break_defined),
'[Bug #13369]'
assert_equal [[:b_return, :f_raise_defined, nil],
assert_equal [[:b_return, :f_raise_defined, f_raise_defined],
[:return, :f_raise_defined, f_raise_defined]],
tp_return_value(:f_raise_defined),
'[Bug #13369]'
assert_equal [[:b_return, :f_break_in_rescue_defined, nil],
assert_equal [[:b_return, :f_break_in_rescue_defined, f_break_in_rescue_defined],
[:return, :f_break_in_rescue_defined, f_break_in_rescue_defined]],
tp_return_value(:f_break_in_rescue_defined),
'[Bug #13369]'
end
define_method(:just_yield) do |&block|
block.call
end
define_method(:unwind_multiple_bmethods) do
just_yield { return :unwind_multiple_bmethods }
end
def test_non_local_return_across_multiple_define_methods
assert_equal [[:b_return, :unwind_multiple_bmethods, nil],
[:b_return, :just_yield, nil],
[:return, :just_yield, nil],
[:b_return, :unwind_multiple_bmethods, :unwind_multiple_bmethods],
[:return, :unwind_multiple_bmethods, :unwind_multiple_bmethods]],
tp_return_value(:unwind_multiple_bmethods)
end
def f_iter
yield
end
@ -2403,6 +2420,99 @@ CODE
assert_equal Array.new(2){th}, events
end
def test_return_bmethod_location
bug13392 = "[ruby-core:80515] incorrect bmethod return location"
actual = nil
obj = Object.new
expected = __LINE__ + 1
obj.define_singleton_method(:t){}
tp = TracePoint.new(:return) do
next unless target_thread?
actual = tp.lineno
end
tp.enable {obj.t}
assert_equal(expected, actual, bug13392)
end
def test_b_tracepoints_going_away
# test that call and return TracePoints continue to work
# when b_call and b_return TracePoints stop
events = []
record_events = ->(tp) do
next unless target_thread?
events << [tp.event, tp.method_id]
end
call_ret_tp = TracePoint.new(:call, :return, &record_events)
block_call_ret_tp = TracePoint.new(:b_call, :b_return, &record_events)
obj = Object.new
obj.define_singleton_method(:foo) {} # a bmethod
foo = obj.method(:foo)
call_ret_tp.enable(target: foo) do
block_call_ret_tp.enable(target: foo) do
obj.foo
end
obj.foo
end
assert_equal(
[
[:call, :foo],
[:b_call, :foo],
[:b_return, :foo],
[:return, :foo],
[:call, :foo],
[:return, :foo],
],
events,
)
end
def test_target_different_bmethod_same_iseq
# make two bmethods that share the same block iseq
block = Proc.new {}
obj = Object.new
obj.define_singleton_method(:one, &block)
obj.define_singleton_method(:two, &block)
events = []
record_events = ->(tp) do
next unless target_thread?
events << [tp.event, tp.method_id]
end
tp_one = TracePoint.new(:call, :return, &record_events)
tp_two = TracePoint.new(:call, :return, &record_events)
tp_one.enable(target: obj.method(:one)) do
obj.one
obj.two # not targeted
end
assert_equal([[:call, :one], [:return, :one]], events)
events.clear
tp_one.enable(target: obj.method(:one)) do
obj.one
tp_two.enable(target: obj.method(:two)) do
obj.two
end
obj.two
obj.one
end
assert_equal(
[
[:call, :one],
[:return, :one],
[:call, :two],
[:return, :two],
[:call, :one],
[:return, :one],
],
events
)
end
def test_return_event_with_rescue
obj = Object.new
def obj.example

71
vm.c
View file

@ -1313,7 +1313,6 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
/* bmethod */
int arg_size = iseq->body->param.size;
VALUE ret;
rb_hook_list_t *hooks;
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
@ -1325,24 +1324,9 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
iseq->body->local_table_size - arg_size,
iseq->body->stack_max);
RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_CALL, self, me->def->original_id, me->called_id, me->owner, Qnil);
if (UNLIKELY((hooks = me->def->body.bmethod.hooks) != NULL) &&
hooks->events & RUBY_EVENT_CALL) {
rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_CALL, self,
me->def->original_id, me->called_id, me->owner, Qnil, FALSE);
}
VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
ret = vm_exec(ec, true);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_RETURN, self, me->def->original_id, me->called_id, me->owner, ret);
if ((hooks = me->def->body.bmethod.hooks) != NULL &&
hooks->events & RUBY_EVENT_RETURN) {
rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_RETURN, self,
me->def->original_id, me->called_id, me->owner, ret, FALSE);
}
RUBY_DTRACE_METHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
return ret;
}
@ -2033,9 +2017,11 @@ frame_name(const rb_control_frame_t *cfp)
}
#endif
// cfp_returning_with_value:
// Whether cfp is the last frame in the unwinding process for a non-local return.
static void
hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
int will_finish_vm_exec, int state, struct vm_throw_data *err)
bool cfp_returning_with_value, int state, struct vm_throw_data *err)
{
if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
return;
@ -2058,32 +2044,36 @@ hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
break;
case VM_FRAME_MAGIC_BLOCK:
if (VM_FRAME_BMETHOD_P(ec->cfp)) {
EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
ec->cfp->self, 0, 0, 0, frame_return_value(err), FALSE);
VALUE bmethod_return_value = frame_return_value(err);
if (cfp_returning_with_value) {
// Non-local return terminating at a BMETHOD control frame.
bmethod_return_value = THROW_DATA_VAL(err);
}
if (!will_finish_vm_exec) {
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
/* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
rb_vm_frame_method_entry(ec->cfp)->def->original_id,
rb_vm_frame_method_entry(ec->cfp)->called_id,
rb_vm_frame_method_entry(ec->cfp)->owner,
frame_return_value(err));
EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value);
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
ec->cfp->self, 0, 0, 0, bmethod_return_value, FALSE);
}
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
local_hooks = me->def->body.bmethod.hooks;
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
rb_vm_frame_method_entry(ec->cfp)->def->original_id,
rb_vm_frame_method_entry(ec->cfp)->called_id,
rb_vm_frame_method_entry(ec->cfp)->owner,
frame_return_value(err), TRUE);
}
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
rb_vm_frame_method_entry(ec->cfp)->def->original_id,
rb_vm_frame_method_entry(ec->cfp)->called_id,
rb_vm_frame_method_entry(ec->cfp)->owner,
bmethod_return_value);
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
local_hooks = me->def->body.bmethod.hooks;
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
rb_vm_frame_method_entry(ec->cfp)->def->original_id,
rb_vm_frame_method_entry(ec->cfp)->called_id,
rb_vm_frame_method_entry(ec->cfp)->owner,
bmethod_return_value, TRUE);
}
THROW_DATA_CONSUMED_SET(err);
}
@ -2284,7 +2274,8 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
if (catch_iseq == NULL) {
ec->errinfo = Qnil;
THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
hook_before_rewind(ec, ec->cfp, TRUE, state, err);
// cfp == escape_cfp here so calling with cfp_returning_with_value = true
hook_before_rewind(ec, ec->cfp, true, state, err);
rb_vm_pop_frame(ec);
return THROW_DATA_VAL(err);
}
@ -2425,7 +2416,7 @@ vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state,
return Qundef;
}
else {
hook_before_rewind(ec, ec->cfp, FALSE, state, err);
hook_before_rewind(ec, ec->cfp, (cfp == escape_cfp), state, err);
if (VM_FRAME_FINISHED_P(ec->cfp)) {
rb_vm_pop_frame(ec);

View file

@ -5638,6 +5638,7 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
{
const VALUE *pc = reg_cfp->pc;
rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
rb_event_flag_t global_events = enabled_flags;
if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
return;
@ -5647,12 +5648,25 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
size_t pos = pc - iseq->body->iseq_encoded;
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
rb_event_flag_t local_hook_events = local_hooks != NULL ? local_hooks->events : 0;
enabled_flags |= local_hook_events;
rb_event_flag_t iseq_local_events = local_hooks != NULL ? local_hooks->events : 0;
rb_hook_list_t *bmethod_local_hooks = NULL;
rb_event_flag_t bmethod_local_events = 0;
bool bmethod_frame = VM_FRAME_BMETHOD_P(reg_cfp);
enabled_flags |= iseq_local_events;
VM_ASSERT((local_hook_events & ~ISEQ_TRACE_EVENTS) == 0);
VM_ASSERT((iseq_local_events & ~ISEQ_TRACE_EVENTS) == 0);
if ((pc_events & enabled_flags) == 0) {
if (bmethod_frame) {
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(reg_cfp);
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
bmethod_local_hooks = me->def->body.bmethod.hooks;
if (bmethod_local_hooks) {
bmethod_local_events = bmethod_local_hooks->events;
}
}
if ((pc_events & enabled_flags) == 0 && !bmethod_frame) {
#if 0
/* disable trace */
/* TODO: incomplete */
@ -5670,6 +5684,9 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
}
else {
rb_hook_list_t *global_hooks = rb_ec_ractor_hooks(ec);
/* Note, not considering iseq local events here since the same
* iseq could be used in multiple bmethods. */
rb_event_flag_t bmethod_events = global_events | bmethod_local_events;
if (0) {
ruby_debug_printf("vm_trace>>%4d (%4x) - %s:%d %s\n",
@ -5681,17 +5698,25 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp)
}
VM_ASSERT(reg_cfp->pc == pc);
VM_ASSERT(pc_events != 0);
VM_ASSERT(enabled_flags & pc_events);
/* check traces */
if ((pc_events & RUBY_EVENT_B_CALL) && bmethod_frame && (bmethod_events & RUBY_EVENT_CALL)) {
/* b_call instruction running as a method. Fire call event. */
vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_CALL, RUBY_EVENT_CALL, global_hooks, bmethod_local_hooks, Qundef);
}
VM_TRACE_HOOK(RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
if ((pc_events & RUBY_EVENT_B_RETURN) && bmethod_frame && (bmethod_events & RUBY_EVENT_RETURN)) {
/* b_return instruction running as a method. Fire return event. */
vm_trace_hook(ec, reg_cfp, pc, RUBY_EVENT_RETURN, RUBY_EVENT_RETURN, global_hooks, bmethod_local_hooks, TOPN(0));
}
}
}
}
#undef VM_TRACE_HOOK
#if VM_CHECK_MODE > 0
NORETURN( NOINLINE( COLDFUNC

View file

@ -1180,8 +1180,9 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
{
rb_tp_t *tp = tpptr(tpval);
const rb_iseq_t *iseq = iseq_of(target);
int n;
int n = 0;
unsigned int line = 0;
bool target_bmethod = false;
if (tp->tracing > 0) {
rb_raise(rb_eArgError, "can't nest-enable a targeting TracePoint");
@ -1199,10 +1200,6 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
VM_ASSERT(tp->local_target_set == Qfalse);
tp->local_target_set = rb_obj_hide(rb_ident_hash_new());
/* iseq */
n = rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line);
rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
/* bmethod */
if (rb_obj_is_method(target)) {
rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
@ -1211,11 +1208,17 @@ rb_tracepoint_enable_for_target(VALUE tpval, VALUE target, VALUE target_line)
def->body.bmethod.hooks = ZALLOC(rb_hook_list_t);
rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
rb_hash_aset(tp->local_target_set, target, Qfalse);
target_bmethod = true;
n++;
}
}
/* iseq */
n += rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line, target_bmethod);
rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
if (n == 0) {
rb_raise(rb_eArgError, "can not enable any hooks");
}