1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Support targetting TracePoint [Feature #15289]

* vm_trace.c (rb_tracepoint_enable_for_target): support targetting
  TracePoint. [Feature #15289]

  Tragetting TracePoint is only enabled on specified method, proc
  and so on, example: `tp.enable(target: code)`.

  `code` should be consisted of InstructionSeuqnece (iseq)
  (RubyVM::InstructionSeuqnece.of(code) should not return nil)
  If code is a tree of iseq, TracePoint is enabled on all of
  iseqs in a tree.

  Enabled tragetting TracePoints can not enabled again with
  and without target.

* vm_core.h (rb_iseq_t): introduce `rb_iseq_t::local_hooks`
  to store local hooks.
  `rb_iseq_t::aux::trace_events` is renamed to
  `global_trace_events` to contrast with `local_hooks`.

* vm_core.h (rb_hook_list_t): add `rb_hook_list_t::running`
  to represent how many Threads/Fibers are used this list.
  If this field is 0, nobody using this hooks and we can
  delete it.

  This is why we can remove code from cont.c.

* vm_core.h (rb_vm_t): because of above change, we can eliminate
  `rb_vm_t::trace_running` field.
  Also renamed from `rb_vm_t::event_hooks` to `global_hooks`.

* vm_core.h, vm.c (ruby_vm_event_enabled_global_flags): renamed
  from `ruby_vm_event_enabled_flags.

* vm_core.h, vm.c (ruby_vm_event_local_num): added to count
  enabled targetting TracePoints.

* vm_core.h, vm_trace.c (rb_exec_event_hooks): accepts
  hook list.

* vm_core.h (rb_vm_global_hooks): added for convinience.

* method.h (rb_method_bmethod_t): added to maintain Proc
  and `rb_hook_list_t` for bmethod (defined by define_method).

* prelude.rb (TracePoint#enable): extracet a keyword parameter
  (because it is easy than writing in C).
  It calls `TracePoint#__enable` internal method written in C.

* vm_insnhelper.c (vm_trace): check also iseq->local_hooks.

* vm.c (invoke_bmethod): check def->body.bmethod.hooks.

* vm.c (hook_before_rewind): check iseq->local_hooks
  and def->body.bmethod.hooks before rewind by exception.


git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@66003 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
ko1 2018-11-26 18:16:39 +00:00
parent 2c9259e6e0
commit 96990203b7
17 changed files with 755 additions and 255 deletions

9
cont.c
View file

@ -799,15 +799,6 @@ cont_restore_thread(rb_context_t *cont)
th->ec->root_svar = sec->root_svar;
th->ec->ensure_list = sec->ensure_list;
th->ec->errinfo = sec->errinfo;
/* trace on -> trace off */
if (th->ec->trace_arg != NULL && sec->trace_arg == NULL) {
GET_VM()->trace_running--;
}
/* trace off -> trace on */
else if (th->ec->trace_arg == NULL && sec->trace_arg != NULL) {
GET_VM()->trace_running++;
}
th->ec->trace_arg = sec->trace_arg;
VM_ASSERT(th->ec->vm_stack != NULL);

3
gc.c
View file

@ -4303,7 +4303,8 @@ mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
gc_mark(objspace, def->body.attr.location);
break;
case VM_METHOD_TYPE_BMETHOD:
gc_mark(objspace, def->body.proc);
gc_mark(objspace, def->body.bmethod.proc);
if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
break;
case VM_METHOD_TYPE_ALIAS:
gc_mark(objspace, (VALUE)def->body.alias.original_me);

238
iseq.c
View file

@ -111,6 +111,11 @@ rb_iseq_free(const rb_iseq_t *iseq)
compile_data_free(ISEQ_COMPILE_DATA(iseq));
ruby_xfree(body);
}
if (iseq->local_hooks) {
rb_hook_list_free(iseq->local_hooks);
}
RUBY_FREE_LEAVE("iseq");
}
@ -247,6 +252,9 @@ rb_iseq_mark(const rb_iseq_t *iseq)
}
}
if (iseq->local_hooks) {
rb_hook_list_mark(iseq->local_hooks);
}
if (FL_TEST(iseq, ISEQ_NOT_LOADED_YET)) {
rb_gc_mark(iseq->aux.loader.obj);
@ -511,9 +519,9 @@ rb_iseq_insns_info_decode_positions(const struct rb_iseq_constant_body *body)
void
rb_iseq_init_trace(rb_iseq_t *iseq)
{
iseq->aux.trace_events = 0;
if (ruby_vm_event_enabled_flags & ISEQ_TRACE_EVENTS) {
rb_iseq_trace_set(iseq, ruby_vm_event_enabled_flags & ISEQ_TRACE_EVENTS);
iseq->aux.global_trace_events = 0;
if (ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS) {
rb_iseq_trace_set(iseq, ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS);
}
}
@ -1668,7 +1676,7 @@ rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t res
struct iseq_insn_info_entry *entry = (struct iseq_insn_info_entry *)get_insn_info(iseq, pos);
if (entry) {
entry->events &= ~reset;
if (!(entry->events & iseq->aux.trace_events)) {
if (!(entry->events & iseq->aux.global_trace_events)) {
void rb_iseq_trace_flag_cleared(const rb_iseq_t *iseq, size_t pos);
rb_iseq_trace_flag_cleared(iseq, pos);
}
@ -1939,8 +1947,7 @@ rb_iseq_disasm_insn(VALUE ret, const VALUE *code, size_t pos,
events & RUBY_EVENT_B_CALL ? "Bc" : "",
events & RUBY_EVENT_B_RETURN ? "Br" : "",
events & RUBY_EVENT_COVERAGE_LINE ? "Cli" : "",
events & RUBY_EVENT_COVERAGE_BRANCH ? "Cbr" : ""
);
events & RUBY_EVENT_COVERAGE_BRANCH ? "Cbr" : "");
}
}
@ -2137,47 +2144,6 @@ rb_iseq_disasm(const rb_iseq_t *iseq)
return rb_iseq_disasm_recursive(iseq, rb_str_new(0, 0));
}
static VALUE
rb_iseq_all_children(const rb_iseq_t *iseq)
{
unsigned int i;
VALUE *code = rb_iseq_original_iseq(iseq);
VALUE all_children = rb_obj_hide(rb_ident_hash_new());
VALUE child;
const struct rb_iseq_constant_body *const body = iseq->body;
if (body->catch_table) {
for (i = 0; i < body->catch_table->size; i++) {
const struct iseq_catch_table_entry *entry = &body->catch_table->entries[i];
child = (VALUE)entry->iseq;
if (child) {
rb_hash_aset(all_children, child, Qtrue);
}
}
}
for (i=0; i<body->iseq_size;) {
VALUE insn = code[i];
int len = insn_len(insn);
const char *types = insn_op_types(insn);
int j;
for (j=0; types[j]; j++) {
switch (types[j]) {
case TS_ISEQ:
child = code[i+j+1];
if (child) {
rb_hash_aset(all_children, child, Qtrue);
}
break;
default:
break;
}
}
i += len;
}
return all_children;
}
/*
* call-seq:
* iseq.disasm -> str
@ -2203,10 +2169,58 @@ iseqw_disasm(VALUE self)
}
static int
iseqw_each_child_i(VALUE key, VALUE value, VALUE dummy)
iseq_iterate_children(const rb_iseq_t *iseq, void (*iter_func)(const rb_iseq_t *child_iseq, void *data), void *data)
{
rb_yield(iseqw_new((const rb_iseq_t *)key));
return ST_CONTINUE;
unsigned int i;
VALUE *code = rb_iseq_original_iseq(iseq);
const struct rb_iseq_constant_body *const body = iseq->body;
const rb_iseq_t *child;
VALUE all_children = rb_obj_hide(rb_ident_hash_new());
if (body->catch_table) {
for (i = 0; i < body->catch_table->size; i++) {
const struct iseq_catch_table_entry *entry = &body->catch_table->entries[i];
child = entry->iseq;
if (child) {
if (rb_hash_aref(all_children, (VALUE)child) == Qnil) {
rb_hash_aset(all_children, (VALUE)child, Qtrue);
(*iter_func)(child, data);
}
}
}
}
for (i=0; i<body->iseq_size;) {
VALUE insn = code[i];
int len = insn_len(insn);
const char *types = insn_op_types(insn);
int j;
for (j=0; types[j]; j++) {
switch (types[j]) {
case TS_ISEQ:
child = (const rb_iseq_t *)code[i+j+1];
if (child) {
if (rb_hash_aref(all_children, (VALUE)child) == Qnil) {
rb_hash_aset(all_children, (VALUE)child, Qtrue);
(*iter_func)(child, data);
}
}
break;
default:
break;
}
}
i += len;
}
return RHASH_SIZE(all_children);
}
static void
yield_each_children(const rb_iseq_t *child_iseq, void *data)
{
rb_yield(iseqw_new(child_iseq));
}
/*
@ -2221,8 +2235,7 @@ static VALUE
iseqw_each_child(VALUE self)
{
const rb_iseq_t *iseq = iseqw_check(self);
VALUE all_children = rb_iseq_all_children(iseq);
rb_hash_foreach(all_children, iseqw_each_child_i, Qnil);
iseq_iterate_children(iseq, yield_each_children, NULL);
return self;
}
@ -2966,12 +2979,111 @@ rb_iseq_trace_flag_cleared(const rb_iseq_t *iseq, size_t pos)
encoded_iseq_trace_instrument(&iseq_encoded[pos], 0);
}
static int
iseq_add_local_tracepoint(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval)
{
unsigned int pc;
int n = 0;
const struct rb_iseq_constant_body *const body = iseq->body;
VALUE *iseq_encoded = (VALUE *)body->iseq_encoded;
VM_ASSERT((iseq->flags & ISEQ_USE_COMPILE_DATA) == 0);
for (pc=0; pc<body->iseq_size;) {
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pc);
if (pc_events & turnon_events) {
n++;
}
pc += encoded_iseq_trace_instrument(&iseq_encoded[pc], pc_events & (turnon_events | iseq->aux.global_trace_events));
}
if (n > 0) {
if (iseq->local_hooks == NULL) {
((rb_iseq_t *)iseq)->local_hooks = RB_ZALLOC(rb_hook_list_t);
}
rb_hook_list_connect_tracepoint((VALUE)iseq, iseq->local_hooks, tpval);
}
return n;
}
struct trace_set_local_events_struct {
rb_event_flag_t turnon_events;
VALUE tpval;
int n;
};
static void
iseq_add_local_tracepoint_i(const rb_iseq_t *iseq, void *p)
{
struct trace_set_local_events_struct *data = (struct trace_set_local_events_struct *)p;
data->n += iseq_add_local_tracepoint(iseq, data->turnon_events, data->tpval);
iseq_iterate_children(iseq, iseq_add_local_tracepoint_i, p);
}
int
rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval)
{
struct trace_set_local_events_struct data = {turnon_events, tpval, 0};
iseq_add_local_tracepoint_i(iseq, (void *)&data);
if (0) rb_funcall(Qnil, rb_intern("puts"), 1, rb_iseq_disasm(iseq)); /* for debug */
return data.n;
}
static int
iseq_remove_local_tracepoint(const rb_iseq_t *iseq, VALUE tpval)
{
int n = 0;
if (iseq->local_hooks) {
unsigned int pc;
const struct rb_iseq_constant_body *const body = iseq->body;
VALUE *iseq_encoded = (VALUE *)body->iseq_encoded;
rb_event_flag_t local_events = 0;
rb_hook_list_remove_tracepoint(iseq->local_hooks, tpval);
local_events = iseq->local_hooks->events;
if (local_events == 0) {
if (iseq->local_hooks->running == 0) {
rb_hook_list_free(iseq->local_hooks);
}
((rb_iseq_t *)iseq)->local_hooks = NULL;
}
for (pc = 0; pc<body->iseq_size;) {
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pc);
pc += encoded_iseq_trace_instrument(&iseq_encoded[pc], pc_events & (local_events | iseq->aux.global_trace_events));
}
}
return n;
}
struct trace_clear_local_events_struct {
VALUE tpval;
int n;
};
static void
iseq_remove_local_tracepoint_i(const rb_iseq_t *iseq, void *p)
{
struct trace_clear_local_events_struct *data = (struct trace_clear_local_events_struct *)p;
data->n += iseq_remove_local_tracepoint(iseq, data->tpval);
iseq_iterate_children(iseq, iseq_remove_local_tracepoint_i, p);
}
int
rb_iseq_remove_local_tracepoint_recursively(const rb_iseq_t *iseq, VALUE tpval)
{
struct trace_clear_local_events_struct data = {tpval, 0};
iseq_remove_local_tracepoint_i(iseq, (void *)&data);
return data.n;
}
void
rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
{
VM_ASSERT((turnon_events & ~ISEQ_TRACE_EVENTS) == 0);
if (iseq->aux.trace_events == turnon_events) {
if (iseq->aux.global_trace_events == turnon_events) {
return;
}
if (iseq->flags & ISEQ_USE_COMPILE_DATA) {
@ -2979,16 +3091,18 @@ rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
return;
}
else {
unsigned int i;
unsigned int pc;
const struct rb_iseq_constant_body *const body = iseq->body;
VALUE *iseq_encoded = (VALUE *)body->iseq_encoded;
((rb_iseq_t *)iseq)->aux.trace_events = turnon_events;
rb_event_flag_t enabled_events;
rb_event_flag_t local_events = iseq->local_hooks ? iseq->local_hooks->events : 0;
((rb_iseq_t *)iseq)->aux.global_trace_events = turnon_events;
enabled_events = turnon_events | local_events;
for (i=0; i<body->iseq_size;) {
rb_event_flag_t events = rb_iseq_event_flags(iseq, i);
i += encoded_iseq_trace_instrument(&iseq_encoded[i], events & turnon_events);
for (pc=0; pc<body->iseq_size;) {
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pc);
pc += encoded_iseq_trace_instrument(&iseq_encoded[pc], pc_events & enabled_events);
}
/* clear for debugging: ISEQ_ORIGINAL_ISEQ_CLEAR(iseq); */
}
}
@ -3013,7 +3127,7 @@ rb_iseq_trace_set_all(rb_event_flag_t turnon_events)
}
/* This is exported since Ruby 2.5 but not internally used for now. If you're going to use this, please
update `ruby_vm_event_enabled_flags` and set `mjit_call_p = FALSE` as well to cancel MJIT code. */
update `ruby_vm_event_enabled_global_flags` and set `mjit_call_p = FALSE` as well to cancel MJIT code. */
void
rb_iseq_trace_on_all(void)
{

2
iseq.h
View file

@ -148,6 +148,8 @@ void rb_ibf_load_iseq_complete(rb_iseq_t *iseq);
const rb_iseq_t *rb_iseq_ibf_load(VALUE str);
VALUE rb_iseq_ibf_load_extra_data(VALUE str);
void rb_iseq_init_trace(rb_iseq_t *iseq);
int rb_iseq_add_local_tracepoint_recursively(const rb_iseq_t *iseq, rb_event_flag_t turnon_events, VALUE tpval);
int rb_iseq_remove_local_tracepoint_recursively(const rb_iseq_t *iseq, VALUE tpval);
#if VM_INSN_INFO_TABLE_IMPL == 2
unsigned int *rb_iseq_insns_info_decode_positions(const struct rb_iseq_constant_body *body);

View file

@ -147,6 +147,11 @@ typedef struct rb_method_refined_struct {
const VALUE owner;
} rb_method_refined_t;
typedef struct rb_method_bmethod_struct {
const VALUE proc; /* should be marked */
struct rb_hook_list_struct *hooks;
} rb_method_bmethod_t;
enum method_optimized_type {
OPTIMIZED_METHOD_TYPE_SEND,
OPTIMIZED_METHOD_TYPE_CALL,
@ -165,9 +170,9 @@ PACKED_STRUCT_UNALIGNED(struct rb_method_definition_struct {
rb_method_attr_t attr;
rb_method_alias_t alias;
rb_method_refined_t refined;
rb_method_bmethod_t bmethod;
const VALUE proc; /* should be marked */
enum method_optimized_type optimize_type;
enum method_optimized_type optimize_type;
} body;
ID original_id;

View file

@ -173,7 +173,7 @@ struct mjit_options mjit_opts;
/* TRUE if MJIT is enabled. */
int mjit_enabled = FALSE;
/* TRUE if JIT-ed code should be called. When `ruby_vm_event_enabled_flags & ISEQ_TRACE_EVENTS`
/* TRUE if JIT-ed code should be called. When `ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS`
and `mjit_call_p == FALSE`, any JIT-ed code execution is cancelled as soon as possible. */
int mjit_call_p = FALSE;

View file

@ -132,6 +132,12 @@ class IO
end
end
class TracePoint
def enable target: nil, &blk
self.__enable target, &blk
end
end
class Binding
# :nodoc:
def irb

14
proc.c
View file

@ -2342,7 +2342,7 @@ rb_method_entry_min_max_arity(const rb_method_entry_t *me, int *max)
def = def->body.alias.original_me->def;
goto again;
case VM_METHOD_TYPE_BMETHOD:
return rb_proc_min_max_arity(def->body.proc, max);
return rb_proc_min_max_arity(def->body.bmethod.proc, max);
case VM_METHOD_TYPE_ISEQ:
return rb_iseq_min_max_arity(rb_iseq_check(def->body.iseq.iseqptr), max);
case VM_METHOD_TYPE_UNDEF:
@ -2478,8 +2478,8 @@ rb_obj_method_arity(VALUE obj, ID id)
return rb_mod_method_arity(CLASS_OF(obj), id);
}
static inline const rb_method_definition_t *
method_def(VALUE method)
const rb_method_definition_t *
rb_method_def(VALUE method)
{
const struct METHOD *data;
@ -2494,7 +2494,7 @@ method_def_iseq(const rb_method_definition_t *def)
case VM_METHOD_TYPE_ISEQ:
return rb_iseq_check(def->body.iseq.iseqptr);
case VM_METHOD_TYPE_BMETHOD:
return rb_proc_get_iseq(def->body.proc, 0);
return rb_proc_get_iseq(def->body.bmethod.proc, 0);
case VM_METHOD_TYPE_ALIAS:
return method_def_iseq(def->body.alias.original_me->def);
case VM_METHOD_TYPE_CFUNC:
@ -2514,13 +2514,13 @@ method_def_iseq(const rb_method_definition_t *def)
const rb_iseq_t *
rb_method_iseq(VALUE method)
{
return method_def_iseq(method_def(method));
return method_def_iseq(rb_method_def(method));
}
static const rb_cref_t *
method_cref(VALUE method)
{
const rb_method_definition_t *def = method_def(method);
const rb_method_definition_t *def = rb_method_def(method);
again:
switch (def->type) {
@ -2576,7 +2576,7 @@ rb_obj_method_location(VALUE obj, ID id)
VALUE
rb_method_location(VALUE method)
{
return method_def_location(method_def(method));
return method_def_location(rb_method_def(method));
}
/*

View file

@ -1914,4 +1914,146 @@ class TestSetTraceFunc < Test::Unit::TestCase
EOF
assert_equal "7\n", actual, '[Bug #14809]'
end
def method_for_enable_target1
a = 1
b = 2
1.times{|i|
x = i
}
c = a + b
end
def method_for_enable_target2
a = 1
b = 2
1.times{|i|
x = i
}
c = a + b
end
def check_with_events *trace_events
all_events = [[:call, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:b_call, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:b_return, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:return, :method_for_enable_target1],
# repeat
[:call, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:b_call, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:b_return, :method_for_enable_target1],
[:line, :method_for_enable_target1],
[:return, :method_for_enable_target1],
]
events = []
TracePoint.new(*trace_events) do |tp|
next unless target_thread?
events << [tp.event, tp.method_id]
end.enable(target: method(:method_for_enable_target1)) do
method_for_enable_target1
method_for_enable_target2
method_for_enable_target1
end
assert_equal all_events.find_all{|(ev, m)| trace_events.include? ev}, events
end
def test_tracepoint_enable_target
check_with_events :line
check_with_events :call, :return
check_with_events :line, :call, :return
check_with_events :call, :return, :b_call, :b_return
check_with_events :line, :call, :return, :b_call, :b_return
end
def test_tracepoint_nested_enabled_with_target
code1 = proc{
a = 1
}
code2 = proc{
b = 2
}
## error
# targetted TP and targetted TP
ex = assert_raise(ArgumentError) do
tp = TracePoint.new(:line){}
tp.enable(target: code1){
tp.enable(target: code2){}
}
end
assert_equal "can't nest-enable a targetting TracePoint", ex.message
# global TP and targetted TP
ex = assert_raise(ArgumentError) do
tp = TracePoint.new(:line){}
tp.enable{
tp.enable(target: code2){}
}
end
assert_equal "can't nest-enable a targetting TracePoint", ex.message
# targetted TP and global TP
ex = assert_raise(ArgumentError) do
tp = TracePoint.new(:line){}
tp.enable(target: code1){
tp.enable{}
}
end
assert_equal "can't nest-enable a targetting TracePoint", ex.message
# targetted TP and disable
ex = assert_raise(ArgumentError) do
tp = TracePoint.new(:line){}
tp.enable(target: code1){
tp.disable{}
}
end
assert_equal "can't disable a targetting TracePoint in a block", ex.message
## success with two nesting targetting tracepoints
events = []
tp1 = TracePoint.new(:line){|tp| events << :tp1}
tp2 = TracePoint.new(:line){|tp| events << :tp2}
tp1.enable(target: code1) do
tp2.enable(target: code1) do
code1.call
events << :___
end
end
assert_equal [:tp2, :tp1, :___], events
# succss with two tracepoints (global/targetting)
events = []
tp1 = TracePoint.new(:line){|tp| events << :tp1}
tp2 = TracePoint.new(:line){|tp| events << :tp2}
tp1.enable do
tp2.enable(target: code1) do
code1.call
events << :___
end
end
assert_equal [:tp1, :tp1, :tp1, :tp1, :tp2, :tp1, :___], events
# succss with two tracepoints (targetting/global)
events = []
tp1 = TracePoint.new(:line){|tp| events << :tp1}
tp2 = TracePoint.new(:line){|tp| events << :tp2}
tp1.enable(target: code1) do
tp2.enable do
code1.call
events << :___
end
end
assert_equal [:tp2, :tp2, :tp1, :tp2, :___], events
end
end

View file

@ -5340,7 +5340,7 @@ rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
break;
}
case VM_METHOD_TYPE_BMETHOD: {
const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.proc, 0);
const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
if (iseq) {
rb_iseq_location_t *loc;
rb_iseq_check(iseq);

View file

@ -61,7 +61,7 @@
%
% # JIT: We should evaluate ISeq modified for TracePoint if it's enabled. Note: This is slow.
% unless insn.always_leaf?
fprintf(f, " if (UNLIKELY(ruby_vm_event_enabled_flags & ISEQ_TRACE_EVENTS)) {\n");
fprintf(f, " if (UNLIKELY(ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS)) {\n");
fprintf(f, " reg_cfp->sp = (VALUE *)reg_cfp->bp + %d;\n", b->stack_size + (int)<%= insn.call_attribute('sp_inc') %> + 1);
if (!pc_moved_p) {
fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", next_pos);

View file

@ -76,7 +76,7 @@
fprintf(f, " }\n");
% # JIT: We should evaluate ISeq modified for TracePoint if it's enabled. Note: This is slow.
fprintf(f, " if (UNLIKELY(ruby_vm_event_enabled_flags & ISEQ_TRACE_EVENTS)) {\n");
fprintf(f, " if (UNLIKELY(ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS)) {\n");
fprintf(f, " reg_cfp->sp = (VALUE *)reg_cfp->bp + %d;\n", b->stack_size + (int)<%= insn.call_attribute('sp_inc') %> + 1);
if (!pc_moved_p) {
fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", next_pos);

113
vm.c
View file

@ -338,8 +338,11 @@ VALUE rb_mRubyVMFrozenCore;
VALUE ruby_vm_const_missing_count = 0;
rb_vm_t *ruby_current_vm_ptr = NULL;
rb_execution_context_t *ruby_current_execution_context_ptr = NULL;
rb_event_flag_t ruby_vm_event_flags;
rb_event_flag_t ruby_vm_event_enabled_flags;
rb_event_flag_t ruby_vm_event_enabled_global_flags;
unsigned int ruby_vm_event_local_num;
rb_serial_t ruby_vm_global_method_state = 1;
rb_serial_t ruby_vm_global_constant_state = 1;
rb_serial_t ruby_vm_class_serial = 1;
@ -1015,6 +1018,9 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
/* bmethod */
int arg_size = iseq->body->param.size;
VALUE ret;
rb_hook_list_t *hooks;
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
VM_GUARDED_PREV_EP(captured->ep),
@ -1026,9 +1032,21 @@ invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, co
RUBY_DTRACE_METHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_CALL, self, me->def->original_id, me->called_id, me->owner, Qnil);
if (UNLIKELY((hooks = me->def->body.bmethod.hooks) != NULL) &&
hooks->events & RUBY_EVENT_CALL) {
rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_CALL, self,
me->def->original_id, me->called_id, me->owner, Qnil, FALSE);
}
VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
ret = vm_exec(ec, TRUE);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_RETURN, self, me->def->original_id, me->called_id, me->owner, ret);
if ((hooks = me->def->body.bmethod.hooks) != NULL &&
hooks->events & RUBY_EVENT_RETURN) {
rb_exec_event_hook_orig(ec, hooks, RUBY_EVENT_RETURN, self,
me->def->original_id, me->called_id, me->owner, ret, FALSE);
}
RUBY_DTRACE_METHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
return ret;
}
@ -1687,39 +1705,72 @@ frame_name(const rb_control_frame_t *cfp)
#endif
static void
hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp, int will_finish_vm_exec, int state, struct vm_throw_data *err)
hook_before_rewind(rb_execution_context_t *ec, const rb_control_frame_t *cfp,
int will_finish_vm_exec, int state, struct vm_throw_data *err)
{
if (state == TAG_RAISE && RBASIC_CLASS(err) == rb_eSysStackError) {
return;
}
switch (VM_FRAME_TYPE(ec->cfp)) {
case VM_FRAME_MAGIC_METHOD:
RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
THROW_DATA_CONSUMED_SET(err);
break;
case VM_FRAME_MAGIC_BLOCK:
if (VM_FRAME_BMETHOD_P(ec->cfp)) {
EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
else {
const rb_iseq_t *iseq = cfp->iseq;
rb_hook_list_t *local_hooks = iseq->local_hooks;
if (!will_finish_vm_exec) {
/* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
rb_vm_frame_method_entry(ec->cfp)->def->original_id,
rb_vm_frame_method_entry(ec->cfp)->called_id,
rb_vm_frame_method_entry(ec->cfp)->owner,
frame_return_value(err));
}
THROW_DATA_CONSUMED_SET(err);
}
else {
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
THROW_DATA_CONSUMED_SET(err);
}
break;
case VM_FRAME_MAGIC_CLASS:
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_END, ec->cfp->self, 0, 0, 0, Qnil);
break;
switch (VM_FRAME_TYPE(ec->cfp)) {
case VM_FRAME_MAGIC_METHOD:
RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN,
ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
}
THROW_DATA_CONSUMED_SET(err);
break;
case VM_FRAME_MAGIC_BLOCK:
if (VM_FRAME_BMETHOD_P(ec->cfp)) {
EXEC_EVENT_HOOK(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
ec->cfp->self, 0, 0, 0, frame_return_value(err), FALSE);
}
if (!will_finish_vm_exec) {
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
/* kick RUBY_EVENT_RETURN at invoke_block_from_c() for bmethod */
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
rb_vm_frame_method_entry(ec->cfp)->def->original_id,
rb_vm_frame_method_entry(ec->cfp)->called_id,
rb_vm_frame_method_entry(ec->cfp)->owner,
frame_return_value(err));
VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
local_hooks = me->def->body.bmethod.hooks;
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
rb_vm_frame_method_entry(ec->cfp)->def->original_id,
rb_vm_frame_method_entry(ec->cfp)->called_id,
rb_vm_frame_method_entry(ec->cfp)->owner,
frame_return_value(err), TRUE);
}
}
THROW_DATA_CONSUMED_SET(err);
}
else {
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
}
THROW_DATA_CONSUMED_SET(err);
}
break;
case VM_FRAME_MAGIC_CLASS:
EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_END, ec->cfp->self, 0, 0, 0, Qnil);
break;
}
}
}
@ -2135,8 +2186,6 @@ rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
/* vm */
void rb_vm_trace_mark_event_hooks(rb_hook_list_t *hooks);
void
rb_vm_mark(void *ptr)
{
@ -2165,7 +2214,7 @@ rb_vm_mark(void *ptr)
rb_mark_tbl(vm->loading_table);
}
rb_vm_trace_mark_event_hooks(&vm->event_hooks);
rb_hook_list_mark(&vm->global_hooks);
rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);

View file

@ -474,7 +474,7 @@ struct rb_iseq_constant_body {
/* typedef rb_iseq_t is in method.h */
struct rb_iseq_struct {
VALUE flags;
VALUE reserved1;
struct rb_hook_list_struct *local_hooks;
struct rb_iseq_constant_body *body;
union { /* 4, 5 words */
@ -485,7 +485,7 @@ struct rb_iseq_struct {
int index;
} loader;
rb_event_flag_t trace_events;
rb_event_flag_t global_trace_events;
} aux;
};
@ -577,7 +577,8 @@ void rb_objspace_free(struct rb_objspace *);
typedef struct rb_hook_list_struct {
struct rb_event_hook_struct *hooks;
rb_event_flag_t events;
int need_clean;
unsigned int need_clean;
unsigned int running;
} rb_hook_list_t;
typedef struct rb_vm_struct {
@ -608,8 +609,6 @@ typedef struct rb_vm_struct {
unsigned int thread_report_on_exception: 1;
unsigned int safe_level_: 1;
int trace_running;
int sleeper;
/* object management */
@ -634,7 +633,7 @@ typedef struct rb_vm_struct {
} trap_list;
/* hook */
rb_hook_list_t event_hooks;
rb_hook_list_t global_hooks;
/* relation table of ensure - rollback for callcc */
struct st_table *ensure_rollback_table;
@ -1694,7 +1693,8 @@ RUBY_SYMBOL_EXPORT_BEGIN
RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
RUBY_EXTERN rb_execution_context_t *ruby_current_execution_context_ptr;
RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_flags;
RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
RUBY_EXTERN unsigned int ruby_vm_event_local_num;
RUBY_SYMBOL_EXPORT_END
@ -1805,6 +1805,7 @@ rb_vm_check_ints(rb_execution_context_t *ec)
}
/* tracer */
struct rb_trace_arg_struct {
rb_event_flag_t event;
rb_execution_context_t *ec;
@ -1822,24 +1823,29 @@ struct rb_trace_arg_struct {
VALUE path;
};
void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, int pop_p);
void rb_hook_list_mark(rb_hook_list_t *hooks);
void rb_hook_list_free(rb_hook_list_t *hooks);
void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval);
void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
#define EXEC_EVENT_HOOK_ORIG(ec_, flag_, vm_flags_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
const rb_event_flag_t flag_arg_ = (flag_); \
if (UNLIKELY(vm_flags_ & (flag_arg_))) { \
/* defer evaluating the other arguments */ \
rb_exec_event_hook_orig(ec_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
rb_hook_list_t *hooks_arg_ = (hooks_); \
if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
/* defer evaluating the other arguments */ \
rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
} \
} while (0)
static inline void
rb_exec_event_hook_orig(rb_execution_context_t *ec, const rb_event_flag_t flag,
VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
{
struct rb_trace_arg_struct trace_arg;
VM_ASSERT(rb_ec_vm_ptr(ec)->event_hooks.events == ruby_vm_event_flags);
VM_ASSERT(rb_ec_vm_ptr(ec)->event_hooks.events & flag);
VM_ASSERT((hooks->events & flag) != 0);
trace_arg.event = flag;
trace_arg.ec = ec;
@ -1851,14 +1857,21 @@ rb_exec_event_hook_orig(rb_execution_context_t *ec, const rb_event_flag_t flag,
trace_arg.data = data;
trace_arg.path = Qundef;
trace_arg.klass_solved = 0;
rb_exec_event_hooks(&trace_arg, pop_p);
rb_exec_event_hooks(&trace_arg, hooks, pop_p);
}
static inline rb_hook_list_t *
rb_vm_global_hooks(const rb_execution_context_t *ec)
{
return &rb_ec_vm_ptr(ec)->global_hooks;
}
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
EXEC_EVENT_HOOK_ORIG(ec_, flag_, ruby_vm_event_flags, self_, id_, called_id_, klass_, data_, 0)
EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
EXEC_EVENT_HOOK_ORIG(ec_, flag_, ruby_vm_event_flags, self_, id_, called_id_, klass_, data_, 1)
EXEC_EVENT_HOOK_ORIG(ec_, rb_vm_global_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
RUBY_SYMBOL_EXPORT_BEGIN

View file

@ -1946,7 +1946,7 @@ vm_call_bmethod_body(rb_execution_context_t *ec, struct rb_calling_info *calling
VALUE val;
/* control block frame */
GetProcPtr(cc->me->def->body.proc, proc);
GetProcPtr(cc->me->def->body.bmethod.proc, proc);
val = rb_vm_invoke_bmethod(ec, proc, calling->recv, calling->argc, argv, calling->block_handler, cc->me);
return val;
@ -3847,23 +3847,62 @@ rb_event_flag_t rb_iseq_event_flags(const rb_iseq_t *iseq, size_t pos);
NOINLINE(static void vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc));
static inline void
vm_trace_hook(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc,
rb_event_flag_t pc_events, rb_event_flag_t target_event,
rb_hook_list_t *global_hooks, rb_hook_list_t *local_hooks, VALUE val)
{
rb_event_flag_t event = pc_events & target_event;
VALUE self = GET_SELF();
VM_ASSERT(rb_popcount64((uint64_t)event) == 1);
if (event & global_hooks->events) {
/* increment PC because source line is calculated with PC-1 */
reg_cfp->pc++;
vm_dtrace(event, ec);
rb_exec_event_hook_orig(ec, global_hooks, event, self, 0, 0, 0 , val, 0);
reg_cfp->pc--;
}
if (local_hooks != NULL) {
if (event & local_hooks->events) {
/* increment PC because source line is calculated with PC-1 */
reg_cfp->pc++;
rb_exec_event_hook_orig(ec, local_hooks, event, self, 0, 0, 0 , val, 0);
reg_cfp->pc--;
}
}
}
#define VM_TRACE_HOOK(target_event, val) do { \
if ((pc_events & (target_event)) & enabled_flags) { \
vm_trace_hook(ec, reg_cfp, pc, pc_events, (target_event), global_hooks, local_hooks, (val)); \
} \
} while (0)
static void
vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *pc)
{
rb_event_flag_t vm_event_flags = ruby_vm_event_flags;
rb_event_flag_t enabled_flags = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
if (vm_event_flags == 0) {
return;
if (enabled_flags == 0 && ruby_vm_event_local_num == 0) {
return;
}
else {
const rb_iseq_t *iseq = reg_cfp->iseq;
size_t pos = pc - iseq->body->iseq_encoded;
rb_event_flag_t events = rb_iseq_event_flags(iseq, pos);
rb_event_flag_t event;
rb_event_flag_t pc_events = rb_iseq_event_flags(iseq, pos);
rb_hook_list_t *local_hooks = iseq->local_hooks;
rb_event_flag_t local_hook_events = local_hooks != NULL ? local_hooks->events : 0;
enabled_flags |= local_hook_events;
if ((events & vm_event_flags) == 0) {
VM_ASSERT((local_hook_events & ~ISEQ_TRACE_EVENTS) == 0);
if ((pc_events & enabled_flags) == 0) {
#if 0
/* disable trace */
/* TODO: incomplete */
rb_iseq_trace_set(iseq, vm_event_flags & ISEQ_TRACE_EVENTS);
#else
/* do not disable trace because of performance problem
@ -3871,60 +3910,33 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *p
*/
#endif
return;
}
if (ec->trace_arg != NULL) return;
if (0) {
fprintf(stderr, "vm_trace>>%4d (%4x) - %s:%d %s\n",
(int)pos,
(int)events,
RSTRING_PTR(rb_iseq_path(iseq)),
(int)rb_iseq_line_no(iseq, pos),
RSTRING_PTR(rb_iseq_label(iseq)));
}
VM_ASSERT(reg_cfp->pc == pc);
VM_ASSERT(events != 0);
VM_ASSERT(vm_event_flags & events);
/* increment PC because source line is calculated with PC-1 */
if ((event = (events & (RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL))) != 0) {
VM_ASSERT(event == RUBY_EVENT_CLASS ||
event == RUBY_EVENT_CALL ||
event == RUBY_EVENT_B_CALL);
reg_cfp->pc++;
vm_dtrace(event, ec);
EXEC_EVENT_HOOK(ec, event, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if (events & RUBY_EVENT_LINE) {
reg_cfp->pc++;
vm_dtrace(RUBY_EVENT_LINE, ec);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_LINE, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if (events & RUBY_EVENT_COVERAGE_LINE) {
reg_cfp->pc++;
vm_dtrace(RUBY_EVENT_COVERAGE_LINE, ec);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_COVERAGE_LINE, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if (events & RUBY_EVENT_COVERAGE_BRANCH) {
reg_cfp->pc++;
vm_dtrace(RUBY_EVENT_COVERAGE_BRANCH, ec);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_COVERAGE_BRANCH, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if ((event = (events & (RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN))) != 0) {
VM_ASSERT(event == RUBY_EVENT_END ||
event == RUBY_EVENT_RETURN ||
event == RUBY_EVENT_B_RETURN);
reg_cfp->pc++;
vm_dtrace(event, ec);
EXEC_EVENT_HOOK(ec, event, GET_SELF(), 0, 0, 0, TOPN(0));
reg_cfp->pc--;
}
else if (ec->trace_arg != NULL) {
/* already tracing */
return;
}
else {
rb_hook_list_t *global_hooks = rb_vm_global_hooks(ec);
if (0) {
fprintf(stderr, "vm_trace>>%4d (%4x) - %s:%d %s\n",
(int)pos,
(int)pc_events,
RSTRING_PTR(rb_iseq_path(iseq)),
(int)rb_iseq_line_no(iseq, pos),
RSTRING_PTR(rb_iseq_label(iseq)));
}
VM_ASSERT(reg_cfp->pc == pc);
VM_ASSERT(pc_events != 0);
VM_ASSERT(enabled_flags & pc_events);
/* check traces */
VM_TRACE_HOOK(RUBY_EVENT_CLASS | RUBY_EVENT_CALL | RUBY_EVENT_B_CALL, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_LINE, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_COVERAGE_BRANCH, Qundef);
VM_TRACE_HOOK(RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN, TOPN(0));
}
}
}

View file

@ -152,7 +152,9 @@ rb_method_definition_release(rb_method_definition_t *def, int complemented)
VM_ASSERT(complemented_count >= 0);
if (alias_count + complemented_count == 0) {
if (METHOD_DEBUG) fprintf(stderr, "-%p-%s:%d,%d (remove)\n", (void *)def, rb_id2name(def->original_id), alias_count, complemented_count);
if (METHOD_DEBUG) fprintf(stderr, "-%p-%s:%d,%d (remove)\n", (void *)def,
rb_id2name(def->original_id), alias_count, complemented_count);
VM_ASSERT(def->type == VM_METHOD_TYPE_BMETHOD ? def->body.bmethod.hooks == NULL : TRUE);
xfree(def);
}
else {
@ -277,7 +279,7 @@ rb_method_definition_set(const rb_method_entry_t *me, rb_method_definition_t *de
return;
}
case VM_METHOD_TYPE_BMETHOD:
RB_OBJ_WRITE(me, &def->body.proc, (VALUE)opts);
RB_OBJ_WRITE(me, &def->body.bmethod.proc, (VALUE)opts);
return;
case VM_METHOD_TYPE_NOTIMPLEMENTED:
setup_method_cfunc_struct(UNALIGNED_MEMBER_PTR(def, body.cfunc), rb_f_notimplement, -1);
@ -318,7 +320,9 @@ method_definition_reset(const rb_method_entry_t *me)
RB_OBJ_WRITTEN(me, Qundef, def->body.attr.location);
break;
case VM_METHOD_TYPE_BMETHOD:
RB_OBJ_WRITTEN(me, Qundef, def->body.proc);
RB_OBJ_WRITTEN(me, Qundef, def->body.bmethod.proc);
/* give up to check all in a list */
if (def->body.bmethod.hooks) rb_gc_writebarrier_remember((VALUE)me);
break;
case VM_METHOD_TYPE_REFINED:
RB_OBJ_WRITTEN(me, Qundef, def->body.refined.orig_me);
@ -579,7 +583,7 @@ rb_method_entry_make(VALUE klass, ID mid, VALUE defined_class, rb_method_visibil
iseq = def_iseq_ptr(old_def);
break;
case VM_METHOD_TYPE_BMETHOD:
iseq = rb_proc_get_iseq(old_def->body.proc, 0);
iseq = rb_proc_get_iseq(old_def->body.bmethod.proc, 0);
break;
default:
break;
@ -1521,7 +1525,7 @@ rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_defini
case VM_METHOD_TYPE_IVAR:
return d1->body.attr.id == d2->body.attr.id;
case VM_METHOD_TYPE_BMETHOD:
return RTEST(rb_equal(d1->body.proc, d2->body.proc));
return RTEST(rb_equal(d1->body.bmethod.proc, d2->body.bmethod.proc));
case VM_METHOD_TYPE_MISSING:
return d1->original_id == d2->original_id;
case VM_METHOD_TYPE_ZSUPER:
@ -1555,7 +1559,7 @@ rb_hash_method_definition(st_index_t hash, const rb_method_definition_t *def)
case VM_METHOD_TYPE_IVAR:
return rb_hash_uint(hash, def->body.attr.id);
case VM_METHOD_TYPE_BMETHOD:
return rb_hash_proc(hash, def->body.proc);
return rb_hash_proc(hash, def->body.bmethod.proc);
case VM_METHOD_TYPE_MISSING:
return rb_hash_uint(hash, def->original_id);
case VM_METHOD_TYPE_ZSUPER:

View file

@ -47,10 +47,8 @@ typedef void (*rb_event_hook_raw_arg_func_t)(VALUE data, const rb_trace_arg_t *a
#define MAX_EVENT_NUM 32
/* called from vm.c */
void
rb_vm_trace_mark_event_hooks(rb_hook_list_t *hooks)
rb_hook_list_mark(rb_hook_list_t *hooks)
{
rb_event_hook_t *hook = hooks->hooks;
@ -60,13 +58,21 @@ rb_vm_trace_mark_event_hooks(rb_hook_list_t *hooks)
}
}
static void clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list);
void
rb_hook_list_free(rb_hook_list_t *hooks)
{
clean_hooks(GET_EC(), hooks);
}
/* ruby_vm_event_flags management */
static void
update_global_event_hook(rb_event_flag_t vm_events)
{
rb_event_flag_t new_iseq_events = vm_events & ISEQ_TRACE_EVENTS;
rb_event_flag_t enabled_iseq_events = ruby_vm_event_enabled_flags & ISEQ_TRACE_EVENTS;
rb_event_flag_t enabled_iseq_events = ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS;
if (new_iseq_events & ~enabled_iseq_events) {
/* Stop calling all JIT-ed code. Compiling trace insns is not supported for now. */
@ -79,7 +85,7 @@ update_global_event_hook(rb_event_flag_t vm_events)
}
ruby_vm_event_flags = vm_events;
ruby_vm_event_enabled_flags |= vm_events;
ruby_vm_event_enabled_global_flags |= vm_events;
rb_objspace_set_event_hook(vm_events);
}
@ -107,14 +113,26 @@ alloc_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data,
}
static void
connect_event_hook(const rb_execution_context_t *ec, rb_event_hook_t *hook)
hook_list_connect(VALUE list_owner, rb_hook_list_t *list, rb_event_hook_t *hook, int global_p)
{
rb_hook_list_t *list = &rb_ec_vm_ptr(ec)->event_hooks;
hook->next = list->hooks;
list->hooks = hook;
list->events |= hook->events;
update_global_event_hook(list->events);
if (global_p) {
/* global hooks are root objects at GC mark. */
update_global_event_hook(list->events);
}
else {
RB_OBJ_WRITTEN(list_owner, Qundef, hook->data);
}
}
static void
connect_event_hook(const rb_execution_context_t *ec, rb_event_hook_t *hook)
{
rb_hook_list_t *list = rb_vm_global_hooks(ec);
hook_list_connect(Qundef, list, hook, TRUE);
}
static void
@ -153,7 +171,7 @@ rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data
}
static void
clean_hooks(rb_hook_list_t *list)
clean_hooks(const rb_execution_context_t *ec, rb_hook_list_t *list)
{
rb_event_hook_t *hook, **nextp = &list->hooks;
VM_ASSERT(list->need_clean == TRUE);
@ -172,16 +190,25 @@ clean_hooks(rb_hook_list_t *list)
}
}
update_global_event_hook(list->events);
if (list == rb_vm_global_hooks(ec)) {
/* global events */
update_global_event_hook(list->events);
}
else {
/* local events */
if (list->events == 0) {
ruby_xfree(list);
}
}
}
static void
clean_hooks_check(rb_vm_t *vm, rb_hook_list_t *list)
clean_hooks_check(const rb_execution_context_t *ec, rb_hook_list_t *list)
{
if (UNLIKELY(list->need_clean != FALSE)) {
if (vm->trace_running == 0) {
clean_hooks(list);
}
if (list->running == 0) {
clean_hooks(ec, list);
}
}
}
@ -192,7 +219,7 @@ static int
remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th, rb_event_hook_func_t func, VALUE data)
{
rb_vm_t *vm = rb_ec_vm_ptr(ec);
rb_hook_list_t *list = &vm->event_hooks;
rb_hook_list_t *list = &vm->global_hooks;
int ret = 0;
rb_event_hook_t *hook = list->hooks;
@ -209,7 +236,7 @@ remove_event_hook(const rb_execution_context_t *ec, const rb_thread_t *filter_th
hook = hook->next;
}
clean_hooks_check(vm, list);
clean_hooks_check(ec, list);
return ret;
}
@ -278,10 +305,10 @@ exec_hooks_body(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb
}
static int
exec_hooks_precheck(const rb_execution_context_t *ec, rb_vm_t *vm, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
exec_hooks_precheck(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
{
if (list->events & trace_arg->event) {
vm->trace_running++;
list->running++;
return TRUE;
}
else {
@ -290,27 +317,27 @@ exec_hooks_precheck(const rb_execution_context_t *ec, rb_vm_t *vm, rb_hook_list_
}
static void
exec_hooks_postcheck(const rb_execution_context_t *ec, rb_vm_t *vm, rb_hook_list_t *list)
exec_hooks_postcheck(const rb_execution_context_t *ec, rb_hook_list_t *list)
{
vm->trace_running--;
clean_hooks_check(vm, list);
list->running--;
clean_hooks_check(ec, list);
}
static void
exec_hooks_unprotected(const rb_execution_context_t *ec, rb_vm_t *vm, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
exec_hooks_unprotected(const rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
{
if (exec_hooks_precheck(ec, vm, list, trace_arg) == 0) return;
if (exec_hooks_precheck(ec, list, trace_arg) == 0) return;
exec_hooks_body(ec, list, trace_arg);
exec_hooks_postcheck(ec, vm, list);
exec_hooks_postcheck(ec, list);
}
static int
exec_hooks_protected(rb_execution_context_t *ec, rb_vm_t *vm, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
exec_hooks_protected(rb_execution_context_t *ec, rb_hook_list_t *list, const rb_trace_arg_t *trace_arg)
{
enum ruby_tag_type state;
volatile int raised;
if (exec_hooks_precheck(ec, vm, list, trace_arg) == 0) return 0;
if (exec_hooks_precheck(ec, list, trace_arg) == 0) return 0;
raised = rb_ec_reset_raised(ec);
@ -322,7 +349,7 @@ exec_hooks_protected(rb_execution_context_t *ec, rb_vm_t *vm, rb_hook_list_t *li
}
EC_POP_TAG();
exec_hooks_postcheck(ec, vm, list);
exec_hooks_postcheck(ec, list);
if (raised) {
rb_ec_set_raised(ec);
@ -332,20 +359,21 @@ exec_hooks_protected(rb_execution_context_t *ec, rb_vm_t *vm, rb_hook_list_t *li
}
MJIT_FUNC_EXPORTED void
rb_exec_event_hooks(rb_trace_arg_t *trace_arg, int pop_p)
rb_exec_event_hooks(rb_trace_arg_t *trace_arg, rb_hook_list_t *hooks, int pop_p)
{
rb_execution_context_t *ec = trace_arg->ec;
rb_vm_t *vm = rb_ec_vm_ptr(ec);
if (trace_arg->event & RUBY_INTERNAL_EVENT_MASK) {
if (ec->trace_arg && (ec->trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
/* skip hooks because this thread doing INTERNAL_EVENT */
if (UNLIKELY(trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
if (ec->trace_arg && (ec->trace_arg->event & RUBY_INTERNAL_EVENT_MASK)) {
/* skip hooks because this thread doing INTERNAL_EVENT */
}
else {
rb_trace_arg_t *prev_trace_arg = ec->trace_arg;
ec->trace_arg = trace_arg;
exec_hooks_unprotected(ec, vm, &vm->event_hooks, trace_arg);
ec->trace_arg = prev_trace_arg;
ec->trace_arg = trace_arg;
/* only global hooks */
exec_hooks_unprotected(ec, rb_vm_global_hooks(ec), trace_arg);
ec->trace_arg = prev_trace_arg;
}
}
else {
@ -355,15 +383,18 @@ rb_exec_event_hooks(rb_trace_arg_t *trace_arg, int pop_p)
const VALUE old_recursive = ec->local_storage_recursive_hash;
int state = 0;
/* setup */
ec->local_storage_recursive_hash = ec->local_storage_recursive_hash_for_trace;
ec->errinfo = Qnil;
ec->trace_arg = trace_arg;
state = exec_hooks_protected(ec, vm, &vm->event_hooks, trace_arg);
if (!state) {
ec->errinfo = errinfo;
}
ec->trace_arg = NULL;
/* kick hooks */
if ((state = exec_hooks_protected(ec, hooks, trace_arg)) == TAG_NONE) {
ec->errinfo = errinfo;
}
/* cleanup */
ec->trace_arg = NULL;
ec->local_storage_recursive_hash_for_trace = ec->local_storage_recursive_hash;
ec->local_storage_recursive_hash = old_recursive;
@ -392,7 +423,6 @@ rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg)
dummy_trace_arg.event = 0;
if (!ec->trace_arg) {
vm->trace_running++;
ec->trace_arg = &dummy_trace_arg;
}
@ -413,7 +443,6 @@ rb_suppress_tracing(VALUE (*func)(VALUE), VALUE arg)
if (ec->trace_arg == &dummy_trace_arg) {
ec->trace_arg = NULL;
vm->trace_running--;
}
if (state) {
@ -669,6 +698,10 @@ typedef struct rb_tp_struct {
rb_event_flag_t events;
int tracing; /* bool */
rb_thread_t *target_th;
VALUE local_target_set; /* Hash: target ->
* Qtrue (if target is iseq) or
* Qfalse (if target is bmethod)
*/
void (*func)(VALUE tpval, void *data);
void *data;
VALUE proc;
@ -680,6 +713,7 @@ tp_mark(void *ptr)
{
rb_tp_t *tp = ptr;
rb_gc_mark(tp->proc);
rb_gc_mark(tp->local_target_set);
if (tp->target_th) rb_gc_mark(tp->target_th->self);
}
@ -1087,9 +1121,12 @@ VALUE
rb_tracepoint_enable(VALUE tpval)
{
rb_tp_t *tp;
tp = tpptr(tpval);
if (tp->local_target_set != Qfalse) {
rb_raise(rb_eArgError, "can't nest-enable a targetting TracePoint");
}
if (tp->target_th) {
rb_thread_add_event_hook2(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
@ -1102,6 +1139,82 @@ rb_tracepoint_enable(VALUE tpval)
return Qundef;
}
static const rb_iseq_t *
iseq_of(VALUE target)
{
VALUE iseqv = rb_funcall(rb_cISeq, rb_intern("of"), 1, target);
if (NIL_P(iseqv)) {
rb_raise(rb_eArgError, "specified target is not supported");
}
else {
return rb_iseqw_to_iseq(iseqv);
}
}
const rb_method_definition_t *rb_method_def(VALUE method); /* proc.c */
static VALUE
rb_tracepoint_enable_for_target(VALUE tpval, VALUE target)
{
rb_tp_t *tp = tpptr(tpval);
const rb_iseq_t *iseq = iseq_of(target);
int n;
if (tp->tracing > 0) {
rb_raise(rb_eArgError, "can't nest-enable a targetting TracePoint");
}
VM_ASSERT(tp->local_target_set == Qfalse);
tp->local_target_set = rb_obj_hide(rb_ident_hash_new());
/* iseq */
n = rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval);
rb_hash_aset(tp->local_target_set, (VALUE)iseq, Qtrue);
/* bmethod */
if (rb_obj_is_method(target)) {
rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
if (def->type == VM_METHOD_TYPE_BMETHOD &&
(tp->events & (RUBY_EVENT_CALL | RUBY_EVENT_RETURN))) {
def->body.bmethod.hooks = ZALLOC(rb_hook_list_t);
rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval);
rb_hash_aset(tp->local_target_set, target, Qfalse);
n++;
}
}
if (n == 0) {
rb_raise(rb_eArgError, "can not enable any hooks");
}
ruby_vm_event_local_num++;
tp->tracing = 1;
return Qnil;
}
static int
disable_local_event_iseq_i(VALUE target, VALUE iseq_p, VALUE tpval)
{
if (iseq_p) {
rb_iseq_remove_local_tracepoint_recursively((rb_iseq_t *)target, tpval);
}
else {
/* bmethod */
rb_method_definition_t *def = (rb_method_definition_t *)rb_method_def(target);
rb_hook_list_t *hooks = def->body.bmethod.hooks;
VM_ASSERT(hooks != NULL);
rb_hook_list_remove_tracepoint(hooks, tpval);
if (hooks->running == 0) {
rb_hook_list_free(def->body.bmethod.hooks);
}
def->body.bmethod.hooks = NULL;
}
return ST_CONTINUE;
}
VALUE
rb_tracepoint_disable(VALUE tpval)
{
@ -1109,16 +1222,52 @@ rb_tracepoint_disable(VALUE tpval)
tp = tpptr(tpval);
if (tp->target_th) {
rb_thread_remove_event_hook_with_data(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tpval);
if (tp->local_target_set) {
rb_hash_foreach(tp->local_target_set, disable_local_event_iseq_i, tpval);
tp->local_target_set = Qfalse;
ruby_vm_event_local_num--;
}
else {
rb_remove_event_hook_with_data((rb_event_hook_func_t)tp_call_trace, tpval);
if (tp->target_th) {
rb_thread_remove_event_hook_with_data(tp->target_th->self, (rb_event_hook_func_t)tp_call_trace, tpval);
}
else {
rb_remove_event_hook_with_data((rb_event_hook_func_t)tp_call_trace, tpval);
}
}
tp->tracing = 0;
return Qundef;
}
void
rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval)
{
rb_tp_t *tp = tpptr(tpval);
rb_event_hook_t *hook = alloc_event_hook((rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
hook_list_connect(target, list, hook, FALSE);
}
void
rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval)
{
rb_event_hook_t *hook = list->hooks;
rb_event_flag_t events = 0;
while (hook) {
if (hook->data == tpval) {
hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
list->need_clean = TRUE;
}
else {
events |= hook->events;
}
hook = hook->next;
}
list->events = events;
}
/*
* call-seq:
* trace.enable -> true or false
@ -1157,11 +1306,17 @@ rb_tracepoint_disable(VALUE tpval)
*
*/
static VALUE
tracepoint_enable_m(VALUE tpval)
tracepoint_enable_m(VALUE tpval, VALUE target)
{
rb_tp_t *tp = tpptr(tpval);
int previous_tracing = tp->tracing;
rb_tracepoint_enable(tpval);
if (NIL_P(target)) {
rb_tracepoint_enable(tpval);
}
else {
rb_tracepoint_enable_for_target(tpval, target);
}
if (rb_block_given_p()) {
return rb_ensure(rb_yield, Qundef,
@ -1207,19 +1362,25 @@ tracepoint_enable_m(VALUE tpval)
* trace.disable { p tp.lineno }
* #=> RuntimeError: access from outside
*/
static VALUE
tracepoint_disable_m(VALUE tpval)
{
rb_tp_t *tp = tpptr(tpval);
int previous_tracing = tp->tracing;
rb_tracepoint_disable(tpval);
if (rb_block_given_p()) {
return rb_ensure(rb_yield, Qundef,
if (tp->local_target_set != Qfalse) {
rb_raise(rb_eArgError, "can't disable a targetting TracePoint in a block");
}
rb_tracepoint_disable(tpval);
return rb_ensure(rb_yield, Qundef,
previous_tracing ? rb_tracepoint_enable : rb_tracepoint_disable,
tpval);
}
else {
rb_tracepoint_disable(tpval);
return previous_tracing ? Qtrue : Qfalse;
}
}
@ -1464,7 +1625,7 @@ tracepoint_stat_s(VALUE self)
rb_vm_t *vm = GET_VM();
VALUE stat = rb_hash_new();
tracepoint_stat_event_hooks(stat, vm->self, vm->event_hooks.hooks);
tracepoint_stat_event_hooks(stat, vm->self, vm->global_hooks.hooks);
/* TODO: thread local hooks */
return stat;
@ -1545,7 +1706,7 @@ Init_vm_trace(void)
*/
rb_define_singleton_method(rb_cTracePoint, "trace", tracepoint_trace_s, -1);
rb_define_method(rb_cTracePoint, "enable", tracepoint_enable_m, 0);
rb_define_method(rb_cTracePoint, "__enable", tracepoint_enable_m, 1);
rb_define_method(rb_cTracePoint, "disable", tracepoint_disable_m, 0);
rb_define_method(rb_cTracePoint, "enabled?", rb_tracepoint_enabled_p, 0);