1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

introduce `trace_events' info for iseq.

* vm_core.h (rb_iseq_t::aux): add `trace_events` which represents
  which events are enabled on this iseq. With this information,
  we can skip useless trace-on changes for ISeqs.

* vm_trace.c (RUBY_EVENTS_TRACE_BY_ISEQ): moved to iseq.h and rename it
  with ISEQ_TRACE_EVENTS.

* iseq.h: introduce ISEQ_USE_COMPILE_DATA iseq (imemo) flag to represent
  COMPILE_DATA is available. In other words, iseq->aux.trace_events is not
  available when this flag is set.
  * ISEQ_COMPILE_DATA() is changed from a macro.
  * ISEQ_COMPILE_DATA_ALLOC() is added.
  * ISEQ_COMPILE_DATA_CLEAR() is added.

* iseq.c: use them.


git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@60838 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
ko1 2017-11-18 09:39:41 +00:00
parent 4f83ca015d
commit 26451ab3ba
5 changed files with 110 additions and 76 deletions

View file

@ -9326,7 +9326,7 @@ ibf_load_iseq_complete(rb_iseq_t *iseq)
rb_iseq_t *prev_src_iseq = load->iseq;
load->iseq = iseq;
ibf_load_iseq_each(load, iseq, ibf_iseq_list(load)[iseq->aux.loader.index]);
ISEQ_COMPILE_DATA(iseq) = NULL;
ISEQ_COMPILE_DATA_CLEAR(iseq);
FL_UNSET(iseq, ISEQ_NOT_LOADED_YET);
load->iseq = prev_src_iseq;
}

68
iseq.c
View file

@ -120,7 +120,7 @@ rb_iseq_mark(const rb_iseq_t *iseq)
if (FL_TEST(iseq, ISEQ_NOT_LOADED_YET)) {
rb_gc_mark(iseq->aux.loader.obj);
}
else if (ISEQ_COMPILE_DATA(iseq) != 0) {
else if (ISEQ_COMPILE_DATA(iseq) != NULL) {
const struct iseq_compile_data *const compile_data = ISEQ_COMPILE_DATA(iseq);
RUBY_MARK_UNLESS_NULL(compile_data->mark_ary);
RUBY_MARK_UNLESS_NULL(compile_data->err_info);
@ -305,7 +305,7 @@ prepare_iseq_build(rb_iseq_t *iseq,
}
RB_OBJ_WRITE(iseq, &iseq->body->mark_ary, iseq_mark_ary_create(0));
ISEQ_COMPILE_DATA(iseq) = ZALLOC(struct iseq_compile_data);
ISEQ_COMPILE_DATA_ALLOC(iseq);
RB_OBJ_WRITE(iseq, &ISEQ_COMPILE_DATA(iseq)->err_info, err_info);
RB_OBJ_WRITE(iseq, &ISEQ_COMPILE_DATA(iseq)->mark_ary, rb_ary_tmp_new(3));
@ -341,7 +341,7 @@ finish_iseq_build(rb_iseq_t *iseq)
{
struct iseq_compile_data *data = ISEQ_COMPILE_DATA(iseq);
VALUE err = data->err_info;
ISEQ_COMPILE_DATA(iseq) = 0;
ISEQ_COMPILE_DATA_CLEAR(iseq);
compile_data_free(data);
if (RTEST(err)) {
@ -351,8 +351,9 @@ finish_iseq_build(rb_iseq_t *iseq)
rb_exc_raise(err);
}
if (ruby_vm_event_flags) {
rb_iseq_trace_set(iseq, ruby_vm_event_flags);
iseq->aux.trace_events = 0;
if (ruby_vm_event_flags & ISEQ_TRACE_EVENTS) {
rb_iseq_trace_set(iseq, ruby_vm_event_flags & ISEQ_TRACE_EVENTS);
}
return Qtrue;
}
@ -2321,34 +2322,41 @@ rb_iseq_defined_string(enum defined_type type)
void
rb_iseq_trace_set(const rb_iseq_t *iseq, rb_event_flag_t turnon_events)
{
unsigned int i;
VALUE *iseq_encoded = (VALUE *)iseq->body->iseq_encoded;
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
VALUE *code = rb_iseq_original_iseq(iseq);
const void * const *table = rb_vm_get_insns_address_table();
#else
const VALUE *code = iseq->body->iseq_encoded;
#endif
VM_ASSERT((turnon_events & ~ISEQ_TRACE_EVENTS) == 0);
for (i=0; i<iseq->body->iseq_size;) {
int insn = (int)code[i];
rb_event_flag_t events = rb_iseq_event_flags(iseq, i);
/* code represents before transformation */
VM_ASSERT(insn < VM_INSTRUCTION_SIZE/2);
if (events & turnon_events) {
if (!TRACE_INSN_P(insn, iseq_encoded[i])) {
iseq_encoded[i] = INSN_CODE(insn + VM_INSTRUCTION_SIZE/2);
}
}
else if (TRACE_INSN_P(insn, iseq_encoded[i])) {
iseq_encoded[i] = INSN_CODE(insn);
}
i += insn_len(insn);
if (iseq->aux.trace_events == turnon_events) {
return;
}
else {
unsigned int i;
VALUE *iseq_encoded = (VALUE *)iseq->body->iseq_encoded;
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
VALUE *code = rb_iseq_original_iseq(iseq);
const void * const *table = rb_vm_get_insns_address_table();
#else
const VALUE *code = iseq->body->iseq_encoded;
#endif
((rb_iseq_t *)iseq)->aux.trace_events = turnon_events;
/* clear for debugging: ISEQ_ORIGINAL_ISEQ_CLEAR(iseq); */
for (i=0; i<iseq->body->iseq_size;) {
int insn = (int)code[i];
rb_event_flag_t events = rb_iseq_event_flags(iseq, i);
/* code represents before transformation */
VM_ASSERT(insn < VM_INSTRUCTION_SIZE/2);
if (events & turnon_events) {
if (!TRACE_INSN_P(insn, iseq_encoded[i])) {
iseq_encoded[i] = INSN_CODE(insn + VM_INSTRUCTION_SIZE/2);
}
}
else if (TRACE_INSN_P(insn, iseq_encoded[i])) {
iseq_encoded[i] = INSN_CODE(insn);
}
i += insn_len(insn);
}
/* clear for debugging: ISEQ_ORIGINAL_ISEQ_CLEAR(iseq); */
}
}
static int

100
iseq.h
View file

@ -85,7 +85,72 @@ ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
return (VALUE *)RSTRING_PTR(str);
}
#define ISEQ_COMPILE_DATA(iseq) (iseq)->aux.compile_data
#define ISEQ_TRACE_EVENTS (RUBY_EVENT_LINE | \
RUBY_EVENT_CLASS | \
RUBY_EVENT_END | \
RUBY_EVENT_CALL | \
RUBY_EVENT_RETURN| \
RUBY_EVENT_B_CALL| \
RUBY_EVENT_B_RETURN)
#define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1
#define ISEQ_USE_COMPILE_DATA IMEMO_FL_USER2
struct iseq_compile_data {
/* GC is needed */
const VALUE err_info;
VALUE mark_ary;
const VALUE catch_table_ary; /* Array */
/* GC is not needed */
struct iseq_label_data *start_label;
struct iseq_label_data *end_label;
struct iseq_label_data *redo_label;
const rb_iseq_t *current_block;
VALUE ensure_node;
VALUE for_iseq;
struct iseq_compile_data_ensure_node_stack *ensure_node_stack;
int loopval_popped; /* used by NODE_BREAK */
int cached_const;
struct iseq_compile_data_storage *storage_head;
struct iseq_compile_data_storage *storage_current;
int last_line;
int last_coverable_line;
int label_no;
int node_level;
unsigned int ci_index;
unsigned int ci_kw_index;
const rb_compile_option_t *option;
struct rb_id_table *ivar_cache_table;
#if SUPPORT_JOKE
st_table *labels_table;
#endif
};
static inline struct iseq_compile_data *
ISEQ_COMPILE_DATA(const rb_iseq_t *iseq)
{
if (iseq->flags & ISEQ_USE_COMPILE_DATA) {
return iseq->aux.compile_data;
}
else {
return NULL;
}
}
static inline void
ISEQ_COMPILE_DATA_ALLOC(rb_iseq_t *iseq)
{
iseq->flags |= ISEQ_USE_COMPILE_DATA;
iseq->aux.compile_data = ZALLOC(struct iseq_compile_data);
}
static inline void
ISEQ_COMPILE_DATA_CLEAR(rb_iseq_t *iseq)
{
iseq->flags &= ~ISEQ_USE_COMPILE_DATA;
iseq->aux.compile_data = NULL;
}
static inline rb_iseq_t *
iseq_imemo_alloc(void)
@ -93,8 +158,6 @@ iseq_imemo_alloc(void)
return (rb_iseq_t *)rb_imemo_new(imemo_iseq, 0, 0, 0, 0);
}
#define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1
VALUE iseq_ibf_dump(const rb_iseq_t *iseq, VALUE opt);
void ibf_load_iseq_complete(rb_iseq_t *iseq);
const rb_iseq_t *iseq_ibf_load(VALUE str);
@ -213,37 +276,6 @@ struct iseq_compile_data_storage {
#define SIZEOF_ISEQ_COMPILE_DATA_STORAGE \
(sizeof(struct iseq_compile_data_storage) - 1)
struct iseq_compile_data {
/* GC is needed */
const VALUE err_info;
VALUE mark_ary;
const VALUE catch_table_ary; /* Array */
/* GC is not needed */
struct iseq_label_data *start_label;
struct iseq_label_data *end_label;
struct iseq_label_data *redo_label;
const rb_iseq_t *current_block;
VALUE ensure_node;
VALUE for_iseq;
struct iseq_compile_data_ensure_node_stack *ensure_node_stack;
int loopval_popped; /* used by NODE_BREAK */
int cached_const;
struct iseq_compile_data_storage *storage_head;
struct iseq_compile_data_storage *storage_current;
int last_line;
int last_coverable_line;
int label_no;
int node_level;
unsigned int ci_index;
unsigned int ci_kw_index;
const rb_compile_option_t *option;
struct rb_id_table *ivar_cache_table;
#if SUPPORT_JOKE
st_table *labels_table;
#endif
};
/* defined? */
enum defined_type {

View file

@ -426,6 +426,8 @@ struct rb_iseq_struct {
VALUE obj;
int index;
} loader;
rb_event_flag_t trace_events;
} aux;
};

View file

@ -61,23 +61,15 @@ rb_vm_trace_mark_event_hooks(rb_hook_list_t *hooks)
/* ruby_vm_event_flags management */
#define RUBY_EVENTS_TRACE_BY_ISEQ (RUBY_EVENT_LINE | \
RUBY_EVENT_CLASS | \
RUBY_EVENT_END | \
RUBY_EVENT_CALL | \
RUBY_EVENT_RETURN| \
RUBY_EVENT_B_CALL| \
RUBY_EVENT_B_RETURN)
static void
update_global_event_hook(rb_event_flag_t vm_events)
{
rb_event_flag_t new_iseq_events = vm_events & RUBY_EVENTS_TRACE_BY_ISEQ;
rb_event_flag_t cur_iseq_events = ruby_vm_event_flags & RUBY_EVENTS_TRACE_BY_ISEQ;
rb_event_flag_t new_iseq_events = vm_events & ISEQ_TRACE_EVENTS;
rb_event_flag_t cur_iseq_events = ruby_vm_event_flags & ISEQ_TRACE_EVENTS;
if (new_iseq_events > cur_iseq_events) {
/* write all ISeqs iff new events are added */
rb_iseq_trace_set_all(vm_events);
rb_iseq_trace_set_all(new_iseq_events);
}
ruby_vm_event_flags = vm_events;