1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Remove tracecoverage instructions

The instructions were used only for branch coverage.
Instead, it now uses a trace framework [Feature #14104].

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65225 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
mame 2018-10-20 10:45:48 +00:00
parent e05e77fd6d
commit 6c9a705032
7 changed files with 49 additions and 28 deletions

View file

@ -91,6 +91,7 @@ typedef struct iseq_adjust_data {
typedef struct iseq_trace_data {
LINK_ELEMENT link;
rb_event_flag_t event;
long data;
} TRACE;
struct ensure_range {
@ -259,7 +260,9 @@ struct iseq_compile_data_ensure_node_stack {
ADD_ELEM((seq), (LINK_ELEMENT *) new_insn_send(iseq, (line), (id), (VALUE)(argc), (block), (VALUE)(flag), (keywords)))
#define ADD_TRACE(seq, event) \
ADD_ELEM((seq), (LINK_ELEMENT *)new_trace_body(iseq, (event)))
ADD_ELEM((seq), (LINK_ELEMENT *)new_trace_body(iseq, (event), 0))
#define ADD_TRACE_WITH_DATA(seq, event, data) \
ADD_ELEM((seq), (LINK_ELEMENT *)new_trace_body(iseq, (event), (data)))
#define DECL_BRANCH_BASE(branches, first_line, first_column, last_line, last_column, type) \
@ -291,7 +294,8 @@ struct iseq_compile_data_ensure_node_stack {
rb_ary_push(branches, INT2FIX(last_line)); \
rb_ary_push(branches, INT2FIX(last_column)); \
rb_ary_push(branches, INT2FIX(counter_idx)); \
ADD_INSN2((seq), (first_line), tracecoverage, INT2FIX(RUBY_EVENT_COVERAGE_BRANCH), INT2FIX(counter_idx)); \
ADD_TRACE_WITH_DATA(seq, RUBY_EVENT_COVERAGE_BRANCH, counter_idx); \
ADD_INSN(seq, last_line, nop); \
} \
} while (0)
@ -478,7 +482,7 @@ static int calc_sp_depth(int depth, INSN *iobj);
static INSN *new_insn_body(rb_iseq_t *iseq, int line_no, enum ruby_vminsn_type insn_id, int argc, ...);
static LABEL *new_label_body(rb_iseq_t *iseq, long line);
static ADJUST *new_adjust_body(rb_iseq_t *iseq, LABEL *label, int line);
static TRACE *new_trace_body(rb_iseq_t *iseq, rb_event_flag_t event);
static TRACE *new_trace_body(rb_iseq_t *iseq, rb_event_flag_t event, long data);
static int iseq_compile_each(rb_iseq_t *iseq, LINK_ANCHOR *anchor, const NODE *n, int);
@ -1081,13 +1085,14 @@ debug_list(ISEQ_ARG_DECLARE LINK_ANCHOR *const anchor)
#endif
static TRACE *
new_trace_body(rb_iseq_t *iseq, rb_event_flag_t event)
new_trace_body(rb_iseq_t *iseq, rb_event_flag_t event, long data)
{
TRACE *trace = compile_data_alloc_trace(iseq);
trace->link.type = ISEQ_ELEMENT_TRACE;
trace->link.next = NULL;
trace->event = event;
trace->data = data;
return trace;
}
@ -1995,6 +2000,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
LINK_ELEMENT *list;
VALUE *generated_iseq;
rb_event_flag_t events = 0;
long data = 0;
int insn_num, code_index, insns_info_index, sp = 0;
int stack_max = fix_sp_depth(iseq, anchor);
@ -2011,16 +2017,24 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
INSN *iobj = (INSN *)list;
/* update sp */
sp = calc_sp_depth(sp, iobj);
code_index += insn_data_length(iobj);
insn_num++;
if (ISEQ_COVERAGE(iseq) && ISEQ_LINE_COVERAGE(iseq) &&
(events & RUBY_EVENT_COVERAGE_LINE) &&
!(rb_get_coverage_mode() & COVERAGE_TARGET_ONESHOT_LINES)) {
int line = iobj->insn_info.line_no;
RARRAY_ASET(ISEQ_LINE_COVERAGE(iseq), line - 1, INT2FIX(0));
if (ISEQ_COVERAGE(iseq)) {
if (ISEQ_LINE_COVERAGE(iseq) && (events & RUBY_EVENT_COVERAGE_LINE) &&
!(rb_get_coverage_mode() & COVERAGE_TARGET_ONESHOT_LINES)) {
int line = iobj->insn_info.line_no;
RARRAY_ASET(ISEQ_LINE_COVERAGE(iseq), line - 1, INT2FIX(0));
}
if (ISEQ_BRANCH_COVERAGE(iseq) && (events & RUBY_EVENT_COVERAGE_BRANCH)) {
while (RARRAY_LEN(ISEQ_PC2BRANCHINDEX(iseq)) <= code_index) {
rb_ary_push(ISEQ_PC2BRANCHINDEX(iseq), Qnil);
}
RARRAY_ASET(ISEQ_PC2BRANCHINDEX(iseq), code_index, INT2FIX(data));
}
}
code_index += insn_data_length(iobj);
iobj->insn_info.events |= events;
events = 0;
data = 0;
break;
}
case ISEQ_ELEMENT_LABEL:
@ -2034,6 +2048,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
{
TRACE *trace = (TRACE *)list;
events |= trace->event;
if (trace->event & RUBY_EVENT_COVERAGE_BRANCH) data = trace->data;
break;
}
case ISEQ_ELEMENT_ADJUST:

View file

@ -688,19 +688,6 @@ checktype
ret = (TYPE(val) == (int)type) ? Qtrue : Qfalse;
}
/* fire a coverage event (currently, this is used for line coverage and branch coverage) */
DEFINE_INSN
tracecoverage
(rb_num_t nf, VALUE data)
()
()
{
rb_event_flag_t flag = (rb_event_flag_t)nf;
vm_dtrace(flag, ec);
EXEC_EVENT_HOOK(ec, flag, GET_SELF(), 0, 0, 0 /* id and klass are resolved at callee */, data);
}
/**********************************************************/
/* deal with control flow 1: class/module */
/**********************************************************/

10
iseq.c
View file

@ -214,6 +214,7 @@ rb_iseq_mark(const rb_iseq_t *iseq)
}
rb_gc_mark(body->variable.coverage);
rb_gc_mark(body->variable.pc2branchindex);
rb_gc_mark(body->location.label);
rb_gc_mark(body->location.base_label);
rb_gc_mark(body->location.pathobj);
@ -468,6 +469,8 @@ prepare_iseq_build(rb_iseq_t *iseq,
}
}
ISEQ_COVERAGE_SET(iseq, coverage);
if (coverage && ISEQ_BRANCH_COVERAGE(iseq))
ISEQ_PC2BRANCHINDEX_SET(iseq, rb_ary_tmp_new(0));
return Qtrue;
}
@ -1922,7 +1925,7 @@ rb_iseq_disasm_insn(VALUE ret, const VALUE *code, size_t pos,
{
rb_event_flag_t events = rb_iseq_event_flags(iseq, pos);
if (events) {
str = rb_str_catf(str, "[%s%s%s%s%s%s%s%s%s]",
str = rb_str_catf(str, "[%s%s%s%s%s%s%s%s%s%s%s]",
events & RUBY_EVENT_LINE ? "Li" : "",
events & RUBY_EVENT_CLASS ? "Cl" : "",
events & RUBY_EVENT_END ? "En" : "",
@ -1931,7 +1934,10 @@ rb_iseq_disasm_insn(VALUE ret, const VALUE *code, size_t pos,
events & RUBY_EVENT_C_CALL ? "Cc" : "",
events & RUBY_EVENT_C_RETURN ? "Cr" : "",
events & RUBY_EVENT_B_CALL ? "Bc" : "",
events & RUBY_EVENT_B_RETURN ? "Br" : "");
events & RUBY_EVENT_B_RETURN ? "Br" : "",
events & RUBY_EVENT_COVERAGE_LINE ? "Cli" : "",
events & RUBY_EVENT_COVERAGE_BRANCH ? "Cbr" : ""
);
}
}

6
iseq.h
View file

@ -32,6 +32,9 @@ rb_call_info_kw_arg_bytes(int keyword_len)
#define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES)
#define ISEQ_BRANCH_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_BRANCHES)
#define ISEQ_PC2BRANCHINDEX(iseq) iseq->body->variable.pc2branchindex
#define ISEQ_PC2BRANCHINDEX_SET(iseq, h) RB_OBJ_WRITE(iseq, &iseq->body->variable.pc2branchindex, h)
#define ISEQ_FLIP_CNT(iseq) (iseq)->body->variable.flip_count
static inline rb_snum_t
@ -72,7 +75,8 @@ ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
RUBY_EVENT_RETURN| \
RUBY_EVENT_B_CALL| \
RUBY_EVENT_B_RETURN| \
RUBY_EVENT_COVERAGE_LINE)
RUBY_EVENT_COVERAGE_LINE| \
RUBY_EVENT_COVERAGE_BRANCH)
#define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1
#define ISEQ_USE_COMPILE_DATA IMEMO_FL_USER2

View file

@ -5254,11 +5254,13 @@ update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
static void
update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
{
VALUE coverage = rb_iseq_coverage(GET_EC()->cfp->iseq);
const rb_control_frame_t *cfp = GET_EC()->cfp;
VALUE coverage = rb_iseq_coverage(cfp->iseq);
if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
if (branches) {
long idx = FIX2INT(trace_arg->data), count;
long pc = cfp->pc - cfp->iseq->body->iseq_encoded - 1;
long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
VALUE counters = RARRAY_AREF(branches, 1);
VALUE num = RARRAY_AREF(counters, idx);
count = FIX2LONG(num) + 1;

View file

@ -449,6 +449,7 @@ struct rb_iseq_constant_body {
struct {
rb_snum_t flip_count;
VALUE coverage;
VALUE pc2branchindex;
VALUE *original_iseq;
} variable;

View file

@ -3895,6 +3895,12 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *p
EXEC_EVENT_HOOK(ec, RUBY_EVENT_COVERAGE_LINE, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if (events & RUBY_EVENT_COVERAGE_BRANCH) {
reg_cfp->pc++;
vm_dtrace(RUBY_EVENT_COVERAGE_BRANCH, ec);
EXEC_EVENT_HOOK(ec, RUBY_EVENT_COVERAGE_BRANCH, GET_SELF(), 0, 0, 0, Qundef);
reg_cfp->pc--;
}
if ((event = (events & (RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN))) != 0) {
VM_ASSERT(event == RUBY_EVENT_END ||
event == RUBY_EVENT_RETURN ||