diff --git a/compile.c b/compile.c index c424092ae8..efa001a7d4 100644 --- a/compile.c +++ b/compile.c @@ -260,15 +260,6 @@ struct iseq_compile_data_ensure_node_stack { #define ADD_TRACE(seq, event) \ ADD_ELEM((seq), (LINK_ELEMENT *)new_trace_body(iseq, (event))) -#define ADD_TRACE_LINE_COVERAGE(seq, line) \ - do { \ - if (ISEQ_COVERAGE(iseq) && \ - ISEQ_LINE_COVERAGE(iseq) && \ - (line) > 0) { \ - RARRAY_ASET(ISEQ_LINE_COVERAGE(iseq), (line) - 1, INT2FIX(0)); \ - ADD_INSN2((seq), (line), tracecoverage, INT2FIX(RUBY_EVENT_COVERAGE_LINE), INT2FIX(line)); \ - } \ - } while (0) #define DECL_BRANCH_BASE(branches, first_line, first_column, last_line, last_column, type) \ @@ -2021,6 +2012,10 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor) sp = calc_sp_depth(sp, iobj); code_index += insn_data_length(iobj); insn_num++; + if (ISEQ_COVERAGE(iseq) && ISEQ_LINE_COVERAGE(iseq) && (events & RUBY_EVENT_COVERAGE_LINE)) { + int line = iobj->insn_info.line_no; + RARRAY_ASET(ISEQ_LINE_COVERAGE(iseq), line - 1, INT2FIX(0)); + } iobj->insn_info.events |= events; events = 0; break; @@ -5829,9 +5824,12 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, in } else { if (node->flags & NODE_FL_NEWLINE) { + int event = RUBY_EVENT_LINE; ISEQ_COMPILE_DATA(iseq)->last_line = line; - ADD_TRACE_LINE_COVERAGE(ret, line); - ADD_TRACE(ret, RUBY_EVENT_LINE); + if (ISEQ_COVERAGE(iseq) && ISEQ_LINE_COVERAGE(iseq)) { + event |= RUBY_EVENT_COVERAGE_LINE; + } + ADD_TRACE(ret, event); } } @@ -7462,18 +7460,6 @@ iseq_compile_each0(rb_iseq_t *iseq, LINK_ANCHOR *const ret, const NODE *node, in return COMPILE_NG; } - /* remove tracecoverage instruction if there is no relevant instruction */ - if (IS_TRACE(ret->last) && ((TRACE*) ret->last)->event == RUBY_EVENT_LINE) { - LINK_ELEMENT *insn = ret->last->prev; - if (IS_INSN(insn) && - IS_INSN_ID(insn, tracecoverage) && - FIX2LONG(OPERAND_AT(insn, 0)) == RUBY_EVENT_COVERAGE_LINE - ) { - ELEM_REMOVE(insn); /* remove tracecovearge */ - RARRAY_ASET(ISEQ_LINE_COVERAGE(iseq), line - 1, Qnil); - } - } - debug_node_end(); return COMPILE_OK; } diff --git a/iseq.h b/iseq.h index 2062c33bb4..851eaeabbc 100644 --- a/iseq.h +++ b/iseq.h @@ -72,7 +72,8 @@ ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size) RUBY_EVENT_CALL | \ RUBY_EVENT_RETURN| \ RUBY_EVENT_B_CALL| \ - RUBY_EVENT_B_RETURN) + RUBY_EVENT_B_RETURN| \ + RUBY_EVENT_COVERAGE_LINE) #define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1 #define ISEQ_USE_COMPILE_DATA IMEMO_FL_USER2 diff --git a/test/coverage/test_coverage.rb b/test/coverage/test_coverage.rb index 0b150b8509..8cc6253061 100644 --- a/test/coverage/test_coverage.rb +++ b/test/coverage/test_coverage.rb @@ -194,7 +194,7 @@ class TestCoverage < Test::Unit::TestCase def test_line_coverage_for_multiple_lines result = { - :lines => [1, nil, nil, nil, nil, nil, 1, nil, 1, nil, nil, nil, nil, nil, 1, nil, 1, 1, 1, nil, nil, nil, nil, nil, 1] + :lines => [nil, 1, nil, nil, nil, 1, nil, nil, nil, 1, nil, 1, nil, nil, nil, nil, 1, 1, nil, 1, nil, nil, nil, nil, 1] } assert_coverage(<<~"end;", { lines: true }, result) # Bug #14191 FOO = [ diff --git a/thread.c b/thread.c index b093a9a2e4..9757f9c0e9 100644 --- a/thread.c +++ b/thread.c @@ -5256,7 +5256,7 @@ update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg) if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) { VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES); if (lines) { - long line = FIX2INT(trace_arg->data) - 1; + long line = rb_sourceline() - 1; long count; VALUE num; if (line >= RARRAY_LEN(lines)) { /* no longer tracked */ diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 9dd8abf520..f82b0e075b 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -3883,6 +3883,12 @@ vm_trace(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, const VALUE *p EXEC_EVENT_HOOK(ec, RUBY_EVENT_LINE, GET_SELF(), 0, 0, 0, Qundef); reg_cfp->pc--; } + if (events & RUBY_EVENT_COVERAGE_LINE) { + reg_cfp->pc++; + vm_dtrace(RUBY_EVENT_COVERAGE_LINE, ec); + EXEC_EVENT_HOOK(ec, RUBY_EVENT_COVERAGE_LINE, GET_SELF(), 0, 0, 0, Qundef); + reg_cfp->pc--; + } if (event = (events & (RUBY_EVENT_END | RUBY_EVENT_RETURN | RUBY_EVENT_B_RETURN))) { VM_ASSERT(event == RUBY_EVENT_END || event == RUBY_EVENT_RETURN ||