1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Make backtrace generation work outward from current frame

This fixes multiple bugs found in the partial backtrace
optimization added in 3b24b7914c.
These bugs occurs when passing a start argument to caller where
the start argument lands on a iseq frame without a pc.

Before this commit, the following code results in the same
line being printed twice, both for the #each method.

```ruby
def a; [1].group_by { b } end
def b; puts(caller(2, 1).first, caller(3, 1).first) end
a
```

After this commit and in Ruby 2.7, the lines are different,
with the first line being for each and the second for group_by.

Before this commit, the following code can either segfault or
result in an infinite loop:

```ruby
def foo
  caller_locations(2, 1).inspect # segfault
  caller_locations(2, 1)[0].path # infinite loop
end

1.times.map { 1.times.map { foo } }
```

After this commit, this code works correctly.

This commit completely refactors the backtrace handling.
Instead of processing the backtrace from the outermost
frame working in, process it from the innermost frame
working out.  This is much faster for partial backtraces,
since you only access the control frames you need to in
order to construct the backtrace.

To handle cfunc frames in the new design, they start
out with no location information.  We increment a counter
for each cfunc frame added.  When an iseq frame with pc
is accessed, after adding the iseq backtrace location,
we use the location for the iseq backtrace location for
all of the directly preceding cfunc backtrace locations.

If the last backtrace line is a cfunc frame, we continue
scanning for iseq frames until the end control frame, and
use the location information from the first one for the
trailing cfunc frames in the backtrace.

As only rb_ec_partial_backtrace_object uses the new
backtrace implementation, remove all of the function
pointers and inline the functions.  This makes the
process easier to understand.

Restore the Ruby 2.7 implementation of backtrace_each and
use it for all the other functions that called
backtrace_each other than rb_ec_partial_backtrace_object.
All other cases requested the entire backtrace, so there
is no advantage of using the new algorithm for those.
Additionally, there are implicit assumptions in the other
code that the backtrace processing works inward instead
of outward.

Remove the cfunc/iseq union in rb_backtrace_location_t,
and remove the prev_loc member for cfunc.  Both cfunc and
iseq types can now have iseq and pc entries, so the
location information can be accessed the same way for each.
This avoids the need for a extra backtrace location entry
to store an iseq backtrace location if the final entry in
the backtrace is a cfunc. This is also what fixes the
segfault and infinite loop issues in the above bugs.

Here's Ruby pseudocode for the new algorithm, where start
and length are the arguments to caller or caller_locations:

```ruby
end_cf = VM.end_control_frame.next
cf = VM.start_control_frame
size = VM.num_control_frames - 2
bt = []
cfunc_counter = 0

if length.nil? || length > size
  length = size
end

while cf != end_cf && bt.size != length
  if cf.iseq?
    if cf.instruction_pointer?
      if start > 0
        start -= 1
      else
        bt << cf.iseq_backtrace_entry
        cf_counter.times do |i|
          bt[-1 - i].loc = cf.loc
        end
        cfunc_counter = 0
      end
    end
  elsif cf.cfunc?
    if start > 0
      start -= 1
    else
      bt << cf.cfunc_backtrace_entry
      cfunc_counter += 1
    end
  end

  cf = cf.prev
end

if cfunc_counter > 0
  while cf != end_cf
    if (cf.iseq? && cf.instruction_pointer?)
      cf_counter.times do |i|
        bt[-i].loc = cf.loc
      end
    end
    cf = cf.prev
  end
end
```

With the following benchmark, which uses a call depth of
around 100 (common in many Ruby applications):

```ruby
class T
  def test(depth, &block)
    if depth == 0
      yield self
    else
      test(depth - 1, &block)
    end
  end
  def array
    Array.new
  end
  def first
    caller_locations(1, 1)
  end
  def full
    caller_locations
  end
end

t = T.new
t.test((ARGV.first || 100).to_i) do
  Benchmark.ips do |x|
    x.report ('caller_loc(1, 1)') {t.first}
    x.report ('caller_loc') {t.full}
    x.report ('Array.new') {t.array}
    x.compare!
  end
end
```

Results before commit:

```
Calculating -------------------------------------
    caller_loc(1, 1)    281.159k (_ 0.7%) i/s -      1.426M in   5.073055s
          caller_loc     15.836k (_ 2.1%) i/s -     79.450k in   5.019426s
           Array.new      1.852M (_ 2.5%) i/s -      9.296M in   5.022511s

Comparison:
           Array.new:  1852297.5 i/s
    caller_loc(1, 1):   281159.1 i/s - 6.59x  (_ 0.00) slower
          caller_loc:    15835.9 i/s - 116.97x  (_ 0.00) slower
```

Results after commit:

```
Calculating -------------------------------------
    caller_loc(1, 1)    562.286k (_ 0.8%) i/s -      2.858M in   5.083249s
          caller_loc     16.402k (_ 1.0%) i/s -     83.200k in   5.072963s
           Array.new      1.853M (_ 0.1%) i/s -      9.278M in   5.007523s

Comparison:
           Array.new:  1852776.5 i/s
    caller_loc(1, 1):   562285.6 i/s - 3.30x  (_ 0.00) slower
          caller_loc:    16402.3 i/s - 112.96x  (_ 0.00) slower
```

This shows that the speed of caller_locations(1, 1) has roughly
doubled, and the speed of caller_locations with no arguments
has improved slightly.  So this new algorithm is significant faster,
much simpler, and fixes bugs in the previous algorithm.

Fixes [Bug #18053]
This commit is contained in:
Jeremy Evans 2021-07-21 16:44:56 -07:00
parent d868c8f08f
commit 1a05dc03f9
Notes: git 2021-08-07 02:15:29 +09:00
2 changed files with 216 additions and 302 deletions

View file

@ -170,6 +170,34 @@ class TestBacktrace < Test::Unit::TestCase
end
end
def test_caller_limit_cfunc_iseq_no_pc
def self.a; [1].group_by { b } end
def self.b
[
caller_locations(2, 1).first.base_label,
caller_locations(3, 1).first.base_label
]
end
assert_equal({["each", "group_by"]=>[1]}, a)
end
def test_caller_location_inspect_cfunc_iseq_no_pc
def self.foo
@res = caller_locations(2, 1).inspect
end
@line = __LINE__ + 1
1.times.map { 1.times.map { foo } }
assert_equal("[\"#{__FILE__}:#{@line}:in `times'\"]", @res)
end
def test_caller_location_path_cfunc_iseq_no_pc
def self.foo
@res = caller_locations(2, 1)[0].path
end
1.times.map { 1.times.map { foo } }
assert_equal(__FILE__, @res)
end
def test_caller_locations
cs = caller(0); locs = caller_locations(0).map{|loc|
loc.to_s

View file

@ -119,16 +119,9 @@ typedef struct rb_backtrace_location_struct {
LOCATION_TYPE_CFUNC,
} type;
union {
struct {
const rb_iseq_t *iseq;
const VALUE *pc;
} iseq;
struct {
ID mid;
struct rb_backtrace_location_struct *prev_loc;
} cfunc;
} body;
const rb_iseq_t *iseq;
const VALUE *pc;
ID mid;
} rb_backtrace_location_t;
struct valued_frame_info {
@ -148,9 +141,13 @@ location_mark_entry(rb_backtrace_location_t *fi)
{
switch (fi->type) {
case LOCATION_TYPE_ISEQ:
rb_gc_mark_movable((VALUE)fi->body.iseq.iseq);
rb_gc_mark_movable((VALUE)fi->iseq);
break;
case LOCATION_TYPE_CFUNC:
if (fi->iseq) {
rb_gc_mark_movable((VALUE)fi->iseq);
}
break;
default:
break;
}
@ -188,10 +185,10 @@ location_lineno(rb_backtrace_location_t *loc)
{
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
return calc_lineno(loc->body.iseq.iseq, loc->body.iseq.pc);
return calc_lineno(loc->iseq, loc->pc);
case LOCATION_TYPE_CFUNC:
if (loc->body.cfunc.prev_loc) {
return location_lineno(loc->body.cfunc.prev_loc);
if (loc->iseq && loc->pc) {
return calc_lineno(loc->iseq, loc->pc);
}
return 0;
default:
@ -219,9 +216,9 @@ location_label(rb_backtrace_location_t *loc)
{
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
return loc->body.iseq.iseq->body->location.label;
return loc->iseq->body->location.label;
case LOCATION_TYPE_CFUNC:
return rb_id2str(loc->body.cfunc.mid);
return rb_id2str(loc->mid);
default:
rb_bug("location_label: unreachable");
UNREACHABLE;
@ -266,9 +263,9 @@ location_base_label(rb_backtrace_location_t *loc)
{
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
return loc->body.iseq.iseq->body->location.base_label;
return loc->iseq->body->location.base_label;
case LOCATION_TYPE_CFUNC:
return rb_id2str(loc->body.cfunc.mid);
return rb_id2str(loc->mid);
default:
rb_bug("location_base_label: unreachable");
UNREACHABLE;
@ -291,10 +288,10 @@ location_path(rb_backtrace_location_t *loc)
{
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
return rb_iseq_path(loc->body.iseq.iseq);
return rb_iseq_path(loc->iseq);
case LOCATION_TYPE_CFUNC:
if (loc->body.cfunc.prev_loc) {
return location_path(loc->body.cfunc.prev_loc);
if (loc->iseq) {
return rb_iseq_path(loc->iseq);
}
return Qnil;
default:
@ -325,10 +322,10 @@ location_node_id(rb_backtrace_location_t *loc)
{
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
return calc_node_id(loc->body.iseq.iseq, loc->body.iseq.pc);
return calc_node_id(loc->iseq, loc->pc);
case LOCATION_TYPE_CFUNC:
if (loc->body.cfunc.prev_loc) {
return location_node_id(loc->body.cfunc.prev_loc);
if (loc->iseq && loc->pc) {
return calc_node_id(loc->iseq, loc->pc);
}
return -1;
default:
@ -355,10 +352,10 @@ location_realpath(rb_backtrace_location_t *loc)
{
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
return rb_iseq_realpath(loc->body.iseq.iseq);
return rb_iseq_realpath(loc->iseq);
case LOCATION_TYPE_CFUNC:
if (loc->body.cfunc.prev_loc) {
return location_realpath(loc->body.cfunc.prev_loc);
if (loc->iseq) {
return rb_iseq_realpath(loc->iseq);
}
return Qnil;
default:
@ -404,21 +401,21 @@ location_to_str(rb_backtrace_location_t *loc)
switch (loc->type) {
case LOCATION_TYPE_ISEQ:
file = rb_iseq_path(loc->body.iseq.iseq);
name = loc->body.iseq.iseq->body->location.label;
file = rb_iseq_path(loc->iseq);
name = loc->iseq->body->location.label;
lineno = calc_lineno(loc->body.iseq.iseq, loc->body.iseq.pc);
lineno = calc_lineno(loc->iseq, loc->pc);
break;
case LOCATION_TYPE_CFUNC:
if (loc->body.cfunc.prev_loc) {
file = rb_iseq_path(loc->body.cfunc.prev_loc->body.iseq.iseq);
lineno = location_lineno(loc->body.cfunc.prev_loc);
if (loc->iseq && loc->pc) {
file = rb_iseq_path(loc->iseq);
lineno = calc_lineno(loc->iseq, loc->pc);
}
else {
file = GET_VM()->progname;
lineno = 0;
}
name = rb_id2str(loc->body.cfunc.mid);
name = rb_id2str(loc->mid);
break;
default:
rb_bug("location_to_str: unreachable");
@ -479,9 +476,13 @@ location_update_entry(rb_backtrace_location_t *fi)
{
switch (fi->type) {
case LOCATION_TYPE_ISEQ:
fi->body.iseq.iseq = (rb_iseq_t*)rb_gc_location((VALUE)fi->body.iseq.iseq);
fi->iseq = (rb_iseq_t*)rb_gc_location((VALUE)fi->iseq);
break;
case LOCATION_TYPE_CFUNC:
if (fi->iseq) {
fi->iseq = (rb_iseq_t*)rb_gc_location((VALUE)fi->iseq);
}
break;
default:
break;
}
@ -548,157 +549,6 @@ backtrace_size(const rb_execution_context_t *ec)
return start_cfp - last_cfp + 1;
}
static int
backtrace_each(const rb_execution_context_t *ec,
ptrdiff_t from_last,
long num_frames,
void (*init)(void *arg, size_t size),
void (*iter_iseq)(void *arg, const rb_control_frame_t *cfp),
void (*iter_cfunc)(void *arg, const rb_control_frame_t *cfp, ID mid),
void (*iter_skip)(void *arg, const rb_control_frame_t *cfp),
void *arg)
{
const rb_control_frame_t *last_cfp = ec->cfp;
const rb_control_frame_t *start_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
const rb_control_frame_t *cfp;
ptrdiff_t size, real_size, i, j, last, start = 0;
int ret = 0;
// In the case the thread vm_stack or cfp is not initialized, there is no backtrace.
if (start_cfp == NULL) {
init(arg, 0);
return ret;
}
/* <- start_cfp (end control frame)
* top frame (dummy)
* top frame (dummy)
* top frame <- start_cfp
* top frame
* ...
* 2nd frame <- lev:0
* current frame <- ec->cfp
*/
start_cfp =
RUBY_VM_NEXT_CONTROL_FRAME(
RUBY_VM_NEXT_CONTROL_FRAME(start_cfp)); /* skip top frames */
if (start_cfp < last_cfp) {
real_size = size = last = 0;
}
else {
/* Ensure we don't look at frames beyond the ones requested */
for (; from_last > 0 && start_cfp >= last_cfp; from_last--) {
last_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(last_cfp);
}
real_size = size = start_cfp - last_cfp + 1;
if (from_last > size) {
size = last = 0;
ret = 1;
}
else if (num_frames >= 0 && num_frames < size) {
if (from_last + num_frames > size) {
size -= from_last;
last = size;
}
else {
start = size - from_last - num_frames;
size = num_frames;
last = start + size;
}
}
else {
size -= from_last;
last = size;
}
}
init(arg, size);
/* If a limited number of frames is requested, scan the VM stack for
* ignored frames (iseq without pc). Then adjust the start for the
* backtrace to account for skipped frames.
*/
if (start > 0 && num_frames >= 0 && num_frames < real_size) {
ptrdiff_t ignored_frames;
bool ignored_frames_before_start = false;
for (i=0, j=0, cfp = start_cfp; i<last && j<real_size; i++, j++, cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
if (cfp->iseq && !cfp->pc) {
if (j < start)
ignored_frames_before_start = true;
else
i--;
}
}
ignored_frames = j - i;
if (ignored_frames) {
if (ignored_frames_before_start) {
/* There were ignored frames before start. So just decrementing
* start for ignored frames could still result in not all desired
* frames being captured.
*
* First, scan to the CFP of the desired start frame.
*
* Then scan backwards to previous frames, skipping the number of
* frames ignored after start and any additional ones before start,
* so the number of desired frames will be correctly captured.
*/
for (i=0, j=0, cfp = start_cfp; i<last && j<real_size && j < start; i++, j++, cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
/* nothing */
}
for (; start > 0 && ignored_frames > 0 && j > 0; j--, ignored_frames--, start--, cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)) {
if (cfp->iseq && !cfp->pc) {
ignored_frames++;
}
}
}
else {
/* No ignored frames before start frame, just decrement start */
start -= ignored_frames;
}
}
}
for (i=0, j=0, cfp = start_cfp; i<last && j<real_size; i++, j++, cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
if (j < start) {
if (iter_skip) {
iter_skip(arg, cfp);
}
continue;
}
/* fprintf(stderr, "cfp: %d\n", (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size) - cfp); */
if (cfp->iseq) {
if (cfp->pc) {
iter_iseq(arg, cfp);
}
else {
i--;
}
}
else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
ID mid = me->def->original_id;
iter_cfunc(arg, cfp, mid);
}
}
return ret;
}
struct bt_iter_arg {
rb_backtrace_t *bt;
VALUE btobj;
rb_backtrace_location_t *prev_loc;
const rb_control_frame_t *prev_cfp;
rb_backtrace_location_t *init_loc;
};
static bool
is_internal_location(const rb_control_frame_t *cfp)
{
@ -709,113 +559,103 @@ is_internal_location(const rb_control_frame_t *cfp)
}
static void
bt_init(void *ptr, size_t size)
bt_update_cfunc_loc(unsigned long cfunc_counter, rb_backtrace_location_t *cfunc_loc, const rb_iseq_t *iseq, const VALUE *pc)
{
struct bt_iter_arg *arg = (struct bt_iter_arg *)ptr;
arg->btobj = backtrace_alloc(rb_cBacktrace);
GetCoreDataFromValue(arg->btobj, rb_backtrace_t, arg->bt);
arg->bt->backtrace = ZALLOC_N(rb_backtrace_location_t, size+1);
arg->bt->backtrace_size = 1;
arg->prev_cfp = NULL;
arg->init_loc = &arg->bt->backtrace[size];
}
static void
bt_iter_iseq(void *ptr, const rb_control_frame_t *cfp)
{
const rb_iseq_t *iseq = cfp->iseq;
const VALUE *pc = cfp->pc;
struct bt_iter_arg *arg = (struct bt_iter_arg *)ptr;
rb_backtrace_location_t *loc = &arg->bt->backtrace[arg->bt->backtrace_size++-1];
loc->type = LOCATION_TYPE_ISEQ;
loc->body.iseq.iseq = iseq;
loc->body.iseq.pc = pc;
arg->prev_loc = loc;
}
static void
bt_iter_iseq_skip_internal(void *ptr, const rb_control_frame_t *cfp)
{
struct bt_iter_arg *arg = (struct bt_iter_arg *)ptr;
rb_backtrace_location_t *loc = &arg->bt->backtrace[arg->bt->backtrace_size++-1];
if (!is_internal_location(cfp)) {
loc->type = LOCATION_TYPE_ISEQ;
loc->body.iseq.iseq = cfp->iseq;
loc->body.iseq.pc = cfp->pc;
arg->prev_loc = loc;
}
else if (arg->prev_cfp) {
loc->type = LOCATION_TYPE_ISEQ;
loc->body.iseq.iseq = arg->prev_cfp->iseq;
loc->body.iseq.pc = arg->prev_cfp->pc;
arg->prev_loc = loc;
}
else {
rb_bug("No non-internal backtrace entry before an <internal: backtrace entry");
}
}
static void
bt_iter_cfunc(void *ptr, const rb_control_frame_t *cfp, ID mid)
{
struct bt_iter_arg *arg = (struct bt_iter_arg *)ptr;
rb_backtrace_location_t *loc = &arg->bt->backtrace[arg->bt->backtrace_size++-1];
loc->type = LOCATION_TYPE_CFUNC;
loc->body.cfunc.mid = mid;
if (arg->prev_loc) {
loc->body.cfunc.prev_loc = arg->prev_loc;
}
else if (arg->prev_cfp) {
const rb_iseq_t *iseq = arg->prev_cfp->iseq;
const VALUE *pc = arg->prev_cfp->pc;
arg->init_loc->type = LOCATION_TYPE_ISEQ;
arg->init_loc->body.iseq.iseq = iseq;
arg->init_loc->body.iseq.pc = pc;
loc->body.cfunc.prev_loc = arg->prev_loc = arg->init_loc;
}
else {
loc->body.cfunc.prev_loc = NULL;
}
}
static void
bt_iter_skip(void *ptr, const rb_control_frame_t *cfp)
{
if (cfp->iseq && cfp->pc) {
((struct bt_iter_arg *)ptr)->prev_cfp = cfp;
}
}
static void
bt_iter_skip_skip_internal(void *ptr, const rb_control_frame_t *cfp)
{
if (cfp->iseq && cfp->pc) {
if (!is_internal_location(cfp)) {
((struct bt_iter_arg *) ptr)->prev_cfp = cfp;
}
for (; cfunc_counter > 0; cfunc_counter--, cfunc_loc--) {
cfunc_loc->iseq = iseq;
cfunc_loc->pc = pc;
}
}
static VALUE
rb_ec_partial_backtrace_object(const rb_execution_context_t *ec, long lev, long n, int* level_too_large, bool skip_internal)
rb_ec_partial_backtrace_object(const rb_execution_context_t *ec, long start_frame, long num_frames, int* start_too_large, bool skip_internal)
{
struct bt_iter_arg arg;
int too_large;
arg.prev_loc = 0;
const rb_control_frame_t *cfp = ec->cfp;
const rb_control_frame_t *end_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
ptrdiff_t size;
rb_backtrace_t *bt;
VALUE btobj = backtrace_alloc(rb_cBacktrace);
rb_backtrace_location_t *loc;
unsigned long cfunc_counter = 0;
GetCoreDataFromValue(btobj, rb_backtrace_t, bt);
too_large = backtrace_each(ec,
lev,
n,
bt_init,
skip_internal ? bt_iter_iseq_skip_internal : bt_iter_iseq,
bt_iter_cfunc,
skip_internal ? bt_iter_skip_skip_internal : bt_iter_skip,
&arg);
// In the case the thread vm_stack or cfp is not initialized, there is no backtrace.
if (end_cfp == NULL) {
num_frames = 0;
}
else {
end_cfp = RUBY_VM_NEXT_CONTROL_FRAME(end_cfp);
if (level_too_large) *level_too_large = too_large;
/*
* top frame (dummy) <- RUBY_VM_END_CONTROL_FRAME
* top frame (dummy) <- end_cfp
* top frame <- main script
* top frame
* ...
* 2nd frame <- lev:0
* current frame <- ec->cfp
*/
return arg.btobj;
size = end_cfp - cfp + 1;
if (size < 0) {
num_frames = 0;
}
else if (num_frames < 0 || num_frames > size) {
num_frames = size;
}
}
bt->backtrace = ZALLOC_N(rb_backtrace_location_t, num_frames);
bt->backtrace_size = 0;
if (num_frames == 0) {
if (start_too_large) *start_too_large = 0;
return btobj;
}
for (; cfp != end_cfp && (bt->backtrace_size < num_frames); cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)) {
if (cfp->iseq) {
if (cfp->pc) {
if (start_frame > 0) {
start_frame--;
}
else if (!skip_internal || !is_internal_location(cfp)) {
const rb_iseq_t *iseq = cfp->iseq;
const VALUE *pc = cfp->pc;
loc = &bt->backtrace[bt->backtrace_size++];
loc->type = LOCATION_TYPE_ISEQ;
loc->iseq = iseq;
loc->pc = pc;
bt_update_cfunc_loc(cfunc_counter, loc-1, iseq, pc);
cfunc_counter = 0;
}
}
}
else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
if (start_frame > 0) {
start_frame--;
}
else {
loc = &bt->backtrace[bt->backtrace_size++];
loc->type = LOCATION_TYPE_CFUNC;
loc->iseq = NULL;
loc->pc = NULL;
loc->mid = rb_vm_frame_method_entry(cfp)->def->original_id;
cfunc_counter++;
}
}
}
if (cfunc_counter > 0) {
for (; cfp != end_cfp; cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp)) {
if (cfp->iseq && cfp->pc && (!skip_internal || !is_internal_location(cfp))) {
bt_update_cfunc_loc(cfunc_counter, loc, cfp->iseq, cfp->pc);
break;
}
}
}
if (start_too_large) *start_too_large = (start_frame > 0 ? -1 : 0);
return btobj;
}
MJIT_FUNC_EXPORTED VALUE
@ -830,10 +670,10 @@ backtrace_collect(rb_backtrace_t *bt, VALUE (*func)(rb_backtrace_location_t *, v
VALUE btary;
int i;
btary = rb_ary_new2(bt->backtrace_size-1);
btary = rb_ary_new2(bt->backtrace_size);
for (i=0; i<bt->backtrace_size-1; i++) {
rb_backtrace_location_t *loc = &bt->backtrace[bt->backtrace_size - 2 - i];
for (i=0; i<bt->backtrace_size; i++) {
rb_backtrace_location_t *loc = &bt->backtrace[i];
rb_ary_push(btary, func(loc, arg));
}
@ -876,13 +716,13 @@ rb_backtrace_use_iseq_first_lineno_for_last_location(VALUE self)
rb_backtrace_location_t *loc;
GetCoreDataFromValue(self, rb_backtrace_t, bt);
VM_ASSERT(bt->backtrace_size > 1);
VM_ASSERT(bt->backtrace_size > 0);
loc = &bt->backtrace[bt->backtrace_size - 2];
loc = &bt->backtrace[0];
VM_ASSERT(loc->type == LOCATION_TYPE_ISEQ);
loc->body.iseq.pc = NULL; // means location.first_lineno
loc->pc = NULL; // means location.first_lineno
}
static VALUE
@ -957,6 +797,64 @@ rb_ec_backtrace_location_ary(const rb_execution_context_t *ec, long lev, long n,
/* make old style backtrace directly */
static void
backtrace_each(const rb_execution_context_t *ec,
void (*init)(void *arg, size_t size),
void (*iter_iseq)(void *arg, const rb_control_frame_t *cfp),
void (*iter_cfunc)(void *arg, const rb_control_frame_t *cfp, ID mid),
void *arg)
{
const rb_control_frame_t *last_cfp = ec->cfp;
const rb_control_frame_t *start_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
const rb_control_frame_t *cfp;
ptrdiff_t size, i;
// In the case the thread vm_stack or cfp is not initialized, there is no backtrace.
if (start_cfp == NULL) {
init(arg, 0);
return;
}
/* <- start_cfp (end control frame)
* top frame (dummy)
* top frame (dummy)
* top frame <- start_cfp
* top frame
* ...
* 2nd frame <- lev:0
* current frame <- ec->cfp
*/
start_cfp =
RUBY_VM_NEXT_CONTROL_FRAME(
RUBY_VM_NEXT_CONTROL_FRAME(start_cfp)); /* skip top frames */
if (start_cfp < last_cfp) {
size = 0;
}
else {
size = start_cfp - last_cfp + 1;
}
init(arg, size);
/* SDR(); */
for (i=0, cfp = start_cfp; i<size; i++, cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp)) {
/* fprintf(stderr, "cfp: %d\n", (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size) - cfp); */
if (cfp->iseq) {
if (cfp->pc) {
iter_iseq(arg, cfp);
}
}
else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
ID mid = me->def->original_id;
iter_cfunc(arg, cfp, mid);
}
}
}
struct oldbt_arg {
VALUE filename;
int lineno;
@ -1019,12 +917,9 @@ vm_backtrace_print(FILE *fp)
arg.func = oldbt_print;
arg.data = (void *)fp;
backtrace_each(GET_EC(),
BACKTRACE_START,
ALL_BACKTRACE_LINES,
oldbt_init,
oldbt_iter_iseq,
oldbt_iter_cfunc,
NULL,
&arg);
}
@ -1055,12 +950,9 @@ rb_backtrace_print_as_bugreport(void)
arg.data = (int *)&i;
backtrace_each(GET_EC(),
BACKTRACE_START,
ALL_BACKTRACE_LINES,
oldbt_init,
oldbt_iter_iseq,
oldbt_iter_cfunc,
NULL,
&arg);
}
@ -1101,12 +993,9 @@ rb_backtrace_each(VALUE (*iter)(VALUE recv, VALUE str), VALUE output)
arg.func = oldbt_print_to;
arg.data = &parg;
backtrace_each(GET_EC(),
BACKTRACE_START,
ALL_BACKTRACE_LINES,
oldbt_init,
oldbt_iter_iseq,
oldbt_iter_cfunc,
NULL,
&arg);
}
@ -1458,12 +1347,9 @@ collect_caller_bindings(const rb_execution_context_t *ec)
data.ary = rb_ary_new();
backtrace_each(ec,
BACKTRACE_START,
ALL_BACKTRACE_LINES,
collect_caller_bindings_init,
collect_caller_bindings_iseq,
collect_caller_bindings_cfunc,
NULL,
&data);
result = rb_ary_reverse(data.ary);