1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Avoid referring to an old value of realloc

OpenBSD RubyCI has failed with SEGV since 4bcd5981e8.
https://rubyci.org/logs/rubyci.s3.amazonaws.com/openbsd-current/ruby-master/log/20200312T223005Z.fail.html.gz

This was because `status->cc_entries` could be stale after `realloc` call
for inlined iseqs.
This commit is contained in:
Takashi Kokubun 2020-03-12 22:51:33 -07:00
parent 43e18c68f4
commit 0cd7be99e9
No known key found for this signature in database
GPG key ID: 6FFC433B12EE23DD
4 changed files with 38 additions and 15 deletions

View file

@ -51,8 +51,8 @@ struct compile_status {
bool local_stack_p;
// Safely-accessible ivar cache entries copied from main thread.
union iseq_inline_storage_entry *is_entries;
// Safely-accessible call cache entries captured to compiled_iseq to be marked on GC
const struct rb_callcache **cc_entries;
// Index of call cache entries captured to compiled_iseq to be marked on GC
int cc_entries_index;
// A pointer to root (i.e. not inlined) iseq being compiled.
const struct rb_iseq_constant_body *compiled_iseq;
// Mutated optimization levels
@ -82,6 +82,18 @@ call_data_index(CALL_DATA cd, const struct rb_iseq_constant_body *body)
return cd - body->call_data;
}
const struct rb_callcache ** mjit_iseq_cc_entries(const struct rb_iseq_constant_body *const body);
// Using this function to refer to cc_entries allocated by `mjit_capture_cc_entries`
// instead of storing cc_entries in status directly so that we always refer to a new address
// returned by `realloc` inside it.
static const struct rb_callcache **
captured_cc_entries(const struct compile_status *status)
{
VM_ASSERT(status->cc_entries_index != -1);
return mjit_iseq_cc_entries(status->compiled_iseq) + status->cc_entries_index;
}
// Returns true if call cache is still not obsoleted and vm_cc_cme(cc)->def->type is available.
static bool
has_valid_method_type(CALL_CACHE cc)
@ -277,7 +289,7 @@ compile_cancel_handler(FILE *f, const struct rb_iseq_constant_body *body, struct
fprintf(f, " return Qundef;\n");
}
extern const struct rb_callcache **
extern int
mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const struct rb_iseq_constant_body *captured_iseq);
extern bool mjit_copy_cache_from_main_thread(const rb_iseq_t *iseq,
@ -375,8 +387,8 @@ inlinable_iseq_p(const struct rb_iseq_constant_body *body)
alloca(sizeof(const struct rb_iseq_constant_body *) * body->iseq_size) : NULL, \
.is_entries = (body->is_size > 0) ? \
alloca(sizeof(union iseq_inline_storage_entry) * body->is_size) : NULL, \
.cc_entries = (body->ci_size > 0) ? \
mjit_capture_cc_entries(status.compiled_iseq, body) : NULL, \
.cc_entries_index = (body->ci_size > 0) ? \
mjit_capture_cc_entries(status.compiled_iseq, body) : -1, \
.compiled_iseq = status.compiled_iseq, \
.compile_info = compile_root_p ? \
rb_mjit_iseq_compile_info(body) : alloca(sizeof(struct rb_mjit_compile_info)) \
@ -403,7 +415,7 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
if (insn == BIN(opt_send_without_block)) { // `compile_inlined_cancel_handler` supports only `opt_send_without_block`
CALL_DATA cd = (CALL_DATA)body->iseq_encoded[pos + 1];
const struct rb_callinfo *ci = cd->ci;
const struct rb_callcache *cc = status->cc_entries[call_data_index(cd, body)]; // use copy to avoid race condition
const struct rb_callcache *cc = captured_cc_entries(status)[call_data_index(cd, body)]; // use copy to avoid race condition
const rb_iseq_t *child_iseq;
if (has_valid_method_type(cc) &&
@ -429,7 +441,7 @@ precompile_inlinable_iseqs(FILE *f, const rb_iseq_t *iseq, struct compile_status
.param_size = child_iseq->body->param.size,
.local_size = child_iseq->body->local_table_size
};
if ((child_iseq->body->ci_size > 0 && child_status.cc_entries == NULL)
if ((child_iseq->body->ci_size > 0 && child_status.cc_entries_index == -1)
|| (child_status.is_entries != NULL && !mjit_copy_cache_from_main_thread(child_iseq, child_status.is_entries))) {
return false;
}
@ -462,7 +474,7 @@ mjit_compile(FILE *f, const rb_iseq_t *iseq, const char *funcname)
struct compile_status status = { .compiled_iseq = iseq->body };
INIT_COMPILE_STATUS(status, iseq->body, true);
if ((iseq->body->ci_size > 0 && status.cc_entries == NULL)
if ((iseq->body->ci_size > 0 && status.cc_entries_index == -1)
|| (status.is_entries != NULL && !mjit_copy_cache_from_main_thread(iseq, status.is_entries))) {
return false;
}

View file

@ -1151,11 +1151,21 @@ static void mjit_copy_job_handler(void *data);
// vm_trace.c
int rb_workqueue_register(unsigned flags, rb_postponed_job_func_t , void *);
// To see cc_entries using index returned by `mjit_capture_cc_entries` in mjit_compile.c
const struct rb_callcache **
mjit_iseq_cc_entries(const struct rb_iseq_constant_body *const body)
{
return body->jit_unit->cc_entries;
}
// Capture cc entries of `captured_iseq` and append them to `compiled_iseq->jit_unit->cc_entries`.
// This is needed when `captured_iseq` is inlined by `compiled_iseq` and GC needs to mark inlined cc.
//
// Index to refer to `compiled_iseq->jit_unit->cc_entries` is returned instead of the address
// because old addresses may be invalidated by `realloc` later. -1 is returned on failure.
//
// This assumes that it's safe to reference cc without acquiring GVL.
const struct rb_callcache **
int
mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const struct rb_iseq_constant_body *captured_iseq)
{
struct rb_mjit_unit *unit = compiled_iseq->jit_unit;
@ -1164,16 +1174,17 @@ mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const
// Allocate new cc_entries and append them to unit->cc_entries
const struct rb_callcache **cc_entries;
int cc_entries_index = unit->cc_entries_size;
if (unit->cc_entries_size == 0) {
VM_ASSERT(unit->cc_entries == NULL);
unit->cc_entries = cc_entries = malloc(sizeof(struct rb_callcache *) * new_entries_size);
if (cc_entries == NULL) return NULL;
if (cc_entries == NULL) return -1;
}
else {
cc_entries = realloc(unit->cc_entries, sizeof(struct rb_callcache *) * new_entries_size);
if (cc_entries == NULL) return NULL;
if (cc_entries == NULL) return -1;
unit->cc_entries = cc_entries;
cc_entries += unit->cc_entries_size;
cc_entries += cc_entries_index;
}
unit->cc_entries_size = new_entries_size;
@ -1182,7 +1193,7 @@ mjit_capture_cc_entries(const struct rb_iseq_constant_body *compiled_iseq, const
cc_entries[i] = captured_iseq->call_data[i].cc;
}
return cc_entries;
return cc_entries_index;
}
// Copy inline cache values of `iseq` to `cc_entries` and `is_entries`.

View file

@ -14,7 +14,7 @@
MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
% end
% # compiler: Use copied cc to avoid race condition
const struct rb_callcache *captured_cc = status->cc_entries[call_data_index(cd, body)];
const struct rb_callcache *captured_cc = captured_cc_entries(status)[call_data_index(cd, body)];
%
if (!status->compile_info->disable_send_cache && has_valid_method_type(captured_cc)) {
const rb_iseq_t *iseq;

View file

@ -57,7 +57,7 @@ switch (insn) {
% when *send_compatible_opt_insns
% # To avoid cancel, just emit `opt_send_without_block` instead of `opt_*` insn if call cache is populated.
% cd_index = insn.opes.index { |o| o.fetch(:type) == 'CALL_DATA' }
if (has_valid_method_type(status->cc_entries[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)])) {
if (has_valid_method_type(captured_cc_entries(status)[call_data_index((CALL_DATA)operands[<%= cd_index %>], body)])) {
<%= render 'mjit_compile_send', locals: { insn: opt_send_without_block } -%>
<%= render 'mjit_compile_insn', locals: { insn: opt_send_without_block } -%>
break;