2021-03-12 12:54:54 -05:00
|
|
|
#include "ruby/ruby.h"
|
|
|
|
#include "internal.h"
|
|
|
|
#include "vm_sync.h"
|
|
|
|
#include "builtin.h"
|
|
|
|
|
2021-03-06 18:46:56 -05:00
|
|
|
#include "yjit_asm.h"
|
|
|
|
#include "yjit_utils.h"
|
|
|
|
#include "yjit_iface.h"
|
|
|
|
#include "yjit_core.h"
|
|
|
|
#include "yjit_codegen.h"
|
2020-12-10 00:06:10 -05:00
|
|
|
|
2021-01-27 13:02:55 -05:00
|
|
|
// Maximum number of versions per block
|
|
|
|
#define MAX_VERSIONS 4
|
|
|
|
|
2020-12-08 16:54:41 -05:00
|
|
|
/*
|
|
|
|
Get an operand for the adjusted stack pointer address
|
|
|
|
*/
|
|
|
|
x86opnd_t
|
|
|
|
ctx_sp_opnd(ctx_t* ctx, int32_t offset_bytes)
|
|
|
|
{
|
2021-02-09 16:24:06 -05:00
|
|
|
int32_t offset = (ctx->sp_offset * sizeof(VALUE)) + offset_bytes;
|
2020-12-08 16:54:41 -05:00
|
|
|
return mem_opnd(64, REG_SP, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-01-20 16:58:09 -05:00
|
|
|
Push one new value on the temp stack
|
2020-12-08 16:54:41 -05:00
|
|
|
Return a pointer to the new stack top
|
|
|
|
*/
|
|
|
|
x86opnd_t
|
2021-03-31 15:54:46 -04:00
|
|
|
ctx_stack_push(ctx_t* ctx, val_type_t type)
|
2020-12-08 16:54:41 -05:00
|
|
|
{
|
2021-01-20 16:58:09 -05:00
|
|
|
// Keep track of the type of the value
|
2021-03-31 15:54:46 -04:00
|
|
|
if (ctx->stack_size < MAX_TEMP_TYPES) {
|
|
|
|
ctx->temp_mapping[ctx->stack_size] = MAP_STACK;
|
2021-01-20 16:58:09 -05:00
|
|
|
ctx->temp_types[ctx->stack_size] = type;
|
2021-03-31 15:54:46 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
ctx->stack_size += 1;
|
|
|
|
ctx->sp_offset += 1;
|
|
|
|
|
|
|
|
// SP points just above the topmost value
|
|
|
|
int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE);
|
|
|
|
return mem_opnd(64, REG_SP, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Push the self value on the stack
|
|
|
|
*/
|
|
|
|
x86opnd_t
|
|
|
|
ctx_stack_push_self(ctx_t* ctx)
|
|
|
|
{
|
|
|
|
// Keep track of the type of the value
|
|
|
|
if (ctx->stack_size < MAX_TEMP_TYPES) {
|
|
|
|
ctx->temp_mapping[ctx->stack_size] = MAP_SELF;
|
|
|
|
ctx->temp_types[ctx->stack_size] = ctx->self_type;
|
|
|
|
}
|
2021-01-20 16:58:09 -05:00
|
|
|
|
|
|
|
ctx->stack_size += 1;
|
2021-02-09 16:24:06 -05:00
|
|
|
ctx->sp_offset += 1;
|
2020-12-08 16:54:41 -05:00
|
|
|
|
|
|
|
// SP points just above the topmost value
|
2021-02-09 16:24:06 -05:00
|
|
|
int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE);
|
2020-12-08 16:54:41 -05:00
|
|
|
return mem_opnd(64, REG_SP, offset);
|
|
|
|
}
|
|
|
|
|
2021-04-06 12:00:09 -04:00
|
|
|
/*
|
|
|
|
Push a local variable on the stack
|
|
|
|
*/
|
|
|
|
x86opnd_t
|
|
|
|
ctx_stack_push_local(ctx_t* ctx, size_t local_idx)
|
|
|
|
{
|
|
|
|
// Keep track of the type of the value
|
|
|
|
if (ctx->stack_size < MAX_TEMP_TYPES && local_idx < MAX_LOCAL_TYPES) {
|
|
|
|
ctx->temp_mapping[ctx->stack_size] = (temp_mapping_t){ .kind = TEMP_LOCAL, .idx = local_idx };
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->stack_size += 1;
|
|
|
|
ctx->sp_offset += 1;
|
|
|
|
|
|
|
|
// SP points just above the topmost value
|
|
|
|
int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE);
|
|
|
|
return mem_opnd(64, REG_SP, offset);
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:54:41 -05:00
|
|
|
/*
|
|
|
|
Pop N values off the stack
|
|
|
|
Return a pointer to the stack top before the pop operation
|
|
|
|
*/
|
|
|
|
x86opnd_t
|
|
|
|
ctx_stack_pop(ctx_t* ctx, size_t n)
|
|
|
|
{
|
2021-01-20 16:58:09 -05:00
|
|
|
RUBY_ASSERT(n <= ctx->stack_size);
|
|
|
|
|
2020-12-08 16:54:41 -05:00
|
|
|
// SP points just above the topmost value
|
2021-02-09 16:24:06 -05:00
|
|
|
int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE);
|
2020-12-08 16:54:41 -05:00
|
|
|
x86opnd_t top = mem_opnd(64, REG_SP, offset);
|
|
|
|
|
2021-01-20 16:58:09 -05:00
|
|
|
// Clear the types of the popped values
|
|
|
|
for (size_t i = 0; i < n; ++i)
|
|
|
|
{
|
|
|
|
size_t idx = ctx->stack_size - i - 1;
|
2021-03-31 15:54:46 -04:00
|
|
|
if (idx < MAX_TEMP_TYPES) {
|
|
|
|
ctx->temp_types[idx] = TYPE_UNKNOWN;
|
|
|
|
ctx->temp_mapping[idx] = MAP_STACK;
|
|
|
|
}
|
2021-01-20 16:58:09 -05:00
|
|
|
}
|
|
|
|
|
2020-12-10 00:06:10 -05:00
|
|
|
ctx->stack_size -= n;
|
2021-02-09 16:24:06 -05:00
|
|
|
ctx->sp_offset -= n;
|
2020-12-08 16:54:41 -05:00
|
|
|
|
|
|
|
return top;
|
|
|
|
}
|
|
|
|
|
2021-01-20 16:58:09 -05:00
|
|
|
/**
|
|
|
|
Get an operand pointing to a slot on the temp stack
|
|
|
|
*/
|
2020-12-08 16:54:41 -05:00
|
|
|
x86opnd_t
|
|
|
|
ctx_stack_opnd(ctx_t* ctx, int32_t idx)
|
|
|
|
{
|
|
|
|
// SP points just above the topmost value
|
2021-02-09 16:24:06 -05:00
|
|
|
int32_t offset = (ctx->sp_offset - 1 - idx) * sizeof(VALUE);
|
2020-12-08 16:54:41 -05:00
|
|
|
x86opnd_t opnd = mem_opnd(64, REG_SP, offset);
|
|
|
|
|
|
|
|
return opnd;
|
|
|
|
}
|
2020-12-10 16:59:13 -05:00
|
|
|
|
2021-01-20 16:58:09 -05:00
|
|
|
/**
|
2021-04-08 16:40:08 -04:00
|
|
|
Get the type of an instruction operand
|
2021-01-20 16:58:09 -05:00
|
|
|
*/
|
2021-03-31 15:54:46 -04:00
|
|
|
val_type_t
|
2021-04-08 16:40:08 -04:00
|
|
|
ctx_get_opnd_type(const ctx_t* ctx, insn_opnd_t opnd)
|
2021-01-20 16:58:09 -05:00
|
|
|
{
|
2021-04-08 16:40:08 -04:00
|
|
|
if (opnd.is_self)
|
|
|
|
return ctx->self_type;
|
2021-01-22 12:22:34 -05:00
|
|
|
|
2021-01-20 16:58:09 -05:00
|
|
|
if (ctx->stack_size > MAX_TEMP_TYPES)
|
2021-03-31 15:54:46 -04:00
|
|
|
return TYPE_UNKNOWN;
|
|
|
|
|
2021-04-09 12:16:56 -04:00
|
|
|
RUBY_ASSERT(opnd.idx < ctx->stack_size);
|
2021-04-08 16:40:08 -04:00
|
|
|
temp_mapping_t mapping = ctx->temp_mapping[ctx->stack_size - 1 - opnd.idx];
|
2021-03-31 15:54:46 -04:00
|
|
|
|
2021-04-06 14:44:28 -04:00
|
|
|
switch (mapping.kind)
|
|
|
|
{
|
|
|
|
case TEMP_SELF:
|
2021-03-31 15:54:46 -04:00
|
|
|
return ctx->self_type;
|
2021-04-06 14:44:28 -04:00
|
|
|
|
|
|
|
case TEMP_STACK:
|
2021-04-08 16:40:08 -04:00
|
|
|
return ctx->temp_types[ctx->stack_size - 1 - opnd.idx];
|
2021-03-31 15:54:46 -04:00
|
|
|
|
2021-04-06 14:44:28 -04:00
|
|
|
case TEMP_LOCAL:
|
|
|
|
RUBY_ASSERT(mapping.idx < MAX_LOCAL_TYPES);
|
|
|
|
return ctx->local_types[mapping.idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_bug("unreachable");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2021-04-08 16:40:08 -04:00
|
|
|
Set the type of an instruction operand
|
2021-04-06 14:44:28 -04:00
|
|
|
*/
|
2021-04-08 16:40:08 -04:00
|
|
|
void ctx_set_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type)
|
2021-04-06 14:44:28 -04:00
|
|
|
{
|
2021-04-08 16:40:08 -04:00
|
|
|
if (opnd.is_self) {
|
|
|
|
ctx->self_type = type;
|
|
|
|
return;
|
|
|
|
}
|
2021-04-06 14:44:28 -04:00
|
|
|
|
|
|
|
if (ctx->stack_size > MAX_TEMP_TYPES)
|
|
|
|
return;
|
|
|
|
|
2021-04-20 16:14:35 -04:00
|
|
|
RUBY_ASSERT(opnd.idx < ctx->stack_size);
|
2021-04-08 16:40:08 -04:00
|
|
|
temp_mapping_t mapping = ctx->temp_mapping[ctx->stack_size - 1 - opnd.idx];
|
2021-04-06 14:44:28 -04:00
|
|
|
|
|
|
|
switch (mapping.kind)
|
|
|
|
{
|
|
|
|
case TEMP_SELF:
|
|
|
|
ctx->self_type = type;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TEMP_STACK:
|
2021-04-08 16:40:08 -04:00
|
|
|
ctx->temp_types[ctx->stack_size - 1 - opnd.idx] = type;
|
2021-04-06 14:44:28 -04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case TEMP_LOCAL:
|
|
|
|
RUBY_ASSERT(mapping.idx < MAX_LOCAL_TYPES);
|
|
|
|
ctx->local_types[mapping.idx] = type;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
Set the type of a local variable
|
|
|
|
*/
|
|
|
|
void ctx_set_local_type(ctx_t* ctx, size_t idx, val_type_t type)
|
|
|
|
{
|
2021-04-09 14:48:02 -04:00
|
|
|
if (idx >= MAX_LOCAL_TYPES)
|
2021-04-06 14:44:28 -04:00
|
|
|
return;
|
|
|
|
|
|
|
|
ctx->local_types[idx] = type;
|
2021-03-31 15:54:46 -04:00
|
|
|
}
|
|
|
|
|
2021-04-09 11:44:35 -04:00
|
|
|
// Erase local variable type information
|
|
|
|
// eg: because of a call we can't track
|
|
|
|
void ctx_clear_local_types(ctx_t* ctx)
|
|
|
|
{
|
|
|
|
memset(&ctx->local_types, 0, sizeof(ctx->local_types));
|
|
|
|
}
|
|
|
|
|
2021-03-31 15:54:46 -04:00
|
|
|
/*
|
|
|
|
Compute a difference between two value types
|
|
|
|
Returns 0 if the two are the same
|
|
|
|
Returns > 0 if different but compatible
|
|
|
|
Returns INT_MAX if incompatible
|
|
|
|
*/
|
|
|
|
int type_diff(val_type_t src, val_type_t dst)
|
|
|
|
{
|
|
|
|
RUBY_ASSERT(!src.is_heap || !src.is_imm);
|
|
|
|
RUBY_ASSERT(!dst.is_heap || !dst.is_imm);
|
|
|
|
|
2021-04-01 10:43:54 -04:00
|
|
|
// If dst assumes heap but src doesn't
|
|
|
|
if (dst.is_heap && !src.is_heap)
|
2021-03-31 15:54:46 -04:00
|
|
|
return INT_MAX;
|
|
|
|
|
2021-04-01 10:43:54 -04:00
|
|
|
// If dst assumes imm but src doesn't
|
|
|
|
if (dst.is_imm && !src.is_imm)
|
2021-03-31 15:54:46 -04:00
|
|
|
return INT_MAX;
|
|
|
|
|
2021-04-01 10:43:54 -04:00
|
|
|
// If dst assumes known type different from src
|
|
|
|
if (dst.type != ETYPE_UNKNOWN && dst.type != src.type)
|
2021-03-31 15:54:46 -04:00
|
|
|
return INT_MAX;
|
|
|
|
|
2021-04-01 10:43:54 -04:00
|
|
|
if (dst.is_heap != src.is_heap)
|
2021-03-31 15:54:46 -04:00
|
|
|
return 1;
|
|
|
|
|
2021-04-01 10:43:54 -04:00
|
|
|
if (dst.is_imm != src.is_imm)
|
2021-03-31 15:54:46 -04:00
|
|
|
return 1;
|
2021-01-20 16:58:09 -05:00
|
|
|
|
2021-04-01 10:43:54 -04:00
|
|
|
if (dst.type != src.type)
|
2021-03-31 15:54:46 -04:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
2021-01-20 16:58:09 -05:00
|
|
|
}
|
|
|
|
|
2021-01-22 14:57:44 -05:00
|
|
|
/**
|
|
|
|
Compute a difference score for two context objects
|
|
|
|
Returns 0 if the two contexts are the same
|
|
|
|
Returns > 0 if different but compatible
|
2021-01-22 16:54:43 -05:00
|
|
|
Returns INT_MAX if incompatible
|
2021-01-22 14:57:44 -05:00
|
|
|
*/
|
|
|
|
int ctx_diff(const ctx_t* src, const ctx_t* dst)
|
|
|
|
{
|
2021-03-05 15:45:44 -05:00
|
|
|
// Can only lookup the first version in the chain
|
|
|
|
if (dst->chain_depth != 0)
|
|
|
|
return INT_MAX;
|
|
|
|
|
|
|
|
// Blocks with depth > 0 always produce new versions
|
|
|
|
// Sidechains cannot overlap
|
|
|
|
if (src->chain_depth != 0)
|
|
|
|
return INT_MAX;
|
|
|
|
|
2021-01-22 14:57:44 -05:00
|
|
|
if (dst->stack_size != src->stack_size)
|
2021-01-22 16:54:43 -05:00
|
|
|
return INT_MAX;
|
2021-01-22 14:57:44 -05:00
|
|
|
|
2021-02-09 16:24:06 -05:00
|
|
|
if (dst->sp_offset != src->sp_offset)
|
|
|
|
return INT_MAX;
|
|
|
|
|
2021-01-22 14:57:44 -05:00
|
|
|
// Difference sum
|
|
|
|
int diff = 0;
|
|
|
|
|
2021-03-31 15:54:46 -04:00
|
|
|
// Check the type of self
|
|
|
|
int self_diff = type_diff(src->self_type, dst->self_type);
|
|
|
|
|
|
|
|
if (self_diff == INT_MAX)
|
|
|
|
return INT_MAX;
|
|
|
|
|
|
|
|
diff += self_diff;
|
|
|
|
|
2021-04-13 14:32:21 -04:00
|
|
|
// For each local type we track
|
|
|
|
for (size_t i = 0; i < MAX_LOCAL_TYPES; ++i)
|
|
|
|
{
|
|
|
|
val_type_t t_src = src->local_types[i];
|
|
|
|
val_type_t t_dst = dst->local_types[i];
|
|
|
|
int temp_diff = type_diff(t_src, t_dst);
|
|
|
|
|
|
|
|
if (temp_diff == INT_MAX)
|
|
|
|
return INT_MAX;
|
|
|
|
|
|
|
|
diff += temp_diff;
|
|
|
|
}
|
2021-03-31 15:54:46 -04:00
|
|
|
|
|
|
|
// For each value on the temp stack
|
|
|
|
for (size_t i = 0; i < src->stack_size; ++i)
|
2021-01-22 14:57:44 -05:00
|
|
|
{
|
2021-04-08 16:40:08 -04:00
|
|
|
val_type_t t_src = ctx_get_opnd_type(src, OPND_STACK(i));
|
|
|
|
val_type_t t_dst = ctx_get_opnd_type(dst, OPND_STACK(i));
|
2021-03-31 15:54:46 -04:00
|
|
|
int temp_diff = type_diff(t_src, t_dst);
|
|
|
|
|
|
|
|
if (temp_diff == INT_MAX)
|
|
|
|
return INT_MAX;
|
|
|
|
|
|
|
|
diff += temp_diff;
|
2021-01-22 14:57:44 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return diff;
|
|
|
|
}
|
|
|
|
|
2021-03-04 15:31:37 -05:00
|
|
|
// Get all blocks for a particular place in an iseq.
|
2021-03-24 18:07:26 -04:00
|
|
|
rb_yjit_block_array_t
|
|
|
|
yjit_get_version_array(const rb_iseq_t *iseq, unsigned idx)
|
2021-02-12 17:12:18 -05:00
|
|
|
{
|
|
|
|
struct rb_iseq_constant_body *body = iseq->body;
|
2021-03-04 12:05:18 -05:00
|
|
|
|
2021-03-06 18:46:56 -05:00
|
|
|
if (rb_darray_size(body->yjit_blocks) == 0) {
|
2021-02-12 17:12:18 -05:00
|
|
|
return NULL;
|
|
|
|
}
|
2021-03-04 12:05:18 -05:00
|
|
|
|
2021-03-06 18:46:56 -05:00
|
|
|
RUBY_ASSERT((unsigned)rb_darray_size(body->yjit_blocks) == body->iseq_size);
|
|
|
|
return rb_darray_get(body->yjit_blocks, idx);
|
2021-02-12 17:12:18 -05:00
|
|
|
}
|
|
|
|
|
2021-03-04 12:05:18 -05:00
|
|
|
// Count the number of block versions matching a given blockid
|
|
|
|
static size_t get_num_versions(blockid_t blockid)
|
|
|
|
{
|
2021-03-24 18:07:26 -04:00
|
|
|
return rb_darray_size(yjit_get_version_array(blockid.iseq, blockid.idx));
|
2021-03-04 12:05:18 -05:00
|
|
|
}
|
|
|
|
|
2021-02-19 15:03:12 -05:00
|
|
|
// Keep track of a block version. Block should be fully constructed.
|
2021-02-12 17:12:18 -05:00
|
|
|
static void
|
|
|
|
add_block_version(blockid_t blockid, block_t* block)
|
2021-01-24 23:08:11 -05:00
|
|
|
{
|
2021-02-05 15:49:02 -05:00
|
|
|
// Function entry blocks must have stack size 0
|
|
|
|
RUBY_ASSERT(!(block->blockid.idx == 0 && block->ctx.stack_size > 0));
|
2021-02-12 17:12:18 -05:00
|
|
|
const rb_iseq_t *iseq = block->blockid.iseq;
|
|
|
|
struct rb_iseq_constant_body *body = iseq->body;
|
|
|
|
|
2021-03-06 18:46:56 -05:00
|
|
|
// Ensure yjit_blocks is initialized for this iseq
|
|
|
|
if (rb_darray_size(body->yjit_blocks) == 0) {
|
|
|
|
// Initialize yjit_blocks to be as wide as body->iseq_encoded
|
2021-02-16 21:03:20 -05:00
|
|
|
int32_t casted = (int32_t)body->iseq_size;
|
|
|
|
if ((unsigned)casted != body->iseq_size) {
|
|
|
|
rb_bug("iseq too large");
|
|
|
|
}
|
2021-03-06 18:46:56 -05:00
|
|
|
if (!rb_darray_make(&body->yjit_blocks, casted)) {
|
2021-02-16 21:03:20 -05:00
|
|
|
rb_bug("allocation failed");
|
2021-02-12 17:12:18 -05:00
|
|
|
}
|
2021-02-16 11:15:29 -05:00
|
|
|
|
2021-02-25 15:10:38 -05:00
|
|
|
#if RUBY_DEBUG
|
2021-02-16 11:15:29 -05:00
|
|
|
// First block compiled for this iseq
|
|
|
|
rb_compiled_iseq_count++;
|
2021-02-25 15:10:38 -05:00
|
|
|
#endif
|
2021-02-12 17:12:18 -05:00
|
|
|
}
|
2021-02-05 15:49:02 -05:00
|
|
|
|
2021-03-06 18:46:56 -05:00
|
|
|
RUBY_ASSERT((int32_t)blockid.idx < rb_darray_size(body->yjit_blocks));
|
|
|
|
rb_yjit_block_array_t *block_array_ref = rb_darray_ref(body->yjit_blocks, blockid.idx);
|
2021-01-24 23:08:11 -05:00
|
|
|
|
2021-03-04 15:31:37 -05:00
|
|
|
// Add the new block
|
|
|
|
if (!rb_darray_append(block_array_ref, block)) {
|
|
|
|
rb_bug("allocation failed");
|
2021-01-24 23:08:11 -05:00
|
|
|
}
|
|
|
|
|
2021-02-12 17:12:18 -05:00
|
|
|
{
|
|
|
|
// By writing the new block to the iseq, the iseq now
|
|
|
|
// contains new references to Ruby objects. Run write barriers.
|
2021-03-17 19:07:20 -04:00
|
|
|
RB_OBJ_WRITTEN(iseq, Qundef, block->receiver_klass);
|
|
|
|
RB_OBJ_WRITTEN(iseq, Qundef, block->callee_cme);
|
2021-02-19 15:03:12 -05:00
|
|
|
|
2021-02-25 15:10:38 -05:00
|
|
|
// Run write barriers for all objects in generated code.
|
2021-02-19 15:03:12 -05:00
|
|
|
uint32_t *offset_element;
|
|
|
|
rb_darray_foreach(block->gc_object_offsets, offset_idx, offset_element) {
|
|
|
|
uint32_t offset_to_value = *offset_element;
|
|
|
|
uint8_t *value_address = cb_get_ptr(cb, offset_to_value);
|
|
|
|
|
|
|
|
VALUE object;
|
|
|
|
memcpy(&object, value_address, SIZEOF_VALUE);
|
|
|
|
RB_OBJ_WRITTEN(iseq, Qundef, object);
|
|
|
|
}
|
2021-02-12 17:12:18 -05:00
|
|
|
}
|
2021-01-24 23:08:11 -05:00
|
|
|
}
|
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
// Create a new outgoing branch entry for a block
|
|
|
|
static branch_t*
|
|
|
|
make_branch_entry(block_t* block, const ctx_t* src_ctx, branchgen_fn gen_fn)
|
|
|
|
{
|
|
|
|
RUBY_ASSERT(block != NULL);
|
|
|
|
|
|
|
|
// Allocate and zero-initialize
|
|
|
|
branch_t* branch = calloc(1, sizeof(branch_t));
|
|
|
|
|
|
|
|
branch->block = block;
|
|
|
|
branch->src_ctx = *src_ctx;
|
|
|
|
branch->gen_fn = gen_fn;
|
|
|
|
branch->shape = SHAPE_DEFAULT;
|
|
|
|
|
|
|
|
// Add to the list of outgoing branches for the block
|
|
|
|
rb_darray_append(&block->outgoing, branch);
|
|
|
|
|
|
|
|
return branch;
|
|
|
|
}
|
|
|
|
|
2020-12-16 17:07:18 -05:00
|
|
|
// Retrieve a basic block version for an (iseq, idx) tuple
|
2021-01-14 13:33:19 -05:00
|
|
|
block_t* find_block_version(blockid_t blockid, const ctx_t* ctx)
|
2020-12-16 17:07:18 -05:00
|
|
|
{
|
2021-03-24 18:07:26 -04:00
|
|
|
rb_yjit_block_array_t versions = yjit_get_version_array(blockid.iseq, blockid.idx);
|
2020-12-16 17:07:18 -05:00
|
|
|
|
2021-01-22 16:54:43 -05:00
|
|
|
// Best match found
|
|
|
|
block_t* best_version = NULL;
|
|
|
|
int best_diff = INT_MAX;
|
2021-01-08 15:18:03 -05:00
|
|
|
|
2021-01-22 16:54:43 -05:00
|
|
|
// For each version matching the blockid
|
2021-03-04 15:31:37 -05:00
|
|
|
rb_darray_for(versions, idx) {
|
|
|
|
block_t *version = rb_darray_get(versions, idx);
|
|
|
|
int diff = ctx_diff(ctx, &version->ctx);
|
2021-03-04 12:05:18 -05:00
|
|
|
|
|
|
|
// Note that we always prefer the first matching
|
|
|
|
// version because of inline-cache chains
|
|
|
|
if (diff < best_diff) {
|
2021-01-22 16:54:43 -05:00
|
|
|
best_version = version;
|
|
|
|
best_diff = diff;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return best_version;
|
2020-12-16 17:07:18 -05:00
|
|
|
}
|
2021-01-27 13:02:55 -05:00
|
|
|
|
2021-01-08 15:18:03 -05:00
|
|
|
// Compile a new block version immediately
|
2021-03-01 20:43:58 -05:00
|
|
|
block_t* gen_block_version(blockid_t blockid, const ctx_t* start_ctx, rb_execution_context_t* ec)
|
2021-01-08 15:18:03 -05:00
|
|
|
{
|
2021-01-18 17:03:04 -05:00
|
|
|
// Copy the context to avoid mutating it
|
|
|
|
ctx_t ctx_copy = *start_ctx;
|
|
|
|
ctx_t* ctx = &ctx_copy;
|
2021-01-14 13:33:19 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Allocate a new block version object
|
|
|
|
block_t* first_block = calloc(1, sizeof(block_t));
|
|
|
|
first_block->blockid = blockid;
|
|
|
|
memcpy(&first_block->ctx, ctx, sizeof(ctx_t));
|
2021-01-08 15:18:03 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Block that is currently being compiled
|
|
|
|
block_t* block = first_block;
|
2021-01-14 13:33:19 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Generate code for the first block
|
2021-03-06 18:46:56 -05:00
|
|
|
yjit_gen_block(ctx, block, ec);
|
2021-01-08 15:18:03 -05:00
|
|
|
|
|
|
|
// Keep track of the new block version
|
2021-01-25 15:28:49 -05:00
|
|
|
add_block_version(block->blockid, block);
|
2021-01-18 17:03:04 -05:00
|
|
|
|
|
|
|
// For each successor block to compile
|
|
|
|
for (;;) {
|
2021-04-19 17:07:27 -04:00
|
|
|
// If the previous block compiled doesn't have outgoing branches, stop
|
|
|
|
if (rb_darray_size(block->outgoing) == 0) {
|
2021-01-18 17:03:04 -05:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
// Get the last outgoing branch from the previous block
|
|
|
|
branch_t* last_branch = rb_darray_back(block->outgoing);
|
2021-01-18 17:03:04 -05:00
|
|
|
|
|
|
|
// If there is no next block to compile, stop
|
|
|
|
if (last_branch->dst_addrs[0] || last_branch->dst_addrs[1]) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (last_branch->targets[0].iseq == NULL) {
|
|
|
|
rb_bug("invalid target for last branch");
|
|
|
|
}
|
|
|
|
|
2021-02-05 15:49:02 -05:00
|
|
|
// Use the context from the branch
|
|
|
|
*ctx = last_branch->target_ctxs[0];
|
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Allocate a new block version object
|
|
|
|
block = calloc(1, sizeof(block_t));
|
|
|
|
block->blockid = last_branch->targets[0];
|
|
|
|
memcpy(&block->ctx, ctx, sizeof(ctx_t));
|
|
|
|
|
|
|
|
// Generate code for the current block
|
2021-03-06 18:46:56 -05:00
|
|
|
yjit_gen_block(ctx, block, ec);
|
2021-01-18 17:03:04 -05:00
|
|
|
|
|
|
|
// Keep track of the new block version
|
2021-01-25 15:28:49 -05:00
|
|
|
add_block_version(block->blockid, block);
|
2021-01-18 17:03:04 -05:00
|
|
|
|
|
|
|
// Patch the last branch address
|
|
|
|
last_branch->dst_addrs[0] = cb_get_ptr(cb, block->start_pos);
|
2021-04-19 17:07:27 -04:00
|
|
|
rb_darray_append(&block->incoming, last_branch);
|
|
|
|
last_branch->blocks[0] = block;
|
2021-02-19 16:04:23 -05:00
|
|
|
|
2021-01-22 13:29:09 -05:00
|
|
|
RUBY_ASSERT(block->start_pos == last_branch->end_pos);
|
2021-01-18 17:03:04 -05:00
|
|
|
}
|
2021-01-08 15:18:03 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
return first_block;
|
2021-01-08 15:18:03 -05:00
|
|
|
}
|
|
|
|
|
2021-01-13 14:14:16 -05:00
|
|
|
// Generate a block version that is an entry point inserted into an iseq
|
2021-03-01 20:43:58 -05:00
|
|
|
uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_context_t *ec)
|
2021-01-13 14:14:16 -05:00
|
|
|
{
|
|
|
|
// The entry context makes no assumptions about types
|
2021-01-18 17:03:04 -05:00
|
|
|
blockid_t blockid = { iseq, insn_idx };
|
2021-01-14 13:33:19 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Write the interpreter entry prologue
|
2021-03-06 18:46:56 -05:00
|
|
|
uint8_t* code_ptr = yjit_entry_prologue();
|
2021-01-13 14:14:16 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Try to generate code for the entry block
|
2021-03-01 20:43:58 -05:00
|
|
|
block_t* block = gen_block_version(blockid, &DEFAULT_CTX, ec);
|
2021-01-13 14:14:16 -05:00
|
|
|
|
2021-01-13 15:18:35 -05:00
|
|
|
// If we couldn't generate any code
|
2021-01-18 17:03:04 -05:00
|
|
|
if (block->end_idx == insn_idx)
|
2021-01-13 15:18:35 -05:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-01-13 14:14:16 -05:00
|
|
|
return code_ptr;
|
|
|
|
}
|
|
|
|
|
2020-12-16 21:45:51 -05:00
|
|
|
// Called by the generated code when a branch stub is executed
|
|
|
|
// Triggers compilation of branches and code patching
|
2021-03-12 12:22:19 -05:00
|
|
|
static uint8_t *
|
2021-04-19 17:07:27 -04:00
|
|
|
branch_stub_hit(branch_t* branch, const uint32_t target_idx, rb_execution_context_t* ec)
|
2020-12-16 21:45:51 -05:00
|
|
|
{
|
2021-01-20 12:44:24 -05:00
|
|
|
uint8_t* dst_addr;
|
2021-04-08 17:55:31 -04:00
|
|
|
ctx_t generic_ctx;
|
2021-01-20 12:44:24 -05:00
|
|
|
|
2021-04-06 10:36:00 -04:00
|
|
|
// Stop other ractors since we are going to patch machine code.
|
|
|
|
// This is how the GC does it.
|
2021-01-20 12:44:24 -05:00
|
|
|
RB_VM_LOCK_ENTER();
|
2021-04-06 10:36:00 -04:00
|
|
|
rb_vm_barrier();
|
2021-03-22 20:12:34 -04:00
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
RUBY_ASSERT(branch != NULL);
|
2021-01-22 13:29:09 -05:00
|
|
|
RUBY_ASSERT(target_idx < 2);
|
2020-12-17 14:51:56 -05:00
|
|
|
blockid_t target = branch->targets[target_idx];
|
2021-01-27 13:02:55 -05:00
|
|
|
const ctx_t* target_ctx = &branch->target_ctxs[target_idx];
|
2020-12-17 14:51:56 -05:00
|
|
|
|
2021-04-07 15:36:02 -04:00
|
|
|
// If this branch has already been patched, return the dst address
|
|
|
|
// Note: ractors can cause the same stub to be hit multiple times
|
2021-04-19 17:07:27 -04:00
|
|
|
if (branch->blocks[target_idx]) {
|
|
|
|
dst_addr = branch->dst_addrs[target_idx];
|
2021-04-07 15:36:02 -04:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-04-19 17:07:27 -04:00
|
|
|
//fprintf(stderr, "\nstub hit, branch: %p, target idx: %d\n", branch, target_idx);
|
2021-04-07 15:36:02 -04:00
|
|
|
//fprintf(stderr, "blockid.iseq=%p, blockid.idx=%d\n", target.iseq, target.idx);
|
|
|
|
//fprintf(stderr, "chain_depth=%d\n", target_ctx->chain_depth);
|
|
|
|
|
|
|
|
// :stub-sp-flush:
|
|
|
|
// Generated code do stack operations without modifying cfp->sp, while the
|
|
|
|
// cfp->sp tells the GC what values on the stack to root. Generated code
|
|
|
|
// generally takes care of updating cfp->sp when it calls runtime routines that
|
|
|
|
// could trigger GC, but for the case of branch stubs, it's inconvenient. So
|
|
|
|
// we do it here.
|
|
|
|
VALUE *const original_interp_sp = ec->cfp->sp;
|
|
|
|
ec->cfp->sp += target_ctx->sp_offset;
|
|
|
|
|
|
|
|
// Update the PC in the current CFP, because it
|
|
|
|
// may be out of sync in JITted code
|
2021-03-24 18:07:26 -04:00
|
|
|
ec->cfp->pc = yjit_iseq_pc_at_idx(target.iseq, target.idx);
|
2021-04-07 15:36:02 -04:00
|
|
|
|
|
|
|
// Try to find an existing compiled version of this block
|
|
|
|
block_t* p_block = find_block_version(target, target_ctx);
|
|
|
|
|
|
|
|
// If this block hasn't yet been compiled
|
|
|
|
if (!p_block) {
|
|
|
|
// Limit the number of block versions
|
|
|
|
if (target_ctx->chain_depth == 0) { // guard chains implement limits individually
|
|
|
|
if (get_num_versions(target) >= MAX_VERSIONS - 1) {
|
|
|
|
//fprintf(stderr, "version limit hit in branch_stub_hit\n");
|
2021-04-08 17:55:31 -04:00
|
|
|
generic_ctx = DEFAULT_CTX;
|
|
|
|
generic_ctx.stack_size = target_ctx->stack_size;
|
|
|
|
generic_ctx.sp_offset = target_ctx->sp_offset;
|
2021-04-07 15:36:02 -04:00
|
|
|
target_ctx = &generic_ctx;
|
|
|
|
}
|
2021-03-30 17:05:20 -04:00
|
|
|
}
|
|
|
|
|
2021-04-07 15:36:02 -04:00
|
|
|
// If the new block can be generated right after the branch (at cb->write_pos)
|
|
|
|
if (cb->write_pos == branch->end_pos) {
|
2021-04-19 17:07:27 -04:00
|
|
|
// This branch should be terminating its block
|
|
|
|
RUBY_ASSERT(branch->end_pos == branch->block->end_pos);
|
|
|
|
|
2021-04-07 15:36:02 -04:00
|
|
|
// Change the branch shape to indicate the target block will be placed next
|
|
|
|
branch->shape = (uint8_t)target_idx;
|
|
|
|
|
|
|
|
// Rewrite the branch with the new, potentially more compact shape
|
|
|
|
cb_set_pos(cb, branch->start_pos);
|
|
|
|
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
|
|
|
RUBY_ASSERT(cb->write_pos <= branch->end_pos && "can't enlarge branches");
|
|
|
|
branch->end_pos = cb->write_pos;
|
2021-04-19 17:07:27 -04:00
|
|
|
branch->block->end_pos = cb->write_pos;
|
2021-04-07 15:36:02 -04:00
|
|
|
}
|
2021-04-06 12:19:45 -04:00
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
// Compile the new block version
|
2021-04-07 15:36:02 -04:00
|
|
|
p_block = gen_block_version(target, target_ctx, ec);
|
|
|
|
RUBY_ASSERT(p_block);
|
|
|
|
RUBY_ASSERT(!(branch->shape == (uint8_t)target_idx && p_block->start_pos != branch->end_pos));
|
2021-04-06 12:19:45 -04:00
|
|
|
}
|
|
|
|
|
2021-04-07 15:36:02 -04:00
|
|
|
// Add this branch to the list of incoming branches for the target
|
2021-04-19 17:07:27 -04:00
|
|
|
rb_darray_append(&p_block->incoming, branch);
|
2020-12-16 21:45:51 -05:00
|
|
|
|
2021-04-07 15:36:02 -04:00
|
|
|
// Update the branch target address
|
|
|
|
dst_addr = cb_get_ptr(cb, p_block->start_pos);
|
|
|
|
branch->dst_addrs[target_idx] = dst_addr;
|
2021-01-13 15:18:35 -05:00
|
|
|
|
2021-04-07 15:36:02 -04:00
|
|
|
// Rewrite the branch with the new jump target address
|
|
|
|
RUBY_ASSERT(branch->dst_addrs[0] != NULL);
|
|
|
|
uint32_t cur_pos = cb->write_pos;
|
|
|
|
cb_set_pos(cb, branch->start_pos);
|
|
|
|
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
|
|
|
RUBY_ASSERT(cb->write_pos == branch->end_pos && "branch can't change size");
|
|
|
|
cb_set_pos(cb, cur_pos);
|
|
|
|
|
|
|
|
// Mark this branch target as patched (no longer a stub)
|
2021-04-19 17:07:27 -04:00
|
|
|
branch->blocks[target_idx] = p_block;
|
2020-12-17 14:51:56 -05:00
|
|
|
|
2021-04-07 15:36:02 -04:00
|
|
|
// Restore interpreter sp, since the code hitting the stub expects the original.
|
|
|
|
ec->cfp->sp = original_interp_sp;
|
|
|
|
}
|
2020-12-16 21:45:51 -05:00
|
|
|
|
2021-01-20 12:44:24 -05:00
|
|
|
RB_VM_LOCK_LEAVE();
|
|
|
|
|
2020-12-16 21:45:51 -05:00
|
|
|
// Return a pointer to the compiled block version
|
2021-01-12 14:56:43 -05:00
|
|
|
return dst_addr;
|
2020-12-16 21:45:51 -05:00
|
|
|
}
|
|
|
|
|
2020-12-16 17:07:18 -05:00
|
|
|
// Get a version or stub corresponding to a branch target
|
2021-01-08 15:18:03 -05:00
|
|
|
uint8_t* get_branch_target(
|
|
|
|
blockid_t target,
|
|
|
|
const ctx_t* ctx,
|
2021-04-19 17:07:27 -04:00
|
|
|
branch_t* branch,
|
2021-01-08 15:18:03 -05:00
|
|
|
uint32_t target_idx
|
|
|
|
)
|
2020-12-16 17:07:18 -05:00
|
|
|
{
|
2021-01-19 13:28:52 -05:00
|
|
|
//fprintf(stderr, "get_branch_target, block (%p, %d)\n", target.iseq, target.idx);
|
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
block_t* p_block = find_block_version(target, ctx);
|
2020-12-16 17:07:18 -05:00
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
// If the block already exists
|
2021-01-14 13:33:19 -05:00
|
|
|
if (p_block)
|
2021-01-12 14:56:43 -05:00
|
|
|
{
|
2021-01-13 15:18:35 -05:00
|
|
|
// Add an incoming branch for this version
|
2021-04-19 17:07:27 -04:00
|
|
|
rb_darray_append(&p_block->incoming, branch);
|
|
|
|
branch->blocks[target_idx] = p_block;
|
2021-01-13 15:18:35 -05:00
|
|
|
|
2021-02-19 16:04:23 -05:00
|
|
|
// Return a pointer to the compiled code
|
2021-01-14 13:33:19 -05:00
|
|
|
return cb_get_ptr(cb, p_block->start_pos);
|
2021-01-12 14:56:43 -05:00
|
|
|
}
|
2020-12-16 17:07:18 -05:00
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
// Generate an outlined stub that will call branch_stub_hit()
|
2020-12-17 14:51:56 -05:00
|
|
|
uint8_t* stub_addr = cb_get_ptr(ocb, ocb->write_pos);
|
|
|
|
|
2021-03-06 18:46:56 -05:00
|
|
|
// Save the yjit registers
|
2020-12-17 14:51:56 -05:00
|
|
|
push(ocb, REG_CFP);
|
|
|
|
push(ocb, REG_EC);
|
|
|
|
push(ocb, REG_SP);
|
|
|
|
push(ocb, REG_SP);
|
|
|
|
|
2021-03-01 20:43:58 -05:00
|
|
|
// Call branch_stub_hit(branch_idx, target_idx, ec)
|
|
|
|
mov(ocb, C_ARG_REGS[2], REG_EC);
|
2021-04-19 17:07:27 -04:00
|
|
|
mov(ocb, C_ARG_REGS[1], imm_opnd(target_idx));
|
|
|
|
mov(ocb, C_ARG_REGS[0], const_ptr_opnd(branch));
|
2020-12-16 21:45:51 -05:00
|
|
|
call_ptr(ocb, REG0, (void *)&branch_stub_hit);
|
2020-12-16 17:07:18 -05:00
|
|
|
|
2021-03-06 18:46:56 -05:00
|
|
|
// Restore the yjit registers
|
2020-12-17 14:51:56 -05:00
|
|
|
pop(ocb, REG_SP);
|
|
|
|
pop(ocb, REG_SP);
|
|
|
|
pop(ocb, REG_EC);
|
|
|
|
pop(ocb, REG_CFP);
|
|
|
|
|
2020-12-16 21:45:51 -05:00
|
|
|
// Jump to the address returned by the
|
|
|
|
// branch_stub_hit call
|
|
|
|
jmp_rm(ocb, RAX);
|
2020-12-16 17:07:18 -05:00
|
|
|
|
|
|
|
return stub_addr;
|
|
|
|
}
|
|
|
|
|
2021-01-08 15:18:03 -05:00
|
|
|
void gen_branch(
|
2021-04-19 17:07:27 -04:00
|
|
|
block_t* block,
|
2021-01-08 15:18:03 -05:00
|
|
|
const ctx_t* src_ctx,
|
2021-03-10 11:18:17 -05:00
|
|
|
blockid_t target0,
|
2021-01-08 15:18:03 -05:00
|
|
|
const ctx_t* ctx0,
|
2021-03-10 11:18:17 -05:00
|
|
|
blockid_t target1,
|
2021-01-08 15:18:03 -05:00
|
|
|
const ctx_t* ctx1,
|
|
|
|
branchgen_fn gen_fn
|
|
|
|
)
|
2020-12-16 17:07:18 -05:00
|
|
|
{
|
2021-01-22 13:29:09 -05:00
|
|
|
RUBY_ASSERT(target0.iseq != NULL);
|
2021-04-19 17:07:27 -04:00
|
|
|
|
|
|
|
branch_t* branch = make_branch_entry(block, src_ctx, gen_fn);
|
|
|
|
branch->targets[0] = target0;
|
|
|
|
branch->targets[1] = target1;
|
|
|
|
branch->target_ctxs[0] = *ctx0;
|
|
|
|
branch->target_ctxs[1] = ctx1? *ctx1:DEFAULT_CTX;
|
2021-01-13 15:18:35 -05:00
|
|
|
|
2021-01-19 11:11:11 -05:00
|
|
|
// Get the branch targets or stubs
|
2021-04-19 17:07:27 -04:00
|
|
|
branch->dst_addrs[0] = get_branch_target(target0, ctx0, branch, 0);
|
|
|
|
branch->dst_addrs[1] = ctx1? get_branch_target(target1, ctx1, branch, 1):NULL;
|
2021-01-19 11:11:11 -05:00
|
|
|
|
|
|
|
// Call the branch generation function
|
2021-04-19 17:07:27 -04:00
|
|
|
branch->start_pos = cb->write_pos;
|
|
|
|
gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], SHAPE_DEFAULT);
|
|
|
|
branch->end_pos = cb->write_pos;
|
2021-01-19 11:11:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
gen_jump_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t shape)
|
|
|
|
{
|
|
|
|
switch (shape)
|
|
|
|
{
|
|
|
|
case SHAPE_NEXT0:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SHAPE_NEXT1:
|
2021-01-22 13:29:09 -05:00
|
|
|
RUBY_ASSERT(false);
|
2021-01-19 11:11:11 -05:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SHAPE_DEFAULT:
|
|
|
|
jmp_ptr(cb, target0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_direct_jump(
|
2021-04-19 17:07:27 -04:00
|
|
|
block_t* block,
|
2021-01-19 11:11:11 -05:00
|
|
|
const ctx_t* ctx,
|
|
|
|
blockid_t target0
|
|
|
|
)
|
|
|
|
{
|
2021-01-22 13:29:09 -05:00
|
|
|
RUBY_ASSERT(target0.iseq != NULL);
|
2021-04-08 17:55:31 -04:00
|
|
|
ctx_t generic_ctx;
|
2021-01-19 11:11:11 -05:00
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
branch_t* branch = make_branch_entry(block, ctx, gen_jump_branch);
|
|
|
|
branch->targets[0] = target0;
|
|
|
|
branch->target_ctxs[0] = *ctx;
|
2021-01-19 11:11:11 -05:00
|
|
|
|
|
|
|
block_t* p_block = find_block_version(target0, ctx);
|
|
|
|
|
|
|
|
// If the version already exists
|
|
|
|
if (p_block)
|
2021-01-13 15:18:35 -05:00
|
|
|
{
|
2021-04-19 17:07:27 -04:00
|
|
|
rb_darray_append(&p_block->incoming, branch);
|
|
|
|
|
|
|
|
branch->dst_addrs[0] = cb_get_ptr(cb, p_block->start_pos);
|
|
|
|
branch->blocks[0] = p_block;
|
|
|
|
branch->shape = SHAPE_DEFAULT;
|
2021-01-19 11:11:11 -05:00
|
|
|
|
|
|
|
// Call the branch generation function
|
2021-04-19 17:07:27 -04:00
|
|
|
branch->start_pos = cb->write_pos;
|
|
|
|
gen_jump_branch(cb, branch->dst_addrs[0], NULL, SHAPE_DEFAULT);
|
|
|
|
branch->end_pos = cb->write_pos;
|
2021-01-13 15:18:35 -05:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-03-30 17:05:20 -04:00
|
|
|
// Limit the number of block versions
|
|
|
|
if (get_num_versions(target0) >= MAX_VERSIONS - 1)
|
|
|
|
{
|
|
|
|
//fprintf(stderr, "version limit hit in gen_direct_jump\n");
|
2021-04-08 17:55:31 -04:00
|
|
|
generic_ctx = DEFAULT_CTX;
|
|
|
|
generic_ctx.stack_size = ctx->stack_size;
|
|
|
|
generic_ctx.sp_offset = ctx->sp_offset;
|
2021-03-30 17:05:20 -04:00
|
|
|
ctx = &generic_ctx;
|
|
|
|
}
|
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
// The target block will be compiled next
|
2021-01-19 11:11:11 -05:00
|
|
|
// It will be compiled in gen_block_version()
|
2021-04-19 17:07:27 -04:00
|
|
|
branch->dst_addrs[0] = NULL;
|
|
|
|
branch->shape = SHAPE_NEXT0;
|
|
|
|
branch->start_pos = cb->write_pos;
|
|
|
|
branch->end_pos = cb->write_pos;
|
2021-01-13 15:18:35 -05:00
|
|
|
}
|
2021-01-12 17:03:54 -05:00
|
|
|
}
|
|
|
|
|
2021-03-03 14:58:42 -05:00
|
|
|
// Create a stub to force the code up to this point to be executed
|
|
|
|
void defer_compilation(
|
|
|
|
block_t* block,
|
2021-03-05 15:45:44 -05:00
|
|
|
uint32_t insn_idx,
|
|
|
|
ctx_t* cur_ctx
|
2021-03-03 14:58:42 -05:00
|
|
|
)
|
|
|
|
{
|
2021-03-05 15:45:44 -05:00
|
|
|
//fprintf(stderr, "defer compilation at (%p, %d) depth=%d\n", block->blockid.iseq, insn_idx, cur_ctx->chain_depth);
|
2021-03-03 14:58:42 -05:00
|
|
|
|
2021-03-05 15:45:44 -05:00
|
|
|
if (cur_ctx->chain_depth != 0) {
|
2021-04-08 17:55:31 -04:00
|
|
|
rb_bug("double defer");
|
2021-03-05 15:45:44 -05:00
|
|
|
}
|
2021-03-03 14:58:42 -05:00
|
|
|
|
2021-03-05 15:45:44 -05:00
|
|
|
ctx_t next_ctx = *cur_ctx;
|
2021-03-03 14:58:42 -05:00
|
|
|
|
2021-03-05 15:45:44 -05:00
|
|
|
if (next_ctx.chain_depth >= UINT8_MAX) {
|
|
|
|
rb_bug("max block version chain depth reached");
|
|
|
|
}
|
2021-03-03 14:58:42 -05:00
|
|
|
|
2021-03-05 15:45:44 -05:00
|
|
|
next_ctx.chain_depth += 1;
|
2021-03-03 14:58:42 -05:00
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
branch_t* branch = make_branch_entry(block, cur_ctx, gen_jump_branch);
|
2021-03-03 14:58:42 -05:00
|
|
|
|
2021-03-05 15:45:44 -05:00
|
|
|
// Get the branch targets or stubs
|
2021-04-19 17:07:27 -04:00
|
|
|
branch->target_ctxs[0] = next_ctx;
|
|
|
|
branch->targets[0] = (blockid_t){ block->blockid.iseq, insn_idx };
|
|
|
|
branch->dst_addrs[0] = get_branch_target(branch->targets[0], &next_ctx, branch, 0);
|
2021-03-05 15:45:44 -05:00
|
|
|
|
|
|
|
// Call the branch generation function
|
2021-04-19 17:07:27 -04:00
|
|
|
branch->start_pos = cb->write_pos;
|
|
|
|
gen_jump_branch(cb, branch->dst_addrs[0], NULL, SHAPE_DEFAULT);
|
|
|
|
branch->end_pos = cb->write_pos;
|
2021-03-03 14:58:42 -05:00
|
|
|
}
|
|
|
|
|
2021-02-12 17:12:18 -05:00
|
|
|
// Remove all references to a block then free it.
|
|
|
|
void
|
2021-03-06 18:46:56 -05:00
|
|
|
yjit_free_block(block_t *block)
|
2021-02-12 17:12:18 -05:00
|
|
|
{
|
2021-03-06 18:46:56 -05:00
|
|
|
yjit_unlink_method_lookup_dependency(block);
|
|
|
|
yjit_block_assumptions_free(block);
|
2021-02-25 15:10:38 -05:00
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
// For each outgoing branch
|
|
|
|
rb_darray_for(block->outgoing, branch_idx) {
|
|
|
|
branch_t* out_branch = rb_darray_get(block->outgoing, branch_idx);
|
|
|
|
|
|
|
|
// For each successor block
|
|
|
|
for (size_t succ_idx = 0; succ_idx < 2; succ_idx++) {
|
|
|
|
block_t* succ = out_branch->blocks[succ_idx];
|
|
|
|
|
|
|
|
if (succ == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Remove this block from the successor's incoming list
|
|
|
|
rb_darray_for(succ->incoming, incoming_idx) {
|
|
|
|
branch_t* pred_branch = rb_darray_get(succ->incoming, incoming_idx);
|
|
|
|
if (pred_branch == out_branch) {
|
|
|
|
rb_darray_remove_unordered(succ->incoming, incoming_idx);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free the outgoing branch entry
|
|
|
|
free(out_branch);
|
|
|
|
}
|
|
|
|
|
2021-02-19 16:04:23 -05:00
|
|
|
rb_darray_free(block->incoming);
|
2021-04-19 17:07:27 -04:00
|
|
|
rb_darray_free(block->outgoing);
|
2021-02-19 15:49:23 -05:00
|
|
|
rb_darray_free(block->gc_object_offsets);
|
2021-02-25 15:10:38 -05:00
|
|
|
|
|
|
|
free(block);
|
2021-02-12 17:12:18 -05:00
|
|
|
}
|
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
// Remove a block version
|
|
|
|
static void
|
2021-03-06 18:46:56 -05:00
|
|
|
block_array_remove(rb_yjit_block_array_t block_array, block_t *block)
|
2021-03-04 12:05:18 -05:00
|
|
|
{
|
|
|
|
block_t **element;
|
|
|
|
rb_darray_foreach(block_array, idx, element) {
|
2021-04-19 17:07:27 -04:00
|
|
|
if (*element == block) {
|
|
|
|
rb_darray_remove_unordered(block_array, idx);
|
|
|
|
return;
|
2021-03-04 15:31:37 -05:00
|
|
|
}
|
2021-03-04 12:05:18 -05:00
|
|
|
}
|
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
RUBY_ASSERT(false);
|
2021-03-04 12:05:18 -05:00
|
|
|
}
|
|
|
|
|
2021-01-12 17:03:54 -05:00
|
|
|
// Invalidate one specific block version
|
2021-01-29 12:07:18 -05:00
|
|
|
void
|
|
|
|
invalidate_block_version(block_t* block)
|
2021-01-12 17:03:54 -05:00
|
|
|
{
|
2021-03-31 12:50:16 -04:00
|
|
|
ASSERT_vm_locking();
|
2021-03-24 18:07:26 -04:00
|
|
|
// TODO: want to assert that all other ractors are stopped here. Can't patch
|
|
|
|
// machine code that some other thread is running.
|
2021-03-31 12:50:16 -04:00
|
|
|
|
2021-02-12 17:12:18 -05:00
|
|
|
const rb_iseq_t *iseq = block->blockid.iseq;
|
|
|
|
|
2021-04-19 17:07:27 -04:00
|
|
|
//fprintf(stderr, "invalidating block (%p, %d)\n", block->blockid.iseq, block->blockid.idx);
|
|
|
|
//fprintf(stderr, "block=%p\n", block);
|
2021-01-14 16:58:20 -05:00
|
|
|
|
2021-03-04 12:05:18 -05:00
|
|
|
// Remove this block from the version array
|
2021-03-24 18:07:26 -04:00
|
|
|
rb_yjit_block_array_t versions = yjit_get_version_array(iseq, block->blockid.idx);
|
2021-04-19 17:07:27 -04:00
|
|
|
block_array_remove(versions, block);
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 16:58:20 -05:00
|
|
|
// Get a pointer to the generated code for this block
|
2021-01-14 13:33:19 -05:00
|
|
|
uint8_t* code_ptr = cb_get_ptr(cb, block->start_pos);
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// For each incoming branch
|
2021-04-19 17:07:27 -04:00
|
|
|
rb_darray_for(block->incoming, incoming_idx)
|
2021-01-14 13:33:19 -05:00
|
|
|
{
|
2021-04-19 17:07:27 -04:00
|
|
|
branch_t* branch = rb_darray_get(block->incoming, incoming_idx);
|
2021-01-14 13:33:19 -05:00
|
|
|
uint32_t target_idx = (branch->dst_addrs[0] == code_ptr)? 0:1;
|
2021-04-19 17:07:27 -04:00
|
|
|
RUBY_ASSERT(!branch->blocks[target_idx] || branch->blocks[target_idx] == block);
|
2021-01-14 13:33:19 -05:00
|
|
|
|
|
|
|
// Create a stub for this branch target
|
|
|
|
branch->dst_addrs[target_idx] = get_branch_target(
|
|
|
|
block->blockid,
|
|
|
|
&block->ctx,
|
2021-04-19 17:07:27 -04:00
|
|
|
branch,
|
2021-01-14 13:33:19 -05:00
|
|
|
target_idx
|
|
|
|
);
|
|
|
|
|
2021-04-07 15:36:02 -04:00
|
|
|
// Mark this target as being a stub
|
2021-04-19 17:07:27 -04:00
|
|
|
branch->blocks[target_idx] = NULL;
|
2021-04-07 15:36:02 -04:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// Check if the invalidated block immediately follows
|
|
|
|
bool target_next = block->start_pos == branch->end_pos;
|
|
|
|
|
|
|
|
if (target_next)
|
|
|
|
{
|
2021-01-19 13:28:52 -05:00
|
|
|
// The new block will no longer be adjacent
|
2021-01-14 13:33:19 -05:00
|
|
|
branch->shape = SHAPE_DEFAULT;
|
|
|
|
}
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// Rewrite the branch with the new jump target address
|
2021-01-22 13:29:09 -05:00
|
|
|
RUBY_ASSERT(branch->dst_addrs[0] != NULL);
|
2021-01-14 13:33:19 -05:00
|
|
|
uint32_t cur_pos = cb->write_pos;
|
|
|
|
cb_set_pos(cb, branch->start_pos);
|
|
|
|
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
|
|
|
branch->end_pos = cb->write_pos;
|
|
|
|
cb_set_pos(cb, cur_pos);
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
if (target_next && branch->end_pos > block->end_pos)
|
|
|
|
{
|
2021-03-06 18:46:56 -05:00
|
|
|
rb_bug("yjit invalidate rewrote branch past block end");
|
2021-01-14 13:33:19 -05:00
|
|
|
}
|
|
|
|
}
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
uint32_t idx = block->blockid.idx;
|
2021-02-12 17:12:18 -05:00
|
|
|
// FIXME: the following says "if", but it's unconditional.
|
|
|
|
// If the block is an entry point, it needs to be unmapped from its iseq
|
2021-03-24 18:07:26 -04:00
|
|
|
VALUE* entry_pc = yjit_iseq_pc_at_idx(iseq, idx);
|
|
|
|
int entry_opcode = yjit_opcode_at_pc(iseq, entry_pc);
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-03-06 18:46:56 -05:00
|
|
|
// TODO: unmap_addr2insn in yjit_iface.c? Maybe we can write a function to encompass this logic?
|
2021-01-14 13:33:19 -05:00
|
|
|
// Should check how it's used in exit and side-exit
|
|
|
|
const void * const *handler_table = rb_vm_get_insns_address_table();
|
|
|
|
void* handler_addr = (void*)handler_table[entry_opcode];
|
2021-03-04 15:31:37 -05:00
|
|
|
iseq->body->iseq_encoded[idx] = (VALUE)handler_addr;
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// TODO:
|
2021-02-11 15:27:33 -05:00
|
|
|
// May want to recompile a new entry point (for interpreter entry blocks)
|
|
|
|
// This isn't necessary for correctness
|
|
|
|
|
|
|
|
// FIXME:
|
2021-01-14 13:33:19 -05:00
|
|
|
// Call continuation addresses on the stack can also be atomically replaced by jumps going to the stub.
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-03-06 18:46:56 -05:00
|
|
|
yjit_free_block(block);
|
2021-01-19 13:28:52 -05:00
|
|
|
|
2021-02-17 13:08:53 -05:00
|
|
|
// fprintf(stderr, "invalidation done\n");
|
2020-12-16 17:07:18 -05:00
|
|
|
}
|
|
|
|
|
2020-12-10 16:59:13 -05:00
|
|
|
void
|
2021-03-06 18:46:56 -05:00
|
|
|
yjit_init_core(void)
|
2020-12-10 16:59:13 -05:00
|
|
|
{
|
2021-02-12 17:12:18 -05:00
|
|
|
// Nothing yet
|
2020-12-10 16:59:13 -05:00
|
|
|
}
|