2020-12-17 14:51:56 -05:00
|
|
|
#include "vm_core.h"
|
|
|
|
#include "vm_callinfo.h"
|
|
|
|
#include "builtin.h"
|
|
|
|
#include "insns.inc"
|
|
|
|
#include "insns_info.inc"
|
2020-12-08 16:54:41 -05:00
|
|
|
#include "ujit_asm.h"
|
2020-12-17 14:51:56 -05:00
|
|
|
#include "ujit_utils.h"
|
2020-12-08 16:54:41 -05:00
|
|
|
#include "ujit_iface.h"
|
|
|
|
#include "ujit_core.h"
|
2020-12-10 16:59:13 -05:00
|
|
|
#include "ujit_codegen.h"
|
2020-12-10 00:06:10 -05:00
|
|
|
|
2020-12-16 17:07:18 -05:00
|
|
|
// Maximum number of branch instructions we can track
|
|
|
|
#define MAX_BRANCHES 32768
|
|
|
|
|
2020-12-10 00:06:10 -05:00
|
|
|
// Table of block versions indexed by (iseq, index) tuples
|
|
|
|
st_table * version_tbl;
|
|
|
|
|
2020-12-16 17:07:18 -05:00
|
|
|
// Registered branch entries
|
|
|
|
branch_t branch_entries[MAX_BRANCHES];
|
|
|
|
uint32_t num_branches = 0;
|
2020-12-10 00:06:10 -05:00
|
|
|
|
2020-12-08 16:54:41 -05:00
|
|
|
/*
|
|
|
|
Get an operand for the adjusted stack pointer address
|
|
|
|
*/
|
|
|
|
x86opnd_t
|
|
|
|
ctx_sp_opnd(ctx_t* ctx, int32_t offset_bytes)
|
|
|
|
{
|
2020-12-10 00:06:10 -05:00
|
|
|
int32_t offset = (ctx->stack_size) * 8 + offset_bytes;
|
2020-12-08 16:54:41 -05:00
|
|
|
return mem_opnd(64, REG_SP, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Make space on the stack for N values
|
|
|
|
Return a pointer to the new stack top
|
|
|
|
*/
|
|
|
|
x86opnd_t
|
|
|
|
ctx_stack_push(ctx_t* ctx, size_t n)
|
|
|
|
{
|
2020-12-10 00:06:10 -05:00
|
|
|
ctx->stack_size += n;
|
2020-12-08 16:54:41 -05:00
|
|
|
|
|
|
|
// SP points just above the topmost value
|
2020-12-10 00:06:10 -05:00
|
|
|
int32_t offset = (ctx->stack_size - 1) * 8;
|
2020-12-08 16:54:41 -05:00
|
|
|
return mem_opnd(64, REG_SP, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
Pop N values off the stack
|
|
|
|
Return a pointer to the stack top before the pop operation
|
|
|
|
*/
|
|
|
|
x86opnd_t
|
|
|
|
ctx_stack_pop(ctx_t* ctx, size_t n)
|
|
|
|
{
|
|
|
|
// SP points just above the topmost value
|
2020-12-10 00:06:10 -05:00
|
|
|
int32_t offset = (ctx->stack_size - 1) * 8;
|
2020-12-08 16:54:41 -05:00
|
|
|
x86opnd_t top = mem_opnd(64, REG_SP, offset);
|
|
|
|
|
2020-12-10 00:06:10 -05:00
|
|
|
ctx->stack_size -= n;
|
2020-12-08 16:54:41 -05:00
|
|
|
|
|
|
|
return top;
|
|
|
|
}
|
|
|
|
|
|
|
|
x86opnd_t
|
|
|
|
ctx_stack_opnd(ctx_t* ctx, int32_t idx)
|
|
|
|
{
|
|
|
|
// SP points just above the topmost value
|
2020-12-10 00:06:10 -05:00
|
|
|
int32_t offset = (ctx->stack_size - 1 - idx) * 8;
|
2020-12-08 16:54:41 -05:00
|
|
|
x86opnd_t opnd = mem_opnd(64, REG_SP, offset);
|
|
|
|
|
|
|
|
return opnd;
|
|
|
|
}
|
2020-12-10 16:59:13 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Add an incoming branch for a given block version
|
|
|
|
static void add_incoming(block_t* p_block, uint32_t branch_idx)
|
|
|
|
{
|
|
|
|
// Add this branch to the list of incoming branches for the target
|
|
|
|
uint32_t* new_list = malloc(sizeof(uint32_t) * p_block->num_incoming + 1);
|
|
|
|
memcpy(new_list, p_block->incoming, p_block->num_incoming);
|
|
|
|
new_list[p_block->num_incoming] = branch_idx;
|
|
|
|
p_block->incoming = new_list;
|
|
|
|
p_block->num_incoming += 1;
|
|
|
|
}
|
|
|
|
|
2020-12-16 17:07:18 -05:00
|
|
|
// Retrieve a basic block version for an (iseq, idx) tuple
|
2021-01-14 13:33:19 -05:00
|
|
|
block_t* find_block_version(blockid_t blockid, const ctx_t* ctx)
|
2020-12-16 17:07:18 -05:00
|
|
|
{
|
|
|
|
// If there exists a version for this block id
|
|
|
|
st_data_t st_version;
|
2021-01-14 13:33:19 -05:00
|
|
|
if (rb_st_lookup(version_tbl, (st_data_t)&blockid, &st_version)) {
|
|
|
|
return (block_t*)st_version;
|
2020-12-16 17:07:18 -05:00
|
|
|
}
|
|
|
|
|
2021-01-08 15:18:03 -05:00
|
|
|
//
|
2021-01-18 17:03:04 -05:00
|
|
|
// TODO: use the ctx parameter to search existing versions for a match
|
2021-01-08 15:18:03 -05:00
|
|
|
//
|
|
|
|
|
2020-12-16 17:07:18 -05:00
|
|
|
return NULL;
|
|
|
|
}
|
2021-01-08 15:18:03 -05:00
|
|
|
// Compile a new block version immediately
|
2021-01-18 17:03:04 -05:00
|
|
|
block_t* gen_block_version(blockid_t blockid, const ctx_t* start_ctx)
|
2021-01-08 15:18:03 -05:00
|
|
|
{
|
2021-01-18 17:03:04 -05:00
|
|
|
// Copy the context to avoid mutating it
|
|
|
|
ctx_t ctx_copy = *start_ctx;
|
|
|
|
ctx_t* ctx = &ctx_copy;
|
2021-01-14 13:33:19 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Allocate a new block version object
|
|
|
|
block_t* first_block = calloc(1, sizeof(block_t));
|
|
|
|
first_block->blockid = blockid;
|
|
|
|
memcpy(&first_block->ctx, ctx, sizeof(ctx_t));
|
2021-01-08 15:18:03 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Block that is currently being compiled
|
|
|
|
block_t* block = first_block;
|
2021-01-14 13:33:19 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Generate code for the first block
|
|
|
|
ujit_gen_block(ctx, block);
|
2021-01-08 15:18:03 -05:00
|
|
|
|
|
|
|
// Keep track of the new block version
|
2021-01-18 17:03:04 -05:00
|
|
|
st_insert(version_tbl, (st_data_t)&block->blockid, (st_data_t)block);
|
|
|
|
|
|
|
|
// For each successor block to compile
|
|
|
|
for (;;) {
|
|
|
|
// If no branches were generated, stop
|
|
|
|
if (num_branches == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the last branch entry
|
|
|
|
uint32_t branch_idx = num_branches - 1;
|
|
|
|
branch_t* last_branch = &branch_entries[num_branches - 1];
|
|
|
|
|
|
|
|
// If there is no next block to compile, stop
|
|
|
|
if (last_branch->dst_addrs[0] || last_branch->dst_addrs[1]) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (last_branch->targets[0].iseq == NULL) {
|
|
|
|
rb_bug("invalid target for last branch");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allocate a new block version object
|
|
|
|
block = calloc(1, sizeof(block_t));
|
|
|
|
block->blockid = last_branch->targets[0];
|
|
|
|
memcpy(&block->ctx, ctx, sizeof(ctx_t));
|
|
|
|
|
|
|
|
// Generate code for the current block
|
|
|
|
ujit_gen_block(ctx, block);
|
|
|
|
|
|
|
|
// Keep track of the new block version
|
|
|
|
st_insert(version_tbl, (st_data_t)&block->blockid, (st_data_t)block);
|
|
|
|
|
|
|
|
// Patch the last branch address
|
|
|
|
last_branch->dst_addrs[0] = cb_get_ptr(cb, block->start_pos);
|
|
|
|
add_incoming(block, branch_idx);
|
|
|
|
assert (block->start_pos == last_branch->end_pos);
|
|
|
|
}
|
2021-01-08 15:18:03 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
return first_block;
|
2021-01-08 15:18:03 -05:00
|
|
|
}
|
|
|
|
|
2021-01-13 14:14:16 -05:00
|
|
|
// Generate a block version that is an entry point inserted into an iseq
|
|
|
|
uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx)
|
|
|
|
{
|
|
|
|
// The entry context makes no assumptions about types
|
2021-01-18 17:03:04 -05:00
|
|
|
blockid_t blockid = { iseq, insn_idx };
|
2021-01-13 14:14:16 -05:00
|
|
|
ctx_t ctx = { 0 };
|
2021-01-14 13:33:19 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Write the interpreter entry prologue
|
|
|
|
uint8_t* code_ptr = ujit_entry_prologue();
|
2021-01-13 14:14:16 -05:00
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Try to generate code for the entry block
|
|
|
|
block_t* block = gen_block_version(blockid, &ctx);
|
2021-01-13 14:14:16 -05:00
|
|
|
|
2021-01-13 15:18:35 -05:00
|
|
|
// If we couldn't generate any code
|
2021-01-18 17:03:04 -05:00
|
|
|
if (block->end_idx == insn_idx)
|
2021-01-13 15:18:35 -05:00
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2021-01-13 14:14:16 -05:00
|
|
|
return code_ptr;
|
|
|
|
}
|
|
|
|
|
2020-12-16 21:45:51 -05:00
|
|
|
// Called by the generated code when a branch stub is executed
|
|
|
|
// Triggers compilation of branches and code patching
|
|
|
|
uint8_t* branch_stub_hit(uint32_t branch_idx, uint32_t target_idx)
|
|
|
|
{
|
|
|
|
assert (branch_idx < num_branches);
|
|
|
|
assert (target_idx < 2);
|
2020-12-17 14:51:56 -05:00
|
|
|
branch_t *branch = &branch_entries[branch_idx];
|
|
|
|
blockid_t target = branch->targets[target_idx];
|
2021-01-08 15:18:03 -05:00
|
|
|
ctx_t* target_ctx = &branch->target_ctxs[target_idx];
|
2020-12-17 14:51:56 -05:00
|
|
|
|
|
|
|
//fprintf(stderr, "\nstub hit, branch idx: %d, target idx: %d\n", branch_idx, target_idx);
|
2021-01-07 17:09:25 -05:00
|
|
|
//fprintf(stderr, "cb->write_pos=%ld\n", cb->write_pos);
|
|
|
|
//fprintf(stderr, "branch->end_pos=%d\n", branch->end_pos);
|
2020-12-16 21:45:51 -05:00
|
|
|
|
|
|
|
// If either of the target blocks will be placed next
|
2020-12-17 14:51:56 -05:00
|
|
|
if (cb->write_pos == branch->end_pos)
|
2020-12-16 21:45:51 -05:00
|
|
|
{
|
2021-01-07 17:09:25 -05:00
|
|
|
//fprintf(stderr, "target idx %d will be placed next\n", target_idx);
|
2020-12-17 14:51:56 -05:00
|
|
|
branch->shape = (uint8_t)target_idx;
|
2020-12-16 21:45:51 -05:00
|
|
|
|
|
|
|
// Rewrite the branch with the new, potentially more compact shape
|
2020-12-17 14:51:56 -05:00
|
|
|
cb_set_pos(cb, branch->start_pos);
|
|
|
|
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
|
|
|
assert (cb->write_pos <= branch->end_pos);
|
2020-12-16 21:45:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try to find a compiled version of this block
|
2021-01-14 13:33:19 -05:00
|
|
|
block_t* p_block = find_block_version(target, target_ctx);
|
2020-12-16 21:45:51 -05:00
|
|
|
|
|
|
|
// If this block hasn't yet been compiled
|
2021-01-14 13:33:19 -05:00
|
|
|
if (!p_block)
|
2020-12-16 21:45:51 -05:00
|
|
|
{
|
2021-01-14 13:33:19 -05:00
|
|
|
p_block = gen_block_version(target, target_ctx);
|
2020-12-16 21:45:51 -05:00
|
|
|
}
|
|
|
|
|
2021-01-13 15:18:35 -05:00
|
|
|
// Add this branch to the list of incoming branches for the target
|
2021-01-14 13:33:19 -05:00
|
|
|
add_incoming(p_block, branch_idx);
|
2021-01-13 15:18:35 -05:00
|
|
|
|
2021-01-08 15:18:03 -05:00
|
|
|
// Update the branch target address
|
2021-01-14 13:33:19 -05:00
|
|
|
uint8_t* dst_addr = cb_get_ptr(cb, p_block->start_pos);
|
2021-01-12 14:56:43 -05:00
|
|
|
branch->dst_addrs[target_idx] = dst_addr;
|
2020-12-17 14:51:56 -05:00
|
|
|
|
2020-12-16 21:45:51 -05:00
|
|
|
// Rewrite the branch with the new jump target address
|
2020-12-17 15:45:38 -05:00
|
|
|
assert (branch->dst_addrs[0] != NULL);
|
|
|
|
assert (branch->dst_addrs[1] != NULL);
|
2021-01-12 14:56:43 -05:00
|
|
|
uint32_t cur_pos = cb->write_pos;
|
2020-12-17 14:51:56 -05:00
|
|
|
cb_set_pos(cb, branch->start_pos);
|
|
|
|
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
|
|
|
assert (cb->write_pos <= branch->end_pos);
|
2021-01-14 13:33:19 -05:00
|
|
|
branch->end_pos = cb->write_pos;
|
2020-12-16 21:45:51 -05:00
|
|
|
cb_set_pos(cb, cur_pos);
|
|
|
|
|
|
|
|
// Return a pointer to the compiled block version
|
2021-01-12 14:56:43 -05:00
|
|
|
return dst_addr;
|
2020-12-16 21:45:51 -05:00
|
|
|
}
|
|
|
|
|
2020-12-16 17:07:18 -05:00
|
|
|
// Get a version or stub corresponding to a branch target
|
2021-01-12 17:03:54 -05:00
|
|
|
// TODO: need incoming and target contexts
|
2021-01-08 15:18:03 -05:00
|
|
|
uint8_t* get_branch_target(
|
|
|
|
blockid_t target,
|
|
|
|
const ctx_t* ctx,
|
|
|
|
uint32_t branch_idx,
|
|
|
|
uint32_t target_idx
|
|
|
|
)
|
2020-12-16 17:07:18 -05:00
|
|
|
{
|
2021-01-14 13:33:19 -05:00
|
|
|
block_t* p_block = find_block_version(target, ctx);
|
2020-12-16 17:07:18 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
if (p_block)
|
2021-01-12 14:56:43 -05:00
|
|
|
{
|
2021-01-13 15:18:35 -05:00
|
|
|
// Add an incoming branch for this version
|
2021-01-14 13:33:19 -05:00
|
|
|
add_incoming(p_block, branch_idx);
|
2021-01-13 15:18:35 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
return cb_get_ptr(cb, p_block->start_pos);
|
2021-01-12 14:56:43 -05:00
|
|
|
}
|
2020-12-16 17:07:18 -05:00
|
|
|
|
|
|
|
// Generate an outlined stub that will call
|
|
|
|
// branch_stub_hit(uint32_t branch_idx, uint32_t target_idx)
|
2020-12-17 14:51:56 -05:00
|
|
|
uint8_t* stub_addr = cb_get_ptr(ocb, ocb->write_pos);
|
|
|
|
|
|
|
|
//fprintf(stderr, "REQUESTING STUB FOR IDX: %d\n", target.idx);
|
|
|
|
|
|
|
|
// Save the ujit registers
|
|
|
|
push(ocb, REG_CFP);
|
|
|
|
push(ocb, REG_EC);
|
|
|
|
push(ocb, REG_SP);
|
|
|
|
push(ocb, REG_SP);
|
|
|
|
|
2020-12-16 21:45:51 -05:00
|
|
|
mov(ocb, RDI, imm_opnd(branch_idx));
|
|
|
|
mov(ocb, RSI, imm_opnd(target_idx));
|
|
|
|
call_ptr(ocb, REG0, (void *)&branch_stub_hit);
|
2020-12-16 17:07:18 -05:00
|
|
|
|
2020-12-17 14:51:56 -05:00
|
|
|
// Restore the ujit registers
|
|
|
|
pop(ocb, REG_SP);
|
|
|
|
pop(ocb, REG_SP);
|
|
|
|
pop(ocb, REG_EC);
|
|
|
|
pop(ocb, REG_CFP);
|
|
|
|
|
2020-12-16 21:45:51 -05:00
|
|
|
// Jump to the address returned by the
|
|
|
|
// branch_stub_hit call
|
|
|
|
jmp_rm(ocb, RAX);
|
2020-12-16 17:07:18 -05:00
|
|
|
|
|
|
|
return stub_addr;
|
|
|
|
}
|
|
|
|
|
2021-01-08 15:18:03 -05:00
|
|
|
void gen_branch(
|
|
|
|
const ctx_t* src_ctx,
|
|
|
|
blockid_t target0,
|
|
|
|
const ctx_t* ctx0,
|
|
|
|
blockid_t target1,
|
|
|
|
const ctx_t* ctx1,
|
|
|
|
branchgen_fn gen_fn
|
|
|
|
)
|
2020-12-16 17:07:18 -05:00
|
|
|
{
|
2021-01-18 17:03:04 -05:00
|
|
|
assert (target0.iseq != NULL);
|
2021-01-19 11:11:11 -05:00
|
|
|
assert (target1.iseq != NULL);
|
2021-01-13 15:18:35 -05:00
|
|
|
assert (num_branches < MAX_BRANCHES);
|
2021-01-18 17:03:04 -05:00
|
|
|
uint32_t branch_idx = num_branches++;
|
2021-01-13 15:18:35 -05:00
|
|
|
|
2021-01-19 11:11:11 -05:00
|
|
|
// Get the branch targets or stubs
|
|
|
|
uint8_t* dst_addr0 = get_branch_target(target0, ctx0, branch_idx, 0);
|
|
|
|
uint8_t* dst_addr1 = get_branch_target(target1, ctx1, branch_idx, 1);
|
|
|
|
|
|
|
|
// Call the branch generation function
|
|
|
|
uint32_t start_pos = cb->write_pos;
|
|
|
|
gen_fn(cb, dst_addr0, dst_addr1, SHAPE_DEFAULT);
|
|
|
|
uint32_t end_pos = cb->write_pos;
|
|
|
|
|
|
|
|
// Register this branch entry
|
|
|
|
branch_t branch_entry = {
|
|
|
|
start_pos,
|
|
|
|
end_pos,
|
|
|
|
*src_ctx,
|
|
|
|
{ target0, target1 },
|
|
|
|
{ *ctx0, *ctx1 },
|
|
|
|
{ dst_addr0, dst_addr1 },
|
|
|
|
gen_fn,
|
|
|
|
SHAPE_DEFAULT
|
|
|
|
};
|
|
|
|
|
|
|
|
branch_entries[branch_idx] = branch_entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
gen_jump_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t shape)
|
|
|
|
{
|
|
|
|
switch (shape)
|
|
|
|
{
|
|
|
|
case SHAPE_NEXT0:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SHAPE_NEXT1:
|
|
|
|
assert (false);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SHAPE_DEFAULT:
|
|
|
|
jmp_ptr(cb, target0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_direct_jump(
|
|
|
|
const ctx_t* ctx,
|
|
|
|
blockid_t target0
|
|
|
|
)
|
|
|
|
{
|
|
|
|
assert (target0.iseq != NULL);
|
|
|
|
assert (num_branches < MAX_BRANCHES);
|
|
|
|
uint32_t branch_idx = num_branches++;
|
|
|
|
|
|
|
|
// Branch targets or stub adddress
|
2021-01-13 15:18:35 -05:00
|
|
|
uint8_t* dst_addr0;
|
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
// Shape of the branch
|
|
|
|
uint8_t branch_shape;
|
|
|
|
|
2021-01-19 11:11:11 -05:00
|
|
|
// Branch start and end positions
|
|
|
|
uint32_t start_pos;
|
|
|
|
uint32_t end_pos;
|
|
|
|
|
|
|
|
block_t* p_block = find_block_version(target0, ctx);
|
|
|
|
|
|
|
|
// If the version already exists
|
|
|
|
if (p_block)
|
2021-01-13 15:18:35 -05:00
|
|
|
{
|
2021-01-19 11:11:11 -05:00
|
|
|
add_incoming(p_block, branch_idx);
|
|
|
|
dst_addr0 = cb_get_ptr(cb, p_block->start_pos);
|
|
|
|
branch_shape = SHAPE_DEFAULT;
|
|
|
|
|
|
|
|
// Call the branch generation function
|
|
|
|
start_pos = cb->write_pos;
|
|
|
|
gen_jump_branch(cb, dst_addr0, NULL, branch_shape);
|
|
|
|
end_pos = cb->write_pos;
|
2021-01-13 15:18:35 -05:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2021-01-19 11:11:11 -05:00
|
|
|
// The target block will follow next
|
|
|
|
// It will be compiled in gen_block_version()
|
|
|
|
dst_addr0 = NULL;
|
|
|
|
branch_shape = SHAPE_NEXT0;
|
|
|
|
start_pos = cb->write_pos;
|
|
|
|
end_pos = cb->write_pos;
|
2021-01-13 15:18:35 -05:00
|
|
|
}
|
2020-12-16 17:07:18 -05:00
|
|
|
|
|
|
|
// Register this branch entry
|
|
|
|
branch_t branch_entry = {
|
|
|
|
start_pos,
|
|
|
|
end_pos,
|
2021-01-19 11:11:11 -05:00
|
|
|
*ctx,
|
|
|
|
{ target0, BLOCKID_NULL },
|
|
|
|
{ *ctx, *ctx },
|
|
|
|
{ dst_addr0, NULL },
|
|
|
|
gen_jump_branch,
|
2021-01-18 17:03:04 -05:00
|
|
|
branch_shape
|
2020-12-16 17:07:18 -05:00
|
|
|
};
|
|
|
|
|
2021-01-18 17:03:04 -05:00
|
|
|
branch_entries[branch_idx] = branch_entry;
|
2021-01-12 17:03:54 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Invalidate one specific block version
|
2021-01-14 13:33:19 -05:00
|
|
|
void invalidate(block_t* block)
|
2021-01-12 17:03:54 -05:00
|
|
|
{
|
2021-01-18 17:03:04 -05:00
|
|
|
fprintf(stderr, "invalidating block (%p, %d)\n", block->blockid.iseq, block->blockid.idx);
|
2021-01-14 16:58:20 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// Remove the version object from the map so we can re-generate stubs
|
|
|
|
st_delete(version_tbl, (st_data_t*)&block->blockid, NULL);
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 16:58:20 -05:00
|
|
|
// Get a pointer to the generated code for this block
|
2021-01-14 13:33:19 -05:00
|
|
|
uint8_t* code_ptr = cb_get_ptr(cb, block->start_pos);
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// For each incoming branch
|
|
|
|
for (uint32_t i = 0; i < block->num_incoming; ++i)
|
|
|
|
{
|
|
|
|
uint32_t branch_idx = block->incoming[i];
|
|
|
|
branch_t* branch = &branch_entries[branch_idx];
|
|
|
|
uint32_t target_idx = (branch->dst_addrs[0] == code_ptr)? 0:1;
|
|
|
|
|
|
|
|
// Create a stub for this branch target
|
|
|
|
branch->dst_addrs[target_idx] = get_branch_target(
|
|
|
|
block->blockid,
|
|
|
|
&block->ctx,
|
|
|
|
branch_idx,
|
|
|
|
target_idx
|
|
|
|
);
|
|
|
|
|
|
|
|
// Check if the invalidated block immediately follows
|
|
|
|
bool target_next = block->start_pos == branch->end_pos;
|
|
|
|
|
|
|
|
if (target_next)
|
|
|
|
{
|
|
|
|
// Reset the branch shape
|
|
|
|
branch->shape = SHAPE_DEFAULT;
|
|
|
|
}
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// Rewrite the branch with the new jump target address
|
|
|
|
assert (branch->dst_addrs[0] != NULL);
|
|
|
|
assert (branch->dst_addrs[1] != NULL);
|
|
|
|
uint32_t cur_pos = cb->write_pos;
|
|
|
|
cb_set_pos(cb, branch->start_pos);
|
|
|
|
branch->gen_fn(cb, branch->dst_addrs[0], branch->dst_addrs[1], branch->shape);
|
|
|
|
branch->end_pos = cb->write_pos;
|
|
|
|
cb_set_pos(cb, cur_pos);
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
if (target_next && branch->end_pos > block->end_pos)
|
|
|
|
{
|
|
|
|
rb_bug("ujit invalidate rewrote branch past block end");
|
|
|
|
}
|
|
|
|
}
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// If the block is an entry point, it needs to be unmapped from its iseq
|
|
|
|
const rb_iseq_t* iseq = block->blockid.iseq;
|
|
|
|
uint32_t idx = block->blockid.idx;
|
|
|
|
VALUE* entry_pc = &iseq->body->iseq_encoded[idx];
|
|
|
|
int entry_opcode = opcode_at_pc(iseq, entry_pc);
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// TODO: unmap_addr2insn in ujit_iface.c? Maybe we can write a function to encompass this logic?
|
|
|
|
// Should check how it's used in exit and side-exit
|
|
|
|
const void * const *handler_table = rb_vm_get_insns_address_table();
|
|
|
|
void* handler_addr = (void*)handler_table[entry_opcode];
|
|
|
|
iseq->body->iseq_encoded[idx] = (VALUE)handler_addr;
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
//
|
|
|
|
// Optional: may want to recompile a new deoptimized entry point
|
|
|
|
//
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// TODO:
|
|
|
|
// Call continuation addresses on the stack can also be atomically replaced by jumps going to the stub.
|
|
|
|
// For now this isn't an issue
|
2021-01-12 17:03:54 -05:00
|
|
|
|
2021-01-14 13:33:19 -05:00
|
|
|
// Free the block version object
|
|
|
|
free(block);
|
2020-12-16 17:07:18 -05:00
|
|
|
}
|
|
|
|
|
2021-01-07 17:09:25 -05:00
|
|
|
int blockid_cmp(st_data_t arg0, st_data_t arg1)
|
|
|
|
{
|
|
|
|
const blockid_t *block0 = (const blockid_t*)arg0;
|
|
|
|
const blockid_t *block1 = (const blockid_t*)arg1;
|
|
|
|
return block0->iseq == block1->iseq && block0->idx == block1->idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
st_index_t blockid_hash(st_data_t arg)
|
|
|
|
{
|
|
|
|
const blockid_t *blockid = (const blockid_t*)arg;
|
|
|
|
st_index_t hash0 = st_numhash((st_data_t)blockid->iseq);
|
|
|
|
st_index_t hash1 = st_numhash((st_data_t)(uint64_t)blockid->idx);
|
|
|
|
|
|
|
|
// Use XOR to combine the hashes
|
|
|
|
return hash0 ^ hash1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct st_hash_type hashtype_blockid = {
|
|
|
|
blockid_cmp,
|
|
|
|
blockid_hash,
|
|
|
|
};
|
|
|
|
|
2020-12-10 16:59:13 -05:00
|
|
|
void
|
|
|
|
ujit_init_core(void)
|
|
|
|
{
|
|
|
|
// Initialize the version hash table
|
|
|
|
version_tbl = st_init_table(&hashtype_blockid);
|
|
|
|
}
|