1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Add system to implement codegen preconditions in JIT

This commit is contained in:
Maxime Chevalier-Boisvert 2020-10-15 11:46:37 -04:00 committed by Alan Wu
parent 6f81bd2c76
commit 32a66b99f6

View file

@ -34,7 +34,7 @@ typedef struct ctx_struct
} ctx_t;
// MicroJIT code generation function signature
typedef void (*codegen_fn)(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx);
typedef bool (*codegen_fn)(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx);
// Map from YARV opcodes to code generation functions
static st_table *gen_fns;
@ -255,7 +255,10 @@ ujit_compile_insn(rb_iseq_t *iseq, unsigned int insn_idx, unsigned int* next_uji
// Call the code generation function
codegen_fn gen_fn = (codegen_fn)st_gen_fn;
gen_fn(cb, ocb, &ctx);
if (!gen_fn(cb, ocb, &ctx))
{
break;
}
// Move to the next instruction
insn_idx += insn_len(opcode);
@ -278,7 +281,8 @@ ujit_compile_insn(rb_iseq_t *iseq, unsigned int insn_idx, unsigned int* next_uji
return code_ptr;
}
void gen_dup(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_dup(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
x86opnd_t dup_val = ctx_stack_pop(ctx, 1);
x86opnd_t loc0 = ctx_stack_push(ctx, 1);
@ -286,27 +290,35 @@ void gen_dup(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
mov(cb, RAX, dup_val);
mov(cb, loc0, RAX);
mov(cb, loc1, RAX);
return true;
}
void gen_nop(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_nop(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
// Do nothing
return true;
}
void gen_pop(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_pop(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
// Decrement SP
ctx_stack_pop(ctx, 1);
return true;
}
void gen_putnil(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_putnil(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
// Write constant at SP
x86opnd_t stack_top = ctx_stack_push(ctx, 1);
mov(cb, stack_top, imm_opnd(Qnil));
return true;
}
void gen_putobject(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_putobject(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
// Load the argument from the bytecode sequence.
// We need to do this as the argument can chanage due to GC compaction.
@ -317,9 +329,12 @@ void gen_putobject(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
// Write argument at SP
x86opnd_t stack_top = ctx_stack_push(ctx, 1);
mov(cb, stack_top, RAX);
return true;
}
void gen_putobject_int2fix(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_putobject_int2fix(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
int opcode = ctx_get_opcode(ctx);
int cst_val = (opcode == BIN(putobject_INT2FIX_0_))? 0:1;
@ -327,9 +342,12 @@ void gen_putobject_int2fix(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
// Write constant at SP
x86opnd_t stack_top = ctx_stack_push(ctx, 1);
mov(cb, stack_top, imm_opnd(INT2FIX(cst_val)));
return true;
}
void gen_putself(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_putself(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
// Load self from CFP
mov(cb, RAX, mem_opnd(64, RDI, 24));
@ -337,9 +355,12 @@ void gen_putself(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
// Write it on the stack
x86opnd_t stack_top = ctx_stack_push(ctx, 1);
mov(cb, stack_top, RAX);
return true;
}
void gen_getlocal_wc0(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_getlocal_wc0(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
// Load environment pointer EP from CFP
mov(cb, RDX, member_opnd(RDI, rb_control_frame_t, ep));
@ -354,9 +375,12 @@ void gen_getlocal_wc0(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
// Write the local at SP
x86opnd_t stack_top = ctx_stack_push(ctx, 1);
mov(cb, stack_top, RCX);
return true;
}
void gen_setlocal_wc0(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_setlocal_wc0(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
/*
vm_env_write(const VALUE *ep, int index, VALUE v)
@ -392,9 +416,12 @@ void gen_setlocal_wc0(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
int32_t local_idx = (int32_t)ctx_get_arg(ctx, 0);
const int32_t offs = -8 * local_idx;
mov(cb, mem_opnd(64, RDX, offs), RCX);
return true;
}
void gen_opt_minus(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_opt_minus(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
// Create a size-exit to fall back to the interpreter
// Note: we generate the side-exit before popping operands from the stack
@ -429,56 +456,87 @@ void gen_opt_minus(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
// Push the output on the stack
x86opnd_t dst = ctx_stack_push(ctx, 1);
mov(cb, dst, RAX);
return true;
}
void gen_opt_send_without_block(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
bool
gen_opt_send_without_block(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
{
// Create a size-exit to fall back to the interpreter
uint8_t* side_exit = ujit_side_exit(ocb, ctx, ctx->pc);
/*
struct rb_call_data * cd = (struct rb_call_data *)ctx_get_arg(ctx, 0);
int32_t argc = (int32_t)vm_ci_argc(cd->ci);
const struct rb_callcache *cc = cd->cc;
// Callee method ID
ID mid = vm_ci_mid(cd->ci);
//fprintf(stderr, "jitting call to \"%s\", argc: %lu\n", rb_id2name(mid), argc);
// TODO: don't jit calls that aren't simple
// have this codegen function return false, make codegen stop?
if (vm_ci_flag(cd->ci) & VM_CALL_ARGS_SIMPLE)
// Don't JIT calls with keyword splat
if (vm_ci_flag(cd->ci) & VM_CALL_KW_SPLAT)
{
//fprintf(stderr, "simple call\n");
return false;
}
// Don't jit calls that aren't simple
if (!(vm_ci_flag(cd->ci) & VM_CALL_ARGS_SIMPLE))
{
return false;
}
// TODO: stop if the inline cache isn't filled
// TODO: stop if this isn't a C call
return false;
/*
// Create a size-exit to fall back to the interpreter
uint8_t* side_exit = ujit_side_exit(ocb, ctx, ctx->pc);
struct rb_calling_info *calling = (struct rb_calling_info*)malloc(sizeof(struct rb_calling_info));
calling->block_handler = VM_BLOCK_HANDLER_NONE;
calling->kw_splat = 0;
calling->argc = argc;
mov(cb, RAX, const_ptr_opnd(cd));
x86opnd_t ptr_to_cc = member_opnd(RAX, struct rb_call_data, cc);
mov(cb, RAX, ptr_to_cc);
*/
/*
x86opnd_t ptr_to_klass = mem_opnd(64, RAX, offsetof(struct rb_callcache, klass));
x86opnd_t ptr_to_cme_ = mem_opnd(64, RAX, offsetof(struct rb_callcache, cme_));
mov(cb, RBX, ptr_to_klass);
x86opnd_t ptr_to_call_ = mem_opnd(64, RAX, offsetof(struct rb_callcache, call_));
mov(cb, R9, ptr_to_klass);
mov(cb, RCX, ptr_to_cme_);
// Points to the receiver operand on the stack
x86opnd_t recv = ctx_stack_opnd(ctx, argc);
mov(cb, RDX, recv);
//print_int(cb, recv);
// Store calling->recv
mov(cb, R8, const_ptr_opnd(calling));
x86opnd_t recv_opnd = mem_opnd(64, R8, offsetof(struct rb_calling_info, recv));
mov(cb, recv_opnd, RDX);
//print_int(cb, recv_opnd);
// Pointer to the klass field of the receiver
x86opnd_t klass_opnd = mem_opnd(64, RDX, offsetof(struct RBasic, klass));
print_int(cb, klass_opnd);
cmp(cb, RBX, klass_opnd);
cmp(cb, R9, klass_opnd);
jne_ptr(cb, side_exit);
//print_int(cb, klass_opnd);
print_str(cb, "cache klass hit");
//#define METHOD_ENTRY_INVALIDATED(me) ((me)->flags & IMEMO_FL_USER5)
@ -486,14 +544,53 @@ void gen_opt_send_without_block(codeblock_t* cb, codeblock_t* ocb, ctx_t* ctx)
test(cb, flags_opnd, imm_opnd(IMEMO_FL_USER5));
jnz_ptr(cb, side_exit);
print_str(cb, "method entry not invalidated!!!1");
push(cb, RDI);
push(cb, RSI);
x86opnd_t ptr_to_pc = mem_opnd(64, RDI, offsetof(rb_control_frame_t, pc));
mov(cb, ptr_to_pc, const_ptr_opnd(ctx->pc + insn_len(BIN(opt_send_without_block))));
// Write the adjusted SP back into the CFP
if (ctx->stack_diff != 0)
{
x86opnd_t stack_pointer = ctx_sp_opnd(ctx, 1);
lea(cb, RSI, stack_pointer);
mov(cb, mem_opnd(64, RDI, 8), RSI);
}
// val = vm_cc_call(cc)(ec, GET_CFP(), &calling, cd);
mov(cb, RSI, RDI);
mov(cb, RDI, const_ptr_opnd(rb_current_execution_context()));
mov(cb, RDX, R8);
print_int(cb, RDX);
mov(cb, RCX, const_ptr_opnd(cd));
call(cb, ptr_to_call_);
pop(cb, RSI);
pop(cb, RDI);
size_t continue_in_jit = cb_new_label(cb, "continue_in_jit");
cmp(cb, RAX, imm_opnd(Qundef));
jne(cb, continue_in_jit);
//print_str(cb, "method entry not invalidated!!!1");
mov(cb, RDI, const_ptr_opnd(rb_current_execution_context()));
mov(cb, RDI, mem_opnd(64, RDI, offsetof(rb_execution_context_t, cfp)));
// Read the PC from the CFP
mov(cb, RAX, mem_opnd(64, RDI, 0));
// Write the post call bytes
for (size_t i = 0; i < sizeof(ujit_post_call_bytes); ++i)
cb_write_byte(cb, ujit_post_call_bytes[i]);
cb_write_label(cb, continue_in_jit);
cb_link_labels(cb);
return true;
*/
jmp_ptr(cb, side_exit);
}
bool
@ -505,9 +602,11 @@ rb_ujit_enabled_p(void)
void
rb_ujit_init(void)
{
if (!ujit_scrape_successful || !PLATFORM_SUPPORTED_P) {
if (!ujit_scrape_successful || !PLATFORM_SUPPORTED_P)
{
return;
}
// Initialize the code blocks
size_t mem_size = 128 * 1024 * 1024;
uint8_t* mem_block = alloc_exec_mem(mem_size);