mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
style: align pointer "*" to the right
This commit is contained in:
parent
5b68d14c2f
commit
a10cf74e5c
9 changed files with 515 additions and 515 deletions
330
yjit_asm.c
330
yjit_asm.c
|
@ -128,7 +128,7 @@ x86opnd_t const_ptr_opnd(const void *ptr)
|
|||
}
|
||||
|
||||
// Align the current write position to a multiple of bytes
|
||||
static uint8_t* align_ptr(uint8_t* ptr, uint32_t multiple)
|
||||
static uint8_t *align_ptr(uint8_t *ptr, uint32_t multiple)
|
||||
{
|
||||
// Compute the pointer modulo the given alignment boundary
|
||||
uint32_t rem = ((uint32_t)(uintptr_t)ptr) % multiple;
|
||||
|
@ -144,16 +144,16 @@ static uint8_t* align_ptr(uint8_t* ptr, uint32_t multiple)
|
|||
}
|
||||
|
||||
// Allocate a block of executable memory
|
||||
uint8_t* alloc_exec_mem(uint32_t mem_size)
|
||||
uint8_t *alloc_exec_mem(uint32_t mem_size)
|
||||
{
|
||||
#ifndef _WIN32
|
||||
uint8_t* mem_block;
|
||||
uint8_t *mem_block;
|
||||
|
||||
// On Linux
|
||||
#if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
|
||||
// Align the requested address to page size
|
||||
uint32_t page_size = (uint32_t)sysconf(_SC_PAGESIZE);
|
||||
uint8_t* req_addr = align_ptr((uint8_t*)&alloc_exec_mem, page_size);
|
||||
uint8_t *req_addr = align_ptr((uint8_t*)&alloc_exec_mem, page_size);
|
||||
|
||||
while (req_addr < (uint8_t*)&alloc_exec_mem + INT32_MAX)
|
||||
{
|
||||
|
@ -223,16 +223,16 @@ uint8_t* alloc_exec_mem(uint32_t mem_size)
|
|||
code_page_t *freelist = NULL;
|
||||
|
||||
// Allocate a single code page from a pool of free pages
|
||||
code_page_t* alloc_code_page()
|
||||
code_page_t *alloc_code_page()
|
||||
{
|
||||
// If the free list is empty
|
||||
if (!freelist) {
|
||||
// Allocate many pages at once
|
||||
uint8_t* code_chunk = alloc_exec_mem(PAGES_PER_ALLOC * CODE_PAGE_SIZE);
|
||||
uint8_t *code_chunk = alloc_exec_mem(PAGES_PER_ALLOC * CODE_PAGE_SIZE);
|
||||
|
||||
// Do this in reverse order so we allocate our pages in order
|
||||
for (int i = PAGES_PER_ALLOC - 1; i >= 0; --i) {
|
||||
code_page_t* code_page = malloc(sizeof(code_page_t));
|
||||
code_page_t *code_page = malloc(sizeof(code_page_t));
|
||||
code_page->mem_block = code_chunk + i * CODE_PAGE_SIZE;
|
||||
assert ((intptr_t)code_page->mem_block % CODE_PAGE_SIZE == 0);
|
||||
code_page->page_size = CODE_PAGE_SIZE;
|
||||
|
@ -241,21 +241,21 @@ code_page_t* alloc_code_page()
|
|||
}
|
||||
}
|
||||
|
||||
code_page_t* free_page = freelist;
|
||||
code_page_t *free_page = freelist;
|
||||
freelist = freelist->_next;
|
||||
|
||||
return free_page;
|
||||
}
|
||||
|
||||
// Put a code page back into the allocation pool
|
||||
void free_code_page(code_page_t* code_page)
|
||||
void free_code_page(code_page_t *code_page)
|
||||
{
|
||||
code_page->_next = freelist;
|
||||
freelist = code_page;
|
||||
}
|
||||
|
||||
// Initialize a code block object
|
||||
void cb_init(codeblock_t* cb, uint8_t* mem_block, uint32_t mem_size)
|
||||
void cb_init(codeblock_t *cb, uint8_t *mem_block, uint32_t mem_size)
|
||||
{
|
||||
assert (mem_block);
|
||||
cb->mem_block = mem_block;
|
||||
|
@ -266,11 +266,11 @@ void cb_init(codeblock_t* cb, uint8_t* mem_block, uint32_t mem_size)
|
|||
}
|
||||
|
||||
// Align the current write position to a multiple of bytes
|
||||
void cb_align_pos(codeblock_t* cb, uint32_t multiple)
|
||||
void cb_align_pos(codeblock_t *cb, uint32_t multiple)
|
||||
{
|
||||
// Compute the pointer modulo the given alignment boundary
|
||||
uint8_t* ptr = &cb->mem_block[cb->write_pos];
|
||||
uint8_t* aligned_ptr = align_ptr(ptr, multiple);
|
||||
uint8_t *ptr = &cb->mem_block[cb->write_pos];
|
||||
uint8_t *aligned_ptr = align_ptr(ptr, multiple);
|
||||
|
||||
// Pad the pointer by the necessary amount to align it
|
||||
ptrdiff_t pad = aligned_ptr - ptr;
|
||||
|
@ -278,14 +278,14 @@ void cb_align_pos(codeblock_t* cb, uint32_t multiple)
|
|||
}
|
||||
|
||||
// Set the current write position
|
||||
void cb_set_pos(codeblock_t* cb, uint32_t pos)
|
||||
void cb_set_pos(codeblock_t *cb, uint32_t pos)
|
||||
{
|
||||
assert (pos < cb->mem_size);
|
||||
cb->write_pos = pos;
|
||||
}
|
||||
|
||||
// Set the current write position from a pointer
|
||||
void cb_set_write_ptr(codeblock_t* cb, uint8_t* code_ptr)
|
||||
void cb_set_write_ptr(codeblock_t *cb, uint8_t *code_ptr)
|
||||
{
|
||||
intptr_t pos = code_ptr - cb->mem_block;
|
||||
assert (pos < cb->mem_size);
|
||||
|
@ -293,20 +293,20 @@ void cb_set_write_ptr(codeblock_t* cb, uint8_t* code_ptr)
|
|||
}
|
||||
|
||||
// Get a direct pointer into the executable memory block
|
||||
uint8_t* cb_get_ptr(codeblock_t* cb, uint32_t index)
|
||||
uint8_t *cb_get_ptr(codeblock_t *cb, uint32_t index)
|
||||
{
|
||||
assert (index < cb->mem_size);
|
||||
return &cb->mem_block[index];
|
||||
}
|
||||
|
||||
// Get a direct pointer to the current write position
|
||||
uint8_t* cb_get_write_ptr(codeblock_t* cb)
|
||||
uint8_t *cb_get_write_ptr(codeblock_t *cb)
|
||||
{
|
||||
return cb_get_ptr(cb, cb->write_pos);
|
||||
}
|
||||
|
||||
// Write a byte at the current position
|
||||
void cb_write_byte(codeblock_t* cb, uint8_t byte)
|
||||
void cb_write_byte(codeblock_t *cb, uint8_t byte)
|
||||
{
|
||||
assert (cb->mem_block);
|
||||
assert (cb->write_pos + 1 <= cb->mem_size);
|
||||
|
@ -314,7 +314,7 @@ void cb_write_byte(codeblock_t* cb, uint8_t byte)
|
|||
}
|
||||
|
||||
// Write multiple bytes starting from the current position
|
||||
void cb_write_bytes(codeblock_t* cb, uint32_t num_bytes, ...)
|
||||
void cb_write_bytes(codeblock_t *cb, uint32_t num_bytes, ...)
|
||||
{
|
||||
va_list va;
|
||||
va_start(va, num_bytes);
|
||||
|
@ -329,7 +329,7 @@ void cb_write_bytes(codeblock_t* cb, uint32_t num_bytes, ...)
|
|||
}
|
||||
|
||||
// Write a signed integer over a given number of bits at the current position
|
||||
void cb_write_int(codeblock_t* cb, uint64_t val, uint32_t num_bits)
|
||||
void cb_write_int(codeblock_t *cb, uint64_t val, uint32_t num_bits)
|
||||
{
|
||||
assert (num_bits > 0);
|
||||
assert (num_bits % 8 == 0);
|
||||
|
@ -378,7 +378,7 @@ void cb_write_int(codeblock_t* cb, uint64_t val, uint32_t num_bits)
|
|||
}
|
||||
|
||||
// Allocate a new label with a given name
|
||||
uint32_t cb_new_label(codeblock_t* cb, const char* name)
|
||||
uint32_t cb_new_label(codeblock_t *cb, const char *name)
|
||||
{
|
||||
//if (hasASM)
|
||||
// writeString(to!string(label) ~ ":");
|
||||
|
@ -396,14 +396,14 @@ uint32_t cb_new_label(codeblock_t* cb, const char* name)
|
|||
}
|
||||
|
||||
// Write a label at the current address
|
||||
void cb_write_label(codeblock_t* cb, uint32_t label_idx)
|
||||
void cb_write_label(codeblock_t *cb, uint32_t label_idx)
|
||||
{
|
||||
assert (label_idx < MAX_LABELS);
|
||||
cb->label_addrs[label_idx] = cb->write_pos;
|
||||
}
|
||||
|
||||
// Add a label reference at the current write position
|
||||
void cb_label_ref(codeblock_t* cb, uint32_t label_idx)
|
||||
void cb_label_ref(codeblock_t *cb, uint32_t label_idx)
|
||||
{
|
||||
assert (label_idx < MAX_LABELS);
|
||||
assert (cb->num_refs < MAX_LABEL_REFS);
|
||||
|
@ -414,7 +414,7 @@ void cb_label_ref(codeblock_t* cb, uint32_t label_idx)
|
|||
}
|
||||
|
||||
// Link internal label references
|
||||
void cb_link_labels(codeblock_t* cb)
|
||||
void cb_link_labels(codeblock_t *cb)
|
||||
{
|
||||
uint32_t orig_pos = cb->write_pos;
|
||||
|
||||
|
@ -516,7 +516,7 @@ uint32_t disp_size(x86opnd_t opnd)
|
|||
|
||||
// Write the REX byte
|
||||
static void cb_write_rex(
|
||||
codeblock_t* cb,
|
||||
codeblock_t *cb,
|
||||
bool w_flag,
|
||||
uint8_t reg_no,
|
||||
uint8_t idx_reg_no,
|
||||
|
@ -539,7 +539,7 @@ static void cb_write_rex(
|
|||
}
|
||||
|
||||
// Write an opcode byte with an embedded register operand
|
||||
static void cb_write_opcode(codeblock_t* cb, uint8_t opcode, x86opnd_t reg)
|
||||
static void cb_write_opcode(codeblock_t *cb, uint8_t opcode, x86opnd_t reg)
|
||||
{
|
||||
// Write the reg field into the opcode byte
|
||||
uint8_t op_byte = opcode | (reg.as.reg.reg_no & 7);
|
||||
|
@ -548,7 +548,7 @@ static void cb_write_opcode(codeblock_t* cb, uint8_t opcode, x86opnd_t reg)
|
|||
|
||||
// Encode an RM instruction
|
||||
void cb_write_rm(
|
||||
codeblock_t* cb,
|
||||
codeblock_t *cb,
|
||||
bool szPref,
|
||||
bool rexW,
|
||||
x86opnd_t r_opnd,
|
||||
|
@ -709,8 +709,8 @@ void cb_write_rm(
|
|||
|
||||
// Encode a mul-like single-operand RM instruction
|
||||
void write_rm_unary(
|
||||
codeblock_t* cb,
|
||||
const char* mnem,
|
||||
codeblock_t *cb,
|
||||
const char *mnem,
|
||||
uint8_t opMemReg8,
|
||||
uint8_t opMemRegPref,
|
||||
uint8_t opExt,
|
||||
|
@ -738,8 +738,8 @@ void write_rm_unary(
|
|||
|
||||
// Encode an add-like RM instruction with multiple possible encodings
|
||||
void cb_write_rm_multi(
|
||||
codeblock_t* cb,
|
||||
const char* mnem,
|
||||
codeblock_t *cb,
|
||||
const char *mnem,
|
||||
uint8_t opMemReg8,
|
||||
uint8_t opMemRegPref,
|
||||
uint8_t opRegMem8,
|
||||
|
@ -837,8 +837,8 @@ void cb_write_rm_multi(
|
|||
|
||||
// Encode a single-operand shift instruction
|
||||
void cb_write_shift(
|
||||
codeblock_t* cb,
|
||||
const char* mnem,
|
||||
codeblock_t *cb,
|
||||
const char *mnem,
|
||||
uint8_t opMemOnePref,
|
||||
uint8_t opMemClPref,
|
||||
uint8_t opMemImmPref,
|
||||
|
@ -887,7 +887,7 @@ void cb_write_shift(
|
|||
|
||||
// Encode a relative jump to a label (direct or conditional)
|
||||
// Note: this always encodes a 32-bit offset
|
||||
void cb_write_jcc(codeblock_t* cb, const char* mnem, uint8_t op0, uint8_t op1, uint32_t label_idx)
|
||||
void cb_write_jcc(codeblock_t *cb, const char *mnem, uint8_t op0, uint8_t op1, uint32_t label_idx)
|
||||
{
|
||||
//cb.writeASM(mnem, label);
|
||||
|
||||
|
@ -904,7 +904,7 @@ void cb_write_jcc(codeblock_t* cb, const char* mnem, uint8_t op0, uint8_t op1, u
|
|||
}
|
||||
|
||||
// Encode a relative jump to a pointer at a 32-bit offset (direct or conditional)
|
||||
void cb_write_jcc_ptr(codeblock_t* cb, const char* mnem, uint8_t op0, uint8_t op1, uint8_t* dst_ptr)
|
||||
void cb_write_jcc_ptr(codeblock_t *cb, const char *mnem, uint8_t op0, uint8_t op1, uint8_t *dst_ptr)
|
||||
{
|
||||
//cb.writeASM(mnem, label);
|
||||
|
||||
|
@ -914,7 +914,7 @@ void cb_write_jcc_ptr(codeblock_t* cb, const char* mnem, uint8_t op0, uint8_t op
|
|||
cb_write_byte(cb, op1);
|
||||
|
||||
// Pointer to the end of this jump instruction
|
||||
uint8_t* end_ptr = &cb->mem_block[cb->write_pos] + 4;
|
||||
uint8_t *end_ptr = &cb->mem_block[cb->write_pos] + 4;
|
||||
|
||||
// Compute the jump offset
|
||||
int64_t rel64 = (int64_t)(dst_ptr - end_ptr);
|
||||
|
@ -925,7 +925,7 @@ void cb_write_jcc_ptr(codeblock_t* cb, const char* mnem, uint8_t op0, uint8_t op
|
|||
}
|
||||
|
||||
// Encode a conditional move instruction
|
||||
void cb_write_cmov(codeblock_t* cb, const char* mnem, uint8_t opcode1, x86opnd_t dst, x86opnd_t src)
|
||||
void cb_write_cmov(codeblock_t *cb, const char *mnem, uint8_t opcode1, x86opnd_t dst, x86opnd_t src)
|
||||
{
|
||||
//cb.writeASM(mnem, dst, src);
|
||||
|
||||
|
@ -940,7 +940,7 @@ void cb_write_cmov(codeblock_t* cb, const char* mnem, uint8_t opcode1, x86opnd_t
|
|||
}
|
||||
|
||||
// add - Integer addition
|
||||
void add(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void add(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_rm_multi(
|
||||
cb,
|
||||
|
@ -959,7 +959,7 @@ void add(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
}
|
||||
|
||||
/// and - Bitwise AND
|
||||
void and(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void and(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_rm_multi(
|
||||
cb,
|
||||
|
@ -978,7 +978,7 @@ void and(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
}
|
||||
|
||||
// call - Call to a pointer with a 32-bit displacement offset
|
||||
void call_rel32(codeblock_t* cb, int32_t rel32)
|
||||
void call_rel32(codeblock_t *cb, int32_t rel32)
|
||||
{
|
||||
//cb.writeASM("call", rel32);
|
||||
|
||||
|
@ -990,12 +990,12 @@ void call_rel32(codeblock_t* cb, int32_t rel32)
|
|||
}
|
||||
|
||||
// call - Call a pointer, encode with a 32-bit offset if possible
|
||||
void call_ptr(codeblock_t* cb, x86opnd_t scratch_reg, uint8_t* dst_ptr)
|
||||
void call_ptr(codeblock_t *cb, x86opnd_t scratch_reg, uint8_t *dst_ptr)
|
||||
{
|
||||
assert (scratch_reg.type == OPND_REG);
|
||||
|
||||
// Pointer to the end of this call instruction
|
||||
uint8_t* end_ptr = &cb->mem_block[cb->write_pos] + 5;
|
||||
uint8_t *end_ptr = &cb->mem_block[cb->write_pos] + 5;
|
||||
|
||||
// Compute the jump offset
|
||||
int64_t rel64 = (int64_t)(dst_ptr - end_ptr);
|
||||
|
@ -1013,7 +1013,7 @@ void call_ptr(codeblock_t* cb, x86opnd_t scratch_reg, uint8_t* dst_ptr)
|
|||
}
|
||||
|
||||
/// call - Call to label with 32-bit offset
|
||||
void call_label(codeblock_t* cb, uint32_t label_idx)
|
||||
void call_label(codeblock_t *cb, uint32_t label_idx)
|
||||
{
|
||||
//cb.writeASM("call", label);
|
||||
|
||||
|
@ -1028,46 +1028,46 @@ void call_label(codeblock_t* cb, uint32_t label_idx)
|
|||
}
|
||||
|
||||
/// call - Indirect call with an R/M operand
|
||||
void call(codeblock_t* cb, x86opnd_t opnd)
|
||||
void call(codeblock_t *cb, x86opnd_t opnd)
|
||||
{
|
||||
//cb.writeASM("call", opnd);
|
||||
cb_write_rm(cb, false, false, NO_OPND, opnd, 2, 1, 0xFF);
|
||||
}
|
||||
|
||||
/// cmovcc - Conditional move
|
||||
void cmova(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmova", 0x47, dst, src); }
|
||||
void cmovae(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovae", 0x43, dst, src); }
|
||||
void cmovb(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovb", 0x42, dst, src); }
|
||||
void cmovbe(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovbe", 0x46, dst, src); }
|
||||
void cmovc(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovc", 0x42, dst, src); }
|
||||
void cmove(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmove", 0x44, dst, src); }
|
||||
void cmovg(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovg", 0x4F, dst, src); }
|
||||
void cmovge(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovge", 0x4D, dst, src); }
|
||||
void cmovl(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovl", 0x4C, dst, src); }
|
||||
void cmovle(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovle", 0x4E, dst, src); }
|
||||
void cmovna(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovna", 0x46, dst, src); }
|
||||
void cmovnae(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnae", 0x42, dst, src); }
|
||||
void cmovnb(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnb", 0x43, dst, src); }
|
||||
void cmovnbe(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnbe", 0x47, dst, src); }
|
||||
void cmovnc(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnc", 0x43, dst, src); }
|
||||
void cmovne(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovne", 0x45, dst, src); }
|
||||
void cmovng(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovng", 0x4E, dst, src); }
|
||||
void cmovnge(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnge", 0x4C, dst, src); }
|
||||
void cmovnl(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnl" , 0x4D, dst, src); }
|
||||
void cmovnle(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnle", 0x4F, dst, src); }
|
||||
void cmovno(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovno", 0x41, dst, src); }
|
||||
void cmovnp(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnp", 0x4B, dst, src); }
|
||||
void cmovns(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovns", 0x49, dst, src); }
|
||||
void cmovnz(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnz", 0x45, dst, src); }
|
||||
void cmovo(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovo", 0x40, dst, src); }
|
||||
void cmovp(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovp", 0x4A, dst, src); }
|
||||
void cmovpe(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovpe", 0x4A, dst, src); }
|
||||
void cmovpo(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovpo", 0x4B, dst, src); }
|
||||
void cmovs(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovs", 0x48, dst, src); }
|
||||
void cmovz(codeblock_t* cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovz", 0x44, dst, src); }
|
||||
void cmova(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmova", 0x47, dst, src); }
|
||||
void cmovae(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovae", 0x43, dst, src); }
|
||||
void cmovb(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovb", 0x42, dst, src); }
|
||||
void cmovbe(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovbe", 0x46, dst, src); }
|
||||
void cmovc(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovc", 0x42, dst, src); }
|
||||
void cmove(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmove", 0x44, dst, src); }
|
||||
void cmovg(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovg", 0x4F, dst, src); }
|
||||
void cmovge(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovge", 0x4D, dst, src); }
|
||||
void cmovl(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovl", 0x4C, dst, src); }
|
||||
void cmovle(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovle", 0x4E, dst, src); }
|
||||
void cmovna(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovna", 0x46, dst, src); }
|
||||
void cmovnae(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnae", 0x42, dst, src); }
|
||||
void cmovnb(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnb", 0x43, dst, src); }
|
||||
void cmovnbe(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnbe", 0x47, dst, src); }
|
||||
void cmovnc(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnc", 0x43, dst, src); }
|
||||
void cmovne(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovne", 0x45, dst, src); }
|
||||
void cmovng(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovng", 0x4E, dst, src); }
|
||||
void cmovnge(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnge", 0x4C, dst, src); }
|
||||
void cmovnl(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnl" , 0x4D, dst, src); }
|
||||
void cmovnle(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnle", 0x4F, dst, src); }
|
||||
void cmovno(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovno", 0x41, dst, src); }
|
||||
void cmovnp(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnp", 0x4B, dst, src); }
|
||||
void cmovns(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovns", 0x49, dst, src); }
|
||||
void cmovnz(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovnz", 0x45, dst, src); }
|
||||
void cmovo(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovo", 0x40, dst, src); }
|
||||
void cmovp(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovp", 0x4A, dst, src); }
|
||||
void cmovpe(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovpe", 0x4A, dst, src); }
|
||||
void cmovpo(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovpo", 0x4B, dst, src); }
|
||||
void cmovs(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovs", 0x48, dst, src); }
|
||||
void cmovz(codeblock_t *cb, x86opnd_t dst, x86opnd_t src) { cb_write_cmov(cb, "cmovz", 0x44, dst, src); }
|
||||
|
||||
/// cmp - Compare and set flags
|
||||
void cmp(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void cmp(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_rm_multi(
|
||||
cb,
|
||||
|
@ -1086,21 +1086,21 @@ void cmp(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
}
|
||||
|
||||
/// cdq - Convert doubleword to quadword
|
||||
void cdq(codeblock_t* cb)
|
||||
void cdq(codeblock_t *cb)
|
||||
{
|
||||
//cb.writeASM("cdq");
|
||||
cb_write_byte(cb, 0x99);
|
||||
}
|
||||
|
||||
/// cqo - Convert quadword to octaword
|
||||
void cqo(codeblock_t* cb)
|
||||
void cqo(codeblock_t *cb)
|
||||
{
|
||||
//cb.writeASM("cqo");
|
||||
cb_write_bytes(cb, 2, 0x48, 0x99);
|
||||
}
|
||||
|
||||
/// Interrupt 3 - trap to debugger
|
||||
void int3(codeblock_t* cb)
|
||||
void int3(codeblock_t *cb)
|
||||
{
|
||||
//cb.writeASM("INT 3");
|
||||
cb_write_byte(cb, 0xCC);
|
||||
|
@ -1205,80 +1205,80 @@ void imul(CodeBlock cb, X86Opnd opnd0, X86Opnd opnd1, X86Opnd opnd2)
|
|||
*/
|
||||
|
||||
/// jcc - relative jumps to a label
|
||||
void ja_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "ja" , 0x0F, 0x87, label_idx); }
|
||||
void jae_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jae" , 0x0F, 0x83, label_idx); }
|
||||
void jb_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jb" , 0x0F, 0x82, label_idx); }
|
||||
void jbe_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jbe" , 0x0F, 0x86, label_idx); }
|
||||
void jc_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jc" , 0x0F, 0x82, label_idx); }
|
||||
void je_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "je" , 0x0F, 0x84, label_idx); }
|
||||
void jg_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jg" , 0x0F, 0x8F, label_idx); }
|
||||
void jge_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jge" , 0x0F, 0x8D, label_idx); }
|
||||
void jl_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jl" , 0x0F, 0x8C, label_idx); }
|
||||
void jle_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jle" , 0x0F, 0x8E, label_idx); }
|
||||
void jna_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jna" , 0x0F, 0x86, label_idx); }
|
||||
void jnae_label(codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jnae", 0x0F, 0x82, label_idx); }
|
||||
void jnb_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jnb" , 0x0F, 0x83, label_idx); }
|
||||
void jnbe_label(codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jnbe", 0x0F, 0x87, label_idx); }
|
||||
void jnc_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jnc" , 0x0F, 0x83, label_idx); }
|
||||
void jne_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jne" , 0x0F, 0x85, label_idx); }
|
||||
void jng_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jng" , 0x0F, 0x8E, label_idx); }
|
||||
void jnge_label(codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jnge", 0x0F, 0x8C, label_idx); }
|
||||
void jnl_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jnl" , 0x0F, 0x8D, label_idx); }
|
||||
void jnle_label(codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jnle", 0x0F, 0x8F, label_idx); }
|
||||
void jno_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jno" , 0x0F, 0x81, label_idx); }
|
||||
void jnp_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jnp" , 0x0F, 0x8b, label_idx); }
|
||||
void jns_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jns" , 0x0F, 0x89, label_idx); }
|
||||
void jnz_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jnz" , 0x0F, 0x85, label_idx); }
|
||||
void jo_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jo" , 0x0F, 0x80, label_idx); }
|
||||
void jp_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jp" , 0x0F, 0x8A, label_idx); }
|
||||
void jpe_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jpe" , 0x0F, 0x8A, label_idx); }
|
||||
void jpo_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jpo" , 0x0F, 0x8B, label_idx); }
|
||||
void js_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "js" , 0x0F, 0x88, label_idx); }
|
||||
void jz_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jz" , 0x0F, 0x84, label_idx); }
|
||||
void jmp_label (codeblock_t* cb, uint32_t label_idx) { cb_write_jcc(cb, "jmp" , 0xFF, 0xE9, label_idx); }
|
||||
void ja_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "ja" , 0x0F, 0x87, label_idx); }
|
||||
void jae_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jae" , 0x0F, 0x83, label_idx); }
|
||||
void jb_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jb" , 0x0F, 0x82, label_idx); }
|
||||
void jbe_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jbe" , 0x0F, 0x86, label_idx); }
|
||||
void jc_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jc" , 0x0F, 0x82, label_idx); }
|
||||
void je_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "je" , 0x0F, 0x84, label_idx); }
|
||||
void jg_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jg" , 0x0F, 0x8F, label_idx); }
|
||||
void jge_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jge" , 0x0F, 0x8D, label_idx); }
|
||||
void jl_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jl" , 0x0F, 0x8C, label_idx); }
|
||||
void jle_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jle" , 0x0F, 0x8E, label_idx); }
|
||||
void jna_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jna" , 0x0F, 0x86, label_idx); }
|
||||
void jnae_label(codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnae", 0x0F, 0x82, label_idx); }
|
||||
void jnb_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnb" , 0x0F, 0x83, label_idx); }
|
||||
void jnbe_label(codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnbe", 0x0F, 0x87, label_idx); }
|
||||
void jnc_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnc" , 0x0F, 0x83, label_idx); }
|
||||
void jne_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jne" , 0x0F, 0x85, label_idx); }
|
||||
void jng_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jng" , 0x0F, 0x8E, label_idx); }
|
||||
void jnge_label(codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnge", 0x0F, 0x8C, label_idx); }
|
||||
void jnl_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnl" , 0x0F, 0x8D, label_idx); }
|
||||
void jnle_label(codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnle", 0x0F, 0x8F, label_idx); }
|
||||
void jno_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jno" , 0x0F, 0x81, label_idx); }
|
||||
void jnp_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnp" , 0x0F, 0x8b, label_idx); }
|
||||
void jns_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jns" , 0x0F, 0x89, label_idx); }
|
||||
void jnz_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jnz" , 0x0F, 0x85, label_idx); }
|
||||
void jo_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jo" , 0x0F, 0x80, label_idx); }
|
||||
void jp_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jp" , 0x0F, 0x8A, label_idx); }
|
||||
void jpe_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jpe" , 0x0F, 0x8A, label_idx); }
|
||||
void jpo_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jpo" , 0x0F, 0x8B, label_idx); }
|
||||
void js_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "js" , 0x0F, 0x88, label_idx); }
|
||||
void jz_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jz" , 0x0F, 0x84, label_idx); }
|
||||
void jmp_label (codeblock_t *cb, uint32_t label_idx) { cb_write_jcc(cb, "jmp" , 0xFF, 0xE9, label_idx); }
|
||||
|
||||
/// jcc - relative jumps to a pointer (32-bit offset)
|
||||
void ja_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "ja" , 0x0F, 0x87, ptr); }
|
||||
void jae_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jae" , 0x0F, 0x83, ptr); }
|
||||
void jb_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jb" , 0x0F, 0x82, ptr); }
|
||||
void jbe_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jbe" , 0x0F, 0x86, ptr); }
|
||||
void jc_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jc" , 0x0F, 0x82, ptr); }
|
||||
void je_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "je" , 0x0F, 0x84, ptr); }
|
||||
void jg_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jg" , 0x0F, 0x8F, ptr); }
|
||||
void jge_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jge" , 0x0F, 0x8D, ptr); }
|
||||
void jl_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jl" , 0x0F, 0x8C, ptr); }
|
||||
void jle_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jle" , 0x0F, 0x8E, ptr); }
|
||||
void jna_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jna" , 0x0F, 0x86, ptr); }
|
||||
void jnae_ptr(codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jnae", 0x0F, 0x82, ptr); }
|
||||
void jnb_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jnb" , 0x0F, 0x83, ptr); }
|
||||
void jnbe_ptr(codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jnbe", 0x0F, 0x87, ptr); }
|
||||
void jnc_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jnc" , 0x0F, 0x83, ptr); }
|
||||
void jne_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jne" , 0x0F, 0x85, ptr); }
|
||||
void jng_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jng" , 0x0F, 0x8E, ptr); }
|
||||
void jnge_ptr(codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jnge", 0x0F, 0x8C, ptr); }
|
||||
void jnl_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jnl" , 0x0F, 0x8D, ptr); }
|
||||
void jnle_ptr(codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jnle", 0x0F, 0x8F, ptr); }
|
||||
void jno_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jno" , 0x0F, 0x81, ptr); }
|
||||
void jnp_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jnp" , 0x0F, 0x8b, ptr); }
|
||||
void jns_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jns" , 0x0F, 0x89, ptr); }
|
||||
void jnz_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jnz" , 0x0F, 0x85, ptr); }
|
||||
void jo_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jo" , 0x0F, 0x80, ptr); }
|
||||
void jp_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jp" , 0x0F, 0x8A, ptr); }
|
||||
void jpe_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jpe" , 0x0F, 0x8A, ptr); }
|
||||
void jpo_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jpo" , 0x0F, 0x8B, ptr); }
|
||||
void js_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "js" , 0x0F, 0x88, ptr); }
|
||||
void jz_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jz" , 0x0F, 0x84, ptr); }
|
||||
void jmp_ptr (codeblock_t* cb, uint8_t* ptr) { cb_write_jcc_ptr(cb, "jmp" , 0xFF, 0xE9, ptr); }
|
||||
void ja_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "ja" , 0x0F, 0x87, ptr); }
|
||||
void jae_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jae" , 0x0F, 0x83, ptr); }
|
||||
void jb_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jb" , 0x0F, 0x82, ptr); }
|
||||
void jbe_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jbe" , 0x0F, 0x86, ptr); }
|
||||
void jc_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jc" , 0x0F, 0x82, ptr); }
|
||||
void je_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "je" , 0x0F, 0x84, ptr); }
|
||||
void jg_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jg" , 0x0F, 0x8F, ptr); }
|
||||
void jge_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jge" , 0x0F, 0x8D, ptr); }
|
||||
void jl_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jl" , 0x0F, 0x8C, ptr); }
|
||||
void jle_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jle" , 0x0F, 0x8E, ptr); }
|
||||
void jna_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jna" , 0x0F, 0x86, ptr); }
|
||||
void jnae_ptr(codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnae", 0x0F, 0x82, ptr); }
|
||||
void jnb_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnb" , 0x0F, 0x83, ptr); }
|
||||
void jnbe_ptr(codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnbe", 0x0F, 0x87, ptr); }
|
||||
void jnc_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnc" , 0x0F, 0x83, ptr); }
|
||||
void jne_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jne" , 0x0F, 0x85, ptr); }
|
||||
void jng_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jng" , 0x0F, 0x8E, ptr); }
|
||||
void jnge_ptr(codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnge", 0x0F, 0x8C, ptr); }
|
||||
void jnl_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnl" , 0x0F, 0x8D, ptr); }
|
||||
void jnle_ptr(codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnle", 0x0F, 0x8F, ptr); }
|
||||
void jno_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jno" , 0x0F, 0x81, ptr); }
|
||||
void jnp_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnp" , 0x0F, 0x8b, ptr); }
|
||||
void jns_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jns" , 0x0F, 0x89, ptr); }
|
||||
void jnz_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jnz" , 0x0F, 0x85, ptr); }
|
||||
void jo_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jo" , 0x0F, 0x80, ptr); }
|
||||
void jp_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jp" , 0x0F, 0x8A, ptr); }
|
||||
void jpe_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jpe" , 0x0F, 0x8A, ptr); }
|
||||
void jpo_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jpo" , 0x0F, 0x8B, ptr); }
|
||||
void js_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "js" , 0x0F, 0x88, ptr); }
|
||||
void jz_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jz" , 0x0F, 0x84, ptr); }
|
||||
void jmp_ptr (codeblock_t *cb, uint8_t *ptr) { cb_write_jcc_ptr(cb, "jmp" , 0xFF, 0xE9, ptr); }
|
||||
|
||||
/// jmp - Indirect jump near to an R/M operand
|
||||
void jmp_rm(codeblock_t* cb, x86opnd_t opnd)
|
||||
void jmp_rm(codeblock_t *cb, x86opnd_t opnd)
|
||||
{
|
||||
//cb.writeASM("jmp", opnd);
|
||||
cb_write_rm(cb, false, false, NO_OPND, opnd, 4, 1, 0xFF);
|
||||
}
|
||||
|
||||
// jmp - Jump with relative 32-bit offset
|
||||
void jmp32(codeblock_t* cb, int32_t offset)
|
||||
void jmp32(codeblock_t *cb, int32_t offset)
|
||||
{
|
||||
//cb.writeASM("jmp", ((offset > 0)? "+":"-") ~ to!string(offset));
|
||||
cb_write_byte(cb, 0xE9);
|
||||
|
@ -1286,7 +1286,7 @@ void jmp32(codeblock_t* cb, int32_t offset)
|
|||
}
|
||||
|
||||
/// lea - Load Effective Address
|
||||
void lea(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
|
||||
void lea(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
|
||||
{
|
||||
//cb.writeASM("lea", dst, src);
|
||||
assert (dst.num_bits == 64);
|
||||
|
@ -1294,7 +1294,7 @@ void lea(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
|
|||
}
|
||||
|
||||
/// mov - Data move operation
|
||||
void mov(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
|
||||
void mov(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
|
||||
{
|
||||
// R/M + Imm
|
||||
if (src.type == OPND_IMM)
|
||||
|
@ -1357,7 +1357,7 @@ void mov(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
|
|||
}
|
||||
|
||||
/// movsx - Move with sign extension (signed integers)
|
||||
void movsx(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
|
||||
void movsx(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
|
||||
{
|
||||
assert (dst.type == OPND_REG);
|
||||
assert (src.type == OPND_REG || src.type == OPND_MEM);
|
||||
|
@ -1385,7 +1385,7 @@ void movsx(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
|
|||
|
||||
/*
|
||||
/// movzx - Move with zero extension (unsigned values)
|
||||
void movzx(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
|
||||
void movzx(codeblock_t *cb, x86opnd_t dst, x86opnd_t src)
|
||||
{
|
||||
cb.writeASM("movzx", dst, src);
|
||||
|
||||
|
@ -1424,7 +1424,7 @@ void movzx(codeblock_t* cb, x86opnd_t dst, x86opnd_t src)
|
|||
*/
|
||||
|
||||
// neg - Integer negation (multiplication by -1)
|
||||
void neg(codeblock_t* cb, x86opnd_t opnd)
|
||||
void neg(codeblock_t *cb, x86opnd_t opnd)
|
||||
{
|
||||
write_rm_unary(
|
||||
cb,
|
||||
|
@ -1437,7 +1437,7 @@ void neg(codeblock_t* cb, x86opnd_t opnd)
|
|||
}
|
||||
|
||||
// nop - Noop, one or multiple bytes long
|
||||
void nop(codeblock_t* cb, uint32_t length)
|
||||
void nop(codeblock_t *cb, uint32_t length)
|
||||
{
|
||||
switch (length)
|
||||
{
|
||||
|
@ -1504,7 +1504,7 @@ void nop(codeblock_t* cb, uint32_t length)
|
|||
}
|
||||
|
||||
// not - Bitwise NOT
|
||||
void not(codeblock_t* cb, x86opnd_t opnd)
|
||||
void not(codeblock_t *cb, x86opnd_t opnd)
|
||||
{
|
||||
write_rm_unary(
|
||||
cb,
|
||||
|
@ -1517,7 +1517,7 @@ void not(codeblock_t* cb, x86opnd_t opnd)
|
|||
}
|
||||
|
||||
/// or - Bitwise OR
|
||||
void or(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void or(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_rm_multi(
|
||||
cb,
|
||||
|
@ -1536,7 +1536,7 @@ void or(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
}
|
||||
|
||||
/// pop - Pop a register off the stack
|
||||
void pop(codeblock_t* cb, x86opnd_t opnd)
|
||||
void pop(codeblock_t *cb, x86opnd_t opnd)
|
||||
{
|
||||
assert (opnd.num_bits == 64);
|
||||
|
||||
|
@ -1554,7 +1554,7 @@ void pop(codeblock_t* cb, x86opnd_t opnd)
|
|||
}
|
||||
|
||||
/// popfq - Pop the flags register (64-bit)
|
||||
void popfq(codeblock_t* cb)
|
||||
void popfq(codeblock_t *cb)
|
||||
{
|
||||
//cb.writeASM("popfq");
|
||||
|
||||
|
@ -1563,7 +1563,7 @@ void popfq(codeblock_t* cb)
|
|||
}
|
||||
|
||||
/// push - Push an operand on the stack
|
||||
void push(codeblock_t* cb, x86opnd_t opnd)
|
||||
void push(codeblock_t *cb, x86opnd_t opnd)
|
||||
{
|
||||
assert (opnd.num_bits == 64);
|
||||
|
||||
|
@ -1581,21 +1581,21 @@ void push(codeblock_t* cb, x86opnd_t opnd)
|
|||
}
|
||||
|
||||
/// pushfq - Push the flags register (64-bit)
|
||||
void pushfq(codeblock_t* cb)
|
||||
void pushfq(codeblock_t *cb)
|
||||
{
|
||||
//cb.writeASM("pushfq");
|
||||
cb_write_byte(cb, 0x9C);
|
||||
}
|
||||
|
||||
/// ret - Return from call, popping only the return address
|
||||
void ret(codeblock_t* cb)
|
||||
void ret(codeblock_t *cb)
|
||||
{
|
||||
//cb.writeASM("ret");
|
||||
cb_write_byte(cb, 0xC3);
|
||||
}
|
||||
|
||||
// sal - Shift arithmetic left
|
||||
void sal(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void sal(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_shift(
|
||||
cb,
|
||||
|
@ -1610,7 +1610,7 @@ void sal(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
}
|
||||
|
||||
/// sar - Shift arithmetic right (signed)
|
||||
void sar(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void sar(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_shift(
|
||||
cb,
|
||||
|
@ -1624,7 +1624,7 @@ void sar(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
);
|
||||
}
|
||||
// shl - Shift logical left
|
||||
void shl(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void shl(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_shift(
|
||||
cb,
|
||||
|
@ -1639,7 +1639,7 @@ void shl(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
}
|
||||
|
||||
/// shr - Shift logical right (unsigned)
|
||||
void shr(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void shr(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_shift(
|
||||
cb,
|
||||
|
@ -1654,7 +1654,7 @@ void shr(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
}
|
||||
|
||||
/// sub - Integer subtraction
|
||||
void sub(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void sub(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_rm_multi(
|
||||
cb,
|
||||
|
@ -1673,7 +1673,7 @@ void sub(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
}
|
||||
|
||||
/// test - Logical Compare
|
||||
void test(codeblock_t* cb, x86opnd_t rm_opnd, x86opnd_t test_opnd)
|
||||
void test(codeblock_t *cb, x86opnd_t rm_opnd, x86opnd_t test_opnd)
|
||||
{
|
||||
assert (rm_opnd.type == OPND_REG || rm_opnd.type == OPND_MEM);
|
||||
assert (test_opnd.type == OPND_REG || test_opnd.type == OPND_IMM);
|
||||
|
@ -1728,13 +1728,13 @@ void test(codeblock_t* cb, x86opnd_t rm_opnd, x86opnd_t test_opnd)
|
|||
}
|
||||
|
||||
/// Undefined opcode
|
||||
void ud2(codeblock_t* cb)
|
||||
void ud2(codeblock_t *cb)
|
||||
{
|
||||
cb_write_bytes(cb, 2, 0x0F, 0x0B);
|
||||
}
|
||||
|
||||
/// xchg - Exchange Register/Memory with Register
|
||||
void xchg(codeblock_t* cb, x86opnd_t rm_opnd, x86opnd_t r_opnd)
|
||||
void xchg(codeblock_t *cb, x86opnd_t rm_opnd, x86opnd_t r_opnd)
|
||||
{
|
||||
assert (rm_opnd.num_bits == 64);
|
||||
assert (r_opnd.num_bits == 64);
|
||||
|
@ -1757,7 +1757,7 @@ void xchg(codeblock_t* cb, x86opnd_t rm_opnd, x86opnd_t r_opnd)
|
|||
}
|
||||
|
||||
/// xor - Exclusive bitwise OR
|
||||
void xor(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
void xor(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
||||
{
|
||||
cb_write_rm_multi(
|
||||
cb,
|
||||
|
@ -1776,7 +1776,7 @@ void xor(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1)
|
|||
}
|
||||
|
||||
// LOCK - lock prefix for atomic shared memory operations
|
||||
void cb_write_lock_prefix(codeblock_t* cb)
|
||||
void cb_write_lock_prefix(codeblock_t *cb)
|
||||
{
|
||||
cb_write_byte(cb, 0xF0);
|
||||
}
|
||||
|
|
290
yjit_asm.h
290
yjit_asm.h
|
@ -32,7 +32,7 @@ typedef struct LabelRef
|
|||
typedef struct CodeBlock
|
||||
{
|
||||
// Memory block
|
||||
uint8_t* mem_block;
|
||||
uint8_t *mem_block;
|
||||
|
||||
// Memory block size
|
||||
uint32_t mem_size;
|
||||
|
@ -45,7 +45,7 @@ typedef struct CodeBlock
|
|||
|
||||
// Table of registered label names
|
||||
// Note that these should be constant strings only
|
||||
const char* label_names[MAX_LABELS];
|
||||
const char *label_names[MAX_LABELS];
|
||||
|
||||
// References to labels
|
||||
labelref_t label_refs[MAX_LABEL_REFS];
|
||||
|
@ -141,13 +141,13 @@ typedef struct X86Opnd
|
|||
typedef struct code_page_struct
|
||||
{
|
||||
// Chunk of executable memory
|
||||
uint8_t* mem_block;
|
||||
uint8_t *mem_block;
|
||||
|
||||
// Size of the executable memory chunk
|
||||
uint32_t page_size;
|
||||
|
||||
// Next node in the free list (private)
|
||||
struct code_page_struct* _next;
|
||||
struct code_page_struct *_next;
|
||||
|
||||
} code_page_t;
|
||||
|
||||
|
@ -261,150 +261,150 @@ x86opnd_t const_ptr_opnd(const void *ptr);
|
|||
)
|
||||
|
||||
// Machine code allocation
|
||||
uint8_t* alloc_exec_mem(uint32_t mem_size);
|
||||
code_page_t* alloc_code_page(void);
|
||||
void free_code_page(code_page_t* code_page);
|
||||
uint8_t *alloc_exec_mem(uint32_t mem_size);
|
||||
code_page_t *alloc_code_page(void);
|
||||
void free_code_page(code_page_t *code_page);
|
||||
|
||||
// Code block methods
|
||||
void cb_init(codeblock_t* cb, uint8_t* mem_block, uint32_t mem_size);
|
||||
void cb_align_pos(codeblock_t* cb, uint32_t multiple);
|
||||
void cb_set_pos(codeblock_t* cb, uint32_t pos);
|
||||
void cb_set_write_ptr(codeblock_t* cb, uint8_t* code_ptr);
|
||||
uint8_t* cb_get_ptr(codeblock_t* cb, uint32_t index);
|
||||
uint8_t* cb_get_write_ptr(codeblock_t* cb);
|
||||
void cb_write_byte(codeblock_t* cb, uint8_t byte);
|
||||
void cb_write_bytes(codeblock_t* cb, uint32_t num_bytes, ...);
|
||||
void cb_write_int(codeblock_t* cb, uint64_t val, uint32_t num_bits);
|
||||
uint32_t cb_new_label(codeblock_t* cb, const char* name);
|
||||
void cb_write_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void cb_label_ref(codeblock_t* cb, uint32_t label_idx);
|
||||
void cb_link_labels(codeblock_t* cb);
|
||||
void cb_init(codeblock_t *cb, uint8_t *mem_block, uint32_t mem_size);
|
||||
void cb_align_pos(codeblock_t *cb, uint32_t multiple);
|
||||
void cb_set_pos(codeblock_t *cb, uint32_t pos);
|
||||
void cb_set_write_ptr(codeblock_t *cb, uint8_t *code_ptr);
|
||||
uint8_t *cb_get_ptr(codeblock_t *cb, uint32_t index);
|
||||
uint8_t *cb_get_write_ptr(codeblock_t *cb);
|
||||
void cb_write_byte(codeblock_t *cb, uint8_t byte);
|
||||
void cb_write_bytes(codeblock_t *cb, uint32_t num_bytes, ...);
|
||||
void cb_write_int(codeblock_t *cb, uint64_t val, uint32_t num_bits);
|
||||
uint32_t cb_new_label(codeblock_t *cb, const char *name);
|
||||
void cb_write_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void cb_label_ref(codeblock_t *cb, uint32_t label_idx);
|
||||
void cb_link_labels(codeblock_t *cb);
|
||||
|
||||
// Encode individual instructions into a code block
|
||||
void add(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void and(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void call_ptr(codeblock_t* cb, x86opnd_t scratch_reg, uint8_t* dst_ptr);
|
||||
void call_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void call(codeblock_t* cb, x86opnd_t opnd);
|
||||
void cmova(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovae(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovb(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovbe(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovc(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmove(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovg(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovge(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovl(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovle(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovna(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnae(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnb(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnbe(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnc(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovne(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovng(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnge(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnl(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnle(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovno(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnp(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovns(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnz(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovo(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovp(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovpe(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovpo(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovs(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovz(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmp(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void cdq(codeblock_t* cb);
|
||||
void cqo(codeblock_t* cb);
|
||||
void int3(codeblock_t* cb);
|
||||
void ja_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jae_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jb_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jbe_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jc_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void je_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jg_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jge_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jl_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jle_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jna_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jnae_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jnb_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jnbe_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jnc_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jne_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jng_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jnge_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jnl_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jnle_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jno_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jnp_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jns_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jnz_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jo_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jp_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jpe_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jpo_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void js_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jz_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void ja_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jae_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jb_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jbe_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jc_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void je_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jg_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jge_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jl_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jle_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jna_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jnae_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jnb_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jnbe_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jnc_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jne_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jng_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jnge_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jnl_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jnle_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jno_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jnp_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jns_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jnz_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jo_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jp_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jpe_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jpo_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void js_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jz_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jmp_label(codeblock_t* cb, uint32_t label_idx);
|
||||
void jmp_ptr(codeblock_t* cb, uint8_t* ptr);
|
||||
void jmp_rm(codeblock_t* cb, x86opnd_t opnd);
|
||||
void jmp32(codeblock_t* cb, int32_t offset);
|
||||
void lea(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void mov(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void movsx(codeblock_t* cb, x86opnd_t dst, x86opnd_t src);
|
||||
void neg(codeblock_t* cb, x86opnd_t opnd);
|
||||
void nop(codeblock_t* cb, uint32_t length);
|
||||
void not(codeblock_t* cb, x86opnd_t opnd);
|
||||
void or(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void pop(codeblock_t* cb, x86opnd_t reg);
|
||||
void popfq(codeblock_t* cb);
|
||||
void push(codeblock_t* cb, x86opnd_t opnd);
|
||||
void pushfq(codeblock_t* cb);
|
||||
void ret(codeblock_t* cb);
|
||||
void sal(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void sar(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void shl(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void shr(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void sub(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void test(codeblock_t* cb, x86opnd_t rm_opnd, x86opnd_t test_opnd);
|
||||
void ud2(codeblock_t* cb);
|
||||
void xchg(codeblock_t* cb, x86opnd_t rm_opnd, x86opnd_t r_opnd);
|
||||
void xor(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void cb_write_lock_prefix(codeblock_t* cb);
|
||||
void add(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void and(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void call_ptr(codeblock_t *cb, x86opnd_t scratch_reg, uint8_t *dst_ptr);
|
||||
void call_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void call(codeblock_t *cb, x86opnd_t opnd);
|
||||
void cmova(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovae(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovb(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovbe(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovc(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmove(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovg(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovge(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovl(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovle(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovna(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnae(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnb(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnbe(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnc(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovne(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovng(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnge(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnl(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnle(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovno(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnp(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovns(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovnz(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovo(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovp(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovpe(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovpo(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovs(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmovz(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void cmp(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void cdq(codeblock_t *cb);
|
||||
void cqo(codeblock_t *cb);
|
||||
void int3(codeblock_t *cb);
|
||||
void ja_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jae_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jb_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jbe_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jc_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void je_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jg_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jge_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jl_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jle_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jna_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jnae_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jnb_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jnbe_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jnc_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jne_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jng_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jnge_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jnl_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jnle_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jno_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jnp_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jns_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jnz_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jo_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jp_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jpe_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jpo_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void js_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jz_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void ja_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jae_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jb_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jbe_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jc_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void je_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jg_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jge_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jl_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jle_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jna_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jnae_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jnb_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jnbe_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jnc_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jne_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jng_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jnge_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jnl_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jnle_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jno_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jnp_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jns_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jnz_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jo_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jp_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jpe_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jpo_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void js_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jz_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jmp_label(codeblock_t *cb, uint32_t label_idx);
|
||||
void jmp_ptr(codeblock_t *cb, uint8_t *ptr);
|
||||
void jmp_rm(codeblock_t *cb, x86opnd_t opnd);
|
||||
void jmp32(codeblock_t *cb, int32_t offset);
|
||||
void lea(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void mov(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void movsx(codeblock_t *cb, x86opnd_t dst, x86opnd_t src);
|
||||
void neg(codeblock_t *cb, x86opnd_t opnd);
|
||||
void nop(codeblock_t *cb, uint32_t length);
|
||||
void not(codeblock_t *cb, x86opnd_t opnd);
|
||||
void or(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void pop(codeblock_t *cb, x86opnd_t reg);
|
||||
void popfq(codeblock_t *cb);
|
||||
void push(codeblock_t *cb, x86opnd_t opnd);
|
||||
void pushfq(codeblock_t *cb);
|
||||
void ret(codeblock_t *cb);
|
||||
void sal(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void sar(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void shl(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void shr(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void sub(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void test(codeblock_t *cb, x86opnd_t rm_opnd, x86opnd_t test_opnd);
|
||||
void ud2(codeblock_t *cb);
|
||||
void xchg(codeblock_t *cb, x86opnd_t rm_opnd, x86opnd_t r_opnd);
|
||||
void xor(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
void cb_write_lock_prefix(codeblock_t *cb);
|
||||
|
||||
#endif
|
||||
|
|
238
yjit_codegen.c
238
yjit_codegen.c
|
@ -30,11 +30,11 @@ static st_table *yjit_method_codegen_table = NULL;
|
|||
|
||||
// Code block into which we write machine code
|
||||
static codeblock_t block;
|
||||
codeblock_t* cb = NULL;
|
||||
codeblock_t *cb = NULL;
|
||||
|
||||
// Code block into which we write out-of-line machine code
|
||||
static codeblock_t outline_block;
|
||||
codeblock_t* ocb = NULL;
|
||||
codeblock_t *ocb = NULL;
|
||||
|
||||
// Code for exiting back to the interpreter from the leave insn
|
||||
static void *leave_exit_code;
|
||||
|
@ -61,7 +61,7 @@ uint32_t yjit_codepage_frozen_bytes = 0;
|
|||
// Print the current source location for debugging purposes
|
||||
RBIMPL_ATTR_MAYBE_UNUSED()
|
||||
static void
|
||||
jit_print_loc(jitstate_t* jit, const char* msg)
|
||||
jit_print_loc(jitstate_t *jit, const char *msg)
|
||||
{
|
||||
char *ptr;
|
||||
long len;
|
||||
|
@ -82,21 +82,21 @@ jit_obj_info_dump(codeblock_t *cb, x86opnd_t opnd) {
|
|||
|
||||
// Get the current instruction's opcode
|
||||
static int
|
||||
jit_get_opcode(jitstate_t* jit)
|
||||
jit_get_opcode(jitstate_t *jit)
|
||||
{
|
||||
return jit->opcode;
|
||||
}
|
||||
|
||||
// Get the index of the next instruction
|
||||
static uint32_t
|
||||
jit_next_idx(jitstate_t* jit)
|
||||
jit_next_insn_idx(jitstate_t *jit)
|
||||
{
|
||||
return jit->insn_idx + insn_len(jit_get_opcode(jit));
|
||||
}
|
||||
|
||||
// Get an instruction argument by index
|
||||
static VALUE
|
||||
jit_get_arg(jitstate_t* jit, size_t arg_idx)
|
||||
jit_get_arg(jitstate_t *jit, size_t arg_idx)
|
||||
{
|
||||
RUBY_ASSERT(arg_idx + 1 < (size_t)insn_len(jit_get_opcode(jit)));
|
||||
return *(jit->pc + arg_idx + 1);
|
||||
|
@ -104,7 +104,7 @@ jit_get_arg(jitstate_t* jit, size_t arg_idx)
|
|||
|
||||
// Load a VALUE into a register and keep track of the reference if it is on the GC heap.
|
||||
static void
|
||||
jit_mov_gc_ptr(jitstate_t* jit, codeblock_t* cb, x86opnd_t reg, VALUE ptr)
|
||||
jit_mov_gc_ptr(jitstate_t *jit, codeblock_t *cb, x86opnd_t reg, VALUE ptr)
|
||||
{
|
||||
RUBY_ASSERT(reg.type == OPND_REG && reg.num_bits == 64);
|
||||
|
||||
|
@ -124,16 +124,16 @@ jit_mov_gc_ptr(jitstate_t* jit, codeblock_t* cb, x86opnd_t reg, VALUE ptr)
|
|||
// Check if we are compiling the instruction at the stub PC
|
||||
// Meaning we are compiling the instruction that is next to execute
|
||||
static bool
|
||||
jit_at_current_insn(jitstate_t* jit)
|
||||
jit_at_current_insn(jitstate_t *jit)
|
||||
{
|
||||
const VALUE* ec_pc = jit->ec->cfp->pc;
|
||||
const VALUE *ec_pc = jit->ec->cfp->pc;
|
||||
return (ec_pc == jit->pc);
|
||||
}
|
||||
|
||||
// Peek at the nth topmost value on the Ruby stack.
|
||||
// Returns the topmost value when n == 0.
|
||||
static VALUE
|
||||
jit_peek_at_stack(jitstate_t* jit, ctx_t* ctx, int n)
|
||||
jit_peek_at_stack(jitstate_t *jit, ctx_t *ctx, int n)
|
||||
{
|
||||
RUBY_ASSERT(jit_at_current_insn(jit));
|
||||
|
||||
|
@ -168,9 +168,9 @@ jit_peek_at_local(jitstate_t *jit, ctx_t *ctx, int n)
|
|||
// Save the incremented PC on the CFP
|
||||
// This is necessary when calleees can raise or allocate
|
||||
static void
|
||||
jit_save_pc(jitstate_t* jit, x86opnd_t scratch_reg)
|
||||
jit_save_pc(jitstate_t *jit, x86opnd_t scratch_reg)
|
||||
{
|
||||
codeblock_t* cb = jit->cb;
|
||||
codeblock_t *cb = jit->cb;
|
||||
mov(cb, scratch_reg, const_ptr_opnd(jit->pc + insn_len(jit->opcode)));
|
||||
mov(cb, mem_opnd(64, REG_CFP, offsetof(rb_control_frame_t, pc)), scratch_reg);
|
||||
}
|
||||
|
@ -180,11 +180,11 @@ jit_save_pc(jitstate_t* jit, x86opnd_t scratch_reg)
|
|||
// Note: this will change the current value of REG_SP,
|
||||
// which could invalidate memory operands
|
||||
static void
|
||||
jit_save_sp(jitstate_t* jit, ctx_t* ctx)
|
||||
jit_save_sp(jitstate_t *jit, ctx_t *ctx)
|
||||
{
|
||||
if (ctx->sp_offset != 0) {
|
||||
x86opnd_t stack_pointer = ctx_sp_opnd(ctx, 0);
|
||||
codeblock_t* cb = jit->cb;
|
||||
codeblock_t *cb = jit->cb;
|
||||
lea(cb, REG_SP, stack_pointer);
|
||||
mov(cb, member_opnd(REG_CFP, rb_control_frame_t, sp), REG_SP);
|
||||
ctx->sp_offset = 0;
|
||||
|
@ -213,13 +213,13 @@ record_global_inval_patch(const codeblock_t *cb, uint32_t outline_block_target_p
|
|||
if (!rb_darray_append(&global_inval_patches, patch_point)) rb_bug("allocation failed");
|
||||
}
|
||||
|
||||
static bool jit_guard_known_klass(jitstate_t *jit, ctx_t* ctx, VALUE known_klass, insn_opnd_t insn_opnd, VALUE sample_instance, const int max_chain_depth, uint8_t *side_exit);
|
||||
static bool jit_guard_known_klass(jitstate_t *jit, ctx_t *ctx, VALUE known_klass, insn_opnd_t insn_opnd, VALUE sample_instance, const int max_chain_depth, uint8_t *side_exit);
|
||||
|
||||
#if YJIT_STATS
|
||||
|
||||
// Add a comment at the current position in the code block
|
||||
static void
|
||||
_add_comment(codeblock_t* cb, const char* comment_str)
|
||||
_add_comment(codeblock_t *cb, const char *comment_str)
|
||||
{
|
||||
// We can't add comments to the outlined code block
|
||||
if (cb == ocb)
|
||||
|
@ -417,7 +417,7 @@ static uint8_t *
|
|||
yjit_side_exit(jitstate_t *jit, ctx_t *ctx)
|
||||
{
|
||||
if (!jit->side_exit_for_pc) {
|
||||
codeblock_t* ocb = jit->ocb;
|
||||
codeblock_t *ocb = jit->ocb;
|
||||
uint32_t pos = yjit_gen_exit(jit->pc, ctx, ocb);
|
||||
jit->side_exit_for_pc = cb_get_ptr(ocb, pos);
|
||||
}
|
||||
|
@ -431,7 +431,7 @@ yjit_side_exit(jitstate_t *jit, ctx_t *ctx)
|
|||
// PC for the method isn't necessarily 0, but we always generated code that
|
||||
// assumes the entry point is 0.
|
||||
static void
|
||||
yjit_pc_guard(codeblock_t* cb, const rb_iseq_t *iseq)
|
||||
yjit_pc_guard(codeblock_t *cb, const rb_iseq_t *iseq)
|
||||
{
|
||||
RUBY_ASSERT(cb != NULL);
|
||||
|
||||
|
@ -520,7 +520,7 @@ Compile an interpreter entry block to be inserted into an iseq
|
|||
Returns `NULL` if compilation fails.
|
||||
*/
|
||||
uint8_t *
|
||||
yjit_entry_prologue(codeblock_t* cb, const rb_iseq_t *iseq)
|
||||
yjit_entry_prologue(codeblock_t *cb, const rb_iseq_t *iseq)
|
||||
{
|
||||
RUBY_ASSERT(cb != NULL);
|
||||
|
||||
|
@ -566,7 +566,7 @@ yjit_entry_prologue(codeblock_t* cb, const rb_iseq_t *iseq)
|
|||
// Generate code to check for interrupts and take a side-exit.
|
||||
// Warning: this function clobbers REG0
|
||||
static void
|
||||
yjit_check_ints(codeblock_t* cb, uint8_t* side_exit)
|
||||
yjit_check_ints(codeblock_t *cb, uint8_t *side_exit)
|
||||
{
|
||||
// Check for interrupts
|
||||
// see RUBY_VM_CHECK_INTS(ec) macro
|
||||
|
@ -614,7 +614,7 @@ yjit_gen_block(block_t *block, rb_execution_context_t *ec)
|
|||
|
||||
// Copy the block's context to avoid mutating it
|
||||
ctx_t ctx_copy = block->ctx;
|
||||
ctx_t* ctx = &ctx_copy;
|
||||
ctx_t *ctx = &ctx_copy;
|
||||
|
||||
const rb_iseq_t *iseq = block->blockid.iseq;
|
||||
uint32_t insn_idx = block->blockid.idx;
|
||||
|
@ -745,17 +745,17 @@ yjit_gen_block(block_t *block, rb_execution_context_t *ec)
|
|||
}
|
||||
}
|
||||
|
||||
static codegen_status_t gen_opt_send_without_block(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb);
|
||||
static codegen_status_t gen_opt_send_without_block(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb);
|
||||
|
||||
static codegen_status_t
|
||||
gen_nop(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_nop(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Do nothing
|
||||
return YJIT_KEEP_COMPILING;
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_dup(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_dup(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Get the top value and its type
|
||||
x86opnd_t dup_val = ctx_stack_pop(ctx, 0);
|
||||
|
@ -771,7 +771,7 @@ gen_dup(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
|
||||
// duplicate stack top n elements
|
||||
static codegen_status_t
|
||||
gen_dupn(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_dupn(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
rb_num_t n = (rb_num_t)jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -798,7 +798,7 @@ gen_dupn(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
|
||||
// Swap top 2 stack entries
|
||||
static codegen_status_t
|
||||
gen_swap(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_swap(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
x86opnd_t opnd0 = ctx_stack_opnd(ctx, 0);
|
||||
x86opnd_t opnd1 = ctx_stack_opnd(ctx, 1);
|
||||
|
@ -818,7 +818,7 @@ gen_swap(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
|
||||
// set Nth stack entry to stack top
|
||||
static codegen_status_t
|
||||
gen_setn(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_setn(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
rb_num_t n = (rb_num_t)jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -836,7 +836,7 @@ gen_setn(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
|
||||
// get nth stack value, then push it
|
||||
static codegen_status_t
|
||||
gen_topn(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_topn(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int32_t n = (int32_t)jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -852,7 +852,7 @@ gen_topn(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_pop(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_pop(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Decrement SP
|
||||
ctx_stack_pop(ctx, 1);
|
||||
|
@ -861,7 +861,7 @@ gen_pop(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
|
||||
// Pop n values off the stack
|
||||
static codegen_status_t
|
||||
gen_adjuststack(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_adjuststack(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
rb_num_t n = (rb_num_t)jit_get_arg(jit, 0);
|
||||
ctx_stack_pop(ctx, n);
|
||||
|
@ -870,7 +870,7 @@ gen_adjuststack(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
|
||||
// new array initialized from top N values
|
||||
static codegen_status_t
|
||||
gen_newarray(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_newarray(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
rb_num_t n = (rb_num_t)jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -894,7 +894,7 @@ gen_newarray(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
|
||||
// dup array
|
||||
static codegen_status_t
|
||||
gen_duparray(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_duparray(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
VALUE ary = jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -915,7 +915,7 @@ VALUE rb_vm_splat_array(VALUE flag, VALUE ary);
|
|||
|
||||
// call to_a on the array on the stack
|
||||
static codegen_status_t
|
||||
gen_splatarray(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_splatarray(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
VALUE flag = (VALUE) jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -939,7 +939,7 @@ gen_splatarray(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
|
||||
// new range initialized from top 2 values
|
||||
static codegen_status_t
|
||||
gen_newrange(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_newrange(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
rb_num_t flag = (rb_num_t)jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -990,7 +990,7 @@ guard_object_is_array(codeblock_t *cb, x86opnd_t object_opnd, x86opnd_t flags_op
|
|||
|
||||
// push enough nils onto the stack to fill out an array
|
||||
static codegen_status_t
|
||||
gen_expandarray(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_expandarray(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int flag = (int) jit_get_arg(jit, 1);
|
||||
|
||||
|
@ -1072,7 +1072,7 @@ gen_expandarray(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
|
||||
// new hash initialized from top N values
|
||||
static codegen_status_t
|
||||
gen_newhash(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_newhash(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
rb_num_t n = (rb_num_t)jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -1093,7 +1093,7 @@ gen_newhash(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_putnil(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_putnil(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Write constant at SP
|
||||
x86opnd_t stack_top = ctx_stack_push(ctx, TYPE_NIL);
|
||||
|
@ -1102,7 +1102,7 @@ gen_putnil(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_putobject(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_putobject(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
VALUE arg = jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -1146,7 +1146,7 @@ gen_putobject(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_putstring(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_putstring(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
VALUE put_val = jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -1164,7 +1164,7 @@ gen_putstring(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_putobject_int2fix(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_putobject_int2fix(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int opcode = jit_get_opcode(jit);
|
||||
int cst_val = (opcode == BIN(putobject_INT2FIX_0_))? 0:1;
|
||||
|
@ -1177,7 +1177,7 @@ gen_putobject_int2fix(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_putself(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_putself(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Load self from CFP
|
||||
mov(cb, REG0, member_opnd(REG_CFP, rb_control_frame_t, self));
|
||||
|
@ -1190,7 +1190,7 @@ gen_putself(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_putspecialobject(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_putspecialobject(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
enum vm_special_object_type type = (enum vm_special_object_type)jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -1216,7 +1216,7 @@ gen_get_ep(codeblock_t *cb, x86opnd_t reg, uint32_t level)
|
|||
while (level--) {
|
||||
// Get the previous EP from the current EP
|
||||
// See GET_PREV_EP(ep) macro
|
||||
// VALUE* prev_ep = ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
|
||||
// VALUE *prev_ep = ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
|
||||
mov(cb, reg, mem_opnd(64, REG0, SIZEOF_VALUE * VM_ENV_DATA_INDEX_SPECVAL));
|
||||
and(cb, reg, imm_opnd(~0x03));
|
||||
}
|
||||
|
@ -1235,7 +1235,7 @@ slot_to_local_idx(const rb_iseq_t *iseq, int32_t slot_idx)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_getlocal_wc0(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_getlocal_wc0(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Compute the offset from BP to the local
|
||||
int32_t slot_idx = (int32_t)jit_get_arg(jit, 0);
|
||||
|
@ -1256,7 +1256,7 @@ gen_getlocal_wc0(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_getlocal_generic(ctx_t* ctx, uint32_t local_idx, uint32_t level)
|
||||
gen_getlocal_generic(ctx_t *ctx, uint32_t local_idx, uint32_t level)
|
||||
{
|
||||
gen_get_ep(cb, REG0, level);
|
||||
|
||||
|
@ -1273,7 +1273,7 @@ gen_getlocal_generic(ctx_t* ctx, uint32_t local_idx, uint32_t level)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_getlocal(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_getlocal(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int32_t idx = (int32_t)jit_get_arg(jit, 0);
|
||||
int32_t level = (int32_t)jit_get_arg(jit, 1);
|
||||
|
@ -1281,14 +1281,14 @@ gen_getlocal(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_getlocal_wc1(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_getlocal_wc1(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int32_t idx = (int32_t)jit_get_arg(jit, 0);
|
||||
return gen_getlocal_generic(ctx, idx, 1);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_setlocal_wc0(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_setlocal_wc0(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
/*
|
||||
vm_env_write(const VALUE *ep, int index, VALUE v)
|
||||
|
@ -1335,7 +1335,7 @@ gen_setlocal_wc0(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_setlocal_generic(jitstate_t *jit, ctx_t* ctx, uint32_t local_idx, uint32_t level)
|
||||
gen_setlocal_generic(jitstate_t *jit, ctx_t *ctx, uint32_t local_idx, uint32_t level)
|
||||
{
|
||||
// Load environment pointer EP at level
|
||||
gen_get_ep(cb, REG0, level);
|
||||
|
@ -1362,7 +1362,7 @@ gen_setlocal_generic(jitstate_t *jit, ctx_t* ctx, uint32_t local_idx, uint32_t l
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_setlocal(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_setlocal(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int32_t idx = (int32_t)jit_get_arg(jit, 0);
|
||||
int32_t level = (int32_t)jit_get_arg(jit, 1);
|
||||
|
@ -1370,7 +1370,7 @@ gen_setlocal(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_setlocal_wc1(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_setlocal_wc1(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int32_t idx = (int32_t)jit_get_arg(jit, 0);
|
||||
return gen_setlocal_generic(jit, ctx, idx, 1);
|
||||
|
@ -1686,7 +1686,7 @@ gen_get_ivar(jitstate_t *jit, ctx_t *ctx, const int max_chain_depth, VALUE compt
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_getinstancevariable(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_getinstancevariable(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Defer compilation so we can specialize on a runtime `self`
|
||||
if (!jit_at_current_insn(jit)) {
|
||||
|
@ -1714,7 +1714,7 @@ gen_getinstancevariable(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
void rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic);
|
||||
|
||||
static codegen_status_t
|
||||
gen_setinstancevariable(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_setinstancevariable(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
ID id = (ID)jit_get_arg(jit, 0);
|
||||
IVC ic = (IVC)jit_get_arg(jit, 1);
|
||||
|
@ -1740,7 +1740,7 @@ gen_setinstancevariable(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
bool rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v);
|
||||
|
||||
static codegen_status_t
|
||||
gen_defined(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_defined(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
rb_num_t op_type = (rb_num_t)jit_get_arg(jit, 0);
|
||||
VALUE obj = (VALUE)jit_get_arg(jit, 1);
|
||||
|
@ -1778,7 +1778,7 @@ gen_defined(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_checktype(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_checktype(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
enum ruby_value_type type_val = (enum ruby_value_type)jit_get_arg(jit, 0);
|
||||
// Only three types are emitted by compile.c
|
||||
|
@ -1837,7 +1837,7 @@ gen_checktype(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_concatstrings(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_concatstrings(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
rb_num_t n = (rb_num_t)jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -1859,7 +1859,7 @@ gen_concatstrings(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static void
|
||||
guard_two_fixnums(ctx_t* ctx, uint8_t* side_exit)
|
||||
guard_two_fixnums(ctx_t *ctx, uint8_t *side_exit)
|
||||
{
|
||||
// Get the stack operand types
|
||||
val_type_t arg1_type = ctx_get_opnd_type(ctx, OPND_STACK(0));
|
||||
|
@ -1907,10 +1907,10 @@ guard_two_fixnums(ctx_t* ctx, uint8_t* side_exit)
|
|||
}
|
||||
|
||||
// Conditional move operation used by comparison operators
|
||||
typedef void (*cmov_fn)(codeblock_t* cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
typedef void (*cmov_fn)(codeblock_t *cb, x86opnd_t opnd0, x86opnd_t opnd1);
|
||||
|
||||
static codegen_status_t
|
||||
gen_fixnum_cmp(jitstate_t* jit, ctx_t* ctx, cmov_fn cmov_op)
|
||||
gen_fixnum_cmp(jitstate_t *jit, ctx_t *ctx, cmov_fn cmov_op)
|
||||
{
|
||||
// Defer compilation so we can specialize base on a runtime receiver
|
||||
if (!jit_at_current_insn(jit)) {
|
||||
|
@ -1924,7 +1924,7 @@ gen_fixnum_cmp(jitstate_t* jit, ctx_t* ctx, cmov_fn cmov_op)
|
|||
if (FIXNUM_P(comptime_a) && FIXNUM_P(comptime_b)) {
|
||||
// Create a size-exit to fall back to the interpreter
|
||||
// Note: we generate the side-exit before popping operands from the stack
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
if (!assume_bop_not_redefined(jit->block, INTEGER_REDEFINED_OP_FLAG, BOP_LT)) {
|
||||
return YJIT_CANT_COMPILE;
|
||||
|
@ -1955,25 +1955,25 @@ gen_fixnum_cmp(jitstate_t* jit, ctx_t* ctx, cmov_fn cmov_op)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_lt(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_lt(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
return gen_fixnum_cmp(jit, ctx, cmovl);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_le(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_le(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
return gen_fixnum_cmp(jit, ctx, cmovle);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_ge(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_ge(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
return gen_fixnum_cmp(jit, ctx, cmovge);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_gt(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_gt(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
return gen_fixnum_cmp(jit, ctx, cmovg);
|
||||
}
|
||||
|
@ -1981,7 +1981,7 @@ gen_opt_gt(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
// Implements specialized equality for either two fixnum or two strings
|
||||
// Returns true if code was generated, otherwise false
|
||||
bool
|
||||
gen_equality_specialized(jitstate_t* jit, ctx_t* ctx, uint8_t *side_exit)
|
||||
gen_equality_specialized(jitstate_t *jit, ctx_t *ctx, uint8_t *side_exit)
|
||||
{
|
||||
VALUE comptime_a = jit_peek_at_stack(jit, ctx, 1);
|
||||
VALUE comptime_b = jit_peek_at_stack(jit, ctx, 0);
|
||||
|
@ -2056,7 +2056,7 @@ gen_equality_specialized(jitstate_t* jit, ctx_t* ctx, uint8_t *side_exit)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_eq(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_eq(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Defer compilation so we can specialize base on a runtime receiver
|
||||
if (!jit_at_current_insn(jit)) {
|
||||
|
@ -2078,7 +2078,7 @@ gen_opt_eq(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
static codegen_status_t gen_send_general(jitstate_t *jit, ctx_t *ctx, struct rb_call_data *cd, rb_iseq_t *block);
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_neq(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_neq(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// opt_neq is passed two rb_call_data as arguments:
|
||||
// first for ==, second for !=
|
||||
|
@ -2087,7 +2087,7 @@ gen_opt_neq(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_aref(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_aref(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
struct rb_call_data * cd = (struct rb_call_data *)jit_get_arg(jit, 0);
|
||||
int32_t argc = (int32_t)vm_ci_argc(cd->ci);
|
||||
|
@ -2201,7 +2201,7 @@ gen_opt_aref(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_aset(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_aset(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Defer compilation so we can specialize on a runtime `self`
|
||||
if (!jit_at_current_insn(jit)) {
|
||||
|
@ -2218,7 +2218,7 @@ gen_opt_aset(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
x86opnd_t val = ctx_stack_opnd(ctx, 0);
|
||||
|
||||
if (CLASS_OF(comptime_recv) == rb_cArray && FIXNUM_P(comptime_key)) {
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
// Guard receiver is an Array
|
||||
mov(cb, REG0, recv);
|
||||
|
@ -2251,7 +2251,7 @@ gen_opt_aset(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
jit_jump_to_next_insn(jit, ctx);
|
||||
return YJIT_END_BLOCK;
|
||||
} else if (CLASS_OF(comptime_recv) == rb_cHash) {
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
// Guard receiver is a Hash
|
||||
mov(cb, REG0, recv);
|
||||
|
@ -2280,7 +2280,7 @@ gen_opt_aset(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_and(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_and(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Defer compilation so we can specialize on a runtime `self`
|
||||
if (!jit_at_current_insn(jit)) {
|
||||
|
@ -2294,7 +2294,7 @@ gen_opt_and(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
if (FIXNUM_P(comptime_a) && FIXNUM_P(comptime_b)) {
|
||||
// Create a size-exit to fall back to the interpreter
|
||||
// Note: we generate the side-exit before popping operands from the stack
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
if (!assume_bop_not_redefined(jit->block, INTEGER_REDEFINED_OP_FLAG, BOP_AND)) {
|
||||
return YJIT_CANT_COMPILE;
|
||||
|
@ -2323,7 +2323,7 @@ gen_opt_and(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_or(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_or(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Defer compilation so we can specialize on a runtime `self`
|
||||
if (!jit_at_current_insn(jit)) {
|
||||
|
@ -2337,7 +2337,7 @@ gen_opt_or(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
if (FIXNUM_P(comptime_a) && FIXNUM_P(comptime_b)) {
|
||||
// Create a size-exit to fall back to the interpreter
|
||||
// Note: we generate the side-exit before popping operands from the stack
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
if (!assume_bop_not_redefined(jit->block, INTEGER_REDEFINED_OP_FLAG, BOP_OR)) {
|
||||
return YJIT_CANT_COMPILE;
|
||||
|
@ -2366,7 +2366,7 @@ gen_opt_or(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_minus(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_minus(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Defer compilation so we can specialize on a runtime `self`
|
||||
if (!jit_at_current_insn(jit)) {
|
||||
|
@ -2380,7 +2380,7 @@ gen_opt_minus(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
if (FIXNUM_P(comptime_a) && FIXNUM_P(comptime_b)) {
|
||||
// Create a size-exit to fall back to the interpreter
|
||||
// Note: we generate the side-exit before popping operands from the stack
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
if (!assume_bop_not_redefined(jit->block, INTEGER_REDEFINED_OP_FLAG, BOP_MINUS)) {
|
||||
return YJIT_CANT_COMPILE;
|
||||
|
@ -2411,7 +2411,7 @@ gen_opt_minus(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_plus(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_plus(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Defer compilation so we can specialize on a runtime `self`
|
||||
if (!jit_at_current_insn(jit)) {
|
||||
|
@ -2425,7 +2425,7 @@ gen_opt_plus(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
if (FIXNUM_P(comptime_a) && FIXNUM_P(comptime_b)) {
|
||||
// Create a size-exit to fall back to the interpreter
|
||||
// Note: we generate the side-exit before popping operands from the stack
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
if (!assume_bop_not_redefined(jit->block, INTEGER_REDEFINED_OP_FLAG, BOP_PLUS)) {
|
||||
return YJIT_CANT_COMPILE;
|
||||
|
@ -2456,14 +2456,14 @@ gen_opt_plus(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_mult(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_mult(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Delegate to send, call the method on the recv
|
||||
return gen_opt_send_without_block(jit, ctx, cb);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_div(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_div(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Delegate to send, call the method on the recv
|
||||
return gen_opt_send_without_block(jit, ctx, cb);
|
||||
|
@ -2472,13 +2472,13 @@ gen_opt_div(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
VALUE rb_vm_opt_mod(VALUE recv, VALUE obj);
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_mod(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_mod(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Save the PC and SP because the callee may allocate bignums
|
||||
// Note that this modifies REG_SP, which is why we do it first
|
||||
jit_prepare_routine_call(jit, ctx, REG0);
|
||||
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
// Get the operands from the stack
|
||||
x86opnd_t arg1 = ctx_stack_pop(ctx, 1);
|
||||
|
@ -2501,28 +2501,28 @@ gen_opt_mod(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_ltlt(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_ltlt(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Delegate to send, call the method on the recv
|
||||
return gen_opt_send_without_block(jit, ctx, cb);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_nil_p(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_nil_p(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Delegate to send, call the method on the recv
|
||||
return gen_opt_send_without_block(jit, ctx, cb);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_empty_p(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_empty_p(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Delegate to send, call the method on the recv
|
||||
return gen_opt_send_without_block(jit, ctx, cb);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_str_freeze(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_str_freeze(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
if (!assume_bop_not_redefined(jit->block, STRING_REDEFINED_OP_FLAG, BOP_FREEZE)) {
|
||||
return YJIT_CANT_COMPILE;
|
||||
|
@ -2539,7 +2539,7 @@ gen_opt_str_freeze(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_str_uminus(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_str_uminus(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
if (!assume_bop_not_redefined(jit->block, STRING_REDEFINED_OP_FLAG, BOP_UMINUS)) {
|
||||
return YJIT_CANT_COMPILE;
|
||||
|
@ -2556,31 +2556,31 @@ gen_opt_str_uminus(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_not(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_not(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
return gen_opt_send_without_block(jit, ctx, cb);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_size(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_size(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
return gen_opt_send_without_block(jit, ctx, cb);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_length(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_length(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
return gen_opt_send_without_block(jit, ctx, cb);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_regexpmatch2(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_regexpmatch2(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
return gen_opt_send_without_block(jit, ctx, cb);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_case_dispatch(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_case_dispatch(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Normally this instruction would lookup the key in a hash and jump to an
|
||||
// offset based on that.
|
||||
|
@ -2596,7 +2596,7 @@ gen_opt_case_dispatch(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
void
|
||||
gen_branchif_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t shape)
|
||||
gen_branchif_branch(codeblock_t *cb, uint8_t *target0, uint8_t *target1, uint8_t shape)
|
||||
{
|
||||
switch (shape)
|
||||
{
|
||||
|
@ -2616,13 +2616,13 @@ gen_branchif_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_branchif(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_branchif(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int32_t jump_offset = (int32_t)jit_get_arg(jit, 0);
|
||||
|
||||
// Check for interrupts, but only on backward branches that may create loops
|
||||
if (jump_offset < 0) {
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
yjit_check_ints(cb, side_exit);
|
||||
}
|
||||
|
||||
|
@ -2653,7 +2653,7 @@ gen_branchif(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
void
|
||||
gen_branchunless_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t shape)
|
||||
gen_branchunless_branch(codeblock_t *cb, uint8_t *target0, uint8_t *target1, uint8_t shape)
|
||||
{
|
||||
switch (shape)
|
||||
{
|
||||
|
@ -2673,13 +2673,13 @@ gen_branchunless_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uin
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_branchunless(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_branchunless(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int32_t jump_offset = (int32_t)jit_get_arg(jit, 0);
|
||||
|
||||
// Check for interrupts, but only on backward branches that may create loops
|
||||
if (jump_offset < 0) {
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
yjit_check_ints(cb, side_exit);
|
||||
}
|
||||
|
||||
|
@ -2710,7 +2710,7 @@ gen_branchunless(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
void
|
||||
gen_branchnil_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t shape)
|
||||
gen_branchnil_branch(codeblock_t *cb, uint8_t *target0, uint8_t *target1, uint8_t shape)
|
||||
{
|
||||
switch (shape)
|
||||
{
|
||||
|
@ -2730,13 +2730,13 @@ gen_branchnil_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_branchnil(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_branchnil(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int32_t jump_offset = (int32_t)jit_get_arg(jit, 0);
|
||||
|
||||
// Check for interrupts, but only on backward branches that may create loops
|
||||
if (jump_offset < 0) {
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
yjit_check_ints(cb, side_exit);
|
||||
}
|
||||
|
||||
|
@ -2766,13 +2766,13 @@ gen_branchnil(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_jump(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_jump(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
int32_t jump_offset = (int32_t)jit_get_arg(jit, 0);
|
||||
|
||||
// Check for interrupts, but only on backward branches that may create loops
|
||||
if (jump_offset < 0) {
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
yjit_check_ints(cb, side_exit);
|
||||
}
|
||||
|
||||
|
@ -3301,7 +3301,7 @@ gen_send_cfunc(jitstate_t *jit, ctx_t *ctx, const struct rb_callinfo *ci, const
|
|||
}
|
||||
|
||||
static void
|
||||
gen_return_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t shape)
|
||||
gen_return_branch(codeblock_t *cb, uint8_t *target0, uint8_t *target1, uint8_t shape)
|
||||
{
|
||||
switch (shape)
|
||||
{
|
||||
|
@ -3742,14 +3742,14 @@ gen_send_general(jitstate_t *jit, ctx_t *ctx, struct rb_call_data *cd, rb_iseq_t
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_send_without_block(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_send_without_block(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
struct rb_call_data *cd = (struct rb_call_data *)jit_get_arg(jit, 0);
|
||||
return gen_send_general(jit, ctx, cd, NULL);
|
||||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_send(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_send(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
struct rb_call_data *cd = (struct rb_call_data *)jit_get_arg(jit, 0);
|
||||
rb_iseq_t *block = (rb_iseq_t *)jit_get_arg(jit, 1);
|
||||
|
@ -3757,7 +3757,7 @@ gen_send(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_invokesuper(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_invokesuper(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
struct rb_call_data *cd = (struct rb_call_data *)jit_get_arg(jit, 0);
|
||||
rb_iseq_t *block = (rb_iseq_t *)jit_get_arg(jit, 1);
|
||||
|
@ -3893,13 +3893,13 @@ gen_invokesuper(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_leave(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_leave(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Only the return value should be on the stack
|
||||
RUBY_ASSERT(ctx->stack_size == 1);
|
||||
|
||||
// Create a size-exit to fall back to the interpreter
|
||||
uint8_t* side_exit = yjit_side_exit(jit, ctx);
|
||||
uint8_t *side_exit = yjit_side_exit(jit, ctx);
|
||||
|
||||
// Load environment pointer EP from CFP
|
||||
mov(cb, REG1, member_opnd(REG_CFP, rb_control_frame_t, ep));
|
||||
|
@ -3931,7 +3931,7 @@ gen_leave(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
RUBY_EXTERN rb_serial_t ruby_vm_global_constant_state;
|
||||
|
||||
static codegen_status_t
|
||||
gen_getglobal(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_getglobal(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
ID gid = jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -3949,7 +3949,7 @@ gen_getglobal(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_setglobal(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_setglobal(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
ID gid = jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -3969,7 +3969,7 @@ gen_setglobal(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_tostring(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_tostring(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// Save the PC and SP because we might make a Ruby call for
|
||||
// Kernel#set_trace_var
|
||||
|
@ -3991,7 +3991,7 @@ gen_tostring(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_toregexp(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_toregexp(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
rb_num_t opt = jit_get_arg(jit, 0);
|
||||
rb_num_t cnt = jit_get_arg(jit, 1);
|
||||
|
@ -4031,7 +4031,7 @@ gen_toregexp(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_getspecial(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_getspecial(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// This takes two arguments, key and type
|
||||
// key is only used when type == 0
|
||||
|
@ -4102,7 +4102,7 @@ gen_getspecial(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_opt_getinlinecache(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_getinlinecache(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
VALUE jump_offset = jit_get_arg(jit, 0);
|
||||
VALUE const_cache_as_value = jit_get_arg(jit, 1);
|
||||
|
@ -4168,7 +4168,7 @@ gen_opt_getinlinecache(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
// interpreter's scheme for avoiding Proc allocations when delegating
|
||||
// explict block parameters.
|
||||
static codegen_status_t
|
||||
gen_getblockparamproxy(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_getblockparamproxy(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
// A mirror of the interpreter code. Checking for the case
|
||||
// where it's pushing rb_block_param_proxy.
|
||||
|
@ -4205,7 +4205,7 @@ gen_getblockparamproxy(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
}
|
||||
|
||||
static codegen_status_t
|
||||
gen_invokebuiltin(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_invokebuiltin(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
const struct rb_builtin_function *bf = (struct rb_builtin_function *)jit_get_arg(jit, 0);
|
||||
|
||||
|
@ -4242,7 +4242,7 @@ gen_invokebuiltin(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
|||
// invokebuiltin does, but instead of taking arguments from the top of the
|
||||
// stack uses the argument locals (and self) from the current method.
|
||||
static codegen_status_t
|
||||
gen_opt_invokebuiltin_delegate(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb)
|
||||
gen_opt_invokebuiltin_delegate(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb)
|
||||
{
|
||||
const struct rb_builtin_function *bf = (struct rb_builtin_function *)jit_get_arg(jit, 0);
|
||||
int32_t start_index = (int32_t)jit_get_arg(jit, 1);
|
||||
|
|
|
@ -16,11 +16,11 @@ typedef enum codegen_status {
|
|||
} codegen_status_t;
|
||||
|
||||
// Code generation function signature
|
||||
typedef codegen_status_t (*codegen_fn)(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb);
|
||||
typedef codegen_status_t (*codegen_fn)(jitstate_t *jit, ctx_t *ctx, codeblock_t *cb);
|
||||
|
||||
uint8_t* yjit_entry_prologue(codeblock_t* cb, const rb_iseq_t* iseq);
|
||||
uint8_t *yjit_entry_prologue(codeblock_t *cb, const rb_iseq_t *iseq);
|
||||
|
||||
void yjit_gen_block(block_t* block, rb_execution_context_t* ec);
|
||||
void yjit_gen_block(block_t *block, rb_execution_context_t *ec);
|
||||
|
||||
void yjit_init_codegen(void);
|
||||
|
||||
|
|
112
yjit_core.c
112
yjit_core.c
|
@ -14,7 +14,7 @@
|
|||
Get an operand for the adjusted stack pointer address
|
||||
*/
|
||||
x86opnd_t
|
||||
ctx_sp_opnd(ctx_t* ctx, int32_t offset_bytes)
|
||||
ctx_sp_opnd(ctx_t *ctx, int32_t offset_bytes)
|
||||
{
|
||||
int32_t offset = (ctx->sp_offset * sizeof(VALUE)) + offset_bytes;
|
||||
return mem_opnd(64, REG_SP, offset);
|
||||
|
@ -25,7 +25,7 @@ Push one new value on the temp stack with an explicit mapping
|
|||
Return a pointer to the new stack top
|
||||
*/
|
||||
x86opnd_t
|
||||
ctx_stack_push_mapping(ctx_t* ctx, temp_type_mapping_t mapping)
|
||||
ctx_stack_push_mapping(ctx_t *ctx, temp_type_mapping_t mapping)
|
||||
{
|
||||
// Keep track of the type and mapping of the value
|
||||
if (ctx->stack_size < MAX_TEMP_TYPES) {
|
||||
|
@ -51,7 +51,7 @@ Push one new value on the temp stack
|
|||
Return a pointer to the new stack top
|
||||
*/
|
||||
x86opnd_t
|
||||
ctx_stack_push(ctx_t* ctx, val_type_t type)
|
||||
ctx_stack_push(ctx_t *ctx, val_type_t type)
|
||||
{
|
||||
temp_type_mapping_t mapping = { MAP_STACK, type };
|
||||
return ctx_stack_push_mapping(ctx, mapping);
|
||||
|
@ -61,7 +61,7 @@ ctx_stack_push(ctx_t* ctx, val_type_t type)
|
|||
Push the self value on the stack
|
||||
*/
|
||||
x86opnd_t
|
||||
ctx_stack_push_self(ctx_t* ctx)
|
||||
ctx_stack_push_self(ctx_t *ctx)
|
||||
{
|
||||
temp_type_mapping_t mapping = { MAP_SELF, TYPE_UNKNOWN };
|
||||
return ctx_stack_push_mapping(ctx, mapping);
|
||||
|
@ -71,7 +71,7 @@ ctx_stack_push_self(ctx_t* ctx)
|
|||
Push a local variable on the stack
|
||||
*/
|
||||
x86opnd_t
|
||||
ctx_stack_push_local(ctx_t* ctx, size_t local_idx)
|
||||
ctx_stack_push_local(ctx_t *ctx, size_t local_idx)
|
||||
{
|
||||
if (local_idx >= MAX_LOCAL_TYPES) {
|
||||
return ctx_stack_push(ctx, TYPE_UNKNOWN);
|
||||
|
@ -89,7 +89,7 @@ Pop N values off the stack
|
|||
Return a pointer to the stack top before the pop operation
|
||||
*/
|
||||
x86opnd_t
|
||||
ctx_stack_pop(ctx_t* ctx, size_t n)
|
||||
ctx_stack_pop(ctx_t *ctx, size_t n)
|
||||
{
|
||||
RUBY_ASSERT(n <= ctx->stack_size);
|
||||
|
||||
|
@ -117,7 +117,7 @@ ctx_stack_pop(ctx_t* ctx, size_t n)
|
|||
Get an operand pointing to a slot on the temp stack
|
||||
*/
|
||||
x86opnd_t
|
||||
ctx_stack_opnd(ctx_t* ctx, int32_t idx)
|
||||
ctx_stack_opnd(ctx_t *ctx, int32_t idx)
|
||||
{
|
||||
// SP points just above the topmost value
|
||||
int32_t offset = (ctx->sp_offset - 1 - idx) * sizeof(VALUE);
|
||||
|
@ -130,7 +130,7 @@ ctx_stack_opnd(ctx_t* ctx, int32_t idx)
|
|||
Get the type of an instruction operand
|
||||
*/
|
||||
val_type_t
|
||||
ctx_get_opnd_type(const ctx_t* ctx, insn_opnd_t opnd)
|
||||
ctx_get_opnd_type(const ctx_t *ctx, insn_opnd_t opnd)
|
||||
{
|
||||
if (opnd.is_self)
|
||||
return ctx->self_type;
|
||||
|
@ -172,7 +172,7 @@ This value must be compatible and at least as specific as the previously known t
|
|||
If this value originated from self, or an lvar, the learned type will be
|
||||
propagated back to its source.
|
||||
*/
|
||||
void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type)
|
||||
void ctx_upgrade_opnd_type(ctx_t *ctx, insn_opnd_t opnd, val_type_t type)
|
||||
{
|
||||
if (opnd.is_self) {
|
||||
UPGRADE_TYPE(ctx->self_type, type);
|
||||
|
@ -211,7 +211,7 @@ This is can be used with ctx_stack_push_mapping or ctx_set_opnd_mapping to copy
|
|||
a stack value's type while maintaining the mapping.
|
||||
*/
|
||||
temp_type_mapping_t
|
||||
ctx_get_opnd_mapping(const ctx_t* ctx, insn_opnd_t opnd)
|
||||
ctx_get_opnd_mapping(const ctx_t *ctx, insn_opnd_t opnd)
|
||||
{
|
||||
temp_type_mapping_t type_mapping;
|
||||
type_mapping.type = ctx_get_opnd_type(ctx, opnd);
|
||||
|
@ -240,7 +240,7 @@ ctx_get_opnd_mapping(const ctx_t* ctx, insn_opnd_t opnd)
|
|||
Overwrite both the type and mapping of a stack operand.
|
||||
*/
|
||||
void
|
||||
ctx_set_opnd_mapping(ctx_t* ctx, insn_opnd_t opnd, temp_type_mapping_t type_mapping)
|
||||
ctx_set_opnd_mapping(ctx_t *ctx, insn_opnd_t opnd, temp_type_mapping_t type_mapping)
|
||||
{
|
||||
// self is always MAP_SELF
|
||||
RUBY_ASSERT(!opnd.is_self);
|
||||
|
@ -261,7 +261,7 @@ ctx_set_opnd_mapping(ctx_t* ctx, insn_opnd_t opnd, temp_type_mapping_t type_mapp
|
|||
/**
|
||||
Set the type of a local variable
|
||||
*/
|
||||
void ctx_set_local_type(ctx_t* ctx, size_t idx, val_type_t type)
|
||||
void ctx_set_local_type(ctx_t *ctx, size_t idx, val_type_t type)
|
||||
{
|
||||
if (idx >= MAX_LOCAL_TYPES)
|
||||
return;
|
||||
|
@ -280,7 +280,7 @@ void ctx_set_local_type(ctx_t* ctx, size_t idx, val_type_t type)
|
|||
|
||||
// Erase local variable type information
|
||||
// eg: because of a call we can't track
|
||||
void ctx_clear_local_types(ctx_t* ctx)
|
||||
void ctx_clear_local_types(ctx_t *ctx)
|
||||
{
|
||||
// When clearing local types we must detach any stack mappings to those
|
||||
// locals. Even if local values may have changed, stack values will not.
|
||||
|
@ -412,7 +412,7 @@ Returns 0 if the two contexts are the same
|
|||
Returns > 0 if different but compatible
|
||||
Returns INT_MAX if incompatible
|
||||
*/
|
||||
int ctx_diff(const ctx_t* src, const ctx_t* dst)
|
||||
int ctx_diff(const ctx_t *src, const ctx_t *dst)
|
||||
{
|
||||
// Can only lookup the first version in the chain
|
||||
if (dst->chain_depth != 0)
|
||||
|
@ -504,7 +504,7 @@ static size_t get_num_versions(blockid_t blockid)
|
|||
|
||||
// Keep track of a block version. Block should be fully constructed.
|
||||
static void
|
||||
add_block_version(blockid_t blockid, block_t* block)
|
||||
add_block_version(blockid_t blockid, block_t *block)
|
||||
{
|
||||
const rb_iseq_t *iseq = block->blockid.iseq;
|
||||
struct rb_iseq_constant_body *body = iseq->body;
|
||||
|
@ -565,12 +565,12 @@ add_block_version(blockid_t blockid, block_t* block)
|
|||
|
||||
// Create a new outgoing branch entry for a block
|
||||
static branch_t*
|
||||
make_branch_entry(block_t* block, const ctx_t* src_ctx, branchgen_fn gen_fn)
|
||||
make_branch_entry(block_t *block, const ctx_t *src_ctx, branchgen_fn gen_fn)
|
||||
{
|
||||
RUBY_ASSERT(block != NULL);
|
||||
|
||||
// Allocate and zero-initialize
|
||||
branch_t* branch = calloc(1, sizeof(branch_t));
|
||||
branch_t *branch = calloc(1, sizeof(branch_t));
|
||||
|
||||
branch->block = block;
|
||||
branch->src_ctx = *src_ctx;
|
||||
|
@ -584,12 +584,12 @@ make_branch_entry(block_t* block, const ctx_t* src_ctx, branchgen_fn gen_fn)
|
|||
}
|
||||
|
||||
// Retrieve a basic block version for an (iseq, idx) tuple
|
||||
block_t* find_block_version(blockid_t blockid, const ctx_t* ctx)
|
||||
block_t *find_block_version(blockid_t blockid, const ctx_t *ctx)
|
||||
{
|
||||
rb_yjit_block_array_t versions = yjit_get_version_array(blockid.iseq, blockid.idx);
|
||||
|
||||
// Best match found
|
||||
block_t* best_version = NULL;
|
||||
block_t *best_version = NULL;
|
||||
int best_diff = INT_MAX;
|
||||
|
||||
// For each version matching the blockid
|
||||
|
@ -619,7 +619,7 @@ block_t* find_block_version(blockid_t blockid, const ctx_t* ctx)
|
|||
|
||||
// Produce a generic context when the block version limit is hit for a blockid
|
||||
// Note that this will mutate the ctx argument
|
||||
void limit_block_versions(blockid_t blockid, ctx_t* ctx)
|
||||
void limit_block_versions(blockid_t blockid, ctx_t *ctx)
|
||||
{
|
||||
// Guard chains implement limits separately, do nothing
|
||||
if (ctx->chain_depth > 0)
|
||||
|
@ -641,15 +641,15 @@ void limit_block_versions(blockid_t blockid, ctx_t* ctx)
|
|||
}
|
||||
|
||||
// Compile a new block version immediately
|
||||
block_t* gen_block_version(blockid_t blockid, const ctx_t* start_ctx, rb_execution_context_t* ec)
|
||||
block_t *gen_block_version(blockid_t blockid, const ctx_t *start_ctx, rb_execution_context_t *ec)
|
||||
{
|
||||
// Allocate a new block version object
|
||||
block_t* block = calloc(1, sizeof(block_t));
|
||||
block_t *block = calloc(1, sizeof(block_t));
|
||||
block->blockid = blockid;
|
||||
memcpy(&block->ctx, start_ctx, sizeof(ctx_t));
|
||||
|
||||
// Store a pointer to the first block (returned by this function)
|
||||
block_t* first_block = block;
|
||||
block_t *first_block = block;
|
||||
|
||||
// Limit the number of specialized versions for this block
|
||||
limit_block_versions(block->blockid, &block->ctx);
|
||||
|
@ -668,7 +668,7 @@ block_t* gen_block_version(blockid_t blockid, const ctx_t* start_ctx, rb_executi
|
|||
}
|
||||
|
||||
// Get the last outgoing branch from the previous block
|
||||
branch_t* last_branch = rb_darray_back(block->outgoing);
|
||||
branch_t *last_branch = rb_darray_back(block->outgoing);
|
||||
|
||||
// If there is no next block to compile, stop
|
||||
if (last_branch->dst_addrs[0] || last_branch->dst_addrs[1]) {
|
||||
|
@ -707,7 +707,7 @@ block_t* gen_block_version(blockid_t blockid, const ctx_t* start_ctx, rb_executi
|
|||
}
|
||||
|
||||
// Generate a block version that is an entry point inserted into an iseq
|
||||
uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_context_t *ec)
|
||||
uint8_t *gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_context_t *ec)
|
||||
{
|
||||
// If we aren't at PC 0, don't generate code
|
||||
// See yjit_pc_guard
|
||||
|
@ -719,10 +719,10 @@ uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_
|
|||
blockid_t blockid = { iseq, insn_idx };
|
||||
|
||||
// Write the interpreter entry prologue
|
||||
uint8_t* code_ptr = yjit_entry_prologue(cb, iseq);
|
||||
uint8_t *code_ptr = yjit_entry_prologue(cb, iseq);
|
||||
|
||||
// Try to generate code for the entry block
|
||||
block_t* block = gen_block_version(blockid, &DEFAULT_CTX, ec);
|
||||
block_t *block = gen_block_version(blockid, &DEFAULT_CTX, ec);
|
||||
|
||||
// If we couldn't generate any code
|
||||
if (block->end_idx == insn_idx)
|
||||
|
@ -736,9 +736,9 @@ uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_
|
|||
// Called by the generated code when a branch stub is executed
|
||||
// Triggers compilation of branches and code patching
|
||||
static uint8_t *
|
||||
branch_stub_hit(branch_t* branch, const uint32_t target_idx, rb_execution_context_t* ec)
|
||||
branch_stub_hit(branch_t *branch, const uint32_t target_idx, rb_execution_context_t *ec)
|
||||
{
|
||||
uint8_t* dst_addr;
|
||||
uint8_t *dst_addr;
|
||||
|
||||
// Stop other ractors since we are going to patch machine code.
|
||||
// This is how the GC does it.
|
||||
|
@ -748,7 +748,7 @@ branch_stub_hit(branch_t* branch, const uint32_t target_idx, rb_execution_contex
|
|||
RUBY_ASSERT(branch != NULL);
|
||||
RUBY_ASSERT(target_idx < 2);
|
||||
blockid_t target = branch->targets[target_idx];
|
||||
const ctx_t* target_ctx = &branch->target_ctxs[target_idx];
|
||||
const ctx_t *target_ctx = &branch->target_ctxs[target_idx];
|
||||
|
||||
// If this branch has already been patched, return the dst address
|
||||
// Note: ractors can cause the same stub to be hit multiple times
|
||||
|
@ -775,7 +775,7 @@ branch_stub_hit(branch_t* branch, const uint32_t target_idx, rb_execution_contex
|
|||
ec->cfp->pc = yjit_iseq_pc_at_idx(target.iseq, target.idx);
|
||||
|
||||
// Try to find an existing compiled version of this block
|
||||
block_t* p_block = find_block_version(target, target_ctx);
|
||||
block_t *p_block = find_block_version(target, target_ctx);
|
||||
|
||||
// If this block hasn't yet been compiled
|
||||
if (!p_block) {
|
||||
|
@ -832,16 +832,16 @@ branch_stub_hit(branch_t* branch, const uint32_t target_idx, rb_execution_contex
|
|||
}
|
||||
|
||||
// Get a version or stub corresponding to a branch target
|
||||
uint8_t* get_branch_target(
|
||||
uint8_t *get_branch_target(
|
||||
blockid_t target,
|
||||
const ctx_t* ctx,
|
||||
branch_t* branch,
|
||||
const ctx_t *ctx,
|
||||
branch_t *branch,
|
||||
uint32_t target_idx
|
||||
)
|
||||
{
|
||||
//fprintf(stderr, "get_branch_target, block (%p, %d)\n", target.iseq, target.idx);
|
||||
|
||||
block_t* p_block = find_block_version(target, ctx);
|
||||
block_t *p_block = find_block_version(target, ctx);
|
||||
|
||||
// If the block already exists
|
||||
if (p_block)
|
||||
|
@ -855,7 +855,7 @@ uint8_t* get_branch_target(
|
|||
}
|
||||
|
||||
// Generate an outlined stub that will call branch_stub_hit()
|
||||
uint8_t* stub_addr = cb_get_ptr(ocb, ocb->write_pos);
|
||||
uint8_t *stub_addr = cb_get_ptr(ocb, ocb->write_pos);
|
||||
|
||||
// Call branch_stub_hit(branch_idx, target_idx, ec)
|
||||
mov(ocb, C_ARG_REGS[2], REG_EC);
|
||||
|
@ -871,18 +871,18 @@ uint8_t* get_branch_target(
|
|||
}
|
||||
|
||||
void gen_branch(
|
||||
jitstate_t* jit,
|
||||
const ctx_t* src_ctx,
|
||||
jitstate_t *jit,
|
||||
const ctx_t *src_ctx,
|
||||
blockid_t target0,
|
||||
const ctx_t* ctx0,
|
||||
const ctx_t *ctx0,
|
||||
blockid_t target1,
|
||||
const ctx_t* ctx1,
|
||||
const ctx_t *ctx1,
|
||||
branchgen_fn gen_fn
|
||||
)
|
||||
{
|
||||
RUBY_ASSERT(target0.iseq != NULL);
|
||||
|
||||
branch_t* branch = make_branch_entry(jit->block, src_ctx, gen_fn);
|
||||
branch_t *branch = make_branch_entry(jit->block, src_ctx, gen_fn);
|
||||
branch->targets[0] = target0;
|
||||
branch->targets[1] = target1;
|
||||
branch->target_ctxs[0] = *ctx0;
|
||||
|
@ -899,7 +899,7 @@ void gen_branch(
|
|||
}
|
||||
|
||||
void
|
||||
gen_jump_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t shape)
|
||||
gen_jump_branch(codeblock_t *cb, uint8_t *target0, uint8_t *target1, uint8_t shape)
|
||||
{
|
||||
switch (shape)
|
||||
{
|
||||
|
@ -917,18 +917,18 @@ gen_jump_branch(codeblock_t* cb, uint8_t* target0, uint8_t* target1, uint8_t sha
|
|||
}
|
||||
|
||||
void gen_direct_jump(
|
||||
jitstate_t* jit,
|
||||
const ctx_t* ctx,
|
||||
jitstate_t *jit,
|
||||
const ctx_t *ctx,
|
||||
blockid_t target0
|
||||
)
|
||||
{
|
||||
RUBY_ASSERT(target0.iseq != NULL);
|
||||
|
||||
branch_t* branch = make_branch_entry(jit->block, ctx, gen_jump_branch);
|
||||
branch_t *branch = make_branch_entry(jit->block, ctx, gen_jump_branch);
|
||||
branch->targets[0] = target0;
|
||||
branch->target_ctxs[0] = *ctx;
|
||||
|
||||
block_t* p_block = find_block_version(target0, ctx);
|
||||
block_t *p_block = find_block_version(target0, ctx);
|
||||
|
||||
// If the version already exists
|
||||
if (p_block) {
|
||||
|
@ -955,8 +955,8 @@ void gen_direct_jump(
|
|||
|
||||
// Create a stub to force the code up to this point to be executed
|
||||
void defer_compilation(
|
||||
jitstate_t* jit,
|
||||
ctx_t* cur_ctx
|
||||
jitstate_t *jit,
|
||||
ctx_t *cur_ctx
|
||||
)
|
||||
{
|
||||
//fprintf(stderr, "defer compilation at (%p, %d) depth=%d\n", block->blockid.iseq, insn_idx, cur_ctx->chain_depth);
|
||||
|
@ -973,7 +973,7 @@ void defer_compilation(
|
|||
|
||||
next_ctx.chain_depth += 1;
|
||||
|
||||
branch_t* branch = make_branch_entry(jit->block, cur_ctx, gen_jump_branch);
|
||||
branch_t *branch = make_branch_entry(jit->block, cur_ctx, gen_jump_branch);
|
||||
|
||||
// Get the branch targets or stubs
|
||||
branch->target_ctxs[0] = next_ctx;
|
||||
|
@ -981,7 +981,7 @@ void defer_compilation(
|
|||
branch->dst_addrs[0] = get_branch_target(branch->targets[0], &next_ctx, branch, 0);
|
||||
|
||||
// Call the branch generation function
|
||||
codeblock_t* cb = jit->cb;
|
||||
codeblock_t *cb = jit->cb;
|
||||
branch->start_pos = cb->write_pos;
|
||||
gen_jump_branch(cb, branch->dst_addrs[0], NULL, SHAPE_DEFAULT);
|
||||
branch->end_pos = cb->write_pos;
|
||||
|
@ -997,7 +997,7 @@ yjit_free_block(block_t *block)
|
|||
// Remove this block from the predecessor's targets
|
||||
rb_darray_for(block->incoming, incoming_idx) {
|
||||
// Branch from the predecessor to us
|
||||
branch_t* pred_branch = rb_darray_get(block->incoming, incoming_idx);
|
||||
branch_t *pred_branch = rb_darray_get(block->incoming, incoming_idx);
|
||||
|
||||
// If this is us, nullify the target block
|
||||
for (size_t succ_idx = 0; succ_idx < 2; succ_idx++) {
|
||||
|
@ -1009,18 +1009,18 @@ yjit_free_block(block_t *block)
|
|||
|
||||
// For each outgoing branch
|
||||
rb_darray_for(block->outgoing, branch_idx) {
|
||||
branch_t* out_branch = rb_darray_get(block->outgoing, branch_idx);
|
||||
branch_t *out_branch = rb_darray_get(block->outgoing, branch_idx);
|
||||
|
||||
// For each successor block
|
||||
for (size_t succ_idx = 0; succ_idx < 2; succ_idx++) {
|
||||
block_t* succ = out_branch->blocks[succ_idx];
|
||||
block_t *succ = out_branch->blocks[succ_idx];
|
||||
|
||||
if (succ == NULL)
|
||||
continue;
|
||||
|
||||
// Remove this block from the successor's incoming list
|
||||
rb_darray_for(succ->incoming, incoming_idx) {
|
||||
branch_t* pred_branch = rb_darray_get(succ->incoming, incoming_idx);
|
||||
branch_t *pred_branch = rb_darray_get(succ->incoming, incoming_idx);
|
||||
if (pred_branch == out_branch) {
|
||||
rb_darray_remove_unordered(succ->incoming, incoming_idx);
|
||||
break;
|
||||
|
@ -1072,11 +1072,11 @@ invalidate_block_version(block_t *block)
|
|||
block_array_remove(versions, block);
|
||||
|
||||
// Get a pointer to the generated code for this block
|
||||
uint8_t* code_ptr = cb_get_ptr(cb, block->start_pos);
|
||||
uint8_t *code_ptr = cb_get_ptr(cb, block->start_pos);
|
||||
|
||||
// For each incoming branch
|
||||
rb_darray_for(block->incoming, incoming_idx) {
|
||||
branch_t* branch = rb_darray_get(block->incoming, incoming_idx);
|
||||
branch_t *branch = rb_darray_get(block->incoming, incoming_idx);
|
||||
uint32_t target_idx = (branch->dst_addrs[0] == code_ptr)? 0:1;
|
||||
RUBY_ASSERT(branch->dst_addrs[target_idx] == code_ptr);
|
||||
RUBY_ASSERT(branch->blocks[target_idx] == block);
|
||||
|
|
26
yjit_iface.c
26
yjit_iface.c
|
@ -486,7 +486,7 @@ rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec)
|
|||
// TODO: I think we need to stop all other ractors here
|
||||
|
||||
// Compile a block version starting at the first instruction
|
||||
uint8_t* code_ptr = gen_entry_point(iseq, 0, ec);
|
||||
uint8_t *code_ptr = gen_entry_point(iseq, 0, ec);
|
||||
|
||||
if (code_ptr)
|
||||
{
|
||||
|
@ -541,7 +541,7 @@ block_address(VALUE self)
|
|||
{
|
||||
block_t * block;
|
||||
TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
|
||||
uint8_t* code_addr = cb_get_ptr(cb, block->start_pos);
|
||||
uint8_t *code_addr = cb_get_ptr(cb, block->start_pos);
|
||||
return LONG2NUM((intptr_t)code_addr);
|
||||
}
|
||||
|
||||
|
@ -883,7 +883,7 @@ rb_yjit_iseq_mark(const struct rb_iseq_constant_body *body)
|
|||
|
||||
// Mark outgoing branch entries
|
||||
rb_darray_for(block->outgoing, branch_idx) {
|
||||
branch_t* branch = rb_darray_get(block->outgoing, branch_idx);
|
||||
branch_t *branch = rb_darray_get(block->outgoing, branch_idx);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
rb_gc_mark_movable((VALUE)branch->targets[i].iseq);
|
||||
}
|
||||
|
@ -925,7 +925,7 @@ rb_yjit_iseq_update_references(const struct rb_iseq_constant_body *body)
|
|||
|
||||
// Update outgoing branch entries
|
||||
rb_darray_for(block->outgoing, branch_idx) {
|
||||
branch_t* branch = rb_darray_get(block->outgoing, branch_idx);
|
||||
branch_t *branch = rb_darray_get(block->outgoing, branch_idx);
|
||||
for (int i = 0; i < 2; ++i) {
|
||||
branch->targets[i].iseq = (const void *)rb_gc_location((VALUE)branch->targets[i].iseq);
|
||||
}
|
||||
|
@ -986,7 +986,7 @@ static const rb_data_type_t yjit_code_page_type = {
|
|||
// Allocate a code page and wrap it into a Ruby object owned by the GC
|
||||
VALUE rb_yjit_code_page_alloc(void)
|
||||
{
|
||||
code_page_t* code_page = alloc_code_page();
|
||||
code_page_t *code_page = alloc_code_page();
|
||||
VALUE cp_obj = TypedData_Wrap_Struct(0, &yjit_code_page_type, code_page);
|
||||
|
||||
// Write a pointer to the wrapper object at the beginning of the code page
|
||||
|
@ -1004,21 +1004,21 @@ code_page_t *rb_yjit_code_page_unwrap(VALUE cp_obj)
|
|||
}
|
||||
|
||||
// Get the code page wrapper object for a code pointer
|
||||
VALUE rb_yjit_code_page_from_ptr(uint8_t* code_ptr)
|
||||
VALUE rb_yjit_code_page_from_ptr(uint8_t *code_ptr)
|
||||
{
|
||||
VALUE* page_start = (VALUE*)((intptr_t)code_ptr & ~(CODE_PAGE_SIZE - 1));
|
||||
VALUE *page_start = (VALUE*)((intptr_t)code_ptr & ~(CODE_PAGE_SIZE - 1));
|
||||
VALUE wrapper = *page_start;
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
// Get the inline code block corresponding to a code pointer
|
||||
void rb_yjit_get_cb(codeblock_t* cb, uint8_t* code_ptr)
|
||||
void rb_yjit_get_cb(codeblock_t *cb, uint8_t *code_ptr)
|
||||
{
|
||||
VALUE page_wrapper = rb_yjit_code_page_from_ptr(code_ptr);
|
||||
code_page_t *code_page = rb_yjit_code_page_unwrap(page_wrapper);
|
||||
|
||||
// A pointer to the page wrapper object is written at the start of the code page
|
||||
uint8_t* mem_block = code_page->mem_block + sizeof(VALUE);
|
||||
uint8_t *mem_block = code_page->mem_block + sizeof(VALUE);
|
||||
uint32_t mem_size = (code_page->page_size/2) - sizeof(VALUE);
|
||||
RUBY_ASSERT(mem_block);
|
||||
|
||||
|
@ -1027,13 +1027,13 @@ void rb_yjit_get_cb(codeblock_t* cb, uint8_t* code_ptr)
|
|||
}
|
||||
|
||||
// Get the outlined code block corresponding to a code pointer
|
||||
void rb_yjit_get_ocb(codeblock_t* cb, uint8_t* code_ptr)
|
||||
void rb_yjit_get_ocb(codeblock_t *cb, uint8_t *code_ptr)
|
||||
{
|
||||
VALUE page_wrapper = rb_yjit_code_page_from_ptr(code_ptr);
|
||||
code_page_t *code_page = rb_yjit_code_page_unwrap(page_wrapper);
|
||||
|
||||
// A pointer to the page wrapper object is written at the start of the code page
|
||||
uint8_t* mem_block = code_page->mem_block + (code_page->page_size/2);
|
||||
uint8_t *mem_block = code_page->mem_block + (code_page->page_size/2);
|
||||
uint32_t mem_size = code_page->page_size/2;
|
||||
RUBY_ASSERT(mem_block);
|
||||
|
||||
|
@ -1086,10 +1086,10 @@ outgoing_ids(VALUE self)
|
|||
VALUE ids = rb_ary_new();
|
||||
|
||||
rb_darray_for(block->outgoing, branch_idx) {
|
||||
branch_t* out_branch = rb_darray_get(block->outgoing, branch_idx);
|
||||
branch_t *out_branch = rb_darray_get(block->outgoing, branch_idx);
|
||||
|
||||
for (size_t succ_idx = 0; succ_idx < 2; succ_idx++) {
|
||||
block_t* succ = out_branch->blocks[succ_idx];
|
||||
block_t *succ = out_branch->blocks[succ_idx];
|
||||
|
||||
if (succ == NULL)
|
||||
continue;
|
||||
|
|
|
@ -139,7 +139,7 @@ void yjit_block_assumptions_free(block_t *block);
|
|||
|
||||
VALUE rb_yjit_code_page_alloc(void);
|
||||
code_page_t *rb_yjit_code_page_unwrap(VALUE cp_obj);
|
||||
void rb_yjit_get_cb(codeblock_t* cb, uint8_t* code_ptr);
|
||||
void rb_yjit_get_ocb(codeblock_t* cb, uint8_t* code_ptr);
|
||||
void rb_yjit_get_cb(codeblock_t *cb, uint8_t *code_ptr);
|
||||
void rb_yjit_get_ocb(codeblock_t *cb, uint8_t *code_ptr);
|
||||
|
||||
#endif // #ifndef YJIT_IFACE_H
|
||||
|
|
14
yjit_utils.c
14
yjit_utils.c
|
@ -5,7 +5,7 @@
|
|||
#include "yjit_asm.h"
|
||||
|
||||
// Save caller-save registers on the stack before a C call
|
||||
void push_regs(codeblock_t* cb)
|
||||
void push_regs(codeblock_t *cb)
|
||||
{
|
||||
push(cb, RAX);
|
||||
push(cb, RCX);
|
||||
|
@ -20,7 +20,7 @@ void push_regs(codeblock_t* cb)
|
|||
}
|
||||
|
||||
// Restore caller-save registers from the after a C call
|
||||
void pop_regs(codeblock_t* cb)
|
||||
void pop_regs(codeblock_t *cb)
|
||||
{
|
||||
popfq(cb);
|
||||
pop(cb, R11);
|
||||
|
@ -39,7 +39,7 @@ static void print_int_cfun(int64_t val)
|
|||
fprintf(stderr, "%lld\n", (long long int)val);
|
||||
}
|
||||
|
||||
void print_int(codeblock_t* cb, x86opnd_t opnd)
|
||||
void print_int(codeblock_t *cb, x86opnd_t opnd)
|
||||
{
|
||||
push_regs(cb);
|
||||
|
||||
|
@ -55,12 +55,12 @@ void print_int(codeblock_t* cb, x86opnd_t opnd)
|
|||
pop_regs(cb);
|
||||
}
|
||||
|
||||
static void print_ptr_cfun(void* val)
|
||||
static void print_ptr_cfun(void *val)
|
||||
{
|
||||
fprintf(stderr, "%p\n", val);
|
||||
}
|
||||
|
||||
void print_ptr(codeblock_t* cb, x86opnd_t opnd)
|
||||
void print_ptr(codeblock_t *cb, x86opnd_t opnd)
|
||||
{
|
||||
assert (opnd.num_bits == 64);
|
||||
|
||||
|
@ -73,13 +73,13 @@ void print_ptr(codeblock_t* cb, x86opnd_t opnd)
|
|||
pop_regs(cb);
|
||||
}
|
||||
|
||||
static void print_str_cfun(const char* str)
|
||||
static void print_str_cfun(const char *str)
|
||||
{
|
||||
fprintf(stderr, "%s\n", str);
|
||||
}
|
||||
|
||||
// Print a constant string to stdout
|
||||
void print_str(codeblock_t* cb, const char* str)
|
||||
void print_str(codeblock_t *cb, const char *str)
|
||||
{
|
||||
//as.comment("printStr(\"" ~ str ~ "\")");
|
||||
size_t len = strlen(str);
|
||||
|
|
10
yjit_utils.h
10
yjit_utils.h
|
@ -6,10 +6,10 @@
|
|||
#include <stdbool.h>
|
||||
#include "yjit_asm.h"
|
||||
|
||||
void push_regs(codeblock_t* cb);
|
||||
void pop_regs(codeblock_t* cb);
|
||||
void print_int(codeblock_t* cb, x86opnd_t opnd);
|
||||
void print_ptr(codeblock_t* cb, x86opnd_t opnd);
|
||||
void print_str(codeblock_t* cb, const char* str);
|
||||
void push_regs(codeblock_t *cb);
|
||||
void pop_regs(codeblock_t *cb);
|
||||
void print_int(codeblock_t *cb, x86opnd_t opnd);
|
||||
void print_ptr(codeblock_t *cb, x86opnd_t opnd);
|
||||
void print_str(codeblock_t *cb, const char *str);
|
||||
|
||||
#endif // #ifndef YJIT_UTILS_H
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue