1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Introduce ctx_{get,set}_opnd_mapping

This commit is contained in:
John Hawthorn 2021-07-14 11:36:33 -07:00 committed by Alan Wu
parent 4ea69e5c0b
commit 6c80150d40
3 changed files with 106 additions and 51 deletions

View file

@ -553,11 +553,11 @@ static codegen_status_t
gen_dup(jitstate_t* jit, ctx_t* ctx)
{
// Get the top value and its type
val_type_t dup_type = ctx_get_opnd_type(ctx, OPND_STACK(0));
x86opnd_t dup_val = ctx_stack_pop(ctx, 0);
temp_type_mapping_t mapping = ctx_get_opnd_mapping(ctx, OPND_STACK(0));
// Push the same value on top
x86opnd_t loc0 = ctx_stack_push(ctx, dup_type);
x86opnd_t loc0 = ctx_stack_push_mapping(ctx, mapping);
mov(cb, REG0, dup_val);
mov(cb, loc0, REG0);
@ -575,17 +575,16 @@ gen_dupn(jitstate_t* jit, ctx_t* ctx)
return YJIT_CANT_COMPILE;
}
val_type_t type1 = ctx_get_opnd_type(ctx, OPND_STACK(1));
x86opnd_t opnd1 = ctx_stack_opnd(ctx, 1);
val_type_t type0 = ctx_get_opnd_type(ctx, OPND_STACK(0));
x86opnd_t opnd0 = ctx_stack_opnd(ctx, 0);
temp_type_mapping_t mapping1 = ctx_get_opnd_mapping(ctx, OPND_STACK(1));
temp_type_mapping_t mapping0 = ctx_get_opnd_mapping(ctx, OPND_STACK(0));
x86opnd_t dst1 = ctx_stack_push(ctx, type1);
x86opnd_t dst1 = ctx_stack_push_mapping(ctx, mapping1);
mov(cb, REG0, opnd1);
mov(cb, dst1, REG0);
x86opnd_t dst0 = ctx_stack_push(ctx, type0);
x86opnd_t dst0 = ctx_stack_push_mapping(ctx, mapping0);
mov(cb, REG0, opnd0);
mov(cb, dst0, REG0);
@ -596,21 +595,19 @@ gen_dupn(jitstate_t* jit, ctx_t* ctx)
static codegen_status_t
gen_swap(jitstate_t* jit, ctx_t* ctx)
{
val_type_t type0 = ctx_get_opnd_type(ctx, OPND_STACK(0));
x86opnd_t opnd0 = ctx_stack_opnd(ctx, 0);
val_type_t type1 = ctx_get_opnd_type(ctx, OPND_STACK(1));
x86opnd_t opnd1 = ctx_stack_opnd(ctx, 1);
temp_type_mapping_t mapping0 = ctx_get_opnd_mapping(ctx, OPND_STACK(0));
temp_type_mapping_t mapping1 = ctx_get_opnd_mapping(ctx, OPND_STACK(1));
mov(cb, REG0, opnd0);
mov(cb, REG1, opnd1);
ctx_set_opnd_type(ctx, OPND_STACK(0), type1);
ctx_set_opnd_type(ctx, OPND_STACK(1), type0);
mov(cb, opnd0, REG1);
mov(cb, opnd1, REG0);
ctx_set_opnd_mapping(ctx, OPND_STACK(0), mapping1);
ctx_set_opnd_mapping(ctx, OPND_STACK(1), mapping0);
return YJIT_KEEP_COMPILING;
}
@ -620,16 +617,15 @@ gen_setn(jitstate_t* jit, ctx_t* ctx)
{
rb_num_t n = (rb_num_t)jit_get_arg(jit, 0);
// Get the top value and its type
val_type_t top_type = ctx_get_opnd_type(ctx, OPND_STACK(0));
// Set the destination
x86opnd_t top_val = ctx_stack_pop(ctx, 0);
// Set the destination and its type
ctx_set_opnd_type(ctx, OPND_STACK(n), top_type);
x86opnd_t dst_opnd = ctx_stack_opnd(ctx, (int32_t)n);
mov(cb, REG0, top_val);
mov(cb, dst_opnd, REG0);
temp_type_mapping_t mapping = ctx_get_opnd_mapping(ctx, OPND_STACK(0));
ctx_set_opnd_mapping(ctx, OPND_STACK(n), mapping);
return YJIT_KEEP_COMPILING;
}
@ -640,10 +636,10 @@ gen_topn(jitstate_t* jit, ctx_t* ctx)
int32_t n = (int32_t)jit_get_arg(jit, 0);
// Get top n type / operand
val_type_t top_n_type = ctx_get_opnd_type(ctx, OPND_STACK(n));
x86opnd_t top_n_val = ctx_stack_opnd(ctx, n);
temp_type_mapping_t mapping = ctx_get_opnd_mapping(ctx, OPND_STACK(n));
x86opnd_t loc0 = ctx_stack_push(ctx, top_n_type);
x86opnd_t loc0 = ctx_stack_push_mapping(ctx, mapping);
mov(cb, REG0, top_n_val);
mov(cb, loc0, REG0);

View file

@ -21,16 +21,18 @@ ctx_sp_opnd(ctx_t* ctx, int32_t offset_bytes)
}
/*
Push one new value on the temp stack
Push one new value on the temp stack with an explicit mapping
Return a pointer to the new stack top
*/
x86opnd_t
ctx_stack_push(ctx_t* ctx, val_type_t type)
ctx_stack_push_mapping(ctx_t* ctx, temp_type_mapping_t mapping)
{
// Keep track of the type of the value
// Keep track of the type and mapping of the value
if (ctx->stack_size < MAX_TEMP_TYPES) {
ctx->temp_mapping[ctx->stack_size] = MAP_STACK;
ctx->temp_types[ctx->stack_size] = type;
ctx->temp_mapping[ctx->stack_size] = mapping.mapping;
ctx->temp_types[ctx->stack_size] = mapping.type;
RUBY_ASSERT(mapping.mapping.kind != TEMP_LOCAL || mapping.mapping.idx < MAX_LOCAL_TYPES);
}
ctx->stack_size += 1;
@ -41,24 +43,26 @@ ctx_stack_push(ctx_t* ctx, val_type_t type)
return mem_opnd(64, REG_SP, offset);
}
/*
Push one new value on the temp stack
Return a pointer to the new stack top
*/
x86opnd_t
ctx_stack_push(ctx_t* ctx, val_type_t type)
{
temp_type_mapping_t mapping = { MAP_STACK, type };
return ctx_stack_push_mapping(ctx, mapping);
}
/*
Push the self value on the stack
*/
x86opnd_t
ctx_stack_push_self(ctx_t* ctx)
{
// Keep track of the type of the value
if (ctx->stack_size < MAX_TEMP_TYPES) {
ctx->temp_mapping[ctx->stack_size] = MAP_SELF;
ctx->temp_types[ctx->stack_size] = ctx->self_type;
}
ctx->stack_size += 1;
ctx->sp_offset += 1;
// SP points just above the topmost value
int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE);
return mem_opnd(64, REG_SP, offset);
temp_type_mapping_t mapping = { MAP_SELF, TYPE_UNKNOWN };
return ctx_stack_push_mapping(ctx, mapping);
}
/*
@ -67,17 +71,15 @@ Push a local variable on the stack
x86opnd_t
ctx_stack_push_local(ctx_t* ctx, size_t local_idx)
{
// Keep track of the type of the value
if (ctx->stack_size < MAX_TEMP_TYPES && local_idx < MAX_LOCAL_TYPES) {
ctx->temp_mapping[ctx->stack_size] = (temp_mapping_t){ .kind = TEMP_LOCAL, .idx = local_idx };
if (local_idx >= MAX_LOCAL_TYPES) {
return ctx_stack_push(ctx, TYPE_UNKNOWN);
}
ctx->stack_size += 1;
ctx->sp_offset += 1;
// SP points just above the topmost value
int32_t offset = (ctx->sp_offset - 1) * sizeof(VALUE);
return mem_opnd(64, REG_SP, offset);
temp_type_mapping_t mapping = {
(temp_mapping_t){ .kind = TEMP_LOCAL, .idx = local_idx },
TYPE_UNKNOWN
};
return ctx_stack_push_mapping(ctx, mapping);
}
/*
@ -131,7 +133,7 @@ ctx_get_opnd_type(const ctx_t* ctx, insn_opnd_t opnd)
if (opnd.is_self)
return ctx->self_type;
if (ctx->stack_size > MAX_TEMP_TYPES)
if (ctx->stack_size >= MAX_TEMP_TYPES)
return TYPE_UNKNOWN;
RUBY_ASSERT(opnd.idx < ctx->stack_size);
@ -170,11 +172,13 @@ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type)
return;
}
if (ctx->stack_size > MAX_TEMP_TYPES)
if (ctx->stack_size >= MAX_TEMP_TYPES)
return;
RUBY_ASSERT(opnd.idx < ctx->stack_size);
temp_mapping_t mapping = ctx->temp_mapping[ctx->stack_size - 1 - opnd.idx];
int stack_index = ctx->stack_size - 1 - opnd.idx;
RUBY_ASSERT(stack_index < MAX_TEMP_TYPES);
temp_mapping_t mapping = ctx->temp_mapping[stack_index];
switch (mapping.kind)
{
@ -183,7 +187,6 @@ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type)
break;
case TEMP_STACK:
int stack_index = ctx->stack_size - 1 - opnd.idx;
UPGRADE_TYPE(ctx->temp_types[stack_index], type);
break;
@ -194,6 +197,51 @@ void ctx_upgrade_opnd_type(ctx_t* ctx, insn_opnd_t opnd, val_type_t type)
}
}
temp_type_mapping_t
ctx_get_opnd_mapping(const ctx_t* ctx, insn_opnd_t opnd)
{
temp_type_mapping_t type_mapping;
type_mapping.type = ctx_get_opnd_type(ctx, opnd);
if (opnd.is_self) {
type_mapping.mapping = MAP_SELF;
return type_mapping;
}
RUBY_ASSERT(opnd.idx < ctx->stack_size);
int stack_idx = ctx->stack_size - 1 - opnd.idx;
if (stack_idx < MAX_TEMP_TYPES) {
type_mapping.mapping = ctx->temp_mapping[stack_idx];
} else {
// We can't know the source of this stack operand, so we assume it is
// a stack-only temporary. type will be UNKNOWN
RUBY_ASSERT(type_mapping.type.type == ETYPE_UNKNOWN);
type_mapping.mapping = MAP_STACK;
}
return type_mapping;
}
void
ctx_set_opnd_mapping(ctx_t* ctx, insn_opnd_t opnd, temp_type_mapping_t type_mapping)
{
// self is always MAP_SELF
RUBY_ASSERT(!opnd.is_self);
RUBY_ASSERT(opnd.idx < ctx->stack_size);
int stack_idx = ctx->stack_size - 1 - opnd.idx;
// If outside of tracked range, do nothing
if (stack_idx >= MAX_TEMP_TYPES)
return;
ctx->temp_mapping[stack_idx] = type_mapping.mapping;
// Only used when mapping == MAP_STACK
ctx->temp_types[stack_idx] = type_mapping.type;
}
/**
Set the type of a local variable
*/

View file

@ -104,6 +104,13 @@ STATIC_ASSERT(temp_mapping_size, sizeof(temp_mapping_t) == 1);
// Temp value is actually self
#define MAP_SELF ( (temp_mapping_t) { .kind = TEMP_SELF } )
// Represents both the type and mapping
typedef struct {
temp_mapping_t mapping;
val_type_t type;
} temp_type_mapping_t;
STATIC_ASSERT(temp_type_mapping_size, sizeof(temp_type_mapping_t) == 2);
// Operand to a bytecode instruction
typedef struct yjit_insn_opnd
{
@ -252,6 +259,7 @@ typedef struct yjit_block_version
// Context object methods
x86opnd_t ctx_sp_opnd(ctx_t* ctx, int32_t offset_bytes);
x86opnd_t ctx_stack_push_mapping(ctx_t* ctx, temp_type_mapping_t mapping);
x86opnd_t ctx_stack_push(ctx_t* ctx, val_type_t type);
x86opnd_t ctx_stack_push_self(ctx_t* ctx);
x86opnd_t ctx_stack_push_local(ctx_t* ctx, size_t local_idx);
@ -263,6 +271,9 @@ void ctx_set_local_type(ctx_t* ctx, size_t idx, val_type_t type);
void ctx_clear_local_types(ctx_t* ctx);
int ctx_diff(const ctx_t* src, const ctx_t* dst);
temp_type_mapping_t ctx_get_opnd_mapping(const ctx_t* ctx, insn_opnd_t opnd);
void ctx_set_opnd_mapping(ctx_t* ctx, insn_opnd_t opnd, temp_type_mapping_t type_mapping);
block_t* find_block_version(blockid_t blockid, const ctx_t* ctx);
block_t* gen_block_version(blockid_t blockid, const ctx_t* ctx, rb_execution_context_t *ec);
uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_context_t *ec);