mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
4b58d698b1
The interpreter instruction count was enabled based on RUBY_DEBUG as opposed to YJIT_STATS. In builds with YJIT_STATS=1 but RUBY_DEBUG=0, the count was not available. Move YJIT_STATS in yjit.h where declarations are expoed to code outside of YJIT. Also reduce the changes made to the interpreter for calling into YJIT's instruction counting function.
265 lines
8.4 KiB
C
265 lines
8.4 KiB
C
#ifndef RUBY_INSNHELPER_H
|
|
#define RUBY_INSNHELPER_H
|
|
/**********************************************************************
|
|
|
|
insnhelper.h - helper macros to implement each instructions
|
|
|
|
$Author$
|
|
created at: 04/01/01 15:50:34 JST
|
|
|
|
Copyright (C) 2004-2007 Koichi Sasada
|
|
|
|
**********************************************************************/
|
|
|
|
MJIT_SYMBOL_EXPORT_BEGIN
|
|
|
|
RUBY_EXTERN VALUE ruby_vm_const_missing_count;
|
|
RUBY_EXTERN rb_serial_t ruby_vm_global_constant_state;
|
|
RUBY_EXTERN rb_serial_t ruby_vm_class_serial;
|
|
RUBY_EXTERN rb_serial_t ruby_vm_global_cvar_state;
|
|
|
|
MJIT_SYMBOL_EXPORT_END
|
|
|
|
#if VM_COLLECT_USAGE_DETAILS
|
|
#define COLLECT_USAGE_INSN(insn) vm_collect_usage_insn(insn)
|
|
#define COLLECT_USAGE_OPERAND(insn, n, op) vm_collect_usage_operand((insn), (n), ((VALUE)(op)))
|
|
|
|
#define COLLECT_USAGE_REGISTER(reg, s) vm_collect_usage_register((reg), (s))
|
|
#elif YJIT_STATS
|
|
/* for --yjit-stats */
|
|
#define COLLECT_USAGE_INSN(insn) rb_yjit_collect_vm_usage_insn(insn)
|
|
#define COLLECT_USAGE_OPERAND(insn, n, op) /* none */
|
|
#define COLLECT_USAGE_REGISTER(reg, s) /* none */
|
|
#else
|
|
#define COLLECT_USAGE_INSN(insn) /* none */
|
|
#define COLLECT_USAGE_OPERAND(insn, n, op) /* none */
|
|
#define COLLECT_USAGE_REGISTER(reg, s) /* none */
|
|
#endif
|
|
|
|
/**********************************************************/
|
|
/* deal with stack */
|
|
/**********************************************************/
|
|
|
|
#define PUSH(x) (SET_SV(x), INC_SP(1))
|
|
#define TOPN(n) (*(GET_SP()-(n)-1))
|
|
#define POPN(n) (DEC_SP(n))
|
|
#define POP() (DEC_SP(1))
|
|
#define STACK_ADDR_FROM_TOP(n) (GET_SP()-(n))
|
|
|
|
/**********************************************************/
|
|
/* deal with registers */
|
|
/**********************************************************/
|
|
|
|
#define VM_REG_CFP (reg_cfp)
|
|
#define VM_REG_PC (VM_REG_CFP->pc)
|
|
#define VM_REG_SP (VM_REG_CFP->sp)
|
|
#define VM_REG_EP (VM_REG_CFP->ep)
|
|
|
|
#define RESTORE_REGS() do { \
|
|
VM_REG_CFP = ec->cfp; \
|
|
} while (0)
|
|
|
|
#if VM_COLLECT_USAGE_DETAILS
|
|
enum vm_regan_regtype {
|
|
VM_REGAN_PC = 0,
|
|
VM_REGAN_SP = 1,
|
|
VM_REGAN_EP = 2,
|
|
VM_REGAN_CFP = 3,
|
|
VM_REGAN_SELF = 4,
|
|
VM_REGAN_ISEQ = 5
|
|
};
|
|
enum vm_regan_acttype {
|
|
VM_REGAN_ACT_GET = 0,
|
|
VM_REGAN_ACT_SET = 1
|
|
};
|
|
|
|
#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) \
|
|
(COLLECT_USAGE_REGISTER((VM_REGAN_##a), (VM_REGAN_ACT_##b)), (v))
|
|
#else
|
|
#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) (v)
|
|
#endif
|
|
|
|
/* PC */
|
|
#define GET_PC() (COLLECT_USAGE_REGISTER_HELPER(PC, GET, VM_REG_PC))
|
|
#define SET_PC(x) (VM_REG_PC = (COLLECT_USAGE_REGISTER_HELPER(PC, SET, (x))))
|
|
#define GET_CURRENT_INSN() (*GET_PC())
|
|
#define GET_OPERAND(n) (GET_PC()[(n)])
|
|
#define ADD_PC(n) (SET_PC(VM_REG_PC + (n)))
|
|
#define JUMP(dst) (SET_PC(VM_REG_PC + (dst)))
|
|
|
|
/* frame pointer, environment pointer */
|
|
#define GET_CFP() (COLLECT_USAGE_REGISTER_HELPER(CFP, GET, VM_REG_CFP))
|
|
#define GET_EP() (COLLECT_USAGE_REGISTER_HELPER(EP, GET, VM_REG_EP))
|
|
#define SET_EP(x) (VM_REG_EP = (COLLECT_USAGE_REGISTER_HELPER(EP, SET, (x))))
|
|
#define GET_LEP() (VM_EP_LEP(GET_EP()))
|
|
|
|
/* SP */
|
|
#define GET_SP() (COLLECT_USAGE_REGISTER_HELPER(SP, GET, VM_REG_SP))
|
|
#define SET_SP(x) (VM_REG_SP = (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
|
|
#define INC_SP(x) (VM_REG_SP += (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
|
|
#define DEC_SP(x) (VM_REG_SP -= (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
|
|
#define SET_SV(x) (*GET_SP() = rb_ractor_confirm_belonging(x))
|
|
/* set current stack value as x */
|
|
|
|
/* instruction sequence C struct */
|
|
#define GET_ISEQ() (GET_CFP()->iseq)
|
|
|
|
/**********************************************************/
|
|
/* deal with variables */
|
|
/**********************************************************/
|
|
|
|
#define GET_PREV_EP(ep) ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
|
|
|
|
/**********************************************************/
|
|
/* deal with values */
|
|
/**********************************************************/
|
|
|
|
#define GET_SELF() (COLLECT_USAGE_REGISTER_HELPER(SELF, GET, GET_CFP()->self))
|
|
|
|
/**********************************************************/
|
|
/* deal with control flow 2: method/iterator */
|
|
/**********************************************************/
|
|
|
|
/* set fastpath when cached method is *NOT* protected
|
|
* because inline method cache does not care about receiver.
|
|
*/
|
|
|
|
static inline void
|
|
CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enabled)
|
|
{
|
|
if (LIKELY(enabled)) {
|
|
vm_cc_call_set(cc, func);
|
|
}
|
|
}
|
|
|
|
#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
|
|
|
|
/**********************************************************/
|
|
/* deal with control flow 3: exception */
|
|
/**********************************************************/
|
|
|
|
|
|
/**********************************************************/
|
|
/* deal with stack canary */
|
|
/**********************************************************/
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
#define SETUP_CANARY(cond) \
|
|
VALUE *canary = 0; \
|
|
if (cond) { \
|
|
canary = GET_SP(); \
|
|
SET_SV(vm_stack_canary); \
|
|
} \
|
|
else {\
|
|
SET_SV(Qfalse); /* cleanup */ \
|
|
}
|
|
#define CHECK_CANARY(cond, insn) \
|
|
if (cond) { \
|
|
if (*canary == vm_stack_canary) { \
|
|
*canary = Qfalse; /* cleanup */ \
|
|
} \
|
|
else { \
|
|
rb_vm_canary_is_found_dead(insn, *canary); \
|
|
} \
|
|
}
|
|
#else
|
|
#define SETUP_CANARY(cond) if (cond) {} else {}
|
|
#define CHECK_CANARY(cond, insn) if (cond) {(void)(insn);}
|
|
#endif
|
|
|
|
/**********************************************************/
|
|
/* others */
|
|
/**********************************************************/
|
|
|
|
#ifndef MJIT_HEADER
|
|
#define CALL_SIMPLE_METHOD() do { \
|
|
rb_snum_t x = leaf ? INSN_ATTR(width) : 0; \
|
|
rb_snum_t y = attr_width_opt_send_without_block(0); \
|
|
rb_snum_t z = x - y; \
|
|
ADD_PC(z); \
|
|
DISPATCH_ORIGINAL_INSN(opt_send_without_block); \
|
|
} while (0)
|
|
#endif
|
|
|
|
#define PREV_CLASS_SERIAL() (ruby_vm_class_serial)
|
|
#define NEXT_CLASS_SERIAL() (++ruby_vm_class_serial)
|
|
#define GET_GLOBAL_CONSTANT_STATE() (ruby_vm_global_constant_state)
|
|
#define INC_GLOBAL_CONSTANT_STATE() (++ruby_vm_global_constant_state)
|
|
#define GET_GLOBAL_CVAR_STATE() (ruby_vm_global_cvar_state)
|
|
#define INC_GLOBAL_CVAR_STATE() (++ruby_vm_global_cvar_state)
|
|
|
|
static inline struct vm_throw_data *
|
|
THROW_DATA_NEW(VALUE val, const rb_control_frame_t *cf, int st)
|
|
{
|
|
struct vm_throw_data *obj = (struct vm_throw_data *)rb_imemo_new(imemo_throw_data, val, (VALUE)cf, 0, 0);
|
|
obj->throw_state = st;
|
|
return obj;
|
|
}
|
|
|
|
static inline VALUE
|
|
THROW_DATA_VAL(const struct vm_throw_data *obj)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
return obj->throw_obj;
|
|
}
|
|
|
|
static inline const rb_control_frame_t *
|
|
THROW_DATA_CATCH_FRAME(const struct vm_throw_data *obj)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
return obj->catch_frame;
|
|
}
|
|
|
|
static inline int
|
|
THROW_DATA_STATE(const struct vm_throw_data *obj)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
return obj->throw_state;
|
|
}
|
|
|
|
static inline int
|
|
THROW_DATA_CONSUMED_P(const struct vm_throw_data *obj)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
return obj->flags & THROW_DATA_CONSUMED;
|
|
}
|
|
|
|
static inline void
|
|
THROW_DATA_CATCH_FRAME_SET(struct vm_throw_data *obj, const rb_control_frame_t *cfp)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
obj->catch_frame = cfp;
|
|
}
|
|
|
|
static inline void
|
|
THROW_DATA_STATE_SET(struct vm_throw_data *obj, int st)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
obj->throw_state = st;
|
|
}
|
|
|
|
static inline void
|
|
THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
|
|
{
|
|
if (THROW_DATA_P(obj) &&
|
|
THROW_DATA_STATE(obj) == TAG_BREAK) {
|
|
obj->flags |= THROW_DATA_CONSUMED;
|
|
}
|
|
}
|
|
|
|
#define IS_ARGS_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT)
|
|
#define IS_ARGS_KEYWORD(ci) (vm_ci_flag(ci) & VM_CALL_KWARG)
|
|
#define IS_ARGS_KW_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT)
|
|
#define IS_ARGS_KW_OR_KW_SPLAT(ci) (vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT))
|
|
#define IS_ARGS_KW_SPLAT_MUT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT_MUT)
|
|
|
|
/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
|
|
can be used as a fastpath. */
|
|
static inline bool
|
|
vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_callcache *cc)
|
|
{
|
|
return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
|
|
METHOD_ENTRY_CACHEABLE(vm_cc_cme(cc));
|
|
}
|
|
|
|
#endif /* RUBY_INSNHELPER_H */
|