mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
d2c41b1bff
Previously, passing a keyword splat to a method always allocated a hash on the caller side, and accepting arbitrary keywords in a method allocated a separate hash on the callee side. Passing explicit keywords to a method that accepted a keyword splat did not allocate a hash on the caller side, but resulted in two hashes allocated on the callee side. This commit makes passing a single keyword splat to a method not allocate a hash on the caller side. Passing multiple keyword splats or a mix of explicit keywords and a keyword splat still generates a hash on the caller side. On the callee side, if arbitrary keywords are not accepted, it does not allocate a hash. If arbitrary keywords are accepted, it will allocate a hash, but this commit uses a callinfo flag to indicate whether the caller already allocated a hash, and if so, the callee can use the passed hash without duplicating it. So this commit should make it so that a maximum of a single hash is allocated during method calls. To set the callinfo flag appropriately, method call argument compilation checks if only a single keyword splat is given. If only one keyword splat is given, the VM_CALL_KW_SPLAT_MUT callinfo flag is not set, since in that case the keyword splat is passed directly and not mutable. If more than one splat is used, a new hash needs to be generated on the caller side, and in that case the callinfo flag is set, indicating the keyword splat is mutable by the callee. In compile_hash, used for both hash and keyword argument compilation, if compiling keyword arguments and only a single keyword splat is used, pass the argument directly. On the caller side, in vm_args.c, the callinfo flag needs to be recognized and handled. Because the keyword splat argument may not be a hash, it needs to be converted to a hash first if not. Then, unless the callinfo flag is set, the hash needs to be duplicated. The temporary copy of the callinfo flag, kw_flag, is updated if a hash was duplicated, to prevent the need to duplicate it again. If we are converting to a hash or duplicating a hash, we need to update the argument array, which can including duplicating the positional splat array if one was passed. CALLER_SETUP_ARG and a couple other places needs to be modified to handle similar issues for other types of calls. This includes fairly comprehensive tests for different ways keywords are handled internally, checking that you get equal results but that keyword splats on the caller side result in distinct objects for keyword rest parameters. Included are benchmarks for keyword argument calls. Brief results when compiled without optimization: def kw(a: 1) a end def kws(**kw) kw end h = {a: 1} kw(a: 1) # about same kw(**h) # 2.37x faster kws(a: 1) # 1.30x faster kws(**h) # 2.19x faster kw(a: 1, **h) # 1.03x slower kw(**h, **h) # about same kws(a: 1, **h) # 1.16x faster kws(**h, **h) # 1.14x faster
261 lines
8.2 KiB
C
261 lines
8.2 KiB
C
/**********************************************************************
|
|
|
|
insnhelper.h - helper macros to implement each instructions
|
|
|
|
$Author$
|
|
created at: 04/01/01 15:50:34 JST
|
|
|
|
Copyright (C) 2004-2007 Koichi Sasada
|
|
|
|
**********************************************************************/
|
|
|
|
#ifndef RUBY_INSNHELPER_H
|
|
#define RUBY_INSNHELPER_H
|
|
|
|
RUBY_SYMBOL_EXPORT_BEGIN
|
|
|
|
RUBY_EXTERN VALUE ruby_vm_const_missing_count;
|
|
RUBY_EXTERN rb_serial_t ruby_vm_global_method_state;
|
|
RUBY_EXTERN rb_serial_t ruby_vm_global_constant_state;
|
|
RUBY_EXTERN rb_serial_t ruby_vm_class_serial;
|
|
|
|
RUBY_SYMBOL_EXPORT_END
|
|
|
|
#if VM_COLLECT_USAGE_DETAILS
|
|
#define COLLECT_USAGE_INSN(insn) vm_collect_usage_insn(insn)
|
|
#define COLLECT_USAGE_OPERAND(insn, n, op) vm_collect_usage_operand((insn), (n), ((VALUE)(op)))
|
|
|
|
#define COLLECT_USAGE_REGISTER(reg, s) vm_collect_usage_register((reg), (s))
|
|
#else
|
|
#define COLLECT_USAGE_INSN(insn) /* none */
|
|
#define COLLECT_USAGE_OPERAND(insn, n, op) /* none */
|
|
#define COLLECT_USAGE_REGISTER(reg, s) /* none */
|
|
#endif
|
|
|
|
/**********************************************************/
|
|
/* deal with stack */
|
|
/**********************************************************/
|
|
|
|
#define PUSH(x) (SET_SV(x), INC_SP(1))
|
|
#define TOPN(n) (*(GET_SP()-(n)-1))
|
|
#define POPN(n) (DEC_SP(n))
|
|
#define POP() (DEC_SP(1))
|
|
#define STACK_ADDR_FROM_TOP(n) (GET_SP()-(n))
|
|
|
|
/**********************************************************/
|
|
/* deal with registers */
|
|
/**********************************************************/
|
|
|
|
#define VM_REG_CFP (reg_cfp)
|
|
#define VM_REG_PC (VM_REG_CFP->pc)
|
|
#define VM_REG_SP (VM_REG_CFP->sp)
|
|
#define VM_REG_EP (VM_REG_CFP->ep)
|
|
|
|
#define RESTORE_REGS() do { \
|
|
VM_REG_CFP = ec->cfp; \
|
|
} while (0)
|
|
|
|
#if VM_COLLECT_USAGE_DETAILS
|
|
enum vm_regan_regtype {
|
|
VM_REGAN_PC = 0,
|
|
VM_REGAN_SP = 1,
|
|
VM_REGAN_EP = 2,
|
|
VM_REGAN_CFP = 3,
|
|
VM_REGAN_SELF = 4,
|
|
VM_REGAN_ISEQ = 5
|
|
};
|
|
enum vm_regan_acttype {
|
|
VM_REGAN_ACT_GET = 0,
|
|
VM_REGAN_ACT_SET = 1
|
|
};
|
|
|
|
#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) \
|
|
(COLLECT_USAGE_REGISTER((VM_REGAN_##a), (VM_REGAN_ACT_##b)), (v))
|
|
#else
|
|
#define COLLECT_USAGE_REGISTER_HELPER(a, b, v) (v)
|
|
#endif
|
|
|
|
/* PC */
|
|
#define GET_PC() (COLLECT_USAGE_REGISTER_HELPER(PC, GET, VM_REG_PC))
|
|
#define SET_PC(x) (VM_REG_PC = (COLLECT_USAGE_REGISTER_HELPER(PC, SET, (x))))
|
|
#define GET_CURRENT_INSN() (*GET_PC())
|
|
#define GET_OPERAND(n) (GET_PC()[(n)])
|
|
#define ADD_PC(n) (SET_PC(VM_REG_PC + (n)))
|
|
#define JUMP(dst) (SET_PC(VM_REG_PC + (dst)))
|
|
|
|
/* frame pointer, environment pointer */
|
|
#define GET_CFP() (COLLECT_USAGE_REGISTER_HELPER(CFP, GET, VM_REG_CFP))
|
|
#define GET_EP() (COLLECT_USAGE_REGISTER_HELPER(EP, GET, VM_REG_EP))
|
|
#define SET_EP(x) (VM_REG_EP = (COLLECT_USAGE_REGISTER_HELPER(EP, SET, (x))))
|
|
#define GET_LEP() (VM_EP_LEP(GET_EP()))
|
|
|
|
/* SP */
|
|
#define GET_SP() (COLLECT_USAGE_REGISTER_HELPER(SP, GET, VM_REG_SP))
|
|
#define SET_SP(x) (VM_REG_SP = (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
|
|
#define INC_SP(x) (VM_REG_SP += (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
|
|
#define DEC_SP(x) (VM_REG_SP -= (COLLECT_USAGE_REGISTER_HELPER(SP, SET, (x))))
|
|
#define SET_SV(x) (*GET_SP() = (x))
|
|
/* set current stack value as x */
|
|
|
|
/* instruction sequence C struct */
|
|
#define GET_ISEQ() (GET_CFP()->iseq)
|
|
|
|
/**********************************************************/
|
|
/* deal with variables */
|
|
/**********************************************************/
|
|
|
|
#define GET_PREV_EP(ep) ((VALUE *)((ep)[VM_ENV_DATA_INDEX_SPECVAL] & ~0x03))
|
|
|
|
/**********************************************************/
|
|
/* deal with values */
|
|
/**********************************************************/
|
|
|
|
#define GET_SELF() (COLLECT_USAGE_REGISTER_HELPER(SELF, GET, GET_CFP()->self))
|
|
|
|
/**********************************************************/
|
|
/* deal with control flow 2: method/iterator */
|
|
/**********************************************************/
|
|
|
|
/* set fastpath when cached method is *NOT* protected
|
|
* because inline method cache does not care about receiver.
|
|
*/
|
|
|
|
static inline void
|
|
CC_SET_FASTPATH(const struct rb_callcache *cc, vm_call_handler func, bool enabled)
|
|
{
|
|
if (LIKELY(enabled)) {
|
|
vm_cc_call_set(cc, func);
|
|
}
|
|
}
|
|
|
|
#define GET_BLOCK_HANDLER() (GET_LEP()[VM_ENV_DATA_INDEX_SPECVAL])
|
|
|
|
/**********************************************************/
|
|
/* deal with control flow 3: exception */
|
|
/**********************************************************/
|
|
|
|
|
|
/**********************************************************/
|
|
/* deal with stack canary */
|
|
/**********************************************************/
|
|
|
|
#if VM_CHECK_MODE > 0
|
|
#define SETUP_CANARY() \
|
|
VALUE *canary; \
|
|
if (leaf) { \
|
|
canary = GET_SP(); \
|
|
SET_SV(vm_stack_canary); \
|
|
} \
|
|
else {\
|
|
SET_SV(Qfalse); /* cleanup */ \
|
|
}
|
|
#define CHECK_CANARY() \
|
|
if (leaf) { \
|
|
if (*canary == vm_stack_canary) { \
|
|
*canary = Qfalse; /* cleanup */ \
|
|
} \
|
|
else { \
|
|
vm_canary_is_found_dead(INSN_ATTR(bin), *canary); \
|
|
} \
|
|
}
|
|
#else
|
|
#define SETUP_CANARY() /* void */
|
|
#define CHECK_CANARY() /* void */
|
|
#endif
|
|
|
|
/**********************************************************/
|
|
/* others */
|
|
/**********************************************************/
|
|
|
|
#ifndef MJIT_HEADER
|
|
#define CALL_SIMPLE_METHOD() do { \
|
|
rb_snum_t x = leaf ? INSN_ATTR(width) : 0; \
|
|
rb_snum_t y = attr_width_opt_send_without_block(0); \
|
|
rb_snum_t z = x - y; \
|
|
ADD_PC(z); \
|
|
DISPATCH_ORIGINAL_INSN(opt_send_without_block); \
|
|
} while (0)
|
|
#endif
|
|
|
|
#define PREV_CLASS_SERIAL() (ruby_vm_class_serial)
|
|
#define NEXT_CLASS_SERIAL() (++ruby_vm_class_serial)
|
|
#define GET_GLOBAL_METHOD_STATE() (ruby_vm_global_method_state)
|
|
#define INC_GLOBAL_METHOD_STATE() (++ruby_vm_global_method_state)
|
|
#define GET_GLOBAL_CONSTANT_STATE() (ruby_vm_global_constant_state)
|
|
#define INC_GLOBAL_CONSTANT_STATE() (++ruby_vm_global_constant_state)
|
|
|
|
static inline struct vm_throw_data *
|
|
THROW_DATA_NEW(VALUE val, const rb_control_frame_t *cf, int st)
|
|
{
|
|
struct vm_throw_data *obj = (struct vm_throw_data *)rb_imemo_new(imemo_throw_data, val, (VALUE)cf, 0, 0);
|
|
obj->throw_state = st;
|
|
return obj;
|
|
}
|
|
|
|
static inline VALUE
|
|
THROW_DATA_VAL(const struct vm_throw_data *obj)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
return obj->throw_obj;
|
|
}
|
|
|
|
static inline const rb_control_frame_t *
|
|
THROW_DATA_CATCH_FRAME(const struct vm_throw_data *obj)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
return obj->catch_frame;
|
|
}
|
|
|
|
static inline int
|
|
THROW_DATA_STATE(const struct vm_throw_data *obj)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
return obj->throw_state;
|
|
}
|
|
|
|
static inline int
|
|
THROW_DATA_CONSUMED_P(const struct vm_throw_data *obj)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
return obj->flags & THROW_DATA_CONSUMED;
|
|
}
|
|
|
|
static inline void
|
|
THROW_DATA_CATCH_FRAME_SET(struct vm_throw_data *obj, const rb_control_frame_t *cfp)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
obj->catch_frame = cfp;
|
|
}
|
|
|
|
static inline void
|
|
THROW_DATA_STATE_SET(struct vm_throw_data *obj, int st)
|
|
{
|
|
VM_ASSERT(THROW_DATA_P(obj));
|
|
obj->throw_state = st;
|
|
}
|
|
|
|
static inline void
|
|
THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj)
|
|
{
|
|
if (THROW_DATA_P(obj) &&
|
|
THROW_DATA_STATE(obj) == TAG_BREAK) {
|
|
obj->flags |= THROW_DATA_CONSUMED;
|
|
}
|
|
}
|
|
|
|
#define IS_ARGS_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_ARGS_SPLAT)
|
|
#define IS_ARGS_KEYWORD(ci) (vm_ci_flag(ci) & VM_CALL_KWARG)
|
|
#define IS_ARGS_KW_SPLAT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT)
|
|
#define IS_ARGS_KW_OR_KW_SPLAT(ci) (vm_ci_flag(ci) & (VM_CALL_KWARG | VM_CALL_KW_SPLAT))
|
|
#define IS_ARGS_KW_SPLAT_MUT(ci) (vm_ci_flag(ci) & VM_CALL_KW_SPLAT_MUT)
|
|
|
|
/* If this returns true, an optimized function returned by `vm_call_iseq_setup_func`
|
|
can be used as a fastpath. */
|
|
static bool
|
|
vm_call_iseq_optimizable_p(const struct rb_callinfo *ci, const struct rb_callcache *cc)
|
|
{
|
|
return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) &&
|
|
!(METHOD_ENTRY_VISI(vm_cc_cme(cc)) == METHOD_VISI_PROTECTED);
|
|
}
|
|
|
|
#endif /* RUBY_INSNHELPER_H */
|