1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

Revert "This commit implements the Object Shapes technique in CRuby."

This reverts commit 68bc9e2e97d12f80df0d113e284864e225f771c2.
This commit is contained in:
Aaron Patterson 2022-09-30 16:01:50 -07:00
parent 0ab0229c11
commit 9a6803c90b
No known key found for this signature in database
GPG key ID: 953170BCB4FFAFC6
41 changed files with 905 additions and 2323 deletions

View file

@ -34,19 +34,3 @@ assert_equal %{ok}, %{
print "ok"
end
}, '[ruby-core:15120]'
assert_equal %{ok}, %{
class Big
attr_reader :foo
def initialize
@foo = "ok"
end
end
obj = Big.new
100.times do |i|
obj.instance_variable_set(:"@ivar_\#{i}", i)
end
Big.new.foo
}

322
common.mk

File diff suppressed because it is too large Load diff

View file

@ -2058,7 +2058,20 @@ cdhash_set_label_i(VALUE key, VALUE val, VALUE ptr)
static inline VALUE
get_ivar_ic_value(rb_iseq_t *iseq,ID id)
{
return INT2FIX(ISEQ_BODY(iseq)->ivc_size++);
VALUE val;
struct rb_id_table *tbl = ISEQ_COMPILE_DATA(iseq)->ivar_cache_table;
if (tbl) {
if (rb_id_table_lookup(tbl,id,&val)) {
return val;
}
}
else {
tbl = rb_id_table_create(1);
ISEQ_COMPILE_DATA(iseq)->ivar_cache_table = tbl;
}
val = INT2FIX(ISEQ_BODY(iseq)->ivc_size++);
rb_id_table_insert(tbl,id,val);
return val;
}
static inline VALUE
@ -2459,13 +2472,9 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor)
generated_iseq[code_index + 1 + j] = (VALUE)ic;
}
break;
case TS_IVC: /* inline ivar cache */
{
unsigned int ic_index = FIX2UINT(operands[j]);
vm_ic_attr_index_initialize(((IVC)&body->is_entries[ic_index]), INVALID_SHAPE_ID);
}
case TS_ISE: /* inline storage entry: `once` insn */
case TS_ICVARC: /* inline cvar cache */
case TS_IVC: /* inline ivar cache */
{
unsigned int ic_index = FIX2UINT(operands[j]);
IC ic = &ISEQ_IS_ENTRY_START(body, type)[ic_index].ic_cache;
@ -11505,11 +11514,6 @@ ibf_load_code(const struct ibf_load *load, rb_iseq_t *iseq, ibf_offset_t bytecod
ISE ic = ISEQ_IS_ENTRY_START(load_body, operand_type) + op;
code[code_index] = (VALUE)ic;
if (operand_type == TS_IVC) {
vm_ic_attr_index_initialize(((IVC)code[code_index]), INVALID_SHAPE_ID);
}
}
break;
case TS_CALLDATA:

View file

@ -130,6 +130,7 @@ RB_DEBUG_COUNTER(frame_C2R)
/* instance variable counts
*
* * ivar_get_ic_hit/miss: ivar_get inline cache (ic) hit/miss counts (VM insn)
* * ivar_get_ic_miss_serial: ivar_get ic miss reason by serial (VM insn)
* * ivar_get_ic_miss_unset: ... by unset (VM insn)
* * ivar_get_ic_miss_noobject: ... by "not T_OBJECT" (VM insn)
* * ivar_set_...: same counts with ivar_set (VM insn)
@ -139,17 +140,17 @@ RB_DEBUG_COUNTER(frame_C2R)
*/
RB_DEBUG_COUNTER(ivar_get_ic_hit)
RB_DEBUG_COUNTER(ivar_get_ic_miss)
RB_DEBUG_COUNTER(ivar_get_ic_miss_serial)
RB_DEBUG_COUNTER(ivar_get_ic_miss_unset)
RB_DEBUG_COUNTER(ivar_get_ic_miss_noobject)
RB_DEBUG_COUNTER(ivar_set_ic_hit)
RB_DEBUG_COUNTER(ivar_set_ic_miss)
RB_DEBUG_COUNTER(ivar_set_ic_miss_serial)
RB_DEBUG_COUNTER(ivar_set_ic_miss_unset)
RB_DEBUG_COUNTER(ivar_set_ic_miss_iv_hit)
RB_DEBUG_COUNTER(ivar_set_ic_miss_noobject)
RB_DEBUG_COUNTER(ivar_get_base)
RB_DEBUG_COUNTER(ivar_set_base)
RB_DEBUG_COUNTER(ivar_get_ic_miss_set)
RB_DEBUG_COUNTER(ivar_get_cc_miss_set)
RB_DEBUG_COUNTER(ivar_get_ic_miss_unset)
RB_DEBUG_COUNTER(ivar_get_cc_miss_unset)
/* local variable counts
*

View file

@ -165,9 +165,7 @@ coverage.o: $(top_srcdir)/ccan/check_type/check_type.h
coverage.o: $(top_srcdir)/ccan/container_of/container_of.h
coverage.o: $(top_srcdir)/ccan/list/list.h
coverage.o: $(top_srcdir)/ccan/str/str.h
coverage.o: $(top_srcdir)/constant.h
coverage.o: $(top_srcdir)/gc.h
coverage.o: $(top_srcdir)/id_table.h
coverage.o: $(top_srcdir)/internal.h
coverage.o: $(top_srcdir)/internal/array.h
coverage.o: $(top_srcdir)/internal/compilers.h
@ -178,14 +176,12 @@ coverage.o: $(top_srcdir)/internal/sanitizers.h
coverage.o: $(top_srcdir)/internal/serial.h
coverage.o: $(top_srcdir)/internal/static_assert.h
coverage.o: $(top_srcdir)/internal/thread.h
coverage.o: $(top_srcdir)/internal/variable.h
coverage.o: $(top_srcdir)/internal/vm.h
coverage.o: $(top_srcdir)/internal/warnings.h
coverage.o: $(top_srcdir)/method.h
coverage.o: $(top_srcdir)/node.h
coverage.o: $(top_srcdir)/ruby_assert.h
coverage.o: $(top_srcdir)/ruby_atomic.h
coverage.o: $(top_srcdir)/shape.h
coverage.o: $(top_srcdir)/thread_pthread.h
coverage.o: $(top_srcdir)/vm_core.h
coverage.o: $(top_srcdir)/vm_opts.h

View file

@ -350,7 +350,6 @@ objspace.o: $(top_srcdir)/internal/serial.h
objspace.o: $(top_srcdir)/internal/static_assert.h
objspace.o: $(top_srcdir)/internal/warnings.h
objspace.o: $(top_srcdir)/node.h
objspace.o: $(top_srcdir)/shape.h
objspace.o: $(top_srcdir)/symbol.h
objspace.o: objspace.c
objspace.o: {$(VPATH)}id.h
@ -534,9 +533,7 @@ objspace_dump.o: $(top_srcdir)/ccan/check_type/check_type.h
objspace_dump.o: $(top_srcdir)/ccan/container_of/container_of.h
objspace_dump.o: $(top_srcdir)/ccan/list/list.h
objspace_dump.o: $(top_srcdir)/ccan/str/str.h
objspace_dump.o: $(top_srcdir)/constant.h
objspace_dump.o: $(top_srcdir)/gc.h
objspace_dump.o: $(top_srcdir)/id_table.h
objspace_dump.o: $(top_srcdir)/internal.h
objspace_dump.o: $(top_srcdir)/internal/array.h
objspace_dump.o: $(top_srcdir)/internal/compilers.h
@ -547,14 +544,12 @@ objspace_dump.o: $(top_srcdir)/internal/sanitizers.h
objspace_dump.o: $(top_srcdir)/internal/serial.h
objspace_dump.o: $(top_srcdir)/internal/static_assert.h
objspace_dump.o: $(top_srcdir)/internal/string.h
objspace_dump.o: $(top_srcdir)/internal/variable.h
objspace_dump.o: $(top_srcdir)/internal/vm.h
objspace_dump.o: $(top_srcdir)/internal/warnings.h
objspace_dump.o: $(top_srcdir)/method.h
objspace_dump.o: $(top_srcdir)/node.h
objspace_dump.o: $(top_srcdir)/ruby_assert.h
objspace_dump.o: $(top_srcdir)/ruby_atomic.h
objspace_dump.o: $(top_srcdir)/shape.h
objspace_dump.o: $(top_srcdir)/thread_pthread.h
objspace_dump.o: $(top_srcdir)/vm_core.h
objspace_dump.o: $(top_srcdir)/vm_opts.h

50
gc.c
View file

@ -2895,7 +2895,8 @@ rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
GC_ASSERT((flags & RUBY_T_MASK) == T_OBJECT);
GC_ASSERT(flags & ROBJECT_EMBED);
uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
uint32_t index_tbl_num_entries = index_tbl == NULL ? 0 : (uint32_t)index_tbl->num_entries;
size_t size;
bool embed = true;
@ -2930,7 +2931,7 @@ rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
#endif
}
else {
rb_ensure_iv_list_size(obj, 0, index_tbl_num_entries);
rb_init_iv_list(obj);
}
return obj;
@ -3205,6 +3206,20 @@ rb_free_const_table(struct rb_id_table *tbl)
rb_id_table_free(tbl);
}
static int
free_iv_index_tbl_free_i(st_data_t key, st_data_t value, st_data_t data)
{
xfree((void *)value);
return ST_CONTINUE;
}
static void
iv_index_tbl_free(struct st_table *tbl)
{
st_foreach(tbl, free_iv_index_tbl_free_i, 0);
st_free_table(tbl);
}
// alive: if false, target pointers can be freed already.
// To check it, we need objspace parameter.
static void
@ -3420,16 +3435,6 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
RB_DEBUG_COUNTER_INC(obj_obj_transient);
}
else {
rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
if (shape) {
VALUE klass = RBASIC_CLASS(obj);
// Increment max_iv_count if applicable, used to determine size pool allocation
uint32_t num_of_ivs = shape->iv_count;
if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
}
}
xfree(RANY(obj)->as.object.as.heap.ivptr);
RB_DEBUG_COUNTER_INC(obj_obj_ptr);
}
@ -3444,6 +3449,9 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
if (RCLASS_CONST_TBL(obj)) {
rb_free_const_table(RCLASS_CONST_TBL(obj));
}
if (RCLASS_IV_INDEX_TBL(obj)) {
iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj));
}
if (RCLASS_CVC_TBL(obj)) {
rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
rb_id_table_free(RCLASS_CVC_TBL(obj));
@ -4865,6 +4873,10 @@ obj_memsize_of(VALUE obj, int use_all_types)
if (RCLASS_CVC_TBL(obj)) {
size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
}
if (RCLASS_IV_INDEX_TBL(obj)) {
// TODO: more correct value
size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
}
if (RCLASS_EXT(obj)->iv_tbl) {
size += st_memsize(RCLASS_EXT(obj)->iv_tbl);
}
@ -10395,6 +10407,15 @@ update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
}
}
static int
update_iv_index_tbl_i(st_data_t key, st_data_t value, st_data_t arg)
{
rb_objspace_t *objspace = (rb_objspace_t *)arg;
struct rb_iv_index_tbl_entry *ent = (struct rb_iv_index_tbl_entry *)value;
UPDATE_IF_MOVED(objspace, ent->class_value);
return ST_CONTINUE;
}
static void
update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
{
@ -10402,6 +10423,11 @@ update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
UPDATE_IF_MOVED(objspace, ext->includer);
UPDATE_IF_MOVED(objspace, ext->refined_class);
update_subclass_entries(objspace, ext->subclasses);
// ext->iv_index_tbl
if (ext->iv_index_tbl) {
st_foreach(ext->iv_index_tbl, update_iv_index_tbl_i, (st_data_t)objspace);
}
}
static void

View file

@ -46,6 +46,7 @@
#define ROBJECT_EMBED ROBJECT_EMBED
#define ROBJECT_NUMIV ROBJECT_NUMIV
#define ROBJECT_IVPTR ROBJECT_IVPTR
#define ROBJECT_IV_INDEX_TBL ROBJECT_IV_INDEX_TBL
/** @endcond */
/**
@ -131,7 +132,7 @@ struct RObject {
*
* This is a shortcut for `RCLASS_IV_INDEX_TBL(rb_obj_class(obj))`.
*/
struct rb_id_table *iv_index_tbl;
struct st_table *iv_index_tbl;
} heap;
#if USE_RVARGC

View file

@ -941,8 +941,21 @@ RB_OBJ_FREEZE_RAW(VALUE obj)
RB_FL_SET_RAW(obj, RUBY_FL_FREEZE);
}
RUBY_SYMBOL_EXPORT_BEGIN
void rb_obj_freeze_inline(VALUE obj);
RUBY_SYMBOL_EXPORT_END
/**
* Prevents further modifications to the given object. ::rb_eFrozenError shall
* be raised if modification is attempted.
*
* @param[out] x Object in question.
*/
static inline void
rb_obj_freeze_inline(VALUE x)
{
if (RB_FL_ABLE(x)) {
RB_OBJ_FREEZE_RAW(x);
if (RBASIC_CLASS(x) && !(RBASIC(x)->flags & RUBY_FL_SINGLETON)) {
rb_freeze_singleton_class(x);
}
}
}
#endif /* RBIMPL_FL_TYPE_H */

View file

@ -77,7 +77,6 @@ rb_call_inits(void)
CALL(vm_stack_canary);
CALL(ast);
CALL(gc_stress);
CALL(shape);
// enable builtin loading
CALL(builtin);

View file

@ -48,6 +48,9 @@
#undef RHASH_TBL
#undef RHASH_EMPTY_P
/* internal/object.h */
#undef ROBJECT_IV_INDEX_TBL
/* internal/struct.h */
#undef RSTRUCT_LEN
#undef RSTRUCT_PTR

View file

@ -14,7 +14,6 @@
#include "ruby/internal/stdbool.h" /* for bool */
#include "ruby/intern.h" /* for rb_alloc_func_t */
#include "ruby/ruby.h" /* for struct RBasic */
#include "shape.h"
#ifdef RCLASS_SUPER
# undef RCLASS_SUPER
@ -28,8 +27,8 @@ struct rb_subclass_entry {
struct rb_iv_index_tbl_entry {
uint32_t index;
shape_id_t source_shape_id;
shape_id_t dest_shape_id;
rb_serial_t class_serial;
VALUE class_value;
};
struct rb_cvar_class_tbl_entry {
@ -39,6 +38,7 @@ struct rb_cvar_class_tbl_entry {
};
struct rb_classext_struct {
struct st_table *iv_index_tbl; // ID -> struct rb_iv_index_tbl_entry
struct st_table *iv_tbl;
#if SIZEOF_SERIAL_T == SIZEOF_VALUE /* otherwise m_tbl is in struct RClass */
struct rb_id_table *m_tbl;
@ -64,10 +64,6 @@ struct rb_classext_struct {
const VALUE refined_class;
rb_alloc_func_t allocator;
const VALUE includer;
uint32_t max_iv_count;
#if !SHAPE_IN_BASIC_FLAGS
shape_id_t shape_id;
#endif
};
struct RClass {
@ -106,6 +102,7 @@ typedef struct rb_classext_struct rb_classext_t;
#define RCLASS_CALLABLE_M_TBL(c) (RCLASS_EXT(c)->callable_m_tbl)
#define RCLASS_CC_TBL(c) (RCLASS_EXT(c)->cc_tbl)
#define RCLASS_CVC_TBL(c) (RCLASS_EXT(c)->cvc_tbl)
#define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl)
#define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin_)
#define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class)
#if SIZEOF_SERIAL_T == SIZEOF_VALUE

View file

@ -9,6 +9,11 @@
* @brief Internal header for Object.
*/
#include "ruby/ruby.h" /* for VALUE */
#include "internal/class.h" /* for RCLASS_IV_INDEX_TBL */
#ifdef ROBJECT_IV_INDEX_TBL
# undef ROBJECT_IV_INDEX_TBL
#endif
/* object.c */
VALUE rb_class_search_ancestor(VALUE klass, VALUE super);
@ -21,6 +26,7 @@ int rb_bool_expected(VALUE, const char *, int raise);
static inline void RBASIC_CLEAR_CLASS(VALUE obj);
static inline void RBASIC_SET_CLASS_RAW(VALUE obj, VALUE klass);
static inline void RBASIC_SET_CLASS(VALUE obj, VALUE klass);
static inline struct st_table *ROBJECT_IV_INDEX_TBL_inline(VALUE obj);
RUBY_SYMBOL_EXPORT_BEGIN
/* object.c (export) */
@ -58,4 +64,20 @@ RBASIC_SET_CLASS(VALUE obj, VALUE klass)
RBASIC_SET_CLASS_RAW(obj, klass);
RB_OBJ_WRITTEN(obj, oldv, klass);
}
RBIMPL_ATTR_PURE()
static inline struct st_table *
ROBJECT_IV_INDEX_TBL_inline(VALUE obj)
{
if (RB_FL_ANY_RAW(obj, ROBJECT_EMBED)) {
VALUE klass = rb_obj_class(obj);
return RCLASS_IV_INDEX_TBL(klass);
}
else {
const struct RObject *const ptr = ROBJECT(obj);
return ptr->as.heap.iv_index_tbl;
}
}
#define ROBJECT_IV_INDEX_TBL ROBJECT_IV_INDEX_TBL_inline
#endif /* INTERNAL_OBJECT_H */

View file

@ -37,9 +37,6 @@ static inline void ROBJ_TRANSIENT_SET(VALUE obj);
static inline void ROBJ_TRANSIENT_UNSET(VALUE obj);
uint32_t rb_obj_ensure_iv_index_mapping(VALUE obj, ID id);
struct gen_ivtbl;
int rb_gen_ivtbl_get(VALUE obj, ID id, struct gen_ivtbl **ivtbl);
RUBY_SYMBOL_EXPORT_BEGIN
/* variable.c (export) */
void rb_mark_generic_ivar(VALUE);
@ -55,8 +52,6 @@ VALUE rb_gvar_set(ID, VALUE);
VALUE rb_gvar_defined(ID);
void rb_const_warn_if_deprecated(const rb_const_entry_t *, VALUE, ID);
void rb_init_iv_list(VALUE obj);
void rb_ensure_iv_list_size(VALUE obj, uint32_t len, uint32_t newsize);
struct gen_ivtbl * rb_ensure_generic_iv_list_size(VALUE obj, uint32_t newsize);
MJIT_SYMBOL_EXPORT_END
static inline bool

14
iseq.c
View file

@ -230,8 +230,18 @@ rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
union iseq_inline_storage_entry *is_entries = body->is_entries;
if (body->is_entries) {
// Skip iterating over ivc caches
is_entries += body->ivc_size;
// IVC entries
for (unsigned int i = 0; i < body->ivc_size; i++, is_entries++) {
IVC ivc = (IVC)is_entries;
if (ivc->entry) {
RUBY_ASSERT(!RB_TYPE_P(ivc->entry->class_value, T_NONE));
VALUE nv = func(data, ivc->entry->class_value);
if (ivc->entry->class_value != nv) {
ivc->entry->class_value = nv;
}
}
}
// ICVARC entries
for (unsigned int i = 0; i < body->icvarc_size; i++, is_entries++) {

View file

@ -73,6 +73,23 @@ module RubyVM::MJIT
src << "#undef GET_SELF\n"
src << "#define GET_SELF() cfp_self\n"
# Generate merged ivar guards first if needed
if !status.compile_info.disable_ivar_cache && status.merge_ivar_guards_p
src << " if (UNLIKELY(!(RB_TYPE_P(GET_SELF(), T_OBJECT) && (rb_serial_t)#{status.ivar_serial} == RCLASS_SERIAL(RBASIC(GET_SELF())->klass) &&"
if USE_RVARGC
src << "#{status.max_ivar_index} < ROBJECT_NUMIV(GET_SELF())" # index < ROBJECT_NUMIV(obj)
else
if status.max_ivar_index >= ROBJECT_EMBED_LEN_MAX
src << "#{status.max_ivar_index} < ROBJECT_NUMIV(GET_SELF())" # index < ROBJECT_NUMIV(obj) && !RB_FL_ANY_RAW(obj, ROBJECT_EMBED)
else
src << "ROBJECT_EMBED_LEN_MAX == ROBJECT_NUMIV(GET_SELF())" # index < ROBJECT_NUMIV(obj) && RB_FL_ANY_RAW(obj, ROBJECT_EMBED)
end
end
src << "))) {\n"
src << " goto ivar_cancel;\n"
src << " }\n"
end
# Simulate `opt_pc` in setup_parameters_complex. Other PCs which may be passed by catch tables
# are not considered since vm_exec doesn't call jit_exec for catch tables.
if iseq.body.param.flags.has_opt
@ -86,13 +103,6 @@ module RubyVM::MJIT
src << " }\n"
end
# Generate merged ivar guards first if needed
if !status.compile_info.disable_ivar_cache && status.merge_ivar_guards_p
src << " if (UNLIKELY(!(RB_TYPE_P(GET_SELF(), T_OBJECT)))) {"
src << " goto ivar_cancel;\n"
src << " }\n"
end
C.fprintf(f, src)
compile_insns(0, 0, status, iseq.body, f)
compile_cancel_handler(f, iseq.body, status)
@ -353,37 +363,52 @@ module RubyVM::MJIT
ic_copy = (status.is_entries + (C.iseq_inline_storage_entry.new(operands[1]) - body.is_entries)).iv_cache
src = +''
if !status.compile_info.disable_ivar_cache && ic_copy.source_shape_id != C.INVALID_SHAPE_ID
if !status.compile_info.disable_ivar_cache && ic_copy.entry
# JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
# compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos)
# JIT: prepare vm_getivar/vm_setivar arguments and variables
src << "{\n"
src << " VALUE obj = GET_SELF();\n"
src << " const shape_id_t source_shape_id = (rb_serial_t)#{ic_copy.source_shape_id};\n"
# JIT: cache hit path of vm_getivar/vm_setivar, or cancel JIT (recompile it with exivar)
if insn_name == :setinstancevariable
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
src << " const shape_id_t dest_shape_id = (rb_serial_t)#{ic_copy.dest_shape_id};\n"
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj) && \n"
src << " dest_shape_id != ROBJECT_SHAPE_ID(obj)) {\n"
src << " if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {\n"
src << " rb_init_iv_list(obj);\n"
src << " }\n"
src << " ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);\n"
src << " VALUE *ptr = ROBJECT_IVPTR(obj);\n"
src << " RB_OBJ_WRITE(obj, &ptr[index], stack[#{stack_size - 1}]);\n"
src << " }\n"
else
if ic_copy.attr_index == 0 # cache hit, but uninitialized iv
src << " /* Uninitialized instance variable */\n"
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj)) {\n"
src << " stack[#{stack_size}] = Qnil;\n"
src << " const uint32_t index = #{ic_copy.entry.index};\n"
if status.merge_ivar_guards_p
# JIT: Access ivar without checking these VM_ASSERTed prerequisites as we checked them in the beginning of `mjit_compile_body`
src << " VM_ASSERT(RB_TYPE_P(obj, T_OBJECT));\n"
src << " VM_ASSERT((rb_serial_t)#{ic_copy.entry.class_serial} == RCLASS_SERIAL(RBASIC(obj)->klass));\n"
src << " VM_ASSERT(index < ROBJECT_NUMIV(obj));\n"
if insn_name == :setinstancevariable
if USE_RVARGC
src << " if (LIKELY(!RB_OBJ_FROZEN_RAW(obj) && index < ROBJECT_NUMIV(obj))) {\n"
src << " RB_OBJ_WRITE(obj, &ROBJECT_IVPTR(obj)[index], stack[#{stack_size - 1}]);\n"
else
heap_ivar_p = status.max_ivar_index >= ROBJECT_EMBED_LEN_MAX
src << " if (LIKELY(!RB_OBJ_FROZEN_RAW(obj) && #{heap_ivar_p ? 'true' : 'RB_FL_ANY_RAW(obj, ROBJECT_EMBED)'})) {\n"
src << " RB_OBJ_WRITE(obj, &ROBJECT(obj)->as.#{heap_ivar_p ? 'heap.ivptr[index]' : 'ary[index]'}, stack[#{stack_size - 1}]);\n"
end
src << " }\n"
else
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
src << " if (source_shape_id == ROBJECT_SHAPE_ID(obj)) {\n"
src << " stack[#{stack_size}] = ROBJECT_IVPTR(obj)[index];\n"
src << " VALUE val;\n"
if USE_RVARGC
src << " if (LIKELY(index < ROBJECT_NUMIV(obj) && (val = ROBJECT_IVPTR(obj)[index]) != Qundef)) {\n"
else
heap_ivar_p = status.max_ivar_index >= ROBJECT_EMBED_LEN_MAX
src << " if (LIKELY(#{heap_ivar_p ? 'true' : 'RB_FL_ANY_RAW(obj, ROBJECT_EMBED)'} && (val = ROBJECT(obj)->as.#{heap_ivar_p ? 'heap.ivptr[index]' : 'ary[index]'}) != Qundef)) {\n"
end
src << " stack[#{stack_size}] = val;\n"
src << " }\n"
end
else
src << " const rb_serial_t ic_serial = (rb_serial_t)#{ic_copy.entry.class_serial};\n"
# JIT: cache hit path of vm_getivar/vm_setivar, or cancel JIT (recompile it with exivar)
if insn_name == :setinstancevariable
src << " if (LIKELY(RB_TYPE_P(obj, T_OBJECT) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && index < ROBJECT_NUMIV(obj) && !RB_OBJ_FROZEN_RAW(obj))) {\n"
src << " VALUE *ptr = ROBJECT_IVPTR(obj);\n"
src << " RB_OBJ_WRITE(obj, &ptr[index], stack[#{stack_size - 1}]);\n"
src << " }\n"
else
src << " VALUE val;\n"
src << " if (LIKELY(RB_TYPE_P(obj, T_OBJECT) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && index < ROBJECT_NUMIV(obj) && (val = ROBJECT_IVPTR(obj)[index]) != Qundef)) {\n"
src << " stack[#{stack_size}] = val;\n"
src << " }\n"
end
end
@ -394,19 +419,20 @@ module RubyVM::MJIT
src << " }\n"
src << "}\n"
return src
elsif insn_name == :getinstancevariable && !status.compile_info.disable_exivar_cache && ic_copy.source_shape_id != C.INVALID_SHAPE_ID
elsif insn_name == :getinstancevariable && !status.compile_info.disable_exivar_cache && ic_copy.entry
# JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
# compile_pc_and_sp(src, insn, stack_size, sp_inc, local_stack_p, next_pos)
# JIT: prepare vm_getivar's arguments and variables
src << "{\n"
src << " VALUE obj = GET_SELF();\n"
src << " const shape_id_t source_shape_id = (rb_serial_t)#{ic_copy.source_shape_id};\n"
src << " const uint32_t index = #{ic_copy.attr_index - 1};\n"
src << " const rb_serial_t ic_serial = (rb_serial_t)#{ic_copy.entry.class_serial};\n"
src << " const uint32_t index = #{ic_copy.entry.index};\n"
# JIT: cache hit path of vm_getivar, or cancel JIT (recompile it without any ivar optimization)
src << " struct gen_ivtbl *ivtbl;\n"
src << " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && source_shape_id == rb_shape_get_shape_id(obj) && rb_ivar_generic_ivtbl_lookup(obj, &ivtbl))) {\n"
src << " stack[#{stack_size}] = ivtbl->ivptr[index];\n"
src << " VALUE val;\n"
src << " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && rb_ivar_generic_ivtbl_lookup(obj, &ivtbl) && index < ivtbl->numiv && (val = ivtbl->ivptr[index]) != Qundef)) {\n"
src << " stack[#{stack_size}] = val;\n"
src << " }\n"
src << " else {\n"
src << " reg_cfp->pc = original_body_iseq + #{pos};\n"
@ -806,16 +832,35 @@ module RubyVM::MJIT
def init_ivar_compile_status(body, status)
C.mjit_capture_is_entries(body, status.is_entries)
num_ivars = 0
pos = 0
status.max_ivar_index = 0
status.ivar_serial = 0
while pos < body.iseq_size
insn = INSNS.fetch(C.rb_vm_insn_decode(body.iseq_encoded[pos]))
if insn.name == :getinstancevariable || insn.name == :setinstancevariable
status.merge_ivar_guards_p = true
return
ic = body.iseq_encoded[pos+2]
ic_copy = (status.is_entries + (C.iseq_inline_storage_entry.new(ic) - body.is_entries)).iv_cache
if ic_copy.entry # Only initialized (ic_serial > 0) IVCs are optimized
num_ivars += 1
if status.max_ivar_index < ic_copy.entry.index
status.max_ivar_index = ic_copy.entry.index
end
if status.ivar_serial == 0
status.ivar_serial = ic_copy.entry.class_serial
elsif status.ivar_serial != ic_copy.entry.class_serial
# Multiple classes have used this ISeq. Give up assuming one serial.
status.merge_ivar_guards_p = false
return
end
end
end
pos += insn.len
end
status.merge_ivar_guards_p = status.ivar_serial > 0 && num_ivars >= 2
end
# Expand simple macro that doesn't require dynamic C code.

View file

@ -39,7 +39,6 @@
#include "ruby/st.h"
#include "ruby/util.h"
#include "builtin.h"
#include "shape.h"
#define BITSPERSHORT (2*CHAR_BIT)
#define SHORTMASK ((1<<BITSPERSHORT)-1)
@ -623,6 +622,10 @@ w_obj_each(st_data_t key, st_data_t val, st_data_t a)
}
return ST_CONTINUE;
}
if (!ivarg->num_ivar) {
rb_raise(rb_eRuntimeError, "instance variable added to %"PRIsVALUE" instance",
CLASS_OF(arg->obj));
}
--ivarg->num_ivar;
w_symbol(ID2SYM(id), arg->arg);
w_object(value, arg->arg, arg->limit);
@ -717,7 +720,6 @@ has_ivars(VALUE obj, VALUE encname, VALUE *ivobj)
static void
w_ivar_each(VALUE obj, st_index_t num, struct dump_call_arg *arg)
{
shape_id_t shape_id = rb_shape_get_shape_id(arg->obj);
struct w_ivar_arg ivarg = {arg, num};
if (!num) return;
rb_ivar_foreach(obj, w_obj_each, (st_data_t)&ivarg);
@ -725,10 +727,6 @@ w_ivar_each(VALUE obj, st_index_t num, struct dump_call_arg *arg)
rb_raise(rb_eRuntimeError, "instance variable removed from %"PRIsVALUE" instance",
CLASS_OF(arg->obj));
}
if (shape_id != rb_shape_get_shape_id(arg->obj)) {
rb_raise(rb_eRuntimeError, "instance variable added to %"PRIsVALUE" instance",
CLASS_OF(arg->obj));
}
}
static void

View file

@ -418,7 +418,6 @@ def lldb_inspect(debugger, target, result, val):
elif flType == RUBY_T_IMEMO:
# I'm not sure how to get IMEMO_MASK out of lldb. It's not in globals()
imemo_type = (flags >> RUBY_FL_USHIFT) & 0x0F # IMEMO_MASK
print("T_IMEMO: ", file=result)
append_command_output(debugger, "p (enum imemo_type) %d" % imemo_type, result)
append_command_output(debugger, "p *(struct MEMO *) %0#x" % val.GetValueAsUnsigned(), result)

View file

@ -5,10 +5,6 @@ module RubyVM::MJIT
C = Object.new
class << C
def SHAPE_BITS
RubyVM::Shape::SHAPE_BITS
end
def ROBJECT_EMBED_LEN_MAX
Primitive.cexpr! 'INT2NUM(RBIMPL_EMBED_LEN_MAX_OF(VALUE))'
end
@ -169,14 +165,6 @@ module RubyVM::MJIT
Primitive.cexpr! %q{ INT2NUM(VM_METHOD_TYPE_ISEQ) }
end
def C.INVALID_SHAPE_ID
Primitive.cexpr! %q{ ULONG2NUM(INVALID_SHAPE_ID) }
end
def C.SHAPE_MASK
Primitive.cexpr! %q{ ULONG2NUM(SHAPE_MASK) }
end
def C.CALL_DATA
@CALL_DATA ||= self.rb_call_data
end
@ -193,10 +181,6 @@ module RubyVM::MJIT
@RB_BUILTIN ||= self.rb_builtin_function
end
def C.attr_index_t
@attr_index_t ||= CType::Immediate.parse("uint32_t")
end
def C.compile_branch
@compile_branch ||= CType::Struct.new(
"compile_branch", Primitive.cexpr!("SIZEOF(struct compile_branch)"),
@ -217,6 +201,7 @@ module RubyVM::MJIT
compiled_id: [CType::Immediate.parse("int"), Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), compiled_id)")],
compile_info: [CType::Pointer.new { self.rb_mjit_compile_info }, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), compile_info)")],
merge_ivar_guards_p: [self._Bool, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), merge_ivar_guards_p)")],
ivar_serial: [self.rb_serial_t, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), ivar_serial)")],
max_ivar_index: [CType::Immediate.parse("size_t"), Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), max_ivar_index)")],
inlined_iseqs: [CType::Pointer.new { CType::Pointer.new { self.rb_iseq_constant_body } }, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), inlined_iseqs)")],
inline_context: [self.inlined_call_context, Primitive.cexpr!("OFFSETOF((*((struct compile_status *)NULL)), inline_context)")],
@ -255,9 +240,7 @@ module RubyVM::MJIT
def C.iseq_inline_iv_cache_entry
@iseq_inline_iv_cache_entry ||= CType::Struct.new(
"iseq_inline_iv_cache_entry", Primitive.cexpr!("SIZEOF(struct iseq_inline_iv_cache_entry)"),
source_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), source_shape_id)")],
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), dest_shape_id)")],
attr_index: [self.attr_index_t, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), attr_index)")],
entry: [CType::Pointer.new { self.rb_iv_index_tbl_entry }, Primitive.cexpr!("OFFSETOF((*((struct iseq_inline_iv_cache_entry *)NULL)), entry)")],
)
end
@ -330,11 +313,7 @@ module RubyVM::MJIT
call_: [self.vm_call_handler, Primitive.cexpr!("OFFSETOF((*((struct rb_callcache *)NULL)), call_)")],
aux_: [CType::Union.new(
"", Primitive.cexpr!("SIZEOF(((struct rb_callcache *)NULL)->aux_)"),
attr: CType::Struct.new(
"", Primitive.cexpr!("SIZEOF(((struct rb_callcache *)NULL)->aux_.attr)"),
index: [self.attr_index_t, Primitive.cexpr!("OFFSETOF(((struct rb_callcache *)NULL)->aux_.attr, index)")],
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF(((struct rb_callcache *)NULL)->aux_.attr, dest_shape_id)")],
),
attr_index: CType::Immediate.parse("unsigned int"),
method_missing_reason: self.method_missing_reason,
v: self.VALUE,
), Primitive.cexpr!("OFFSETOF((*((struct rb_callcache *)NULL)), aux_)")],
@ -524,8 +503,8 @@ module RubyVM::MJIT
@rb_iv_index_tbl_entry ||= CType::Struct.new(
"rb_iv_index_tbl_entry", Primitive.cexpr!("SIZEOF(struct rb_iv_index_tbl_entry)"),
index: [CType::Immediate.parse("uint32_t"), Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), index)")],
source_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), source_shape_id)")],
dest_shape_id: [self.shape_id_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), dest_shape_id)")],
class_serial: [self.rb_serial_t, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), class_serial)")],
class_value: [self.VALUE, Primitive.cexpr!("OFFSETOF((*((struct rb_iv_index_tbl_entry *)NULL)), class_value)")],
)
end
@ -598,10 +577,6 @@ module RubyVM::MJIT
@VALUE ||= CType::Immediate.find(Primitive.cexpr!("SIZEOF(VALUE)"), Primitive.cexpr!("SIGNED_TYPE_P(VALUE)"))
end
def C.shape_id_t
@shape_id_t ||= CType::Immediate.find(Primitive.cexpr!("SIZEOF(shape_id_t)"), Primitive.cexpr!("SIGNED_TYPE_P(shape_id_t)"))
end
def C._Bool
CType::Bool.new
end

View file

@ -8,7 +8,6 @@
#include "builtin.h"
#include "mjit.h"
#include "mjit_unit.h"
#include "shape.h"
// Macros to check if a position is already compiled using compile_status.stack_size_for_pos
#define NOT_COMPILED_STACK_SIZE -1
@ -49,6 +48,7 @@ struct compile_status {
// Mutated optimization levels
struct rb_mjit_compile_info *compile_info;
bool merge_ivar_guards_p; // If true, merge guards of ivar accesses
rb_serial_t ivar_serial; // ic_serial of IVC in is_entries (used only when merge_ivar_guards_p)
size_t max_ivar_index; // Max IVC index in is_entries (used only when merge_ivar_guards_p)
// If `inlined_iseqs[pos]` is not NULL, `mjit_compile_body` tries to inline ISeq there.
const struct rb_iseq_constant_body **inlined_iseqs;

View file

@ -39,7 +39,6 @@
#include "ruby/util.h"
#include "ruby/assert.h"
#include "builtin.h"
#include "shape.h"
/*!
* \addtogroup object
@ -272,33 +271,9 @@ rb_obj_copy_ivar(VALUE dest, VALUE obj)
VALUE *src_buf = ROBJECT_IVPTR(obj);
uint32_t dest_len = ROBJECT_NUMIV(dest);
uint32_t src_len = ROBJECT_NUMIV(obj);
uint32_t max_len = dest_len < src_len ? src_len : dest_len;
uint32_t len = dest_len < src_len ? dest_len : src_len;
rb_ensure_iv_list_size(dest, dest_len, max_len);
dest_len = ROBJECT_NUMIV(dest);
uint32_t min_len = dest_len > src_len ? src_len : dest_len;
if (RBASIC(obj)->flags & ROBJECT_EMBED) {
src_buf = ROBJECT(obj)->as.ary;
// embedded -> embedded
if (RBASIC(dest)->flags & ROBJECT_EMBED) {
dest_buf = ROBJECT(dest)->as.ary;
}
// embedded -> extended
else {
dest_buf = ROBJECT(dest)->as.heap.ivptr;
}
}
// extended -> extended
else {
RUBY_ASSERT(!(RBASIC(dest)->flags & ROBJECT_EMBED));
dest_buf = ROBJECT(dest)->as.heap.ivptr;
src_buf = ROBJECT(obj)->as.heap.ivptr;
}
MEMCPY(dest_buf, src_buf, VALUE, min_len);
MEMCPY(dest_buf, src_buf, VALUE, len);
}
static void
@ -308,23 +283,10 @@ init_copy(VALUE dest, VALUE obj)
rb_raise(rb_eTypeError, "[bug] frozen object (%s) allocated", rb_obj_classname(dest));
}
RBASIC(dest)->flags &= ~(T_MASK|FL_EXIVAR);
// Copies the shape id from obj to dest
RBASIC(dest)->flags |= RBASIC(obj)->flags & (T_MASK|FL_EXIVAR);
rb_copy_wb_protected_attribute(dest, obj);
rb_copy_generic_ivar(dest, obj);
rb_gc_copy_finalizer(dest, obj);
rb_shape_t *shape_to_set = rb_shape_get_shape(obj);
// If the object is frozen, the "dup"'d object will *not* be frozen,
// so we need to copy the frozen shape's parent to the new object.
if (rb_shape_frozen_shape_p(shape_to_set)) {
shape_to_set = shape_to_set->parent;
}
// shape ids are different
rb_shape_set_shape(dest, shape_to_set);
if (RB_TYPE_P(obj, T_OBJECT)) {
rb_obj_copy_ivar(dest, obj);
}
@ -430,9 +392,6 @@ mutable_obj_clone(VALUE obj, VALUE kwfreeze)
case Qnil:
rb_funcall(clone, id_init_clone, 1, obj);
RBASIC(clone)->flags |= RBASIC(obj)->flags & FL_FREEZE;
if (RB_OBJ_FROZEN(obj)) {
rb_shape_transition_shape_frozen(clone);
}
break;
case Qtrue:
{
@ -448,7 +407,6 @@ mutable_obj_clone(VALUE obj, VALUE kwfreeze)
argv[1] = freeze_true_hash;
rb_funcallv_kw(clone, id_init_clone, 2, argv, RB_PASS_KEYWORDS);
RBASIC(clone)->flags |= FL_FREEZE;
rb_shape_transition_shape_frozen(clone);
break;
}
case Qfalse:

View file

@ -289,13 +289,11 @@ rb_ractor_id(const rb_ractor_t *r)
#if RACTOR_CHECK_MODE > 0
uint32_t rb_ractor_current_id(void);
// If ractor check mode is enabled, shape bits needs to be smaller
STATIC_ASSERT(shape_bits, SHAPE_BITS == 16);
static inline void
rb_ractor_setup_belonging_to(VALUE obj, uint32_t rid)
{
VALUE flags = RBASIC(obj)->flags & 0xffff0000ffffffff; // 4B
VALUE flags = RBASIC(obj)->flags & 0xffffffff; // 4B
RBASIC(obj)->flags = flags | ((VALUE)rid << 32);
}
@ -312,7 +310,7 @@ rb_ractor_belonging(VALUE obj)
return 0;
}
else {
return RBASIC(obj)->flags >> 32 & 0xFFFF;
return RBASIC(obj)->flags >> 32;
}
}

523
shape.c
View file

@ -1,523 +0,0 @@
#include "vm_core.h"
#include "vm_sync.h"
#include "shape.h"
#include "internal/class.h"
#include "internal/symbol.h"
#include "internal/variable.h"
#include <stdbool.h>
/*
* Shape getters
*/
static rb_shape_t*
rb_shape_get_root_shape(void) {
return GET_VM()->root_shape;
}
shape_id_t
rb_shape_id(rb_shape_t * shape)
{
return (shape_id_t)(shape - GET_VM()->shape_list);
}
static rb_shape_t*
rb_shape_get_frozen_root_shape(void) {
return GET_VM()->frozen_root_shape;
}
bool
rb_shape_root_shape_p(rb_shape_t* shape) {
return shape == rb_shape_get_root_shape();
}
rb_shape_t*
rb_shape_get_shape_by_id(shape_id_t shape_id)
{
RUBY_ASSERT(shape_id != INVALID_SHAPE_ID);
rb_vm_t *vm = GET_VM();
rb_shape_t *shape = &vm->shape_list[shape_id];
return shape;
}
rb_shape_t*
rb_shape_get_shape_by_id_without_assertion(shape_id_t shape_id)
{
RUBY_ASSERT(shape_id != INVALID_SHAPE_ID);
rb_vm_t *vm = GET_VM();
rb_shape_t *shape = &vm->shape_list[shape_id];
return shape;
}
#if !SHAPE_IN_BASIC_FLAGS
static inline shape_id_t
RCLASS_SHAPE_ID(VALUE obj)
{
return RCLASS_EXT(obj)->shape_id;
}
shape_id_t rb_generic_shape_id(VALUE obj);
#endif
shape_id_t
rb_shape_get_shape_id(VALUE obj)
{
if (RB_SPECIAL_CONST_P(obj)) {
return FROZEN_ROOT_SHAPE_ID;
}
#if SHAPE_IN_BASIC_FLAGS
return RBASIC_SHAPE_ID(obj);
#else
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
return ROBJECT_SHAPE_ID(obj);
break;
case T_CLASS:
case T_MODULE:
return RCLASS_SHAPE_ID(obj);
default:
return rb_generic_shape_id(obj);
}
#endif
}
rb_shape_t*
rb_shape_get_shape(VALUE obj)
{
return rb_shape_get_shape_by_id(rb_shape_get_shape_id(obj));
}
static rb_shape_t *
rb_shape_lookup_id(rb_shape_t* shape, ID id, enum shape_type shape_type) {
while (shape->parent) {
if (shape->edge_name == id) {
// If the shape type is different, we don't
// want this to count as a "found" ID
if (shape_type == (enum shape_type)shape->type) {
return shape;
}
else {
return NULL;
}
}
shape = shape->parent;
}
return NULL;
}
static rb_shape_t*
get_next_shape_internal(rb_shape_t* shape, ID id, VALUE obj, enum shape_type shape_type)
{
rb_shape_t *res = NULL;
RUBY_ASSERT(SHAPE_FROZEN != (enum shape_type)shape->type);
RB_VM_LOCK_ENTER();
{
if (rb_shape_lookup_id(shape, id, shape_type)) {
// If shape already contains the ivar that is being set, we'll return shape
res = shape;
}
else {
if (!shape->edges) {
shape->edges = rb_id_table_create(0);
}
// Lookup the shape in edges - if there's already an edge and a corresponding shape for it,
// we can return that. Otherwise, we'll need to get a new shape
if (!rb_id_table_lookup(shape->edges, id, (VALUE *)&res)) {
// In this case, the shape exists, but the shape is garbage, so we need to recreate it
if (res) {
rb_id_table_delete(shape->edges, id);
res->parent = NULL;
}
rb_shape_t * new_shape = rb_shape_alloc(id, shape);
new_shape->type = (uint8_t)shape_type;
switch(shape_type) {
case SHAPE_IVAR:
new_shape->iv_count = new_shape->parent->iv_count + 1;
// Check if we should update max_iv_count on the object's class
if (BUILTIN_TYPE(obj) == T_OBJECT) {
VALUE klass = rb_obj_class(obj);
if (new_shape->iv_count > RCLASS_EXT(klass)->max_iv_count) {
RCLASS_EXT(klass)->max_iv_count = new_shape->iv_count;
}
}
break;
case SHAPE_IVAR_UNDEF:
case SHAPE_FROZEN:
new_shape->iv_count = new_shape->parent->iv_count;
break;
case SHAPE_ROOT:
rb_bug("Unreachable");
break;
}
rb_id_table_insert(shape->edges, id, (VALUE)new_shape);
res = new_shape;
}
}
}
RB_VM_LOCK_LEAVE();
return res;
}
MJIT_FUNC_EXPORTED int
rb_shape_frozen_shape_p(rb_shape_t* shape)
{
return SHAPE_FROZEN == (enum shape_type)shape->type;
}
void
rb_shape_transition_shape_remove_ivar(VALUE obj, ID id, rb_shape_t *shape)
{
rb_shape_t* next_shape = get_next_shape_internal(shape, id, obj, SHAPE_IVAR_UNDEF);
if (shape == next_shape) {
return;
}
rb_shape_set_shape(obj, next_shape);
}
void
rb_shape_transition_shape_frozen(VALUE obj)
{
rb_shape_t* shape = rb_shape_get_shape(obj);
RUBY_ASSERT(shape);
RUBY_ASSERT(RB_OBJ_FROZEN(obj));
if (rb_shape_frozen_shape_p(shape)) {
return;
}
rb_shape_t* next_shape;
if (shape == rb_shape_get_root_shape()) {
switch(BUILTIN_TYPE(obj)) {
case T_OBJECT:
case T_CLASS:
case T_MODULE:
break;
default:
return;
}
next_shape = rb_shape_get_frozen_root_shape();
}
else {
static ID id_frozen;
if (!id_frozen) {
id_frozen = rb_make_internal_id();
}
next_shape = get_next_shape_internal(shape, (ID)id_frozen, obj, SHAPE_FROZEN);
}
RUBY_ASSERT(next_shape);
rb_shape_set_shape(obj, next_shape);
}
void
rb_shape_transition_shape(VALUE obj, ID id, rb_shape_t *shape)
{
rb_shape_t* next_shape = rb_shape_get_next(shape, obj, id);
if (shape == next_shape) {
return;
}
rb_shape_set_shape(obj, next_shape);
}
rb_shape_t*
rb_shape_get_next(rb_shape_t* shape, VALUE obj, ID id)
{
return get_next_shape_internal(shape, id, obj, SHAPE_IVAR);
}
bool
rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t *value) {
while (shape->parent) {
if (shape->edge_name == id) {
enum shape_type shape_type;
shape_type = (enum shape_type)shape->type;
switch(shape_type) {
case SHAPE_IVAR:
RUBY_ASSERT(shape->iv_count > 0);
*value = shape->iv_count - 1;
return true;
case SHAPE_IVAR_UNDEF:
case SHAPE_ROOT:
return false;
case SHAPE_FROZEN:
rb_bug("Ivar should not exist on frozen transition\n");
}
}
shape = shape->parent;
}
return false;
}
static rb_shape_t *
shape_alloc(void)
{
rb_vm_t *vm = GET_VM();
shape_id_t shape_id = vm->next_shape_id;
vm->next_shape_id++;
if (shape_id == MAX_SHAPE_ID) {
// TODO: Make an OutOfShapesError ??
rb_bug("Out of shapes\n");
}
return &GET_VM()->shape_list[shape_id];
}
rb_shape_t *
rb_shape_alloc(ID edge_name, rb_shape_t * parent)
{
rb_shape_t * shape = shape_alloc();
shape->edge_name = edge_name;
shape->iv_count = 0;
shape->parent = parent;
return shape;
}
MJIT_FUNC_EXPORTED void
rb_shape_set_shape(VALUE obj, rb_shape_t* shape)
{
rb_shape_set_shape_id(obj, rb_shape_id(shape));
}
VALUE rb_cShape;
/*
* Exposing Shape to Ruby via RubyVM.debug_shape
*/
static const rb_data_type_t shape_data_type = {
"Shape",
{NULL, NULL, NULL,},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
};
static VALUE
rb_wrapped_shape_id(VALUE self) {
rb_shape_t * shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
return INT2NUM(rb_shape_id(shape));
}
static VALUE
rb_shape_type(VALUE self) {
rb_shape_t * shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
return INT2NUM(shape->type);
}
static VALUE
rb_shape_parent_id(VALUE self)
{
rb_shape_t * shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
if (shape->parent) {
return INT2NUM(rb_shape_id(shape->parent));
}
else {
return Qnil;
}
}
static VALUE parse_key(ID key) {
if ((key & RUBY_ID_INTERNAL) == RUBY_ID_INTERNAL) {
return LONG2NUM(key);
} else {
return ID2SYM(key);
}
}
static VALUE
rb_shape_t_to_rb_cShape(rb_shape_t *shape) {
union { const rb_shape_t *in; void *out; } deconst;
VALUE res;
deconst.in = shape;
res = TypedData_Wrap_Struct(rb_cShape, &shape_data_type, deconst.out);
return res;
}
static enum rb_id_table_iterator_result rb_edges_to_hash(ID key, VALUE value, void *ref)
{
rb_hash_aset(*(VALUE *)ref, parse_key(key), rb_shape_t_to_rb_cShape((rb_shape_t*)value));
return ID_TABLE_CONTINUE;
}
static VALUE
rb_shape_edges(VALUE self)
{
rb_shape_t* shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
VALUE hash = rb_hash_new();
if (shape->edges) {
rb_id_table_foreach(shape->edges, rb_edges_to_hash, &hash);
}
return hash;
}
static VALUE
rb_shape_edge_name(VALUE self)
{
rb_shape_t* shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
if (shape->edge_name) {
return ID2SYM(shape->edge_name);
}
else {
return Qnil;
}
}
static VALUE
rb_shape_iv_count(VALUE self)
{
rb_shape_t* shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
return INT2NUM(shape->iv_count);
}
static VALUE
rb_shape_export_depth(VALUE self)
{
rb_shape_t* shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
unsigned int depth = 0;
while (shape->parent) {
depth++;
shape = shape->parent;
}
return INT2NUM(depth);
}
static VALUE
rb_shape_parent(VALUE self)
{
rb_shape_t * shape;
TypedData_Get_Struct(self, rb_shape_t, &shape_data_type, shape);
if (shape->parent) {
return rb_shape_t_to_rb_cShape(shape->parent);
}
else {
return Qnil;
}
}
VALUE rb_shape_debug_shape(VALUE self, VALUE obj) {
return rb_shape_t_to_rb_cShape(rb_shape_get_shape(obj));
}
VALUE rb_shape_debug_root_shape(VALUE self) {
return rb_shape_t_to_rb_cShape(rb_shape_get_root_shape());
}
VALUE rb_shape_debug_frozen_root_shape(VALUE self) {
return rb_shape_t_to_rb_cShape(rb_shape_get_frozen_root_shape());
}
VALUE rb_obj_shape(rb_shape_t* shape);
static enum rb_id_table_iterator_result collect_keys_and_values(ID key, VALUE value, void *ref)
{
rb_hash_aset(*(VALUE *)ref, parse_key(key), rb_obj_shape((rb_shape_t*)value));
return ID_TABLE_CONTINUE;
}
static VALUE edges(struct rb_id_table* edges)
{
VALUE hash = rb_hash_new();
if (edges)
rb_id_table_foreach(edges, collect_keys_and_values, &hash);
return hash;
}
VALUE rb_obj_shape(rb_shape_t* shape) {
VALUE rb_shape = rb_hash_new();
rb_hash_aset(rb_shape, ID2SYM(rb_intern("id")), INT2NUM(rb_shape_id(shape)));
rb_hash_aset(rb_shape, ID2SYM(rb_intern("edges")), edges(shape->edges));
if (shape == rb_shape_get_root_shape()) {
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(ROOT_SHAPE_ID));
}
else {
rb_hash_aset(rb_shape, ID2SYM(rb_intern("parent_id")), INT2NUM(rb_shape_id(shape->parent)));
}
rb_hash_aset(rb_shape, ID2SYM(rb_intern("edge_name")), rb_id2str(shape->edge_name));
return rb_shape;
}
static VALUE shape_transition_tree(VALUE self) {
return rb_obj_shape(rb_shape_get_root_shape());
}
static VALUE shape_count(VALUE self) {
int shape_count = 0;
rb_vm_t *vm = GET_VM();
for(shape_id_t i = 0; i < vm->next_shape_id; i++) {
if(rb_shape_get_shape_by_id_without_assertion(i)) {
shape_count++;
}
}
return INT2NUM(shape_count);
}
static VALUE
shape_max_shape_count(VALUE self)
{
return INT2NUM(GET_VM()->next_shape_id);
}
VALUE
rb_shape_flags_mask(void)
{
return SHAPE_FLAG_MASK;
}
void
Init_shape(void)
{
rb_cShape = rb_define_class_under(rb_cRubyVM, "Shape", rb_cObject);
rb_undef_alloc_func(rb_cShape);
rb_define_method(rb_cShape, "parent_id", rb_shape_parent_id, 0);
rb_define_method(rb_cShape, "parent", rb_shape_parent, 0);
rb_define_method(rb_cShape, "edges", rb_shape_edges, 0);
rb_define_method(rb_cShape, "edge_name", rb_shape_edge_name, 0);
rb_define_method(rb_cShape, "iv_count", rb_shape_iv_count, 0);
rb_define_method(rb_cShape, "depth", rb_shape_export_depth, 0);
rb_define_method(rb_cShape, "id", rb_wrapped_shape_id, 0);
rb_define_method(rb_cShape, "type", rb_shape_type, 0);
rb_define_const(rb_cShape, "SHAPE_ROOT", INT2NUM(SHAPE_ROOT));
rb_define_const(rb_cShape, "SHAPE_IVAR", INT2NUM(SHAPE_IVAR));
rb_define_const(rb_cShape, "SHAPE_IVAR_UNDEF", INT2NUM(SHAPE_IVAR_UNDEF));
rb_define_const(rb_cShape, "SHAPE_FROZEN", INT2NUM(SHAPE_FROZEN));
rb_define_const(rb_cShape, "SHAPE_BITS", INT2NUM(SHAPE_BITS));
rb_define_module_function(rb_cRubyVM, "debug_shape_transition_tree", shape_transition_tree, 0);
rb_define_module_function(rb_cRubyVM, "debug_shape_count", shape_count, 0);
rb_define_singleton_method(rb_cRubyVM, "debug_shape", rb_shape_debug_shape, 1);
rb_define_singleton_method(rb_cRubyVM, "debug_max_shape_count", shape_max_shape_count, 0);
rb_define_singleton_method(rb_cRubyVM, "debug_root_shape", rb_shape_debug_root_shape, 0);
rb_define_singleton_method(rb_cRubyVM, "debug_frozen_root_shape", rb_shape_debug_frozen_root_shape, 0);
}

150
shape.h
View file

@ -1,150 +0,0 @@
#ifndef RUBY_SHAPE_H
#define RUBY_SHAPE_H
#if (SIZEOF_UINT64_T == SIZEOF_VALUE)
#define SIZEOF_SHAPE_T 4
#define SHAPE_IN_BASIC_FLAGS 1
typedef uint32_t attr_index_t;
#else
#define SIZEOF_SHAPE_T 2
#define SHAPE_IN_BASIC_FLAGS 0
typedef uint16_t attr_index_t;
#endif
#define MAX_IVARS (attr_index_t)(-1)
#if RUBY_DEBUG || (defined(VM_CHECK_MODE) && VM_CHECK_MODE > 0)
# if SIZEOF_SHAPE_T == 4
typedef uint32_t shape_id_t;
# define SHAPE_BITS 16
# else
typedef uint16_t shape_id_t;
# define SHAPE_BITS 16
# endif
#else
# if SIZEOF_SHAPE_T == 4
typedef uint32_t shape_id_t;
# define SHAPE_BITS 32
# else
typedef uint16_t shape_id_t;
# define SHAPE_BITS 16
# endif
#endif
# define SHAPE_MASK (((uintptr_t)1 << SHAPE_BITS) - 1)
# define SHAPE_FLAG_MASK (((VALUE)-1) >> SHAPE_BITS)
# define SHAPE_FLAG_SHIFT ((SIZEOF_VALUE * 8) - SHAPE_BITS)
# define SHAPE_BITMAP_SIZE 16384
# define MAX_SHAPE_ID (SHAPE_MASK - 1)
# define INVALID_SHAPE_ID SHAPE_MASK
# define ROOT_SHAPE_ID 0x0
# define FROZEN_ROOT_SHAPE_ID 0x1
struct rb_shape {
struct rb_shape * parent; // Pointer to the parent
struct rb_id_table * edges; // id_table from ID (ivar) to next shape
ID edge_name; // ID (ivar) for transition from parent to rb_shape
attr_index_t iv_count;
uint8_t type;
};
typedef struct rb_shape rb_shape_t;
enum shape_type {
SHAPE_ROOT,
SHAPE_IVAR,
SHAPE_FROZEN,
SHAPE_IVAR_UNDEF,
};
static inline shape_id_t
IMEMO_CACHED_SHAPE_ID(VALUE cc)
{
RBIMPL_ASSERT_TYPE((VALUE)cc, RUBY_T_IMEMO);
return (shape_id_t)(SHAPE_MASK & (RBASIC(cc)->flags >> SHAPE_FLAG_SHIFT));
}
static inline void
IMEMO_SET_CACHED_SHAPE_ID(VALUE cc, shape_id_t shape_id)
{
RBIMPL_ASSERT_TYPE((VALUE)cc, RUBY_T_IMEMO);
RBASIC(cc)->flags &= SHAPE_FLAG_MASK;
RBASIC(cc)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
}
#if SHAPE_IN_BASIC_FLAGS
static inline shape_id_t
RBASIC_SHAPE_ID(VALUE obj)
{
RUBY_ASSERT(!RB_SPECIAL_CONST_P(obj));
return (shape_id_t)(SHAPE_MASK & ((RBASIC(obj)->flags) >> SHAPE_FLAG_SHIFT));
}
static inline void
RBASIC_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
{
// Ractors are occupying the upper 32 bits of flags, but only in debug mode
// Object shapes are occupying top bits
RBASIC(obj)->flags &= SHAPE_FLAG_MASK;
RBASIC(obj)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
}
static inline shape_id_t
ROBJECT_SHAPE_ID(VALUE obj)
{
RBIMPL_ASSERT_TYPE(obj, RUBY_T_OBJECT);
return RBASIC_SHAPE_ID(obj);
}
static inline void
ROBJECT_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
{
RBIMPL_ASSERT_TYPE(obj, RUBY_T_OBJECT);
RBASIC_SET_SHAPE_ID(obj, shape_id);
}
#else
static inline shape_id_t
ROBJECT_SHAPE_ID(VALUE obj)
{
RBIMPL_ASSERT_TYPE(obj, RUBY_T_OBJECT);
return (shape_id_t)(SHAPE_MASK & (RBASIC(obj)->flags >> SHAPE_FLAG_SHIFT));
}
static inline void
ROBJECT_SET_SHAPE_ID(VALUE obj, shape_id_t shape_id)
{
RBASIC(obj)->flags &= SHAPE_FLAG_MASK;
RBASIC(obj)->flags |= ((VALUE)(shape_id) << SHAPE_FLAG_SHIFT);
}
#endif
bool rb_shape_root_shape_p(rb_shape_t* shape);
rb_shape_t* rb_shape_get_shape_by_id_without_assertion(shape_id_t shape_id);
MJIT_SYMBOL_EXPORT_BEGIN
rb_shape_t* rb_shape_get_shape_by_id(shape_id_t shape_id);
void rb_shape_set_shape(VALUE obj, rb_shape_t* shape);
shape_id_t rb_shape_get_shape_id(VALUE obj);
rb_shape_t* rb_shape_get_shape(VALUE obj);
int rb_shape_frozen_shape_p(rb_shape_t* shape);
void rb_shape_transition_shape_frozen(VALUE obj);
void rb_shape_transition_shape_remove_ivar(VALUE obj, ID id, rb_shape_t *shape);
void rb_shape_transition_shape(VALUE obj, ID id, rb_shape_t *shape);
rb_shape_t* rb_shape_get_next(rb_shape_t* shape, VALUE obj, ID id);
bool rb_shape_get_iv_index(rb_shape_t * shape, ID id, attr_index_t * value);
shape_id_t rb_shape_id(rb_shape_t * shape);
MJIT_SYMBOL_EXPORT_END
rb_shape_t * rb_shape_alloc(ID edge_name, rb_shape_t * parent);
bool rb_shape_set_shape_id(VALUE obj, shape_id_t shape_id);
VALUE rb_obj_debug_shape(VALUE self, VALUE obj);
VALUE rb_shape_flags_mask(void);
#endif

View file

@ -7,7 +7,6 @@ module Bug end
module Bug::Marshal
class TestInternalIVar < Test::Unit::TestCase
def test_marshal
pend "We don't support IVs with ID of 0"
v = InternalIVar.new("hello", "world", "bye")
assert_equal("hello", v.normal)
assert_equal("world", v.internal)

View file

@ -831,7 +831,7 @@ class TestMJIT < Test::Unit::TestCase
end
def test_inlined_exivar
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "aaa", success_count: 4, recompile_count: 2, min_calls: 2)
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "aaa", success_count: 3, recompile_count: 1, min_calls: 2)
begin;
class Foo < Hash
def initialize
@ -850,7 +850,7 @@ class TestMJIT < Test::Unit::TestCase
end
def test_inlined_undefined_ivar
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "bbb", success_count: 2, min_calls: 2)
assert_eval_with_jit("#{<<~"begin;"}\n#{<<~"end;"}", stdout: "bbb", success_count: 3, min_calls: 3)
begin;
class Foo
def initialize

View file

@ -993,13 +993,4 @@ class TestObject < Test::Unit::TestCase
end
EOS
end
def test_frozen_inspect
obj = Object.new
obj.instance_variable_set(:@a, "a")
ins = obj.inspect
obj.freeze
assert_equal(ins, obj.inspect)
end
end

View file

@ -1,173 +0,0 @@
# frozen_string_literal: false
require 'test/unit'
# These test the functionality of object shapes
class TestShapes < Test::Unit::TestCase
class Example
def initialize
@a = 1
end
end
class RemoveAndAdd
def add_foo
@foo = 1
end
def remove
remove_instance_variable(:@foo)
end
def add_bar
@bar = 1
end
end
# RubyVM.debug_shape returns new instances of shape objects for
# each call. This helper method allows us to define equality for
# shapes
def assert_shape_equal(shape1, shape2)
assert_equal(shape1.id, shape2.id)
assert_equal(shape1.parent_id, shape2.parent_id)
assert_equal(shape1.depth, shape2.depth)
assert_equal(shape1.type, shape2.type)
end
def refute_shape_equal(shape1, shape2)
refute_equal(shape1.id, shape2.id)
end
def test_iv_index
example = RemoveAndAdd.new
shape = RubyVM.debug_shape(example)
assert_equal 0, shape.iv_count
example.add_foo # makes a transition
new_shape = RubyVM.debug_shape(example)
assert_equal([:@foo], example.instance_variables)
assert_equal(shape.id, new_shape.parent.id)
assert_equal(1, new_shape.iv_count)
example.remove # makes a transition
remove_shape = RubyVM.debug_shape(example)
assert_equal([], example.instance_variables)
assert_equal(new_shape.id, remove_shape.parent.id)
assert_equal(1, remove_shape.iv_count)
example.add_bar # makes a transition
bar_shape = RubyVM.debug_shape(example)
assert_equal([:@bar], example.instance_variables)
assert_equal(remove_shape.id, bar_shape.parent.id)
assert_equal(2, bar_shape.iv_count)
end
def test_new_obj_has_root_shape
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(Object.new))
end
def test_frozen_new_obj_has_frozen_root_shape
assert_shape_equal(
RubyVM.debug_frozen_root_shape,
RubyVM.debug_shape(Object.new.freeze)
)
end
def test_str_has_root_shape
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(""))
end
def test_array_has_root_shape
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape([]))
end
def test_hash_has_root_shape
assert_shape_equal(RubyVM.debug_root_shape, RubyVM.debug_shape({}))
end
def test_true_has_frozen_root_shape
assert_shape_equal(RubyVM.debug_frozen_root_shape, RubyVM.debug_shape(true))
end
def test_nil_has_frozen_root_shape
assert_shape_equal(RubyVM.debug_frozen_root_shape, RubyVM.debug_shape(nil))
end
def test_basic_shape_transition
obj = Example.new
refute_equal(RubyVM.debug_root_shape, RubyVM.debug_shape(obj))
assert_shape_equal(RubyVM.debug_root_shape.edges[:@a], RubyVM.debug_shape(obj))
assert_equal(obj.instance_variable_get(:@a), 1)
end
def test_different_objects_make_same_transition
obj = Example.new
obj2 = ""
obj2.instance_variable_set(:@a, 1)
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
end
def test_duplicating_objects
obj = Example.new
obj2 = obj.dup
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
end
def test_freezing_and_duplicating_object
obj = Object.new.freeze
obj2 = obj.dup
refute_predicate(obj2, :frozen?)
# dup'd objects shouldn't be frozen, and the shape should be the
# parent shape of the copied object
assert_equal(RubyVM.debug_shape(obj).parent.id, RubyVM.debug_shape(obj2).id)
end
def test_freezing_and_duplicating_object_with_ivars
obj = Example.new.freeze
obj2 = obj.dup
refute_predicate(obj2, :frozen?)
refute_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
assert_equal(obj2.instance_variable_get(:@a), 1)
end
def test_freezing_and_duplicating_string_with_ivars
str = "str"
str.instance_variable_set(:@a, 1)
str.freeze
str2 = str.dup
refute_predicate(str2, :frozen?)
refute_equal(RubyVM.debug_shape(str).id, RubyVM.debug_shape(str2).id)
assert_equal(str2.instance_variable_get(:@a), 1)
end
def test_freezing_and_cloning_objects
obj = Object.new.freeze
obj2 = obj.clone(freeze: true)
assert_predicate(obj2, :frozen?)
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
end
def test_freezing_and_cloning_object_with_ivars
obj = Example.new.freeze
obj2 = obj.clone(freeze: true)
assert_predicate(obj2, :frozen?)
assert_shape_equal(RubyVM.debug_shape(obj), RubyVM.debug_shape(obj2))
assert_equal(obj2.instance_variable_get(:@a), 1)
end
def test_freezing_and_cloning_string
str = "str".freeze
str2 = str.clone(freeze: true)
assert_predicate(str2, :frozen?)
assert_shape_equal(RubyVM.debug_shape(str), RubyVM.debug_shape(str2))
end
def test_freezing_and_cloning_string_with_ivars
str = "str"
str.instance_variable_set(:@a, 1)
str.freeze
str2 = str.clone(freeze: true)
assert_predicate(str2, :frozen?)
assert_shape_equal(RubyVM.debug_shape(str), RubyVM.debug_shape(str2))
assert_equal(str2.instance_variable_get(:@a), 1)
end
end

View file

@ -341,17 +341,12 @@ generator = BindingGenerator.new(
VM_METHOD_TYPE_CFUNC
VM_METHOD_TYPE_ISEQ
],
ULONG: %w[
INVALID_SHAPE_ID
SHAPE_MASK
],
},
types: %w[
CALL_DATA
IC
IVC
RB_BUILTIN
attr_index_t
compile_branch
compile_status
inlined_call_context
@ -365,10 +360,10 @@ generator = BindingGenerator.new(
rb_callable_method_entry_struct
rb_callcache
rb_callinfo
rb_control_frame_t
rb_cref_t
rb_execution_context_struct
rb_control_frame_t
rb_execution_context_t
rb_execution_context_struct
rb_iseq_constant_body
rb_iseq_location_t
rb_iseq_struct
@ -383,7 +378,6 @@ generator = BindingGenerator.new(
],
dynamic_types: %w[
VALUE
shape_id_t
],
skip_fields: {
'rb_execution_context_struct.machine': %w[regs], # differs between macOS and Linux

File diff suppressed because it is too large Load diff

View file

@ -11,19 +11,11 @@
/* per-object */
struct gen_ivtbl {
#if !SHAPE_IN_BASIC_FLAGS
uint16_t shape_id;
#endif
uint32_t numiv;
VALUE ivptr[FLEX_ARY_LEN];
};
int rb_ivar_generic_ivtbl_lookup(VALUE obj, struct gen_ivtbl **);
#include "shape.h"
#if !SHAPE_IN_BASIC_FLAGS
shape_id_t rb_generic_shape_id(VALUE obj);
#endif
VALUE rb_ivar_generic_lookup_with_index(VALUE obj, ID id, uint32_t index);
#endif /* RUBY_TOPLEVEL_VARIABLE_H */

31
vm.c
View file

@ -26,7 +26,6 @@
#include "internal/thread.h"
#include "internal/vm.h"
#include "internal/sanitizers.h"
#include "internal/variable.h"
#include "iseq.h"
#include "mjit.h"
#include "yjit.h"
@ -4022,11 +4021,6 @@ Init_BareVM(void)
rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
}
#ifndef _WIN32
#include <unistd.h>
#include <sys/mman.h>
#endif
void
Init_vm_objects(void)
{
@ -4038,31 +4032,6 @@ Init_vm_objects(void)
vm->mark_object_ary = rb_ary_hidden_new(128);
vm->loading_table = st_init_strtable();
vm->frozen_strings = st_init_table_with_size(&rb_fstring_hash_type, 10000);
#if HAVE_MMAP
vm->shape_list = (rb_shape_t *)mmap(NULL, rb_size_mul_or_raise(SHAPE_BITMAP_SIZE * 32, sizeof(rb_shape_t), rb_eRuntimeError),
PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (vm->shape_list == MAP_FAILED) {
vm->shape_list = 0;
}
#else
vm->shape_list = xcalloc(SHAPE_BITMAP_SIZE * 32, sizeof(rb_shape_t));
#endif
if (!vm->shape_list) {
rb_memerror();
}
// Root shape
vm->root_shape = rb_shape_alloc(0, 0);
RUBY_ASSERT(rb_shape_id(vm->root_shape) == ROOT_SHAPE_ID);
// Frozen root shape
vm->frozen_root_shape = rb_shape_alloc(rb_make_internal_id(), vm->root_shape);
vm->frozen_root_shape->type = (uint8_t)SHAPE_FROZEN;
RUBY_ASSERT(rb_shape_id(vm->frozen_root_shape) == FROZEN_ROOT_SHAPE_ID);
vm->next_shape_id = 2;
}
/* Stub for builtin function when not building YJIT units*/

View file

@ -10,7 +10,6 @@
#include "debug_counter.h"
#include "internal/class.h"
#include "shape.h"
enum vm_call_flag_bits {
VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
@ -285,32 +284,14 @@ struct rb_callcache {
const vm_call_handler call_;
union {
struct {
const attr_index_t index;
shape_id_t dest_shape_id;
} attr;
const unsigned int attr_index;
const enum method_missing_reason method_missing_reason; /* used by method_missing */
VALUE v;
} aux_;
};
#define VM_CALLCACHE_UNMARKABLE FL_FREEZE
#define VM_CALLCACHE_ON_STACK FL_EXIVAR
extern const struct rb_callcache *rb_vm_empty_cc(void);
extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
#define vm_cc_empty() rb_vm_empty_cc()
static inline void
vm_cc_attr_index_initialize(const struct rb_callcache *cc, shape_id_t shape_id)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
IMEMO_SET_CACHED_SHAPE_ID((VALUE)cc, shape_id);
*(attr_index_t *)&cc->aux_.attr.index = 0;
*(shape_id_t *)&cc->aux_.attr.dest_shape_id = shape_id;
}
#define VM_CALLCACHE_UNMARKABLE IMEMO_FL_USER0
#define VM_CALLCACHE_ON_STACK IMEMO_FL_USER1
static inline const struct rb_callcache *
vm_cc_new(VALUE klass,
@ -318,7 +299,6 @@ vm_cc_new(VALUE klass,
vm_call_handler call)
{
const struct rb_callcache *cc = (const struct rb_callcache *)rb_imemo_new(imemo_callcache, (VALUE)cme, (VALUE)call, 0, klass);
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
RB_DEBUG_COUNTER_INC(cc_new);
return cc;
}
@ -370,71 +350,30 @@ vm_cc_call(const struct rb_callcache *cc)
return cc->call_;
}
static inline attr_index_t
static inline unsigned int
vm_cc_attr_index(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.attr.index - 1;
return cc->aux_.attr_index - 1;
}
static inline bool
vm_cc_attr_index_p(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.attr.index != 0;
return cc->aux_.attr_index > 0;
}
static inline shape_id_t
vm_cc_attr_index_source_shape_id(const struct rb_callcache *cc)
static inline uint32_t
vm_ic_entry_index(const struct iseq_inline_iv_cache_entry *ic)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return IMEMO_CACHED_SHAPE_ID((VALUE)cc);
}
static inline shape_id_t
vm_cc_attr_shape_id(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return vm_cc_attr_index_source_shape_id(cc);
}
static inline shape_id_t
vm_cc_attr_index_dest_shape_id(const struct rb_callcache *cc)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
return cc->aux_.attr.dest_shape_id;
}
static inline attr_index_t
vm_ic_attr_index(const struct iseq_inline_iv_cache_entry *ic)
{
return ic->attr_index - 1;
return ic->entry->index;
}
static inline bool
vm_ic_attr_index_p(const struct iseq_inline_iv_cache_entry *ic)
vm_ic_entry_p(const struct iseq_inline_iv_cache_entry *ic)
{
return ic->attr_index > 0;
}
static inline shape_id_t
vm_ic_attr_shape_id(const struct iseq_inline_iv_cache_entry *ic)
{
return ic->source_shape_id;
}
static inline shape_id_t
vm_ic_attr_index_source_shape_id(const struct iseq_inline_iv_cache_entry *ic)
{
return ic->source_shape_id;
}
static inline shape_id_t
vm_ic_attr_index_dest_shape_id(const struct iseq_inline_iv_cache_entry *ic)
{
return ic->dest_shape_id;
return ic->entry;
}
static inline unsigned int
@ -468,6 +407,10 @@ vm_cc_valid_p(const struct rb_callcache *cc, const rb_callable_method_entry_t *c
}
}
extern const struct rb_callcache *rb_vm_empty_cc(void);
extern const struct rb_callcache *rb_vm_empty_cc_for_super(void);
#define vm_cc_empty() rb_vm_empty_cc()
/* callcache: mutate */
static inline void
@ -479,29 +422,26 @@ vm_cc_call_set(const struct rb_callcache *cc, vm_call_handler call)
}
static inline void
vm_cc_attr_index_set(const struct rb_callcache *cc, attr_index_t index, shape_id_t source_shape_id, shape_id_t dest_shape_id)
vm_cc_attr_index_set(const struct rb_callcache *cc, int index)
{
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
IMEMO_SET_CACHED_SHAPE_ID((VALUE)cc, source_shape_id);
*(attr_index_t *)&cc->aux_.attr.index = (index + 1);
*(shape_id_t *)&cc->aux_.attr.dest_shape_id = dest_shape_id;
*(int *)&cc->aux_.attr_index = index + 1;
}
static inline void
vm_ic_attr_index_set(const rb_iseq_t *iseq, const struct iseq_inline_iv_cache_entry *ic, attr_index_t index, shape_id_t source_shape_id, shape_id_t dest_shape_id)
vm_ic_entry_set(struct iseq_inline_iv_cache_entry *ic, struct rb_iv_index_tbl_entry *entry, const rb_iseq_t *iseq)
{
*(shape_id_t *)&ic->source_shape_id = source_shape_id;
*(shape_id_t *)&ic->dest_shape_id = dest_shape_id;
*(attr_index_t *)&ic->attr_index = index + 1;
ic->entry = entry;
RB_OBJ_WRITTEN(iseq, Qundef, entry->class_value);
}
static inline void
vm_ic_attr_index_initialize(const struct iseq_inline_iv_cache_entry *ic, shape_id_t shape_id)
vm_cc_attr_index_initialize(const struct rb_callcache *cc)
{
*(shape_id_t *)&ic->source_shape_id = shape_id;
*(shape_id_t *)&ic->dest_shape_id = shape_id;
*(attr_index_t *)&ic->attr_index = 0;
VM_ASSERT(IMEMO_TYPE_P(cc, imemo_callcache));
VM_ASSERT(cc != vm_cc_empty());
*(int *)&cc->aux_.attr_index = 0;
}
static inline void

View file

@ -99,7 +99,6 @@ extern int ruby_assert_critical_section_entered;
#include "ruby/st.h"
#include "ruby_atomic.h"
#include "vm_opts.h"
#include "shape.h"
#include "ruby/thread_native.h"
@ -273,9 +272,7 @@ struct iseq_inline_constant_cache {
};
struct iseq_inline_iv_cache_entry {
shape_id_t source_shape_id;
shape_id_t dest_shape_id;
attr_index_t attr_index;
struct rb_iv_index_tbl_entry *entry;
};
struct iseq_inline_cvar_cache_entry {
@ -690,12 +687,6 @@ typedef struct rb_vm_struct {
VALUE mark_object_ary;
const VALUE special_exceptions[ruby_special_error_count];
/* object shapes */
rb_shape_t *shape_list;
rb_shape_t *root_shape;
rb_shape_t *frozen_root_shape;
shape_id_t next_shape_id;
/* load */
VALUE top_self;
VALUE load_path;

View file

@ -47,7 +47,7 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE
{
struct rb_calling_info calling = {
.ci = &VM_CI_ON_STACK(id, kw_splat ? VM_CALL_KW_SPLAT : 0, argc, NULL),
.cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, {{ 0 }}, cme),
.cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, { 0 }, cme),
.block_handler = vm_passed_block_handler(ec),
.recv = recv,
.argc = argc,
@ -89,7 +89,7 @@ vm_call0_cc(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE
static VALUE
vm_call0_cme(rb_execution_context_t *ec, struct rb_calling_info *calling, const VALUE *argv, const rb_callable_method_entry_t *cme)
{
calling->cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, {{ 0 }}, cme);
calling->cc = &VM_CC_ON_STACK(Qfalse, vm_call_general, { 0 }, cme);
return vm_call0_body(ec, calling, argv);
}

View file

@ -50,11 +50,6 @@ MJIT_STATIC VALUE
ruby_vm_special_exception_copy(VALUE exc)
{
VALUE e = rb_obj_alloc(rb_class_real(RBASIC_CLASS(exc)));
rb_shape_t * shape = rb_shape_get_shape(exc);
if (rb_shape_frozen_shape_p(shape)) {
shape = shape->parent;
}
rb_shape_set_shape(e, shape);
rb_obj_copy_ivar(e, exc);
return e;
}
@ -1090,17 +1085,35 @@ vm_get_cvar_base(const rb_cref_t *cref, const rb_control_frame_t *cfp, int top_l
return klass;
}
ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id));
static inline void
fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, attr_index_t index, shape_id_t shape_id)
static bool
iv_index_tbl_lookup(struct st_table *iv_index_tbl, ID id, struct rb_iv_index_tbl_entry **ent)
{
if (is_attr) {
if (vm_cc_markable(cc)) {
vm_cc_attr_index_set(cc, index, shape_id, shape_id);
}
int found;
st_data_t ent_data;
if (iv_index_tbl == NULL) return false;
RB_VM_LOCK_ENTER();
{
found = st_lookup(iv_index_tbl, (st_data_t)id, &ent_data);
}
RB_VM_LOCK_LEAVE();
if (found) *ent = (struct rb_iv_index_tbl_entry *)ent_data;
return found ? true : false;
}
ALWAYS_INLINE(static void fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent));
static inline void
fill_ivar_cache(const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr, struct rb_iv_index_tbl_entry *ent)
{
// fill cache
if (!is_attr) {
vm_ic_entry_set(ic, ent, iseq);
}
else {
vm_ic_attr_index_set(iseq, ic, index, shape_id, shape_id);
vm_cc_attr_index_set(cc, ent->index);
}
}
@ -1110,120 +1123,68 @@ vm_getivar(VALUE obj, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_call
{
#if OPT_IC_FOR_IVAR
VALUE val = Qundef;
shape_id_t shape_id;
VALUE * ivar_list;
if (SPECIAL_CONST_P(obj)) {
return Qnil;
// frozen?
}
else if (LIKELY(is_attr ?
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_unset, vm_cc_attr_index_p(cc)) :
RB_DEBUG_COUNTER_INC_UNLESS(ivar_get_ic_miss_serial, vm_ic_entry_p(ic) && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass)))) {
uint32_t index = !is_attr ? vm_ic_entry_index(ic): (vm_cc_attr_index(cc));
#if SHAPE_IN_BASIC_FLAGS
shape_id = RBASIC_SHAPE_ID(obj);
#endif
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
ivar_list = ROBJECT_IVPTR(obj);
VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
#if !SHAPE_IN_BASIC_FLAGS
shape_id = ROBJECT_SHAPE_ID(obj);
#endif
break;
case T_CLASS:
case T_MODULE:
{
goto general_path;
}
default:
if (FL_TEST_RAW(obj, FL_EXIVAR)) {
struct gen_ivtbl *ivtbl;
rb_gen_ivtbl_get(obj, id, &ivtbl);
#if !SHAPE_IN_BASIC_FLAGS
shape_id = ivtbl->shape_id;
#endif
ivar_list = ivtbl->ivptr;
} else {
return Qnil;
}
}
shape_id_t cached_id;
if (is_attr) {
cached_id = vm_cc_attr_shape_id(cc);
}
else {
cached_id = vm_ic_attr_shape_id(ic);
}
attr_index_t index;
if (LIKELY(cached_id == shape_id)) {
RB_DEBUG_COUNTER_INC(ivar_get_ic_hit);
if (is_attr && vm_cc_attr_index_p(cc)) {
index = vm_cc_attr_index(cc);
if (LIKELY(BUILTIN_TYPE(obj) == T_OBJECT) &&
LIKELY(index < ROBJECT_NUMIV(obj))) {
val = ROBJECT_IVPTR(obj)[index];
VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
}
else if (!is_attr && vm_ic_attr_index_p(ic)) {
index = vm_ic_attr_index(ic);
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
val = rb_ivar_generic_lookup_with_index(obj, id, index);
}
goto ret;
}
else {
struct rb_iv_index_tbl_entry *ent;
if (BUILTIN_TYPE(obj) == T_OBJECT) {
struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
fill_ivar_cache(iseq, ic, cc, is_attr, ent);
// get value
if (ent->index < ROBJECT_NUMIV(obj)) {
val = ROBJECT_IVPTR(obj)[ent->index];
VM_ASSERT(rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
}
}
}
else if (FL_TEST_RAW(obj, FL_EXIVAR)) {
struct st_table *iv_index_tbl = RCLASS_IV_INDEX_TBL(rb_obj_class(obj));
if (iv_index_tbl && iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
fill_ivar_cache(iseq, ic, cc, is_attr, ent);
val = rb_ivar_generic_lookup_with_index(obj, id, ent->index);
}
}
else {
// T_CLASS / T_MODULE
goto general_path;
}
ret:
if (LIKELY(val != Qundef)) {
return val;
}
else {
return Qnil;
}
val = ivar_list[index];
VM_ASSERT(BUILTIN_TYPE(obj) == T_OBJECT && rb_ractor_shareable_p(obj) ? rb_ractor_shareable_p(val) : true);
}
else { // cache miss case
#if RUBY_DEBUG
if (is_attr) {
if (cached_id != INVALID_SHAPE_ID) {
RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_set);
} else {
RB_DEBUG_COUNTER_INC(ivar_get_cc_miss_unset);
}
}
else {
if (cached_id != INVALID_SHAPE_ID) {
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_set);
} else {
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss_unset);
}
}
#endif
attr_index_t index;
rb_shape_t *shape = rb_shape_get_shape_by_id(shape_id);
if (rb_shape_get_iv_index(shape, id, &index)) {
// This fills in the cache with the shared cache object.
// "ent" is the shared cache object
fill_ivar_cache(iseq, ic, cc, is_attr, index, shape_id);
// We fetched the ivar list above
val = ivar_list[index];
}
else {
if (is_attr) {
if (vm_cc_markable(cc)) {
vm_cc_attr_index_initialize(cc, shape_id);
}
}
else {
vm_ic_attr_index_initialize(ic, shape_id);
}
val = Qnil;
}
}
RUBY_ASSERT(val != Qundef);
return val;
general_path:
general_path:
#endif /* OPT_IC_FOR_IVAR */
RB_DEBUG_COUNTER_INC(ivar_get_ic_miss);
@ -1235,20 +1196,6 @@ general_path:
}
}
static void
populate_cache(attr_index_t index, shape_id_t shape_id, shape_id_t next_shape_id, ID id, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, bool is_attr)
{
// Cache population code
if (is_attr) {
if (vm_cc_markable(cc)) {
vm_cc_attr_index_set(cc, index, shape_id, next_shape_id);
}
}
else {
vm_ic_attr_index_set(iseq, ic, index, shape_id, next_shape_id);
}
}
ALWAYS_INLINE(static VALUE vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr));
NOINLINE(static VALUE vm_setivar_slowpath_ivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic));
NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache *cc));
@ -1256,72 +1203,35 @@ NOINLINE(static VALUE vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, cons
static VALUE
vm_setivar_slowpath(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
{
rb_check_frozen_internal(obj);
#if OPT_IC_FOR_IVAR
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
{
rb_check_frozen_internal(obj);
if (RB_TYPE_P(obj, T_OBJECT)) {
struct st_table *iv_index_tbl = ROBJECT_IV_INDEX_TBL(obj);
struct rb_iv_index_tbl_entry *ent;
attr_index_t index;
uint32_t num_iv = ROBJECT_NUMIV(obj);
rb_shape_t* shape = rb_shape_get_shape(obj);
shape_id_t current_shape_id = ROBJECT_SHAPE_ID(obj);
shape_id_t next_shape_id = current_shape_id;
rb_shape_t* next_shape = rb_shape_get_next(shape, obj, id);
if (shape != next_shape) {
rb_shape_set_shape(obj, next_shape);
next_shape_id = ROBJECT_SHAPE_ID(obj);
}
if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree
if (index >= MAX_IVARS) {
rb_raise(rb_eArgError, "too many instance variables");
}
populate_cache(index, current_shape_id, next_shape_id, id, iseq, ic, cc, is_attr);
}
else {
rb_bug("Didn't find instance variable %s\n", rb_id2name(id));
}
// Ensure the IV buffer is wide enough to store the IV
if (UNLIKELY(index >= num_iv)) {
rb_init_iv_list(obj);
}
VALUE *ptr = ROBJECT_IVPTR(obj);
RB_OBJ_WRITE(obj, &ptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
return val;
if (iv_index_tbl_lookup(iv_index_tbl, id, &ent)) {
if (!is_attr) {
vm_ic_entry_set(ic, ent, iseq);
}
case T_CLASS:
case T_MODULE:
break;
default:
{
shape_id_t shape_id = rb_shape_get_shape_id(obj);
rb_ivar_set(obj, id, val);
shape_id_t next_shape_id = rb_shape_get_shape_id(obj);
rb_shape_t *next_shape = rb_shape_get_shape_by_id(next_shape_id);
attr_index_t index;
if (rb_shape_get_iv_index(next_shape, id, &index)) { // based off the hash stored in the transition tree
if (index >= MAX_IVARS) {
rb_raise(rb_eArgError, "too many instance variables");
}
populate_cache(index, shape_id, next_shape_id, id, iseq, ic, cc, is_attr);
}
else {
rb_bug("didn't find the id\n");
}
return val;
else if (ent->index >= INT_MAX) {
rb_raise(rb_eArgError, "too many instance variables");
}
else {
vm_cc_attr_index_set(cc, (int)(ent->index));
}
uint32_t index = ent->index;
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
rb_init_iv_list(obj);
}
VALUE *ptr = ROBJECT_IVPTR(obj);
RB_OBJ_WRITE(obj, &ptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_iv_hit);
return val;
}
}
#endif
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss);
@ -1340,94 +1250,39 @@ vm_setivar_slowpath_attr(VALUE obj, ID id, VALUE val, const struct rb_callcache
return vm_setivar_slowpath(obj, id, val, NULL, NULL, cc, true);
}
NOINLINE(static VALUE vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index));
static VALUE
vm_setivar_default(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
{
#if SHAPE_IN_BASIC_FLAGS
shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
#else
shape_id_t shape_id = rb_generic_shape_id(obj);
#endif
// Cache hit case
if (shape_id == source_shape_id) {
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
struct gen_ivtbl *ivtbl = 0;
if (dest_shape_id != shape_id) {
ivtbl = rb_ensure_generic_iv_list_size(obj, index + 1);
#if SHAPE_IN_BASIC_FLAGS
RBASIC_SET_SHAPE_ID(obj, dest_shape_id);
#else
ivtbl->shape_id = dest_shape_id;
#endif
}
else {
// Just get the IV table
rb_gen_ivtbl_get(obj, 0, &ivtbl);
}
VALUE *ptr = ivtbl->ivptr;
RB_OBJ_WRITE(obj, &ptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
return val;
}
return Qundef;
}
static inline VALUE
vm_setivar(VALUE obj, ID id, VALUE val, shape_id_t source_shape_id, shape_id_t dest_shape_id, attr_index_t index)
vm_setivar(VALUE obj, ID id, VALUE val, const rb_iseq_t *iseq, IVC ic, const struct rb_callcache *cc, int is_attr)
{
#if OPT_IC_FOR_IVAR
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
{
VM_ASSERT(!rb_ractor_shareable_p(obj) || rb_obj_frozen_p(obj));
// If object's shape id is the same as the source
// then do the shape transition and write the ivar
// If object's shape id is the same as the dest
// then write the ivar
shape_id_t shape_id = ROBJECT_SHAPE_ID(obj);
if (LIKELY(RB_TYPE_P(obj, T_OBJECT)) &&
LIKELY(!RB_OBJ_FROZEN_RAW(obj))) {
// Do we have a cache hit *and* is the CC intitialized
if (shape_id == source_shape_id) {
RUBY_ASSERT(dest_shape_id != INVALID_SHAPE_ID && shape_id != INVALID_SHAPE_ID);
VM_ASSERT(!rb_ractor_shareable_p(obj));
VM_ASSERT(!rb_ractor_shareable_p(obj));
if (LIKELY(
(!is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_serial, vm_ic_entry_p(ic) && ic->entry->class_serial == RCLASS_SERIAL(RBASIC(obj)->klass))) ||
( is_attr && RB_DEBUG_COUNTER_INC_UNLESS(ivar_set_ic_miss_unset, vm_cc_attr_index_p(cc))))) {
uint32_t index = !is_attr ? vm_ic_entry_index(ic) : vm_cc_attr_index(cc);
if (dest_shape_id != shape_id) {
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
rb_init_iv_list(obj);
}
ROBJECT_SET_SHAPE_ID(obj, dest_shape_id);
}
RUBY_ASSERT(index < ROBJECT_NUMIV(obj));
VALUE *ptr = ROBJECT_IVPTR(obj);
RB_OBJ_WRITE(obj, &ptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
return val;
}
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
rb_init_iv_list(obj);
}
break;
case T_CLASS:
case T_MODULE:
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
default:
break;
VALUE *ptr = ROBJECT_IVPTR(obj);
RB_OBJ_WRITE(obj, &ptr[index], val);
RB_DEBUG_COUNTER_INC(ivar_set_ic_hit);
return val; /* inline cache hit */
}
}
else {
RB_DEBUG_COUNTER_INC(ivar_set_ic_miss_noobject);
}
return Qundef;
#endif /* OPT_IC_FOR_IVAR */
if (is_attr) {
return vm_setivar_slowpath_attr(obj, id, val, cc);
}
else {
return vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
}
}
static VALUE
@ -1522,22 +1377,7 @@ vm_getinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, IVC ic)
static inline void
vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic)
{
shape_id_t source_shape_id = vm_ic_attr_index_source_shape_id(ic);
attr_index_t index = vm_ic_attr_index(ic);
shape_id_t dest_shape_id = vm_ic_attr_index_dest_shape_id(ic);
if (UNLIKELY(vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index) == Qundef)) {
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
case T_CLASS:
case T_MODULE:
break;
default:
if (vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index) != Qundef) {
return;
}
}
vm_setivar_slowpath_ivar(obj, id, val, iseq, ic);
}
vm_setivar(obj, id, val, iseq, ic, 0, 0);
}
void
@ -1546,6 +1386,28 @@ rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IV
vm_setinstancevariable(iseq, obj, id, val, ic);
}
/* Set the instance variable +val+ on object +obj+ at the +index+.
* This function only works with T_OBJECT objects, so make sure
* +obj+ is of type T_OBJECT before using this function.
*/
VALUE
rb_vm_set_ivar_idx(VALUE obj, uint32_t index, VALUE val)
{
RUBY_ASSERT(RB_TYPE_P(obj, T_OBJECT));
rb_check_frozen_internal(obj);
VM_ASSERT(!rb_ractor_shareable_p(obj));
if (UNLIKELY(index >= ROBJECT_NUMIV(obj))) {
rb_init_iv_list(obj);
}
VALUE *ptr = ROBJECT_IVPTR(obj);
RB_OBJ_WRITE(obj, &ptr[index], val);
return val;
}
static VALUE
vm_throw_continue(const rb_execution_context_t *ec, VALUE err)
{
@ -3238,45 +3100,17 @@ vm_call_ivar(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_call
const struct rb_callcache *cc = calling->cc;
RB_DEBUG_COUNTER_INC(ccf_ivar);
cfp->sp -= 1;
VALUE ivar = vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
return ivar;
}
static VALUE
vm_call_attrset_direct(rb_execution_context_t *ec, rb_control_frame_t *cfp, const struct rb_callcache *cc, VALUE obj)
{
RB_DEBUG_COUNTER_INC(ccf_attrset);
VALUE val = *(cfp->sp - 1);
cfp->sp -= 2;
shape_id_t source_shape_id = vm_cc_attr_index_source_shape_id(cc);
attr_index_t index = vm_cc_attr_index(cc);
shape_id_t dest_shape_id = vm_cc_attr_index_dest_shape_id(cc);
ID id = vm_cc_cme(cc)->def->body.attr.id;
rb_check_frozen_internal(obj);
VALUE res = vm_setivar(obj, id, val, source_shape_id, dest_shape_id, index);
if (res == Qundef) {
switch (BUILTIN_TYPE(obj)) {
case T_OBJECT:
case T_CLASS:
case T_MODULE:
break;
default:
{
res = vm_setivar_default(obj, id, val, source_shape_id, dest_shape_id, index);
if (res != Qundef) {
return res;
}
}
}
res = vm_setivar_slowpath_attr(obj, id, val, cc);
}
return res;
return vm_getivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, NULL, NULL, cc, TRUE);
}
static VALUE
vm_call_attrset(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
return vm_call_attrset_direct(ec, cfp, calling->cc, calling->recv);
const struct rb_callcache *cc = calling->cc;
RB_DEBUG_COUNTER_INC(ccf_attrset);
VALUE val = *(cfp->sp - 1);
cfp->sp -= 2;
return vm_setivar(calling->recv, vm_cc_cme(cc)->def->body.attr.id, val, NULL, NULL, cc, 1);
}
bool
@ -3385,7 +3219,7 @@ vm_call_alias(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_cal
{
calling->cc = &VM_CC_ON_STACK(Qundef,
vm_call_general,
{{0}},
{ 0 },
aliased_callable_method_entry(vm_cc_cme(calling->cc)));
return vm_call_method_each_type(ec, cfp, calling);
@ -3555,7 +3389,7 @@ vm_call_method_missing_body(rb_execution_context_t *ec, rb_control_frame_t *reg_
ec->method_missing_reason = reason;
calling->ci = &VM_CI_ON_STACK(idMethodMissing, flag, argc, vm_ci_kwarg(orig_ci));
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL));
return vm_call_method(ec, reg_cfp, calling);
}
@ -3581,7 +3415,7 @@ vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca
cme = refined_method_callable_without_refinement(cme);
}
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }}, cme);
calling->cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 }, cme);
return vm_call_method_each_type(ec, cfp, calling);
}
@ -3688,7 +3522,7 @@ search_refined_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struc
static VALUE
vm_call_refined(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling)
{
struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, {{ 0 }},
struct rb_callcache *ref_cc = &VM_CC_ON_STACK(Qundef, vm_call_general, { 0 },
search_refined_method(ec, cfp, calling));
if (vm_cc_cme(ref_cc)) {
@ -3868,45 +3702,18 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 1, 1);
vm_cc_attr_index_initialize(cc);
const unsigned int aset_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT | VM_CALL_KWARG);
if (vm_cc_markable(cc)) {
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
VM_CALL_METHOD_ATTR(v,
vm_call_attrset_direct(ec, cfp, cc, calling->recv),
CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
} else {
cc = &((struct rb_callcache) {
.flags = T_IMEMO |
(imemo_callcache << FL_USHIFT) |
VM_CALLCACHE_UNMARKABLE |
((VALUE)INVALID_SHAPE_ID << SHAPE_FLAG_SHIFT) |
VM_CALLCACHE_ON_STACK,
.klass = cc->klass,
.cme_ = cc->cme_,
.call_ = cc->call_,
.aux_ = {
.attr = {
.index = 0,
.dest_shape_id = INVALID_SHAPE_ID,
}
},
});
VM_CALL_METHOD_ATTR(v,
vm_call_attrset_direct(ec, cfp, cc, calling->recv),
CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
}
VM_CALL_METHOD_ATTR(v,
vm_call_attrset(ec, cfp, calling),
CC_SET_FASTPATH(cc, vm_call_attrset, !(vm_ci_flag(ci) & aset_mask)));
return v;
case VM_METHOD_TYPE_IVAR:
CALLER_SETUP_ARG(cfp, calling, ci);
CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci);
rb_check_arity(calling->argc, 0, 0);
if (vm_cc_markable(cc)) {
vm_cc_attr_index_initialize(cc, INVALID_SHAPE_ID);
}
vm_cc_attr_index_initialize(cc);
const unsigned int ivar_mask = (VM_CALL_ARGS_SPLAT | VM_CALL_KW_SPLAT);
VM_CALL_METHOD_ATTR(v,
vm_call_ivar(ec, cfp, calling),

View file

@ -40,7 +40,6 @@ fn main() {
.header("internal.h")
.header("internal/re.h")
.header("include/ruby/ruby.h")
.header("shape.h")
.header("vm_core.h")
.header("vm_callinfo.h")
@ -82,12 +81,6 @@ fn main() {
// This function prints info about a value and is useful for debugging
.allowlist_function("rb_obj_info_dump")
// From shape.h
.allowlist_function("rb_shape_get_shape_id")
.allowlist_function("rb_shape_get_shape_by_id")
.allowlist_function("rb_shape_flags_mask")
.allowlist_function("rb_shape_get_iv_index")
// From ruby/internal/intern/object.h
.allowlist_function("rb_obj_is_kind_of")

View file

@ -617,7 +617,7 @@ fn write_rm_multi(cb: &mut CodeBlock, op_mem_reg8: u8, op_mem_reg_pref: u8, op_r
write_rm(cb, sz_pref, rex_w, X86Opnd::None, opnd0, op_ext_imm, &[op_mem_imm_lrg]);
cb.write_int(uimm.value, if opnd_size > 32 { 32 } else { opnd_size.into() });
} else {
panic!("immediate value too large (num_bits={})", num_bits);
panic!("immediate value too large");
}
},
_ => unreachable!()

View file

@ -1938,9 +1938,11 @@ fn gen_set_ivar(
let val_opnd = ctx.stack_pop(1);
let recv_opnd = ctx.stack_pop(1);
// Call rb_vm_set_ivar_id with the receiver, the ivar name, and the value
let ivar_index: u32 = unsafe { rb_obj_ensure_iv_index_mapping(recv, ivar_name) };
// Call rb_vm_set_ivar_idx with the receiver, the index of the ivar, and the value
let val = asm.ccall(
rb_vm_set_ivar_id as *const u8,
rb_vm_set_ivar_idx as *const u8,
vec![
recv_opnd,
Opnd::UImm(ivar_name),
@ -2021,82 +2023,81 @@ fn gen_get_ivar(
return EndBlock;
}
let ivar_index = unsafe {
let shape_id = comptime_receiver.shape_of();
let shape = rb_shape_get_shape_by_id(shape_id);
let mut ivar_index: u32 = 0;
if rb_shape_get_iv_index(shape, ivar_name, &mut ivar_index) {
Some(ivar_index as usize)
} else {
None
}
};
// must be before stack_pop
let recv_type = ctx.get_opnd_type(recv_opnd);
// Upgrade type
if !recv_type.is_heap() {
ctx.upgrade_opnd_type(recv_opnd, Type::UnknownHeap);
}
// FIXME: Mapping the index could fail when there is too many ivar names. If we're
// compiling for a branch stub that can cause the exception to be thrown from the
// wrong PC.
let ivar_index =
unsafe { rb_obj_ensure_iv_index_mapping(comptime_receiver, ivar_name) }.as_usize();
// Pop receiver if it's on the temp stack
if recv_opnd != SelfOpnd {
ctx.stack_pop(1);
}
// Guard heap object
if !recv_type.is_heap() {
guard_object_is_heap(asm, recv, side_exit);
if USE_RVARGC != 0 {
// Check that the ivar table is big enough
// Check that the slot is inside the ivar table (num_slots > index)
let num_slots = Opnd::mem(32, recv, ROBJECT_OFFSET_NUMIV);
asm.cmp(num_slots, Opnd::UImm(ivar_index as u64));
asm.jbe(counted_exit!(ocb, side_exit, getivar_idx_out_of_range).into());
}
// Compile time self is embedded and the ivar index lands within the object
let embed_test_result = unsafe { FL_TEST_RAW(comptime_receiver, VALUE(ROBJECT_EMBED.as_usize())) != VALUE(0) };
let flags_mask: usize = unsafe { rb_shape_flags_mask() }.as_usize();
let expected_flags_mask: usize = (RUBY_T_MASK as usize) | !flags_mask | (ROBJECT_EMBED as usize);
let expected_flags = comptime_receiver.builtin_flags() & expected_flags_mask;
// Combined guard for all flags: shape, embeddedness, and T_OBJECT
let flags_opnd = Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS);
asm.comment("guard shape, embedded, and T_OBJECT");
let flags_opnd = asm.and(flags_opnd, Opnd::UImm(expected_flags_mask as u64));
asm.cmp(flags_opnd, Opnd::UImm(expected_flags as u64));
jit_chain_guard(
JCC_JNE,
jit,
&starting_context,
asm,
ocb,
max_chain_depth,
side_exit,
);
// If there is no IVAR index, then the ivar was undefined
// when we entered the compiler. That means we can just return
// nil for this shape + iv name
if ivar_index.is_none() {
let out_opnd = ctx.stack_push(Type::Nil);
asm.mov(out_opnd, Qnil.into());
} else if embed_test_result {
let test_result = unsafe { FL_TEST_RAW(comptime_receiver, VALUE(ROBJECT_EMBED.as_usize())) != VALUE(0) };
if test_result {
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
// Guard that self is embedded
// TODO: BT and JC is shorter
asm.comment("guard embedded getivar");
let flags_opnd = Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS);
asm.test(flags_opnd, Opnd::UImm(ROBJECT_EMBED as u64));
let side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic);
jit_chain_guard(
JCC_JZ,
jit,
&starting_context,
asm,
ocb,
max_chain_depth,
side_exit,
);
// Load the variable
let offs = ROBJECT_OFFSET_AS_ARY + (ivar_index.unwrap() * SIZEOF_VALUE) as i32;
let offs = ROBJECT_OFFSET_AS_ARY + (ivar_index * SIZEOF_VALUE) as i32;
let ivar_opnd = Opnd::mem(64, recv, offs);
// Guard that the variable is not Qundef
asm.cmp(ivar_opnd, Qundef.into());
let out_val = asm.csel_e(Qnil.into(), ivar_opnd);
// Push the ivar on the stack
let out_opnd = ctx.stack_push(Type::Unknown);
asm.mov(out_opnd, ivar_opnd);
asm.mov(out_opnd, out_val);
} else {
// Compile time value is *not* embedded.
// Guard that value is *not* embedded
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
asm.comment("guard extended getivar");
let flags_opnd = Opnd::mem(64, recv, RUBY_OFFSET_RBASIC_FLAGS);
asm.test(flags_opnd, Opnd::UImm(ROBJECT_EMBED as u64));
let megamorphic_side_exit = counted_exit!(ocb, side_exit, getivar_megamorphic);
jit_chain_guard(
JCC_JNZ,
jit,
&starting_context,
asm,
ocb,
max_chain_depth,
megamorphic_side_exit,
);
if USE_RVARGC == 0 {
// Check that the extended table is big enough
// Check that the slot is inside the extended table (num_slots > index)
let num_slots = Opnd::mem(32, recv, ROBJECT_OFFSET_NUMIV);
asm.cmp(num_slots, Opnd::UImm(ivar_index.unwrap() as u64));
asm.cmp(num_slots, Opnd::UImm(ivar_index as u64));
asm.jbe(counted_exit!(ocb, side_exit, getivar_idx_out_of_range).into());
}
@ -2104,10 +2105,15 @@ fn gen_get_ivar(
let tbl_opnd = asm.load(Opnd::mem(64, recv, ROBJECT_OFFSET_AS_HEAP_IVPTR));
// Read the ivar from the extended table
let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index.unwrap()) as i32);
let ivar_opnd = Opnd::mem(64, tbl_opnd, (SIZEOF_VALUE * ivar_index) as i32);
// Check that the ivar is not Qundef
asm.cmp(ivar_opnd, Qundef.into());
let out_val = asm.csel_ne(ivar_opnd, Qnil.into());
// Push the ivar on the stack
let out_opnd = ctx.stack_push(Type::Unknown);
asm.mov(out_opnd, ivar_opnd);
asm.mov(out_opnd, out_val);
}
// Jump to next instruction. This allows guard chains to share the same successor.
@ -2130,12 +2136,25 @@ fn gen_getinstancevariable(
let ivar_name = jit_get_arg(jit, 0).as_u64();
let comptime_val = jit_peek_at_self(jit);
let comptime_val_klass = comptime_val.class_of();
// Generate a side exit
let side_exit = get_side_exit(jit, ocb, ctx);
// Guard that the receiver has the same class as the one from compile time.
let self_asm_opnd = Opnd::mem(64, CFP, RUBY_OFFSET_CFP_SELF);
jit_guard_known_klass(
jit,
ctx,
asm,
ocb,
comptime_val_klass,
self_asm_opnd,
SelfOpnd,
comptime_val,
GET_IVAR_MAX_DEPTH,
side_exit,
);
gen_get_ivar(
jit,

View file

@ -120,7 +120,7 @@ extern "C" {
obj: VALUE,
v: VALUE,
) -> bool;
pub fn rb_vm_set_ivar_id(obj: VALUE, idx: u32, val: VALUE) -> VALUE;
pub fn rb_vm_set_ivar_idx(obj: VALUE, idx: u32, val: VALUE) -> VALUE;
pub fn rb_vm_setinstancevariable(iseq: IseqPtr, obj: VALUE, id: ID, val: VALUE, ic: IVC);
pub fn rb_aliased_callable_method_entry(
me: *const rb_callable_method_entry_t,
@ -354,26 +354,18 @@ impl VALUE {
/// Read the flags bits from the RBasic object, then return a Ruby type enum (e.g. RUBY_T_ARRAY)
pub fn builtin_type(self) -> ruby_value_type {
(self.builtin_flags() & (RUBY_T_MASK as usize)) as ruby_value_type
}
pub fn builtin_flags(self) -> usize {
assert!(!self.special_const_p());
let VALUE(cval) = self;
let rbasic_ptr = cval as *const RBasic;
let flags_bits: usize = unsafe { (*rbasic_ptr).flags }.as_usize();
return flags_bits;
(flags_bits & (RUBY_T_MASK as usize)) as ruby_value_type
}
pub fn class_of(self) -> VALUE {
unsafe { CLASS_OF(self) }
}
pub fn shape_of(self) -> u32 {
unsafe { rb_shape_get_shape_id(self) }
}
pub fn as_isize(self) -> isize {
let VALUE(is) = self;
is as isize

View file

@ -269,29 +269,6 @@ extern "C" {
extern "C" {
pub fn rb_reg_new_ary(ary: VALUE, options: ::std::os::raw::c_int) -> VALUE;
}
pub type attr_index_t = u32;
pub type shape_id_t = u32;
#[repr(C)]
pub struct rb_shape {
pub parent: *mut rb_shape,
pub edges: *mut rb_id_table,
pub edge_name: ID,
pub iv_count: attr_index_t,
pub type_: u8,
}
pub type rb_shape_t = rb_shape;
extern "C" {
pub fn rb_shape_get_shape_by_id(shape_id: shape_id_t) -> *mut rb_shape_t;
}
extern "C" {
pub fn rb_shape_get_shape_id(obj: VALUE) -> shape_id_t;
}
extern "C" {
pub fn rb_shape_get_iv_index(shape: *mut rb_shape_t, id: ID, value: *mut attr_index_t) -> bool;
}
extern "C" {
pub fn rb_shape_flags_mask() -> VALUE;
}
pub const idDot2: ruby_method_ids = 128;
pub const idDot3: ruby_method_ids = 129;
pub const idUPlus: ruby_method_ids = 132;
@ -595,11 +572,6 @@ pub const OPTIMIZED_METHOD_TYPE_STRUCT_AREF: method_optimized_type = 3;
pub const OPTIMIZED_METHOD_TYPE_STRUCT_ASET: method_optimized_type = 4;
pub const OPTIMIZED_METHOD_TYPE__MAX: method_optimized_type = 5;
pub type method_optimized_type = u32;
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct rb_id_table {
_unused: [u8; 0],
}
extern "C" {
pub fn rb_method_entry_at(obj: VALUE, id: ID) -> *const rb_method_entry_t;
}
@ -628,10 +600,9 @@ pub struct iseq_inline_constant_cache {
pub segments: *const ID,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct iseq_inline_iv_cache_entry {
pub source_shape_id: shape_id_t,
pub dest_shape_id: shape_id_t,
pub attr_index: attr_index_t,
pub entry: *mut rb_iv_index_tbl_entry,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
@ -727,6 +698,12 @@ extern "C" {
) -> *const rb_callable_method_entry_t;
}
#[repr(C)]
pub struct rb_iv_index_tbl_entry {
pub index: u32,
pub class_serial: rb_serial_t,
pub class_value: VALUE,
}
#[repr(C)]
pub struct rb_cvar_class_tbl_entry {
pub index: u32,
pub global_cvar_state: rb_serial_t,