1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00
ruby--ruby/tool/ruby_vm/views/_mjit_compile_ivar.erb
Koichi Sasada f6661f5085 sync RClass::ext::iv_index_tbl
iv_index_tbl manages instance variable indexes (ID -> index).
This data structure should be synchronized with other ractors
so introduce some VM locks.

This patch also introduced atomic ivar cache used by
set/getinlinecache instructions. To make updating ivar cache (IVC),
we changed iv_index_tbl data structure to manage (ID -> entry)
and an entry points serial and index. IVC points to this entry so
that cache update becomes atomically.
2020-10-17 08:18:04 +09:00

101 lines
5.7 KiB
Text

% # -*- C -*-
% # Copyright (c) 2018 Takashi Kokubun. All rights reserved.
% #
% # This file is a part of the programming language Ruby. Permission is hereby
% # granted, to either redistribute and/or modify this file, provided that the
% # conditions mentioned in the file COPYING are met. Consult the file for
% # details.
%
% # Optimized case of get_instancevariable instruction.
#if OPT_IC_FOR_IVAR
{
% # compiler: Prepare operands which may be used by `insn.call_attribute`
% insn.opes.each_with_index do |ope, i|
MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
% end
% # compiler: Use copied IVC to avoid race condition
IVC ic_copy = &(status->is_entries + ((union iseq_inline_storage_entry *)ic - body->is_entries))->iv_cache;
%
if (!status->compile_info->disable_ivar_cache && ic_copy->entry) { // Only ic_copy is enabled.
% # JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
% # <%= render 'mjit_compile_pc_and_sp', locals: { insn: insn } -%>
%
% # JIT: prepare vm_getivar/vm_setivar arguments and variables
fprintf(f, "{\n");
fprintf(f, " VALUE obj = GET_SELF();\n");
fprintf(f, " const uint32_t index = %u;\n", (ic_copy->entry->index));
if (status->merge_ivar_guards_p) {
% # JIT: Access ivar without checking these VM_ASSERTed prerequisites as we checked them in the beginning of `mjit_compile_body`
fprintf(f, " VM_ASSERT(RB_TYPE_P(obj, T_OBJECT));\n");
fprintf(f, " VM_ASSERT((rb_serial_t)%"PRI_SERIALT_PREFIX"u == RCLASS_SERIAL(RBASIC(obj)->klass));\n", ic_copy->entry->class_serial);
fprintf(f, " VM_ASSERT(index < ROBJECT_NUMIV(obj));\n");
% if insn.name == 'setinstancevariable'
fprintf(f, " if (LIKELY(!RB_OBJ_FROZEN(obj) && %sRB_FL_ANY_RAW(obj, ROBJECT_EMBED))) {\n", status->max_ivar_index >= ROBJECT_EMBED_LEN_MAX ? "!" : "");
fprintf(f, " RB_OBJ_WRITE(obj, &ROBJECT(obj)->as.%s, stack[%d]);\n",
status->max_ivar_index >= ROBJECT_EMBED_LEN_MAX ? "heap.ivptr[index]" : "ary[index]", b->stack_size - 1);
fprintf(f, " }\n");
% else
fprintf(f, " VALUE val;\n");
fprintf(f, " if (LIKELY(%sRB_FL_ANY_RAW(obj, ROBJECT_EMBED) && (val = ROBJECT(obj)->as.%s) != Qundef)) {\n",
status->max_ivar_index >= ROBJECT_EMBED_LEN_MAX ? "!" : "",
status->max_ivar_index >= ROBJECT_EMBED_LEN_MAX ? "heap.ivptr[index]" : "ary[index]");
fprintf(f, " stack[%d] = val;\n", b->stack_size);
fprintf(f, " }\n");
%end
}
else {
fprintf(f, " const rb_serial_t ic_serial = (rb_serial_t)%"PRI_SERIALT_PREFIX"u;\n", ic_copy->entry->class_serial);
% # JIT: cache hit path of vm_getivar/vm_setivar, or cancel JIT (recompile it with exivar)
% if insn.name == 'setinstancevariable'
fprintf(f, " if (LIKELY(RB_TYPE_P(obj, T_OBJECT) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && index < ROBJECT_NUMIV(obj) && !RB_OBJ_FROZEN(obj))) {\n");
fprintf(f, " VALUE *ptr = ROBJECT_IVPTR(obj);\n");
fprintf(f, " RB_OBJ_WRITE(obj, &ptr[index], stack[%d]);\n", b->stack_size - 1);
fprintf(f, " }\n");
% else
fprintf(f, " VALUE val;\n");
fprintf(f, " if (LIKELY(RB_TYPE_P(obj, T_OBJECT) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && index < ROBJECT_NUMIV(obj) && (val = ROBJECT_IVPTR(obj)[index]) != Qundef)) {\n");
fprintf(f, " stack[%d] = val;\n", b->stack_size);
fprintf(f, " }\n");
% end
}
fprintf(f, " else {\n");
fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", pos);
fprintf(f, " reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size);
fprintf(f, " goto ivar_cancel;\n");
fprintf(f, " }\n");
% # compiler: Move JIT compiler's internal stack pointer
b->stack_size += <%= insn.call_attribute('sp_inc') %>;
fprintf(f, "}\n");
break;
}
% if insn.name == 'getinstancevariable'
else if (!status->compile_info->disable_exivar_cache && ic_copy->entry) {
% # JIT: optimize away motion of sp and pc. This path does not call rb_warning() and so it's always leaf and not `handles_sp`.
% # <%= render 'mjit_compile_pc_and_sp', locals: { insn: insn } -%>
%
% # JIT: prepare vm_getivar's arguments and variables
fprintf(f, "{\n");
fprintf(f, " VALUE obj = GET_SELF();\n");
fprintf(f, " const rb_serial_t ic_serial = (rb_serial_t)%"PRI_SERIALT_PREFIX"u;\n", ic_copy->entry->class_serial);
fprintf(f, " const uint32_t index = %u;\n", ic_copy->entry->index);
% # JIT: cache hit path of vm_getivar, or cancel JIT (recompile it without any ivar optimization)
fprintf(f, " struct gen_ivtbl *ivtbl;\n");
fprintf(f, " VALUE val;\n");
fprintf(f, " if (LIKELY(FL_TEST_RAW(obj, FL_EXIVAR) && ic_serial == RCLASS_SERIAL(RBASIC(obj)->klass) && rb_ivar_generic_ivtbl_lookup(obj, &ivtbl) && index < ivtbl->numiv && (val = ivtbl->ivptr[index]) != Qundef)) {\n");
fprintf(f, " stack[%d] = val;\n", b->stack_size);
fprintf(f, " }\n");
fprintf(f, " else {\n");
fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", pos);
fprintf(f, " reg_cfp->sp = vm_base_ptr(reg_cfp) + %d;\n", b->stack_size);
fprintf(f, " goto exivar_cancel;\n");
fprintf(f, " }\n");
% # compiler: Move JIT compiler's internal stack pointer
b->stack_size += <%= insn.call_attribute('sp_inc') %>;
fprintf(f, "}\n");
break;
}
% end
}
#endif // OPT_IC_FOR_IVAR