Yet Another Ruby JIT!

Renaming uJIT to YJIT. AKA s/ujit/yjit/g.
This commit is contained in:
Jose Narvaez 2021-03-06 23:46:56 +00:00 committed by Alan Wu
parent 7f7e79d802
commit 4e2eb7695e
36 changed files with 1312 additions and 1320 deletions

4
.gitignore vendored
View File

@ -229,5 +229,5 @@ lcov*.info
/mjit_config.h
/include/ruby-*/*/rb_mjit_min_header-*.h
# UJIT
/ujit_hooks.inc
# YJIT
/yjit_hooks.inc

1644
common.mk

File diff suppressed because it is too large Load Diff

View File

@ -99,7 +99,7 @@ rb_call_builtin_inits(void)
BUILTIN(timev);
BUILTIN(nilclass);
BUILTIN(marshal);
BUILTIN(ujit);
BUILTIN(yjit);
Init_builtin_prelude();
}
#undef CALL

12
iseq.c
View File

@ -38,7 +38,7 @@
#include "ruby/util.h"
#include "vm_core.h"
#include "vm_callinfo.h"
#include "ujit.h"
#include "yjit.h"
#include "builtin.h"
#include "insns.inc"
@ -110,7 +110,7 @@ rb_iseq_free(const rb_iseq_t *iseq)
if (iseq && iseq->body) {
struct rb_iseq_constant_body *const body = iseq->body;
mjit_free_iseq(iseq); /* Notify MJIT */
rb_ujit_iseq_free(body);
rb_yjit_iseq_free(body);
ruby_xfree((void *)body->iseq_encoded);
ruby_xfree((void *)body->insns_info.body);
if (body->insns_info.positions) ruby_xfree((void *)body->insns_info.positions);
@ -323,7 +323,7 @@ rb_iseq_update_references(rb_iseq_t *iseq)
#if USE_MJIT
mjit_update_references(iseq);
#endif
rb_ujit_iseq_update_references(body);
rb_yjit_iseq_update_references(body);
}
}
@ -404,7 +404,7 @@ rb_iseq_mark(const rb_iseq_t *iseq)
#if USE_MJIT
mjit_mark_cc_entries(body);
#endif
rb_ujit_iseq_mark(body);
rb_yjit_iseq_mark(body);
}
if (FL_TEST_RAW((VALUE)iseq, ISEQ_NOT_LOADED_YET)) {
@ -3184,7 +3184,7 @@ static insn_data_t insn_data[VM_INSTRUCTION_SIZE/2];
#include "ujit_asm.h"
#include "yjit_asm.h"
@ -3490,7 +3490,7 @@ trace_set_i(void *vstart, void *vend, size_t stride, void *data)
}
void
rb_ujit_empty_func_with_ec(rb_control_frame_t *cfp, rb_execution_context_t *ec)
rb_yjit_empty_func_with_ec(rb_control_frame_t *cfp, rb_execution_context_t *ec)
{
// it's put in this file instead of say, compile.c to dodge long C compile time.
// it just needs to be in a different unit from vm.o so the compiler can't see the definition

2
iseq.h
View File

@ -315,7 +315,7 @@ VALUE rb_iseq_defined_string(enum defined_type type);
/* vm.c */
VALUE rb_iseq_local_variables(const rb_iseq_t *iseq);
NOINLINE(void rb_ujit_empty_func_with_ec(rb_control_frame_t *cfp, rb_execution_context_t *ec));
NOINLINE(void rb_yjit_empty_func_with_ec(rb_control_frame_t *cfp, rb_execution_context_t *ec));
RUBY_SYMBOL_EXPORT_END

9
mjit.h
View File

@ -16,7 +16,8 @@
#include "debug_counter.h"
#include "ruby.h"
#include "ujit.h"
#include "vm_core.h"
#include "yjit.h"
// Special address values of a function generated from the
// corresponding iseq by MJIT:
@ -143,15 +144,15 @@ mjit_exec(rb_execution_context_t *ec)
const rb_iseq_t *iseq;
struct rb_iseq_constant_body *body;
if (mjit_call_p || rb_ujit_enabled_p()) {
if (mjit_call_p || rb_yjit_enabled_p()) {
iseq = ec->cfp->iseq;
body = iseq->body;
body->total_calls++;
}
#ifndef MJIT_HEADER
if (rb_ujit_enabled_p() && !mjit_call_p && body->total_calls == rb_ujit_call_threshold()) {
rb_ujit_compile_iseq(iseq, ec);
if (rb_yjit_enabled_p() && !mjit_call_p && body->total_calls == rb_yjit_call_threshold()) {
rb_yjit_compile_iseq(iseq, ec);
return Qundef;
}
#endif

View File

@ -16,7 +16,7 @@
#include "variable.h"
#include "gc.h"
#include "transient_heap.h"
#include "ujit.h"
#include "yjit.h"
VALUE rb_cRactor;
@ -1605,7 +1605,7 @@ ractor_create(rb_execution_context_t *ec, VALUE self, VALUE loc, VALUE name, VAL
r->verbose = cr->verbose;
r->debug = cr->debug;
rb_ujit_before_ractor_spawn();
rb_yjit_before_ractor_spawn();
rb_thread_create_ractor(r, args, block);
RB_GC_GUARD(rv);

28
ruby.c
View File

@ -59,7 +59,7 @@
#include "internal/process.h"
#include "internal/variable.h"
#include "mjit.h"
#include "ujit.h"
#include "yjit.h"
#include "ruby/encoding.h"
#include "ruby/thread.h"
#include "ruby/util.h"
@ -105,7 +105,7 @@ void rb_warning_category_update(unsigned int mask, unsigned int bits);
SEP \
X(jit) \
SEP \
X(ujit)
X(yjit)
/* END OF FEATURES */
#define EACH_DEBUG_FEATURES(X, SEP) \
X(frozen_string_literal) \
@ -189,7 +189,7 @@ struct ruby_cmdline_options {
#if USE_MJIT
struct mjit_options mjit;
#endif
struct rb_ujit_options ujit;
struct rb_yjit_options yjit;
int sflag, xflag;
unsigned int warning: 1;
@ -234,7 +234,7 @@ cmdline_options_init(ruby_cmdline_options_t *opt)
#ifdef MJIT_FORCE_ENABLE /* to use with: ./configure cppflags="-DMJIT_FORCE_ENABLE" */
opt->features.set |= FEATURE_BIT(jit);
#endif
opt->features.set |= FEATURE_BIT(ujit);
opt->features.set |= FEATURE_BIT(yjit);
return opt;
}
@ -333,7 +333,7 @@ usage(const char *name, int help, int highlight, int columns)
M("rubyopt", "", "RUBYOPT environment variable (default: enabled)"),
M("frozen-string-literal", "", "freeze all string literals (default: disabled)"),
M("jit", "", "JIT compiler (default: disabled)"),
M("ujit", "", "in-process JIT compiler (default: enabled)"),
M("yjit", "", "in-process JIT compiler (default: enabled)"),
};
static const struct message warn_categories[] = {
M("deprecated", "", "deprecated features"),
@ -1031,20 +1031,20 @@ set_option_encoding_once(const char *type, VALUE *name, const char *e, long elen
opt_match(s, l, name) && (*(s) ? 1 : (rb_raise(rb_eRuntimeError, "--jit-" name " needs an argument"), 0))
static void
setup_ujit_options(const char *s, struct rb_ujit_options *ujit_opt)
setup_yjit_options(const char *s, struct rb_yjit_options *yjit_opt)
{
if (*s != '-') return;
const size_t l = strlen(++s);
if (opt_match_arg(s, l, "call-threshold")) {
ujit_opt->call_threshold = atoi(s + 1);
yjit_opt->call_threshold = atoi(s + 1);
}
else if (opt_match_noarg(s, l, "stats")) {
ujit_opt->gen_stats = true;
yjit_opt->gen_stats = true;
}
else {
rb_raise(rb_eRuntimeError,
"invalid ujit option `%s' (--help will show valid ujit options)", s);
"invalid yjit option `%s' (--help will show valid yjit options)", s);
}
}
@ -1461,9 +1461,9 @@ proc_options(long argc, char **argv, ruby_cmdline_options_t *opt, int envopt)
rb_warn("MJIT support is disabled.");
#endif
}
else if (strncmp("ujit", s, 4) == 0) {
FEATURE_SET(opt->features, FEATURE_BIT(ujit));
setup_ujit_options(s + 4, &opt->ujit);
else if (strncmp("yjit", s, 4) == 0) {
FEATURE_SET(opt->features, FEATURE_BIT(yjit));
setup_yjit_options(s + 4, &opt->yjit);
}
else if (strcmp("yydebug", s) == 0) {
if (envopt) goto noenvopt_long;
@ -1825,8 +1825,8 @@ process_options(int argc, char **argv, ruby_cmdline_options_t *opt)
*/
rb_warning("-K is specified; it is for 1.8 compatibility and may cause odd behavior");
if (opt->features.set & FEATURE_BIT(ujit))
rb_ujit_init(&opt->ujit);
if (opt->features.set & FEATURE_BIT(yjit))
rb_yjit_init(&opt->yjit);
#if USE_MJIT
if (opt->features.set & FEATURE_BIT(jit)) {
opt->mjit.on = TRUE; /* set mjit.on for ruby_show_version() API and check to call mjit_init() */

View File

@ -590,7 +590,7 @@ update-known-errors:
$(IFCHANGE) $(srcdir)/defs/known_errors.def -
INSNS = opt_sc.inc optinsn.inc optunifs.inc insns.inc insns_info.inc \
vmtc.inc vm.inc mjit_compile.inc ujit_hooks.inc
vmtc.inc vm.inc mjit_compile.inc yjit_hooks.inc
$(INSNS): $(srcdir)/insns.def vm_opts.h \
$(srcdir)/defs/opt_operand.def $(srcdir)/defs/opt_insn_unif.def \

View File

@ -3,7 +3,7 @@
clear
clang -std=gnu99 -Wall -Werror -Wshorten-64-to-32 ujit_asm.c ujit_asm_tests.c -o asm_test
clang -std=gnu99 -Wall -Werror -Wshorten-64-to-32 yjit_asm.c yjit_asm_tests.c -o asm_test
./asm_test

View File

@ -179,7 +179,7 @@ module RubyVM::MicroJIT
def make_result(success, with_pc)
[success ? 1 : 0,
[
['ujit_with_ec', with_pc],
['yjit_with_ec', with_pc],
]
]
end
@ -197,7 +197,7 @@ module RubyVM::MicroJIT
end
def scrape
with_ec = scrape_instruction(RubyVM::Instructions.find_index { |insn| insn.name == 'ujit_call_example_with_ec' })
with_ec = scrape_instruction(RubyVM::Instructions.find_index { |insn| insn.name == 'yjit_call_example_with_ec' })
make_result(true, with_ec)
rescue => e
print_warning("scrape failed: #{e.message}")
@ -207,7 +207,7 @@ module RubyVM::MicroJIT
end
def print_warning(text)
text = "ujit warning: #{text}"
text = "yjit warning: #{text}"
text = "\x1b[1m#{text}\x1b[0m" if STDOUT.tty?
STDOUT.puts(text)
end

View File

@ -64,6 +64,6 @@ class RubyVM::MicroJIT::ExampleInstructions
end
def self.to_a
[new('ujit_call_example_with_ec')]
[new('yjit_call_example_with_ec')]
end
end

View File

@ -26,16 +26,16 @@
% end
%
% RubyVM::MicroJIT::ExampleInstructions.to_a.each do |insn|
INSN_ENTRY(ujit_call_example_with_ec)
INSN_ENTRY(yjit_call_example_with_ec)
{
START_OF_ORIGINAL_INSN(ujit_call_example_with_ec);
START_OF_ORIGINAL_INSN(yjit_call_example_with_ec);
#if USE_MACHINE_REGS
// assumes USE_MACHINE_REGS, aka reg_pc setup,
// aka #define SET_PC(x) (reg_cfp->pc = reg_pc = (x))
rb_ujit_empty_func_with_ec(GET_CFP(), ec);
rb_yjit_empty_func_with_ec(GET_CFP(), ec);
RESTORE_REGS();
#endif
END_INSN(ujit_call_example_with_ec);
END_INSN(yjit_call_example_with_ec);
}
% end
%

View File

@ -13,7 +13,7 @@
} -%>
% success, byte_arrays = RubyVM::MicroJIT.scrape
static const uint8_t ujit_scrape_successful = <%= success %>;
static const uint8_t yjit_scrape_successful = <%= success %>;
% byte_arrays.each do |(prefix, scrape_result)|
// Disassembly:
% scrape_result.disassembly_lines.each do |line|

View File

@ -11,8 +11,8 @@ when ENV['RUNRUBY_USE_GDB'] == 'true'
debugger = :gdb
when ENV['RUNRUBY_USE_LLDB'] == 'true'
debugger = :lldb
when ENV['RUNRUBY_UJIT_STATS']
use_ujit_stat = true
when ENV['RUNRUBY_YJIT_STATS']
use_yjit_stat = true
end
while arg = ARGV[0]
break ARGV.shift if arg == '--'
@ -166,8 +166,8 @@ if debugger
end
cmd = [runner || ruby]
if use_ujit_stat
cmd << '--ujit-stats'
if use_yjit_stat
cmd << '--yjit-stats'
end
cmd.concat(ARGV)
cmd.unshift(*precommand) unless precommand.empty?

61
ujit.h
View File

@ -1,61 +0,0 @@
//
// This file contains definitions uJIT exposes to the CRuby codebase
//
#ifndef UJIT_H
#define UJIT_H 1
#include "stddef.h"
#include "stdint.h"
#include "stdbool.h"
#include "method.h"
#ifdef _WIN32
#define PLATFORM_SUPPORTED_P 0
#else
#define PLATFORM_SUPPORTED_P 1
#endif
#ifndef UJIT_CHECK_MODE
#define UJIT_CHECK_MODE 0
#endif
// >= 1: print when output code invalidation happens
// >= 2: dump list of instructions when regions compile
#ifndef UJIT_DUMP_MODE
#define UJIT_DUMP_MODE 0
#endif
#ifndef rb_iseq_t
typedef struct rb_iseq_struct rb_iseq_t;
#define rb_iseq_t rb_iseq_t
#endif
struct rb_ujit_options {
bool ujit_enabled;
// Number of method calls after which to start generating code
// Threshold==1 means compile on first execution
unsigned call_threshold;
// Capture and print out stats
bool gen_stats;
};
RUBY_SYMBOL_EXPORT_BEGIN
bool rb_ujit_enabled_p(void);
unsigned rb_ujit_call_threshold(void);
RUBY_SYMBOL_EXPORT_END
void rb_ujit_collect_vm_usage_insn(int insn);
void rb_ujit_method_lookup_change(VALUE cme_or_cc);
void rb_ujit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec);
void rb_ujit_init(struct rb_ujit_options *options);
void rb_ujit_bop_redefined(VALUE klass, const rb_method_entry_t *me, enum ruby_basic_operators bop);
void rb_ujit_constant_state_changed(void);
void rb_ujit_iseq_mark(const struct rb_iseq_constant_body *body);
void rb_ujit_iseq_update_references(const struct rb_iseq_constant_body *body);
void rb_ujit_iseq_free(const struct rb_iseq_constant_body *body);
void rb_ujit_before_ractor_spawn(void);
#endif // #ifndef UJIT_H

View File

@ -13,7 +13,7 @@
#include "version.h"
#include "vm_core.h"
#include "mjit.h"
#include "ujit.h"
#include "yjit.h"
#include <stdio.h>
#ifndef EXIT_SUCCESS
@ -125,7 +125,7 @@ ruby_show_version(void)
PRINT(description);
}
if (rb_ujit_enabled_p()) {
if (rb_yjit_enabled_p()) {
fputs("YJIT is enabled\n", stdout);
}
#ifdef RUBY_LAST_COMMIT_TITLE

12
vm.c
View File

@ -37,7 +37,7 @@
#include "vm_insnhelper.h"
#include "ractor_core.h"
#include "vm_sync.h"
#include "ujit.h"
#include "yjit.h"
#include "builtin.h"
@ -346,7 +346,7 @@ static void vm_collect_usage_register(int reg, int isset);
#endif
#if RUBY_DEBUG
static void vm_ujit_collect_usage_insn(int insn);
static void vm_yjit_collect_usage_insn(int insn);
#endif
static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
@ -1854,9 +1854,9 @@ rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
klass = RBASIC_CLASS(klass);
}
if (vm_redefinition_check_method_type(me->def)) {
if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) {
if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
int flag = vm_redefinition_check_flag(klass);
rb_ujit_bop_redefined(klass, me, (enum ruby_basic_operators)bop);
rb_yjit_bop_redefined(klass, me, (enum ruby_basic_operators)bop);
ruby_vm_redefined_flag[bop] |= flag;
}
@ -4063,9 +4063,9 @@ MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register)(int reg, int iss
#if RUBY_DEBUG
static void
vm_ujit_collect_usage_insn(int insn)
vm_yjit_collect_usage_insn(int insn)
{
rb_ujit_collect_vm_usage_insn(insn);
rb_yjit_collect_vm_usage_insn(insn);
}
#endif

View File

@ -313,8 +313,8 @@ pathobj_realpath(VALUE pathobj)
struct rb_mjit_unit;
// List of YJIT block versions
typedef rb_darray(struct ujit_block_version *) rb_ujit_block_array_t;
typedef rb_darray(rb_ujit_block_array_t) rb_ujit_block_array_array_t;
typedef rb_darray(struct yjit_block_version *) rb_yjit_block_array_t;
typedef rb_darray(rb_yjit_block_array_t) rb_yjit_block_array_array_t;
struct rb_iseq_constant_body {
enum iseq_type {
@ -455,7 +455,7 @@ struct rb_iseq_constant_body {
struct rb_mjit_unit *jit_unit;
#endif
rb_ujit_block_array_array_t ujit_blocks; // empty, or has a size equal to iseq_size
rb_yjit_block_array_array_t yjit_blocks; // empty, or has a size equal to iseq_size
};
/* T_IMEMO/iseq */
@ -797,7 +797,7 @@ typedef struct rb_control_frame_struct {
#if VM_DEBUG_BP_CHECK
VALUE *bp_check; /* cfp[7] */
#endif
// Return address for uJIT code
// Return address for YJIT code
void *jit_return;
} rb_control_frame_t;

View File

@ -81,7 +81,7 @@ error !
RSTRING_PTR(rb_iseq_path(reg_cfp->iseq)), \
rb_iseq_line_no(reg_cfp->iseq, reg_pc - reg_cfp->iseq->body->iseq_encoded)); \
} \
if (USE_INSNS_COUNTER && BIN(insn) != BIN(ujit_call_example_with_ec)) vm_insns_counter_count_insn(BIN(insn));
if (USE_INSNS_COUNTER && BIN(insn) != BIN(yjit_call_example_with_ec)) vm_insns_counter_count_insn(BIN(insn));
#define INSN_DISPATCH_SIG(insn)

View File

@ -26,8 +26,8 @@ MJIT_SYMBOL_EXPORT_END
#define COLLECT_USAGE_REGISTER(reg, s) vm_collect_usage_register((reg), (s))
#elif RUBY_DEBUG
/* for --ujit-stats */
#define COLLECT_USAGE_INSN(insn) vm_ujit_collect_usage_insn(insn)
/* for --yjit-stats */
#define COLLECT_USAGE_INSN(insn) vm_yjit_collect_usage_insn(insn)
#define COLLECT_USAGE_OPERAND(insn, n, op) /* none */
#define COLLECT_USAGE_REGISTER(reg, s) /* none */
#else

View File

@ -3,7 +3,7 @@
*/
#include "id_table.h"
#include "ujit.h"
#include "yjit.h"
#define METHOD_DEBUG 0
@ -123,7 +123,7 @@ rb_vm_cc_invalidate(const struct rb_callcache *cc)
VM_ASSERT(cc->klass != 0); // should be enable
*(VALUE *)&cc->klass = 0;
rb_ujit_method_lookup_change((VALUE)cc);
rb_yjit_method_lookup_change((VALUE)cc);
RB_DEBUG_COUNTER_INC(cc_ent_invalidate);
}
@ -135,13 +135,13 @@ vm_cme_invalidate(rb_callable_method_entry_t *cme)
VM_ASSERT(callable_method_entry_p(cme));
METHOD_ENTRY_INVALIDATED_SET(cme);
RB_DEBUG_COUNTER_INC(cc_cme_invalidate);
rb_ujit_method_lookup_change((VALUE)cme);
rb_yjit_method_lookup_change((VALUE)cme);
}
void
rb_clear_constant_cache(void)
{
rb_ujit_constant_state_changed();
rb_yjit_constant_state_changed();
INC_GLOBAL_CONSTANT_STATE();
}

View File

@ -1347,7 +1347,7 @@ $(MJIT_PRECOMPILED_HEADER): $(MJIT_PRECOMPILED_HEADER_NAME)
$(Q) $(MAKE_LINK) $(MJIT_PRECOMPILED_HEADER_NAME:.pch=.pdb) $(arch_hdrdir)/$(MJIT_PRECOMPILED_HEADER_NAME:.pch=.pdb)
INSNS = opt_sc.inc optinsn.inc optunifs.inc insns.inc insns_info.inc \
vmtc.inc vm.inc mjit_compile.inc ujit_hooks.inc
vmtc.inc vm.inc mjit_compile.inc yjit_hooks.inc
!if [exit > insns_rules.mk]
!else if [for %I in ($(INSNS)) do \

61
yjit.h Normal file
View File

@ -0,0 +1,61 @@
//
// This file contains definitions YJIT exposes to the CRuby codebase
//
#ifndef YJIT_H
#define YJIT_H 1
#include "stddef.h"
#include "stdint.h"
#include "stdbool.h"
#include "method.h"
#ifdef _WIN32
#define PLATFORM_SUPPORTED_P 0
#else
#define PLATFORM_SUPPORTED_P 1
#endif
#ifndef YJIT_CHECK_MODE
#define YJIT_CHECK_MODE 0
#endif
// >= 1: print when output code invalidation happens
// >= 2: dump list of instructions when regions compile
#ifndef YJIT_DUMP_MODE
#define YJIT_DUMP_MODE 0
#endif
#ifndef rb_iseq_t
typedef struct rb_iseq_struct rb_iseq_t;
#define rb_iseq_t rb_iseq_t
#endif
struct rb_yjit_options {
bool yjit_enabled;
// Number of method calls after which to start generating code
// Threshold==1 means compile on first execution
unsigned call_threshold;
// Capture and print out stats
bool gen_stats;
};
RUBY_SYMBOL_EXPORT_BEGIN
bool rb_yjit_enabled_p(void);
unsigned rb_yjit_call_threshold(void);
RUBY_SYMBOL_EXPORT_END
void rb_yjit_collect_vm_usage_insn(int insn);
void rb_yjit_method_lookup_change(VALUE cme_or_cc);
void rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec);
void rb_yjit_init(struct rb_yjit_options *options);
void rb_yjit_bop_redefined(VALUE klass, const rb_method_entry_t *me, enum ruby_basic_operators bop);
void rb_yjit_constant_state_changed(void);
void rb_yjit_iseq_mark(const struct rb_iseq_constant_body *body);
void rb_yjit_iseq_update_references(const struct rb_iseq_constant_body *body);
void rb_yjit_iseq_free(const struct rb_iseq_constant_body *body);
void rb_yjit_before_ractor_spawn(void);
#endif // #ifndef YJIT_H

View File

@ -1,13 +1,13 @@
module UJIT
module YJIT
def self.disasm(iseq)
iseq = RubyVM::InstructionSequence.of(iseq)
blocks = UJIT.blocks_for(iseq)
blocks = YJIT.blocks_for(iseq)
return if blocks.empty?
str = ""
cs = UJIT::Disasm.new
cs = YJIT::Disasm.new
str << iseq.disasm
str << "\n"
@ -36,16 +36,16 @@ module UJIT
str
end if defined?(Disasm)
# Return a hash for statistics generated for the --ujit-stats command line option.
# Return a hash for statistics generated for the --yjit-stats command line option.
# Return nil when option is not passed or unavailable.
def self.runtime_stats
# defined in ujit_iface.c
# defined in yjit_iface.c
Primitive.get_stat_counters
end
# Discard statistics collected for --ujit-stats.
# Discard statistics collected for --yjit-stats.
def self.reset_stats!
# defined in ujit_iface.c
# defined in yjit_iface.c
Primitive.reset_stats_bang
end
@ -58,7 +58,7 @@ module UJIT
return unless counters
$stderr.puts("***uJIT: Printing runtime counters from ujit.rb***")
$stderr.puts("***YJIT: Printing runtime counters from yjit.rb***")
print_counters(counters, prefix: 'oswb_', prompt: 'opt_send_without_block exit reasons: ')
print_counters(counters, prefix: 'leave_', prompt: 'leave exit reasons: ')

View File

@ -11,7 +11,7 @@
#include <sys/mman.h>
#endif
#include "ujit_asm.h"
#include "yjit_asm.h"
// Compute the number of bits needed to encode a signed value
uint32_t sig_imm_size(int64_t imm)

View File

@ -1,5 +1,5 @@
#ifndef UJIT_ASM_H
#define UJIT_ASM_H 1
#ifndef YJIT_ASM_H
#define YJIT_ASM_H 1
#include <stdint.h>
#include <stddef.h>

View File

@ -2,7 +2,7 @@
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "ujit_asm.h"
#include "yjit_asm.h"
// Print the bytes in a code block
void print_bytes(codeblock_t* cb)

View File

@ -8,12 +8,12 @@
#include "internal/compile.h"
#include "internal/class.h"
#include "insns_info.inc"
#include "ujit.h"
#include "ujit_iface.h"
#include "ujit_core.h"
#include "ujit_codegen.h"
#include "ujit_asm.h"
#include "ujit_utils.h"
#include "yjit.h"
#include "yjit_iface.h"
#include "yjit_core.h"
#include "yjit_codegen.h"
#include "yjit_asm.h"
#include "yjit_utils.h"
// Map from YARV opcodes to code generation functions
static st_table *gen_fns;
@ -99,9 +99,9 @@ jit_peek_at_stack(jitstate_t* jit, ctx_t* ctx)
return *(sp - 1);
}
// Save uJIT registers prior to a C call
// Save YJIT registers prior to a C call
static void
ujit_save_regs(codeblock_t* cb)
yjit_save_regs(codeblock_t* cb)
{
push(cb, REG_CFP);
push(cb, REG_EC);
@ -109,9 +109,9 @@ ujit_save_regs(codeblock_t* cb)
push(cb, REG_SP); // Maintain 16-byte RSP alignment
}
// Restore uJIT registers after a C call
// Restore YJIT registers after a C call
static void
ujit_load_regs(codeblock_t* cb)
yjit_load_regs(codeblock_t* cb)
{
pop(cb, REG_SP); // Maintain 16-byte RSP alignment
pop(cb, REG_SP);
@ -123,7 +123,7 @@ ujit_load_regs(codeblock_t* cb)
Generate an inline exit to return to the interpreter
*/
static void
ujit_gen_exit(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb, VALUE* exit_pc)
yjit_gen_exit(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb, VALUE* exit_pc)
{
// Write the adjusted SP back into the CFP
if (ctx->sp_offset != 0)
@ -142,9 +142,9 @@ ujit_gen_exit(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb, VALUE* exit_pc)
// Accumulate stats about interpreter exits
#if RUBY_DEBUG
if (rb_ujit_opts.gen_stats) {
if (rb_yjit_opts.gen_stats) {
mov(cb, RDI, const_ptr_opnd(exit_pc));
call_ptr(cb, RSI, (void *)&rb_ujit_count_side_exit_op);
call_ptr(cb, RSI, (void *)&rb_yjit_count_side_exit_op);
}
#endif
@ -156,7 +156,7 @@ ujit_gen_exit(jitstate_t* jit, ctx_t* ctx, codeblock_t* cb, VALUE* exit_pc)
Generate an out-of-line exit to return to the interpreter
*/
static uint8_t *
ujit_side_exit(jitstate_t* jit, ctx_t* ctx)
yjit_side_exit(jitstate_t* jit, ctx_t* ctx)
{
uint8_t* code_ptr = cb_get_ptr(ocb, ocb->write_pos);
@ -177,7 +177,7 @@ ujit_side_exit(jitstate_t* jit, ctx_t* ctx)
mov(ocb, mem_opnd(64, RAX, 0), RCX);
// Generate the code to exit to the interpreters
ujit_gen_exit(jit, ctx, ocb, exit_pc);
yjit_gen_exit(jit, ctx, ocb, exit_pc);
return code_ptr;
}
@ -185,22 +185,22 @@ ujit_side_exit(jitstate_t* jit, ctx_t* ctx)
#if RUBY_DEBUG
// Increment a profiling counter with counter_name
#define GEN_COUNTER_INC(cb, counter_name) _gen_counter_inc(cb, &(ujit_runtime_counters . counter_name))
#define GEN_COUNTER_INC(cb, counter_name) _gen_counter_inc(cb, &(yjit_runtime_counters . counter_name))
static void
_gen_counter_inc(codeblock_t *cb, int64_t *counter)
{
if (!rb_ujit_opts.gen_stats) return;
if (!rb_yjit_opts.gen_stats) return;
mov(cb, REG0, const_ptr_opnd(counter));
cb_write_lock_prefix(cb); // for ractors.
add(cb, mem_opnd(64, REG0, 0), imm_opnd(1));
}
// Increment a counter then take an existing side exit.
#define COUNTED_EXIT(side_exit, counter_name) _counted_side_exit(side_exit, &(ujit_runtime_counters . counter_name))
#define COUNTED_EXIT(side_exit, counter_name) _counted_side_exit(side_exit, &(yjit_runtime_counters . counter_name))
static uint8_t *
_counted_side_exit(uint8_t *existing_side_exit, int64_t *counter)
{
if (!rb_ujit_opts.gen_stats) return existing_side_exit;
if (!rb_yjit_opts.gen_stats) return existing_side_exit;
uint8_t *start = cb_get_ptr(ocb, ocb->write_pos);
_gen_counter_inc(ocb, counter);
@ -218,7 +218,7 @@ Compile an interpreter entry block to be inserted into an iseq
Returns `NULL` if compilation fails.
*/
uint8_t*
ujit_entry_prologue(void)
yjit_entry_prologue(void)
{
RUBY_ASSERT(cb != NULL);
@ -244,7 +244,7 @@ ujit_entry_prologue(void)
Generate code to check for interrupts and take a side-exit
*/
static void
ujit_check_ints(codeblock_t* cb, uint8_t* side_exit)
yjit_check_ints(codeblock_t* cb, uint8_t* side_exit)
{
// Check for interrupts
// see RUBY_VM_CHECK_INTS(ec) macro
@ -258,7 +258,7 @@ ujit_check_ints(codeblock_t* cb, uint8_t* side_exit)
Compile a sequence of bytecode instructions for a given basic block version
*/
void
ujit_gen_block(ctx_t* ctx, block_t* block, rb_execution_context_t* ec)
yjit_gen_block(ctx_t* ctx, block_t* block, rb_execution_context_t* ec)
{
RUBY_ASSERT(cb != NULL);
RUBY_ASSERT(block != NULL);
@ -302,7 +302,7 @@ ujit_gen_block(ctx_t* ctx, block_t* block, rb_execution_context_t* ec)
if (!rb_st_lookup(gen_fns, opcode, (st_data_t*)&gen_fn)) {
// If we reach an unknown instruction,
// exit to the interpreter and stop compiling
ujit_gen_exit(&jit, ctx, cb, jit.pc);
yjit_gen_exit(&jit, ctx, cb, jit.pc);
break;
}
@ -322,8 +322,8 @@ ujit_gen_block(ctx_t* ctx, block_t* block, rb_execution_context_t* ec)
// If we can't compile this instruction
// exit to the interpreter and stop compiling
if (status == UJIT_CANT_COMPILE) {
ujit_gen_exit(&jit, ctx, cb, jit.pc);
if (status == YJIT_CANT_COMPILE) {
yjit_gen_exit(&jit, ctx, cb, jit.pc);
break;
}
@ -332,7 +332,7 @@ ujit_gen_block(ctx_t* ctx, block_t* block, rb_execution_context_t* ec)
insn_idx += insn_len(opcode);
// If the instruction terminates this block
if (status == UJIT_END_BLOCK) {
if (status == YJIT_END_BLOCK) {
break;
}
}
@ -343,7 +343,7 @@ ujit_gen_block(ctx_t* ctx, block_t* block, rb_execution_context_t* ec)
// Store the index of the last instruction in the block
block->end_idx = insn_idx;
if (UJIT_DUMP_MODE >= 2) {
if (YJIT_DUMP_MODE >= 2) {
// Dump list of compiled instrutions
fprintf(stderr, "Compiled the following for iseq=%p:\n", (void *)iseq);
for (uint32_t idx = block->blockid.idx; idx < insn_idx;)
@ -367,14 +367,14 @@ gen_dup(jitstate_t* jit, ctx_t* ctx)
mov(cb, REG0, dup_val);
mov(cb, loc0, REG0);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
gen_nop(jitstate_t* jit, ctx_t* ctx)
{
// Do nothing
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -382,7 +382,7 @@ gen_pop(jitstate_t* jit, ctx_t* ctx)
{
// Decrement SP
ctx_stack_pop(ctx, 1);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -391,7 +391,7 @@ gen_putnil(jitstate_t* jit, ctx_t* ctx)
// Write constant at SP
x86opnd_t stack_top = ctx_stack_push(ctx, T_NIL);
mov(cb, stack_top, imm_opnd(Qnil));
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -435,7 +435,7 @@ gen_putobject(jitstate_t* jit, ctx_t* ctx)
mov(cb, stack_top, RAX);
}
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -448,7 +448,7 @@ gen_putobject_int2fix(jitstate_t* jit, ctx_t* ctx)
x86opnd_t stack_top = ctx_stack_push(ctx, T_FIXNUM);
mov(cb, stack_top, imm_opnd(INT2FIX(cst_val)));
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -461,7 +461,7 @@ gen_putself(jitstate_t* jit, ctx_t* ctx)
x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
mov(cb, stack_top, RAX);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -481,7 +481,7 @@ gen_getlocal_wc0(jitstate_t* jit, ctx_t* ctx)
x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
mov(cb, stack_top, REG0);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -508,7 +508,7 @@ gen_getlocal_wc1(jitstate_t* jit, ctx_t* ctx)
x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
mov(cb, stack_top, REG0);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -535,7 +535,7 @@ gen_setlocal_wc0(jitstate_t* jit, ctx_t* ctx)
test(cb, flags_opnd, imm_opnd(VM_ENV_FLAG_WB_REQUIRED));
// Create a size-exit to fall back to the interpreter
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// if (flags & VM_ENV_FLAG_WB_REQUIRED) != 0
jnz_ptr(cb, side_exit);
@ -549,7 +549,7 @@ gen_setlocal_wc0(jitstate_t* jit, ctx_t* ctx)
const int32_t offs = -8 * local_idx;
mov(cb, mem_opnd(64, REG0, offs), REG1);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
// Check that `self` is a pointer to an object on the GC heap
@ -575,14 +575,14 @@ gen_getinstancevariable(jitstate_t* jit, ctx_t* ctx)
// Check that the inline cache has been set, slot index is known
if (!ic->entry) {
return UJIT_CANT_COMPILE;
return YJIT_CANT_COMPILE;
}
// Defer compilation so we can peek at the topmost object
if (!jit_at_current_insn(jit))
{
defer_compilation(jit->block, jit->insn_idx, ctx);
return UJIT_END_BLOCK;
return YJIT_END_BLOCK;
}
// Peek at the topmost value on the stack at compilation time
@ -609,13 +609,13 @@ gen_getinstancevariable(jitstate_t* jit, ctx_t* ctx)
// Eventually, we can encode whether an object is T_OBJECT or not
// inside object shapes.
if (rb_get_alloc_func(ic->entry->class_value) != rb_class_allocate_instance) {
return UJIT_CANT_COMPILE;
return YJIT_CANT_COMPILE;
}
uint32_t ivar_index = ic->entry->index;
// Create a size-exit to fall back to the interpreter
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// Load self from CFP
mov(cb, REG0, member_opnd(REG_CFP, rb_control_frame_t, self));
@ -659,7 +659,7 @@ gen_getinstancevariable(jitstate_t* jit, ctx_t* ctx)
x86opnd_t out_opnd = ctx_stack_push(ctx, T_NONE);
mov(cb, out_opnd, REG0);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -669,7 +669,7 @@ gen_setinstancevariable(jitstate_t* jit, ctx_t* ctx)
// Check that the inline cache has been set, slot index is known
if (!ic->entry) {
return UJIT_CANT_COMPILE;
return YJIT_CANT_COMPILE;
}
// If the class uses the default allocator, instances should all be T_OBJECT
@ -677,13 +677,13 @@ gen_setinstancevariable(jitstate_t* jit, ctx_t* ctx)
// Eventually, we can encode whether an object is T_OBJECT or not
// inside object shapes.
if (rb_get_alloc_func(ic->entry->class_value) != rb_class_allocate_instance) {
return UJIT_CANT_COMPILE;
return YJIT_CANT_COMPILE;
}
uint32_t ivar_index = ic->entry->index;
// Create a size-exit to fall back to the interpreter
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// Load self from CFP
mov(cb, REG0, member_opnd(REG_CFP, rb_control_frame_t, self));
@ -727,7 +727,7 @@ gen_setinstancevariable(jitstate_t* jit, ctx_t* ctx)
x86opnd_t ivar_opnd = mem_opnd(64, REG0, sizeof(VALUE) * ivar_index);
mov(cb, ivar_opnd, REG1);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
// Conditional move operation used by comparison operators
@ -738,7 +738,7 @@ gen_fixnum_cmp(jitstate_t* jit, ctx_t* ctx, cmov_fn cmov_op)
{
// Create a size-exit to fall back to the interpreter
// Note: we generate the side-exit before popping operands from the stack
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// TODO: make a helper function for guarding on op-not-redefined
// Make sure that minus isn't redefined for integers
@ -777,7 +777,7 @@ gen_fixnum_cmp(jitstate_t* jit, ctx_t* ctx, cmov_fn cmov_op)
x86opnd_t dst = ctx_stack_push(ctx, T_NONE);
mov(cb, dst, REG0);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -806,7 +806,7 @@ gen_opt_aref(jitstate_t* jit, ctx_t* ctx)
// Only JIT one arg calls like `ary[6]`
if (argc != 1) {
return UJIT_CANT_COMPILE;
return YJIT_CANT_COMPILE;
}
const rb_callable_method_entry_t *cme = vm_cc_cme(cd->cc);
@ -815,11 +815,11 @@ gen_opt_aref(jitstate_t* jit, ctx_t* ctx)
// (including arrays) don't use the inline cache, so if the inline cache
// has an entry, then this must be used by some other type.
if (cme) {
return UJIT_CANT_COMPILE;
return YJIT_CANT_COMPILE;
}
// Create a size-exit to fall back to the interpreter
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// TODO: make a helper function for guarding on op-not-redefined
// Make sure that aref isn't redefined for arrays.
@ -857,21 +857,21 @@ gen_opt_aref(jitstate_t* jit, ctx_t* ctx)
test(cb, REG1, imm_opnd(RUBY_FIXNUM_FLAG));
jz_ptr(cb, side_exit);
// Save uJIT registers
ujit_save_regs(cb);
// Save YJIT registers
yjit_save_regs(cb);
mov(cb, RDI, recv_opnd);
sar(cb, REG1, imm_opnd(1)); // Convert fixnum to int
mov(cb, RSI, REG1);
call_ptr(cb, REG0, (void *)rb_ary_entry_internal);
// Restore uJIT registers
ujit_load_regs(cb);
// Restore YJIT registers
yjit_load_regs(cb);
x86opnd_t stack_ret = ctx_stack_push(ctx, T_NONE);
mov(cb, stack_ret, RAX);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -879,7 +879,7 @@ gen_opt_and(jitstate_t* jit, ctx_t* ctx)
{
// Create a size-exit to fall back to the interpreter
// Note: we generate the side-exit before popping operands from the stack
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// TODO: make a helper function for guarding on op-not-redefined
// Make sure that plus isn't redefined for integers
@ -915,7 +915,7 @@ gen_opt_and(jitstate_t* jit, ctx_t* ctx)
x86opnd_t dst = ctx_stack_push(ctx, T_FIXNUM);
mov(cb, dst, REG0);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -923,7 +923,7 @@ gen_opt_minus(jitstate_t* jit, ctx_t* ctx)
{
// Create a size-exit to fall back to the interpreter
// Note: we generate the side-exit before popping operands from the stack
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// TODO: make a helper function for guarding on op-not-redefined
// Make sure that minus isn't redefined for integers
@ -955,7 +955,7 @@ gen_opt_minus(jitstate_t* jit, ctx_t* ctx)
x86opnd_t dst = ctx_stack_push(ctx, T_FIXNUM);
mov(cb, dst, REG0);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
static codegen_status_t
@ -963,7 +963,7 @@ gen_opt_plus(jitstate_t* jit, ctx_t* ctx)
{
// Create a size-exit to fall back to the interpreter
// Note: we generate the side-exit before popping operands from the stack
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// TODO: make a helper function for guarding on op-not-redefined
// Make sure that plus isn't redefined for integers
@ -1001,7 +1001,7 @@ gen_opt_plus(jitstate_t* jit, ctx_t* ctx)
x86opnd_t dst = ctx_stack_push(ctx, T_FIXNUM);
mov(cb, dst, REG0);
return UJIT_KEEP_COMPILING;
return YJIT_KEEP_COMPILING;
}
void
@ -1029,8 +1029,8 @@ gen_branchif(jitstate_t* jit, ctx_t* ctx)
{
// FIXME: eventually, put VM_CHECK_INTS() only on backward branch targets
// Check for interrupts
uint8_t* side_exit = ujit_side_exit(jit, ctx);
ujit_check_ints(cb, side_exit);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
yjit_check_ints(cb, side_exit);
// Test if any bit (outside of the Qnil bit) is on
// RUBY_Qfalse /* ...0000 0000 */
@ -1054,7 +1054,7 @@ gen_branchif(jitstate_t* jit, ctx_t* ctx)
gen_branchif_branch
);
return UJIT_END_BLOCK;
return YJIT_END_BLOCK;
}
void
@ -1082,8 +1082,8 @@ gen_branchunless(jitstate_t* jit, ctx_t* ctx)
{
// FIXME: eventually, put VM_CHECK_INTS() only on backward branch targets
// Check for interrupts
uint8_t* side_exit = ujit_side_exit(jit, ctx);
ujit_check_ints(cb, side_exit);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
yjit_check_ints(cb, side_exit);
// Test if any bit (outside of the Qnil bit) is on
// RUBY_Qfalse /* ...0000 0000 */
@ -1107,7 +1107,7 @@ gen_branchunless(jitstate_t* jit, ctx_t* ctx)
gen_branchunless_branch
);
return UJIT_END_BLOCK;
return YJIT_END_BLOCK;
}
static codegen_status_t
@ -1115,8 +1115,8 @@ gen_jump(jitstate_t* jit, ctx_t* ctx)
{
// FIXME: eventually, put VM_CHECK_INTS() only on backward branch targets
// Check for interrupts
uint8_t* side_exit = ujit_side_exit(jit, ctx);
ujit_check_ints(cb, side_exit);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
yjit_check_ints(cb, side_exit);
// Get the branch target instruction offsets
uint32_t jump_idx = jit_next_idx(jit) + (int32_t)jit_get_arg(jit, 0);
@ -1128,7 +1128,7 @@ gen_jump(jitstate_t* jit, ctx_t* ctx)
jump_block
);
return UJIT_END_BLOCK;
return YJIT_END_BLOCK;
}
static void
@ -1136,13 +1136,13 @@ jit_protected_guard(jitstate_t *jit, codeblock_t *cb, const rb_callable_method_e
{
// Callee is protected. Generate ancestry guard.
// See vm_call_method().
ujit_save_regs(cb);
yjit_save_regs(cb);
mov(cb, C_ARG_REGS[0], member_opnd(REG_CFP, rb_control_frame_t, self));
jit_mov_gc_ptr(jit, cb, C_ARG_REGS[1], cme->defined_class);
// Note: PC isn't written to current control frame as rb_is_kind_of() shouldn't raise.
// VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass);
call_ptr(cb, REG0, (void *)&rb_obj_is_kind_of);
ujit_load_regs(cb);
yjit_load_regs(cb);
test(cb, RAX, RAX);
jz_ptr(cb, COUNTED_EXIT(side_exit, oswb_se_protected_check_failed));
}
@ -1156,27 +1156,27 @@ gen_oswb_cfunc(jitstate_t* jit, ctx_t* ctx, struct rb_call_data * cd, const rb_c
if (cfunc->argc < 0 && cfunc->argc != -1)
{
GEN_COUNTER_INC(cb, oswb_cfunc_ruby_array_varg);
return false;
return YJIT_CANT_COMPILE;
}
// If the argument count doesn't match
if (cfunc->argc >= 0 && cfunc->argc != argc)
{
GEN_COUNTER_INC(cb, oswb_cfunc_argc_mismatch);
return false;
return YJIT_CANT_COMPILE;
}
// Don't JIT functions that need C stack arguments for now
if (argc + 1 > NUM_C_ARG_REGS) {
GEN_COUNTER_INC(cb, oswb_cfunc_toomany_args);
return false;
return YJIT_CANT_COMPILE;
}
// Create a size-exit to fall back to the interpreter
uint8_t *side_exit = ujit_side_exit(jit, ctx);
uint8_t *side_exit = yjit_side_exit(jit, ctx);
// Check for interrupts
ujit_check_ints(cb, side_exit);
yjit_check_ints(cb, side_exit);
// Points to the receiver operand on the stack
x86opnd_t recv = ctx_stack_opnd(ctx, argc);
@ -1237,7 +1237,7 @@ gen_oswb_cfunc(jitstate_t* jit, ctx_t* ctx, struct rb_call_data * cd, const rb_c
lea(cb, REG0, ctx_sp_opnd(ctx, sizeof(VALUE) * 3));
// Put compile time cme into REG1. It's assumed to be valid because we are notified when
// any cme we depend on become outdated. See rb_ujit_method_lookup_change().
// any cme we depend on become outdated. See rb_yjit_method_lookup_change().
jit_mov_gc_ptr(jit, cb, REG1, (VALUE)cme);
// Write method entry at sp[-3]
// sp[-3] = me;
@ -1282,9 +1282,9 @@ gen_oswb_cfunc(jitstate_t* jit, ctx_t* ctx, struct rb_call_data * cd, const rb_c
}
// Verify that we are calling the right function
if (UJIT_CHECK_MODE > 0) {
// Save uJIT registers
ujit_save_regs(cb);
if (YJIT_CHECK_MODE > 0) {
// Save YJIT registers
yjit_save_regs(cb);
// Call check_cfunc_dispatch
mov(cb, RDI, recv);
@ -1293,12 +1293,12 @@ gen_oswb_cfunc(jitstate_t* jit, ctx_t* ctx, struct rb_call_data * cd, const rb_c
jit_mov_gc_ptr(jit, cb, RCX, (VALUE)cme);
call_ptr(cb, REG0, (void *)&check_cfunc_dispatch);
// Load uJIT registers
ujit_load_regs(cb);
// Load YJIT registers
yjit_load_regs(cb);
}
// Save uJIT registers
ujit_save_regs(cb);
// Save YJIT registers
yjit_save_regs(cb);
// Copy SP into RAX because REG_SP will get overwritten
lea(cb, RAX, ctx_sp_opnd(ctx, 0));
@ -1331,11 +1331,11 @@ gen_oswb_cfunc(jitstate_t* jit, ctx_t* ctx, struct rb_call_data * cd, const rb_c
// Call the C function
// VALUE ret = (cfunc->func)(recv, argv[0], argv[1]);
// cfunc comes from compile-time cme->def, which we assume to be stable.
// Invalidation logic is in rb_ujit_method_lookup_change()
// Invalidation logic is in rb_yjit_method_lookup_change()
call_ptr(cb, REG0, (void*)cfunc->func);
// Load uJIT registers
ujit_load_regs(cb);
// Load YJIT registers
yjit_load_regs(cb);
// Push the return value on the Ruby stack
x86opnd_t stack_ret = ctx_stack_push(ctx, T_NONE);
@ -1360,7 +1360,7 @@ gen_oswb_cfunc(jitstate_t* jit, ctx_t* ctx, struct rb_call_data * cd, const rb_c
cont_block
);
return UJIT_END_BLOCK;
return YJIT_END_BLOCK;
}
bool rb_simple_iseq_p(const rb_iseq_t *iseq);
@ -1392,27 +1392,27 @@ gen_oswb_iseq(jitstate_t* jit, ctx_t* ctx, struct rb_call_data * cd, const rb_ca
if (num_params != argc) {
GEN_COUNTER_INC(cb, oswb_iseq_argc_mismatch);
return false;
return YJIT_CANT_COMPILE;
}
if (!rb_simple_iseq_p(iseq)) {
// Only handle iseqs that have simple parameters.
// See vm_callee_setup_arg().
GEN_COUNTER_INC(cb, oswb_iseq_not_simple);
return false;
return YJIT_CANT_COMPILE;
}
if (vm_ci_flag(cd->ci) & VM_CALL_TAILCALL) {
// We can't handle tailcalls
GEN_COUNTER_INC(cb, oswb_iseq_tailcall);
return false;
return YJIT_CANT_COMPILE;
}
// Create a size-exit to fall back to the interpreter
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// Check for interrupts
ujit_check_ints(cb, side_exit);
yjit_check_ints(cb, side_exit);
// Points to the receiver operand on the stack
x86opnd_t recv = ctx_stack_opnd(ctx, argc);
@ -1474,7 +1474,7 @@ gen_oswb_iseq(jitstate_t* jit, ctx_t* ctx, struct rb_call_data * cd, const rb_ca
}
// Put compile time cme into REG1. It's assumed to be valid because we are notified when
// any cme we depend on become outdated. See rb_ujit_method_lookup_change().
// any cme we depend on become outdated. See rb_yjit_method_lookup_change().
jit_mov_gc_ptr(jit, cb, REG1, (VALUE)cme);
// Write method entry at sp[-3]
// sp[-3] = me;
@ -1547,16 +1547,7 @@ gen_oswb_iseq(jitstate_t* jit, ctx_t* ctx, struct rb_call_data * cd, const rb_ca
(blockid_t){ iseq, 0 }
);
// TODO: create stub for call continuation
// TODO: need to pop args in the caller ctx
// TODO: stub so we can return to JITted code
//blockid_t cont_block = { jit->iseq, jit_next_insn_idx(jit) };
return UJIT_END_BLOCK;
return true;
}
static codegen_status_t
@ -1575,19 +1566,19 @@ gen_opt_send_without_block(jitstate_t* jit, ctx_t* ctx)
// Don't JIT calls with keyword splat
if (vm_ci_flag(cd->ci) & VM_CALL_KW_SPLAT) {
GEN_COUNTER_INC(cb, oswb_kw_splat);
return false;
return YJIT_CANT_COMPILE;
}
// Don't JIT calls that aren't simple
if (!(vm_ci_flag(cd->ci) & VM_CALL_ARGS_SIMPLE)) {
GEN_COUNTER_INC(cb, oswb_callsite_not_simple);
return false;
return YJIT_CANT_COMPILE;
}
// Don't JIT if the inline cache is not set
if (!cd->cc || !cd->cc->klass) {
GEN_COUNTER_INC(cb, oswb_ic_empty);
return false;
return YJIT_CANT_COMPILE;
}
const rb_callable_method_entry_t *cme = vm_cc_cme(cd->cc);
@ -1595,7 +1586,7 @@ gen_opt_send_without_block(jitstate_t* jit, ctx_t* ctx)
// Don't JIT if the method entry is out of date
if (METHOD_ENTRY_INVALIDATED(cme)) {
GEN_COUNTER_INC(cb, oswb_invalid_cme);
return false;
return YJIT_CANT_COMPILE;
}
switch (cme->def->type) {
@ -1605,34 +1596,34 @@ gen_opt_send_without_block(jitstate_t* jit, ctx_t* ctx)
return gen_oswb_cfunc(jit, ctx, cd, cme, argc);
case VM_METHOD_TYPE_ATTRSET:
GEN_COUNTER_INC(cb, oswb_ivar_set_method);
return false;
return YJIT_CANT_COMPILE;
case VM_METHOD_TYPE_BMETHOD:
GEN_COUNTER_INC(cb, oswb_bmethod);
return false;
return YJIT_CANT_COMPILE;
case VM_METHOD_TYPE_IVAR:
GEN_COUNTER_INC(cb, oswb_ivar_get_method);
return false;
return YJIT_CANT_COMPILE;
case VM_METHOD_TYPE_ZSUPER:
GEN_COUNTER_INC(cb, oswb_zsuper_method);
return false;
return YJIT_CANT_COMPILE;
case VM_METHOD_TYPE_ALIAS:
GEN_COUNTER_INC(cb, oswb_alias_method);
return false;
return YJIT_CANT_COMPILE;
case VM_METHOD_TYPE_UNDEF:
GEN_COUNTER_INC(cb, oswb_undef_method);
return false;
return YJIT_CANT_COMPILE;
case VM_METHOD_TYPE_NOTIMPLEMENTED:
GEN_COUNTER_INC(cb, oswb_not_implemented_method);
return false;
return YJIT_CANT_COMPILE;
case VM_METHOD_TYPE_OPTIMIZED:
GEN_COUNTER_INC(cb, oswb_optimized_method);
return false;
return YJIT_CANT_COMPILE;
case VM_METHOD_TYPE_MISSING:
GEN_COUNTER_INC(cb, oswb_missing_method);
return false;
return YJIT_CANT_COMPILE;
case VM_METHOD_TYPE_REFINED:
GEN_COUNTER_INC(cb, oswb_refined_method);
return false;
return YJIT_CANT_COMPILE;
// no default case so compiler issues a warning if this is not exhaustive
}
}
@ -1644,7 +1635,7 @@ gen_leave(jitstate_t* jit, ctx_t* ctx)
RUBY_ASSERT(ctx->stack_size == 1);
// Create a size-exit to fall back to the interpreter
uint8_t* side_exit = ujit_side_exit(jit, ctx);
uint8_t* side_exit = yjit_side_exit(jit, ctx);
// Load environment pointer EP from CFP
mov(cb, REG0, member_opnd(REG_CFP, rb_control_frame_t, ep));
@ -1655,7 +1646,7 @@ gen_leave(jitstate_t* jit, ctx_t* ctx)
jnz_ptr(cb, COUNTED_EXIT(side_exit, leave_se_finish_frame));
// Check for interrupts
ujit_check_ints(cb, COUNTED_EXIT(side_exit, leave_se_interrupt));
yjit_check_ints(cb, COUNTED_EXIT(side_exit, leave_se_interrupt));
// Load the return value
mov(cb, REG0, ctx_stack_pop(ctx, 1));
@ -1687,7 +1678,7 @@ gen_leave(jitstate_t* jit, ctx_t* ctx)
cb_link_labels(cb);
cb_write_post_call_bytes(cb);
return UJIT_END_BLOCK;
return YJIT_END_BLOCK;
}
RUBY_EXTERN rb_serial_t ruby_vm_global_constant_state;
@ -1702,24 +1693,24 @@ gen_opt_getinlinecache(jitstate_t *jit, ctx_t *ctx)
struct iseq_inline_constant_cache_entry *ice = ic->entry;
if (!ice) {
// Cache not filled
return UJIT_CANT_COMPILE;
return YJIT_CANT_COMPILE;
}
if (ice->ic_serial != ruby_vm_global_constant_state) {
// Cache miss at compile time.
return UJIT_CANT_COMPILE;
return YJIT_CANT_COMPILE;
}
if (ice->ic_cref) {
// Only compile for caches that don't care about lexical scope.
return UJIT_CANT_COMPILE;
return YJIT_CANT_COMPILE;
}
// Optimize for single ractor mode.
// FIXME: This leaks when st_insert raises NoMemoryError
if (!assume_single_ractor_mode(jit->block)) return UJIT_CANT_COMPILE;
if (!assume_single_ractor_mode(jit->block)) return YJIT_CANT_COMPILE;
// Invalidate output code on any and all constant writes
// FIXME: This leaks when st_insert raises NoMemoryError
if (!assume_stable_global_constant_state(jit->block)) return UJIT_CANT_COMPILE;
if (!assume_stable_global_constant_state(jit->block)) return YJIT_CANT_COMPILE;
x86opnd_t stack_top = ctx_stack_push(ctx, T_NONE);
jit_mov_gc_ptr(jit, cb, REG0, ice->value);
@ -1732,10 +1723,10 @@ gen_opt_getinlinecache(jitstate_t *jit, ctx_t *ctx)
(blockid_t){ .iseq = jit->iseq, .idx = jump_idx }
);
return UJIT_END_BLOCK;
return YJIT_END_BLOCK;
}
void ujit_reg_op(int opcode, codegen_fn gen_fn)
void yjit_reg_op(int opcode, codegen_fn gen_fn)
{
// Check that the op wasn't previously registered
st_data_t st_gen;
@ -1747,7 +1738,7 @@ void ujit_reg_op(int opcode, codegen_fn gen_fn)
}
void
ujit_init_codegen(void)
yjit_init_codegen(void)
{
// Initialize the code blocks
uint32_t mem_size = 128 * 1024 * 1024;
@ -1761,32 +1752,32 @@ ujit_init_codegen(void)
gen_fns = rb_st_init_numtable();
// Map YARV opcodes to the corresponding codegen functions
ujit_reg_op(BIN(dup), gen_dup);
ujit_reg_op(BIN(nop), gen_nop);
ujit_reg_op(BIN(pop), gen_pop);
ujit_reg_op(BIN(putnil), gen_putnil);
ujit_reg_op(BIN(putobject), gen_putobject);
ujit_reg_op(BIN(putobject_INT2FIX_0_), gen_putobject_int2fix);
ujit_reg_op(BIN(putobject_INT2FIX_1_), gen_putobject_int2fix);
ujit_reg_op(BIN(putself), gen_putself);
ujit_reg_op(BIN(getlocal_WC_0), gen_getlocal_wc0);
ujit_reg_op(BIN(getlocal_WC_1), gen_getlocal_wc1);
ujit_reg_op(BIN(setlocal_WC_0), gen_setlocal_wc0);
ujit_reg_op(BIN(getinstancevariable), gen_getinstancevariable);
ujit_reg_op(BIN(setinstancevariable), gen_setinstancevariable);
ujit_reg_op(BIN(opt_lt), gen_opt_lt);
ujit_reg_op(BIN(opt_le), gen_opt_le);
ujit_reg_op(BIN(opt_ge), gen_opt_ge);
ujit_reg_op(BIN(opt_aref), gen_opt_aref);
ujit_reg_op(BIN(opt_and), gen_opt_and);
ujit_reg_op(BIN(opt_minus), gen_opt_minus);
ujit_reg_op(BIN(opt_plus), gen_opt_plus);
yjit_reg_op(BIN(dup), gen_dup);
yjit_reg_op(BIN(nop), gen_nop);
yjit_reg_op(BIN(pop), gen_pop);
yjit_reg_op(BIN(putnil), gen_putnil);
yjit_reg_op(BIN(putobject), gen_putobject);
yjit_reg_op(BIN(putobject_INT2FIX_0_), gen_putobject_int2fix);
yjit_reg_op(BIN(putobject_INT2FIX_1_), gen_putobject_int2fix);
yjit_reg_op(BIN(putself), gen_putself);
yjit_reg_op(BIN(getlocal_WC_0), gen_getlocal_wc0);
yjit_reg_op(BIN(getlocal_WC_1), gen_getlocal_wc1);
yjit_reg_op(BIN(setlocal_WC_0), gen_setlocal_wc0);
yjit_reg_op(BIN(getinstancevariable), gen_getinstancevariable);
yjit_reg_op(BIN(setinstancevariable), gen_setinstancevariable);
yjit_reg_op(BIN(opt_lt), gen_opt_lt);
yjit_reg_op(BIN(opt_le), gen_opt_le);
yjit_reg_op(BIN(opt_ge), gen_opt_ge);
yjit_reg_op(BIN(opt_aref), gen_opt_aref);
yjit_reg_op(BIN(opt_and), gen_opt_and);
yjit_reg_op(BIN(opt_minus), gen_opt_minus);
yjit_reg_op(BIN(opt_plus), gen_opt_plus);
// Map branch instruction opcodes to codegen functions
ujit_reg_op(BIN(opt_getinlinecache), gen_opt_getinlinecache);
ujit_reg_op(BIN(branchif), gen_branchif);
ujit_reg_op(BIN(branchunless), gen_branchunless);
ujit_reg_op(BIN(jump), gen_jump);
ujit_reg_op(BIN(opt_send_without_block), gen_opt_send_without_block);
ujit_reg_op(BIN(leave), gen_leave);
yjit_reg_op(BIN(opt_getinlinecache), gen_opt_getinlinecache);
yjit_reg_op(BIN(branchif), gen_branchif);
yjit_reg_op(BIN(branchunless), gen_branchunless);
yjit_reg_op(BIN(jump), gen_jump);
yjit_reg_op(BIN(opt_send_without_block), gen_opt_send_without_block);
yjit_reg_op(BIN(leave), gen_leave);
}

View File

@ -1,8 +1,8 @@
#ifndef UJIT_CODEGEN_H
#define UJIT_CODEGEN_H 1
#ifndef YJIT_CODEGEN_H
#define YJIT_CODEGEN_H 1
#include "stddef.h"
#include "ujit_core.h"
#include "yjit_core.h"
// Code blocks we generate code into
extern codeblock_t *cb;
@ -30,18 +30,18 @@ typedef struct JITState
} jitstate_t;
typedef enum codegen_status {
UJIT_END_BLOCK,
UJIT_KEEP_COMPILING,
UJIT_CANT_COMPILE
YJIT_END_BLOCK,
YJIT_KEEP_COMPILING,
YJIT_CANT_COMPILE
} codegen_status_t;
// Code generation function signature
typedef codegen_status_t (*codegen_fn)(jitstate_t* jit, ctx_t* ctx);
uint8_t* ujit_entry_prologue();
uint8_t* yjit_entry_prologue();
void ujit_gen_block(ctx_t* ctx, block_t* block, rb_execution_context_t* ec);
void yjit_gen_block(ctx_t* ctx, block_t* block, rb_execution_context_t* ec);
void ujit_init_codegen(void);
void yjit_init_codegen(void);
#endif // #ifndef UJIT_CODEGEN_H
#endif // #ifndef YJIT_CODEGEN_H

View File

@ -4,11 +4,11 @@
#include "insns.inc"
#include "insns_info.inc"
#include "vm_sync.h"
#include "ujit_asm.h"
#include "ujit_utils.h"
#include "ujit_iface.h"
#include "ujit_core.h"
#include "ujit_codegen.h"
#include "yjit_asm.h"
#include "yjit_utils.h"
#include "yjit_iface.h"
#include "yjit_core.h"
#include "yjit_codegen.h"
// Maximum number of versions per block
#define MAX_VERSIONS 4
@ -154,17 +154,17 @@ int ctx_diff(const ctx_t* src, const ctx_t* dst)
}
// Get all blocks for a particular place in an iseq.
static rb_ujit_block_array_t
static rb_yjit_block_array_t
get_version_array(const rb_iseq_t *iseq, unsigned idx)
{
struct rb_iseq_constant_body *body = iseq->body;
if (rb_darray_size(body->ujit_blocks) == 0) {
if (rb_darray_size(body->yjit_blocks) == 0) {
return NULL;
}
RUBY_ASSERT((unsigned)rb_darray_size(body->ujit_blocks) == body->iseq_size);
return rb_darray_get(body->ujit_blocks, idx);
RUBY_ASSERT((unsigned)rb_darray_size(body->yjit_blocks) == body->iseq_size);
return rb_darray_get(body->yjit_blocks, idx);
}
// Count the number of block versions matching a given blockid
@ -182,14 +182,14 @@ add_block_version(blockid_t blockid, block_t* block)
const rb_iseq_t *iseq = block->blockid.iseq;
struct rb_iseq_constant_body *body = iseq->body;
// Ensure ujit_blocks is initialized for this iseq
if (rb_darray_size(body->ujit_blocks) == 0) {
// Initialize ujit_blocks to be as wide as body->iseq_encoded
// Ensure yjit_blocks is initialized for this iseq
if (rb_darray_size(body->yjit_blocks) == 0) {
// Initialize yjit_blocks to be as wide as body->iseq_encoded
int32_t casted = (int32_t)body->iseq_size;
if ((unsigned)casted != body->iseq_size) {
rb_bug("iseq too large");
}
if (!rb_darray_make(&body->ujit_blocks, casted)) {
if (!rb_darray_make(&body->yjit_blocks, casted)) {
rb_bug("allocation failed");
}
@ -199,8 +199,8 @@ add_block_version(blockid_t blockid, block_t* block)
#endif
}
RUBY_ASSERT((int32_t)blockid.idx < rb_darray_size(body->ujit_blocks));
rb_ujit_block_array_t *block_array_ref = rb_darray_ref(body->ujit_blocks, blockid.idx);
RUBY_ASSERT((int32_t)blockid.idx < rb_darray_size(body->yjit_blocks));
rb_yjit_block_array_t *block_array_ref = rb_darray_ref(body->yjit_blocks, blockid.idx);
// Add the new block
if (!rb_darray_append(block_array_ref, block)) {
@ -229,7 +229,7 @@ add_block_version(blockid_t blockid, block_t* block)
// Retrieve a basic block version for an (iseq, idx) tuple
block_t* find_block_version(blockid_t blockid, const ctx_t* ctx)
{
rb_ujit_block_array_t versions = get_version_array(blockid.iseq, blockid.idx);
rb_yjit_block_array_t versions = get_version_array(blockid.iseq, blockid.idx);
// Best match found
block_t* best_version = NULL;
@ -252,7 +252,7 @@ block_t* find_block_version(blockid_t blockid, const ctx_t* ctx)
}
void
ujit_branches_update_references(void)
yjit_branches_update_references(void)
{
for (uint32_t i = 0; i < num_branches; i++) {
branch_entries[i].targets[0].iseq = (const void *)rb_gc_location((VALUE)branch_entries[i].targets[0].iseq);
@ -276,7 +276,7 @@ block_t* gen_block_version(blockid_t blockid, const ctx_t* start_ctx, rb_executi
block_t* block = first_block;
// Generate code for the first block
ujit_gen_block(ctx, block, ec);
yjit_gen_block(ctx, block, ec);
// Keep track of the new block version
add_block_version(block->blockid, block);
@ -310,7 +310,7 @@ block_t* gen_block_version(blockid_t blockid, const ctx_t* start_ctx, rb_executi
memcpy(&block->ctx, ctx, sizeof(ctx_t));
// Generate code for the current block
ujit_gen_block(ctx, block, ec);
yjit_gen_block(ctx, block, ec);
// Keep track of the new block version
add_block_version(block->blockid, block);
@ -332,7 +332,7 @@ uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_
blockid_t blockid = { iseq, insn_idx };
// Write the interpreter entry prologue
uint8_t* code_ptr = ujit_entry_prologue();
uint8_t* code_ptr = yjit_entry_prologue();
// Try to generate code for the entry block
block_t* block = gen_block_version(blockid, &DEFAULT_CTX, ec);
@ -446,7 +446,7 @@ uint8_t* get_branch_target(
// branch_stub_hit(uint32_t branch_idx, uint32_t target_idx)
uint8_t* stub_addr = cb_get_ptr(ocb, ocb->write_pos);
// Save the ujit registers
// Save the yjit registers
push(ocb, REG_CFP);
push(ocb, REG_EC);
push(ocb, REG_SP);
@ -458,7 +458,7 @@ uint8_t* get_branch_target(
mov(ocb, C_ARG_REGS[0], imm_opnd(branch_idx));
call_ptr(ocb, REG0, (void *)&branch_stub_hit);
// Restore the ujit registers
// Restore the yjit registers
pop(ocb, REG_SP);
pop(ocb, REG_SP);
pop(ocb, REG_EC);
@ -646,10 +646,10 @@ void defer_compilation(
// Remove all references to a block then free it.
void
ujit_free_block(block_t *block)
yjit_free_block(block_t *block)
{
ujit_unlink_method_lookup_dependency(block);
ujit_block_assumptions_free(block);
yjit_unlink_method_lookup_dependency(block);
yjit_block_assumptions_free(block);
rb_darray_free(block->incoming);
rb_darray_free(block->gc_object_offsets);
@ -659,7 +659,7 @@ ujit_free_block(block_t *block)
// Remove a block version without reordering the version array
static bool
block_array_remove(rb_ujit_block_array_t block_array, block_t *block)
block_array_remove(rb_yjit_block_array_t block_array, block_t *block)
{
bool after_target = false;
block_t **element;
@ -687,7 +687,7 @@ invalidate_block_version(block_t* block)
// fprintf(stderr, "block=%p\n", block);
// Remove this block from the version array
rb_ujit_block_array_t versions = get_version_array(iseq, block->blockid.idx);
rb_yjit_block_array_t versions = get_version_array(iseq, block->blockid.idx);
RB_UNUSED_VAR(bool removed);
removed = block_array_remove(versions, block);
RUBY_ASSERT(removed);
@ -732,7 +732,7 @@ invalidate_block_version(block_t* block)
if (target_next && branch->end_pos > block->end_pos)
{
rb_bug("ujit invalidate rewrote branch past block end");
rb_bug("yjit invalidate rewrote branch past block end");
}
}
@ -742,7 +742,7 @@ invalidate_block_version(block_t* block)
VALUE* entry_pc = iseq_pc_at_idx(iseq, idx);
int entry_opcode = opcode_at_pc(iseq, entry_pc);
// TODO: unmap_addr2insn in ujit_iface.c? Maybe we can write a function to encompass this logic?
// TODO: unmap_addr2insn in yjit_iface.c? Maybe we can write a function to encompass this logic?
// Should check how it's used in exit and side-exit
const void * const *handler_table = rb_vm_get_insns_address_table();
void* handler_addr = (void*)handler_table[entry_opcode];
@ -755,13 +755,13 @@ invalidate_block_version(block_t* block)
// FIXME:
// Call continuation addresses on the stack can also be atomically replaced by jumps going to the stub.
ujit_free_block(block);
yjit_free_block(block);
// fprintf(stderr, "invalidation done\n");
}
void
ujit_init_core(void)
yjit_init_core(void)
{
// Nothing yet
}

View File

@ -1,17 +1,17 @@
#ifndef UJIT_CORE_H
#define UJIT_CORE_H 1
#ifndef YJIT_CORE_H
#define YJIT_CORE_H 1
#include "stddef.h"
#include "ujit_asm.h"
#include "yjit_asm.h"
// Register uJIT receives the CFP and EC into
// Register YJIT receives the CFP and EC into
#define REG_CFP RDI
#define REG_EC RSI
// Register uJIT loads the SP into
// Register YJIT loads the SP into
#define REG_SP RDX
// Scratch registers used by uJIT
// Scratch registers used by YJIT
#define REG0 RAX
#define REG1 RCX
#define REG0_32 EAX
@ -109,7 +109,7 @@ Basic block version
Represents a portion of an iseq compiled with a given context
Note: care must be taken to minimize the size of block_t objects
*/
typedef struct ujit_block_version
typedef struct yjit_block_version
{
// Bytecode sequence (iseq, idx) this is a version of
blockid_t blockid;
@ -148,8 +148,8 @@ int ctx_diff(const ctx_t* src, const ctx_t* dst);
block_t* find_block_version(blockid_t blockid, const ctx_t* ctx);
block_t* gen_block_version(blockid_t blockid, const ctx_t* ctx, rb_execution_context_t *ec);
uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx, rb_execution_context_t *ec);
void ujit_free_block(block_t *block);
void ujit_branches_update_references(void);
void yjit_free_block(block_t *block);
void yjit_branches_update_references(void);
void gen_branch(
const ctx_t* src_ctx,
@ -173,6 +173,6 @@ void defer_compilation(
void invalidate_block_version(block_t* block);
void ujit_init_core(void);
void yjit_init_core(void);
#endif // #ifndef UJIT_CORE_H
#endif // #ifndef YJIT_CORE_H

View File

@ -8,27 +8,27 @@
#include "internal/compile.h"
#include "internal/class.h"
#include "insns_info.inc"
#include "ujit.h"
#include "ujit_iface.h"
#include "ujit_codegen.h"
#include "ujit_core.h"
#include "ujit_hooks.inc"
#include "yjit.h"
#include "yjit_iface.h"
#include "yjit_codegen.h"
#include "yjit_core.h"
#include "yjit_hooks.inc"
#include "darray.h"
#if HAVE_LIBCAPSTONE
#include <capstone/capstone.h>
#endif
static VALUE mUjit;
static VALUE cUjitBlock;
static VALUE cUjitDisasm;
static VALUE cUjitDisasmInsn;
static VALUE mYjit;
static VALUE cYjitBlock;
static VALUE cYjitDisasm;
static VALUE cYjitDisasmInsn;
#if RUBY_DEBUG
static int64_t vm_insns_count = 0;
static int64_t exit_op_count[VM_INSTRUCTION_SIZE] = { 0 };
int64_t rb_compiled_iseq_count = 0;
struct rb_ujit_runtime_counters ujit_runtime_counters = { 0 };
struct rb_yjit_runtime_counters yjit_runtime_counters = { 0 };
#endif
// Machine code blocks (executable memory)
@ -38,28 +38,28 @@ extern codeblock_t *ocb;
// Hash table of encoded instructions
extern st_table *rb_encoded_insn_data;
struct rb_ujit_options rb_ujit_opts;
struct rb_yjit_options rb_yjit_opts;
static const rb_data_type_t ujit_block_type = {
"UJIT/Block",
static const rb_data_type_t yjit_block_type = {
"YJIT/Block",
{0, 0, 0, },
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
};
// Write the uJIT entry point pre-call bytes
// Write the YJIT entry point pre-call bytes
void
cb_write_pre_call_bytes(codeblock_t* cb)
{
for (size_t i = 0; i < sizeof(ujit_with_ec_pre_call_bytes); ++i)
cb_write_byte(cb, ujit_with_ec_pre_call_bytes[i]);
for (size_t i = 0; i < sizeof(yjit_with_ec_pre_call_bytes); ++i)
cb_write_byte(cb, yjit_with_ec_pre_call_bytes[i]);
}
// Write the uJIT exit post-call bytes
// Write the YJIT exit post-call bytes
void
cb_write_post_call_bytes(codeblock_t* cb)
{
for (size_t i = 0; i < sizeof(ujit_with_ec_post_call_bytes); ++i)
cb_write_byte(cb, ujit_with_ec_post_call_bytes[i]);
for (size_t i = 0; i < sizeof(yjit_with_ec_post_call_bytes); ++i)
cb_write_byte(cb, yjit_with_ec_post_call_bytes[i]);
}
// Get the PC for a given index in an iseq
@ -84,7 +84,7 @@ map_addr2insn(void *code_ptr, int insn)
st_insert(rb_encoded_insn_data, (st_data_t)code_ptr, encoded_insn_data);
}
else {
rb_bug("ujit: failed to find info for original instruction while dealing with addr2insn");
rb_bug("yjit: failed to find info for original instruction while dealing with addr2insn");
}
}
@ -105,7 +105,7 @@ void
check_cfunc_dispatch(VALUE receiver, struct rb_call_data *cd, void *callee, rb_callable_method_entry_t *compile_time_cme)
{
if (METHOD_ENTRY_INVALIDATED(compile_time_cme)) {
rb_bug("ujit: output code uses invalidated cme %p", (void *)compile_time_cme);
rb_bug("yjit: output code uses invalidated cme %p", (void *)compile_time_cme);
}
bool callee_correct = false;
@ -117,7 +117,7 @@ check_cfunc_dispatch(VALUE receiver, struct rb_call_data *cd, void *callee, rb_c
}
}
if (!callee_correct) {
rb_bug("ujit: output code calls wrong method cd->cc->klass: %p", (void *)cd->cc->klass);
rb_bug("yjit: output code calls wrong method cd->cc->klass: %p", (void *)cd->cc->klass);
}
}
@ -137,12 +137,12 @@ cfunc_needs_frame(const rb_method_cfunc_t *cfunc)
}
// GC root for interacting with the GC
struct ujit_root_struct {
struct yjit_root_struct {
int unused; // empty structs are not legal in C99
};
static void
block_array_shuffle_remove(rb_ujit_block_array_t blocks, block_t *to_remove) {
block_array_shuffle_remove(rb_yjit_block_array_t blocks, block_t *to_remove) {
block_t **elem;
rb_darray_foreach(blocks, i, elem) {
if (*elem == to_remove) {
@ -162,12 +162,12 @@ add_lookup_dependency_i(st_data_t *key, st_data_t *value, st_data_t data, int ex
{
block_t *new_block = (block_t *)data;
rb_ujit_block_array_t blocks = NULL;
rb_yjit_block_array_t blocks = NULL;
if (existing) {
blocks = (rb_ujit_block_array_t)*value;
blocks = (rb_yjit_block_array_t)*value;
}
if (!rb_darray_append(&blocks, new_block)) {
rb_bug("ujit: failed to add method lookup dependency"); // TODO: we could bail out of compiling instead
rb_bug("yjit: failed to add method lookup dependency"); // TODO: we could bail out of compiling instead
}
*value = (st_data_t)blocks;
@ -210,7 +210,7 @@ assume_stable_global_constant_state(block_t *block) {
}
static int
ujit_root_mark_i(st_data_t k, st_data_t v, st_data_t ignore)
yjit_root_mark_i(st_data_t k, st_data_t v, st_data_t ignore)
{
// Lifetime notes: cc and cme get added in pairs into the table. One of
// them should become invalid before dying. When one of them invalidate we
@ -237,7 +237,7 @@ replace_all(st_data_t key, st_data_t value, st_data_t argp, int error)
// GC callback during compaction
static void
ujit_root_update_references(void *ptr)
yjit_root_update_references(void *ptr)
{
if (method_lookup_dependency) {
if (st_foreach_with_replace(method_lookup_dependency, replace_all, method_lookup_dep_table_update_keys, 0)) {
@ -245,26 +245,26 @@ ujit_root_update_references(void *ptr)
}
}
ujit_branches_update_references();
yjit_branches_update_references();
}
// GC callback during mark phase
static void
ujit_root_mark(void *ptr)
yjit_root_mark(void *ptr)
{
if (method_lookup_dependency) {
st_foreach(method_lookup_dependency, ujit_root_mark_i, 0);
st_foreach(method_lookup_dependency, yjit_root_mark_i, 0);
}
}
static void
ujit_root_free(void *ptr)
yjit_root_free(void *ptr)
{
// Do nothing. The root lives as long as the process.
}
static size_t
ujit_root_memsize(const void *ptr)
yjit_root_memsize(const void *ptr)
{
// Count off-gc-heap allocation size of the dependency table
return st_memsize(method_lookup_dependency); // TODO: more accurate accounting
@ -273,15 +273,15 @@ ujit_root_memsize(const void *ptr)
// Custom type for interacting with the GC
// TODO: compaction support
// TODO: make this write barrier protected
static const rb_data_type_t ujit_root_type = {
"ujit_root",
{ujit_root_mark, ujit_root_free, ujit_root_memsize, ujit_root_update_references},
static const rb_data_type_t yjit_root_type = {
"yjit_root",
{yjit_root_mark, yjit_root_free, yjit_root_memsize, yjit_root_update_references},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
};
// Callback when cme or cc become invalid
void
rb_ujit_method_lookup_change(VALUE cme_or_cc)
rb_yjit_method_lookup_change(VALUE cme_or_cc)
{
if (!method_lookup_dependency)
return;
@ -293,7 +293,7 @@ rb_ujit_method_lookup_change(VALUE cme_or_cc)
// Invalidate all regions that depend on the cme or cc
st_data_t key = (st_data_t)cme_or_cc, image;
if (st_delete(method_lookup_dependency, &key, &image)) {
rb_ujit_block_array_t array = (void *)image;
rb_yjit_block_array_t array = (void *)image;
block_t **elem;
rb_darray_foreach(array, i, elem) {
@ -312,7 +312,7 @@ remove_method_lookup_dependency(VALUE cc_or_cme, block_t *block)
{
st_data_t key = (st_data_t)cc_or_cme, image;
if (st_lookup(method_lookup_dependency, key, &image)) {
rb_ujit_block_array_t array = (void *)image;
rb_yjit_block_array_t array = (void *)image;
block_array_shuffle_remove(array, block);
@ -324,14 +324,14 @@ remove_method_lookup_dependency(VALUE cc_or_cme, block_t *block)
}
void
ujit_unlink_method_lookup_dependency(block_t *block)
yjit_unlink_method_lookup_dependency(block_t *block)
{
if (block->dependencies.cc) remove_method_lookup_dependency(block->dependencies.cc, block);
if (block->dependencies.cme) remove_method_lookup_dependency(block->dependencies.cme, block);
}
void
ujit_block_assumptions_free(block_t *block)
yjit_block_assumptions_free(block_t *block)
{
st_data_t as_st_data = (st_data_t)block;
if (blocks_assuming_stable_global_constant_state) {
@ -344,7 +344,7 @@ ujit_block_assumptions_free(block_t *block)
}
void
rb_ujit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec)
rb_yjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec)
{
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
RB_VM_LOCK_ENTER();
@ -365,14 +365,14 @@ rb_ujit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec)
#endif
}
struct ujit_block_itr {
struct yjit_block_itr {
const rb_iseq_t *iseq;
VALUE list;
};
/* Get a list of the UJIT blocks associated with `rb_iseq` */
/* Get a list of the YJIT blocks associated with `rb_iseq` */
static VALUE
ujit_blocks_for(VALUE mod, VALUE rb_iseq)
yjit_blocks_for(VALUE mod, VALUE rb_iseq)
{
if (CLASS_OF(rb_iseq) != rb_cISeq) {
return rb_ary_new();
@ -381,13 +381,13 @@ ujit_blocks_for(VALUE mod, VALUE rb_iseq)
const rb_iseq_t *iseq = rb_iseqw_to_iseq(rb_iseq);
VALUE all_versions = rb_ary_new();
rb_darray_for(iseq->body->ujit_blocks, version_array_idx) {
rb_ujit_block_array_t versions = rb_darray_get(iseq->body->ujit_blocks, version_array_idx);
rb_darray_for(iseq->body->yjit_blocks, version_array_idx) {
rb_yjit_block_array_t versions = rb_darray_get(iseq->body->yjit_blocks, version_array_idx);
rb_darray_for(versions, block_idx) {
block_t *block = rb_darray_get(versions, block_idx);
VALUE rb_block = TypedData_Wrap_Struct(cUjitBlock, &ujit_block_type, block);
VALUE rb_block = TypedData_Wrap_Struct(cYjitBlock, &yjit_block_type, block);
rb_ary_push(all_versions, rb_block);
}
}
@ -395,22 +395,22 @@ ujit_blocks_for(VALUE mod, VALUE rb_iseq)
return all_versions;
}
/* Get the address of the the code associated with a UJIT::Block */
/* Get the address of the the code associated with a YJIT::Block */
static VALUE
block_address(VALUE self)
{
block_t * block;
TypedData_Get_Struct(self, block_t, &ujit_block_type, block);
TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
uint8_t* code_addr = cb_get_ptr(cb, block->start_pos);
return LONG2NUM((intptr_t)code_addr);
}
/* Get the machine code for UJIT::Block as a binary string */
/* Get the machine code for YJIT::Block as a binary string */
static VALUE
block_code(VALUE self)
{
block_t * block;
TypedData_Get_Struct(self, block_t, &ujit_block_type, block);
TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
return (VALUE)rb_str_new(
(const char*)cb->mem_block + block->start_pos,
@ -419,30 +419,30 @@ block_code(VALUE self)
}
/* Get the start index in the Instruction Sequence that corresponds to this
* UJIT::Block */
* YJIT::Block */
static VALUE
iseq_start_index(VALUE self)
{
block_t * block;
TypedData_Get_Struct(self, block_t, &ujit_block_type, block);
TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
return INT2NUM(block->blockid.idx);
}
/* Get the end index in the Instruction Sequence that corresponds to this
* UJIT::Block */
* YJIT::Block */
static VALUE
iseq_end_index(VALUE self)
{
block_t * block;
TypedData_Get_Struct(self, block_t, &ujit_block_type, block);
TypedData_Get_Struct(self, block_t, &yjit_block_type, block);
return INT2NUM(block->end_idx);
}
/* Called when a basic operation is redefined */
void
rb_ujit_bop_redefined(VALUE klass, const rb_method_entry_t *me, enum ruby_basic_operators bop)
rb_yjit_bop_redefined(VALUE klass, const rb_method_entry_t *me, enum ruby_basic_operators bop)
{
//fprintf(stderr, "bop redefined\n");
}
@ -456,7 +456,7 @@ block_invalidation_iterator(st_data_t key, st_data_t value, st_data_t data) {
/* Called when the constant state changes */
void
rb_ujit_constant_state_changed(void)
rb_yjit_constant_state_changed(void)
{
if (blocks_assuming_stable_global_constant_state) {
st_foreach(blocks_assuming_stable_global_constant_state, block_invalidation_iterator, 0);
@ -464,7 +464,7 @@ rb_ujit_constant_state_changed(void)
}
void
rb_ujit_before_ractor_spawn(void)
rb_yjit_before_ractor_spawn(void)
{
if (blocks_assuming_single_ractor_mode) {
st_foreach(blocks_assuming_single_ractor_mode, block_invalidation_iterator, 0);
@ -472,29 +472,29 @@ rb_ujit_before_ractor_spawn(void)
}
#if HAVE_LIBCAPSTONE
static const rb_data_type_t ujit_disasm_type = {
"UJIT/Disasm",
static const rb_data_type_t yjit_disasm_type = {
"YJIT/Disasm",
{0, (void(*)(void *))cs_close, 0, },
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
};
static VALUE
ujit_disasm_init(VALUE klass)
yjit_disasm_init(VALUE klass)
{
csh * handle;
VALUE disasm = TypedData_Make_Struct(klass, csh, &ujit_disasm_type, handle);
VALUE disasm = TypedData_Make_Struct(klass, csh, &yjit_disasm_type, handle);
cs_open(CS_ARCH_X86, CS_MODE_64, handle);
return disasm;
}
static VALUE
ujit_disasm(VALUE self, VALUE code, VALUE from)
yjit_disasm(VALUE self, VALUE code, VALUE from)
{
size_t count;
csh * handle;
cs_insn *insns;
TypedData_Get_Struct(self, csh, &ujit_disasm_type, handle);
TypedData_Get_Struct(self, csh, &yjit_disasm_type, handle);
count = cs_disasm(*handle, (uint8_t*)StringValuePtr(code), RSTRING_LEN(code), NUM2INT(from), 0, &insns);
VALUE insn_list = rb_ary_new_capa(count);
@ -502,7 +502,7 @@ ujit_disasm(VALUE self, VALUE code, VALUE from)
VALUE vals = rb_ary_new_from_args(3, LONG2NUM(insns[i].address),
rb_str_new2(insns[i].mnemonic),
rb_str_new2(insns[i].op_str));
rb_ary_push(insn_list, rb_struct_alloc(cUjitDisasmInsn, vals));
rb_ary_push(insn_list, rb_struct_alloc(cYjitDisasmInsn, vals));
}
cs_free(insns, count);
return insn_list;
@ -512,27 +512,27 @@ ujit_disasm(VALUE self, VALUE code, VALUE from)
static VALUE
at_exit_print_stats(RB_BLOCK_CALL_FUNC_ARGLIST(yieldarg, data))
{
// Defined in ujit.rb
rb_funcall(mUjit, rb_intern("_print_stats"), 0);
// Defined in yjit.rb
rb_funcall(mYjit, rb_intern("_print_stats"), 0);
return Qnil;
}
// Primitive called in ujit.rb. Export all runtime counters as a Ruby hash.
// Primitive called in yjit.rb. Export all runtime counters as a Ruby hash.
static VALUE
get_stat_counters(rb_execution_context_t *ec, VALUE self)
{
#if RUBY_DEBUG
if (!rb_ujit_opts.gen_stats) return Qnil;
if (!rb_yjit_opts.gen_stats) return Qnil;
VALUE hash = rb_hash_new();
RB_VM_LOCK_ENTER();
{
int64_t *counter_reader = (int64_t *)&ujit_runtime_counters;
int64_t *counter_reader_end = &ujit_runtime_counters.last_member;
int64_t *counter_reader = (int64_t *)&yjit_runtime_counters;
int64_t *counter_reader_end = &yjit_runtime_counters.last_member;
// Iterate through comma separated counter name list
char *name_reader = ujit_counter_names;
char *counter_name_end = ujit_counter_names + sizeof(ujit_counter_names);
char *name_reader = yjit_counter_names;
char *counter_name_end = yjit_counter_names + sizeof(yjit_counter_names);
while (name_reader < counter_name_end && counter_reader < counter_reader_end) {
if (*name_reader == ',' || *name_reader == ' ') {
name_reader++;
@ -564,7 +564,7 @@ get_stat_counters(rb_execution_context_t *ec, VALUE self)
#endif // if RUBY_DEBUG
}
// Primitive called in ujit.rb. Zero out all the counters.
// Primitive called in yjit.rb. Zero out all the counters.
static VALUE
reset_stats_bang(rb_execution_context_t *ec, VALUE self)
{
@ -572,24 +572,24 @@ reset_stats_bang(rb_execution_context_t *ec, VALUE self)
vm_insns_count = 0;
rb_compiled_iseq_count = 0;
memset(&exit_op_count, 0, sizeof(exit_op_count));
memset(&ujit_runtime_counters, 0, sizeof(ujit_runtime_counters));
memset(&yjit_runtime_counters, 0, sizeof(yjit_runtime_counters));
#endif // if RUBY_DEBUG
return Qnil;
}
#include "ujit.rbinc"
#include "yjit.rbinc"
#if RUBY_DEBUG
// implementation for --ujit-stats
// implementation for --yjit-stats
void
rb_ujit_collect_vm_usage_insn(int insn)
rb_yjit_collect_vm_usage_insn(int insn)
{
vm_insns_count++;
}
const VALUE *
rb_ujit_count_side_exit_op(const VALUE *exit_pc)
rb_yjit_count_side_exit_op(const VALUE *exit_pc)
{
int insn = rb_vm_insn_addr2opcode((const void *)*exit_pc);
exit_op_count[insn]++;
@ -657,31 +657,31 @@ print_insn_count_buffer(const struct insn_count *buffer, int how_many, int left_
__attribute__((destructor))
static void
print_ujit_stats(void)
print_yjit_stats(void)
{
if (!rb_ujit_opts.gen_stats) return;
if (!rb_yjit_opts.gen_stats) return;
const struct insn_count *sorted_exit_ops = sort_insn_count_array(exit_op_count);
double total_insns_count = vm_insns_count + ujit_runtime_counters.exec_instruction;
double ratio = ujit_runtime_counters.exec_instruction / total_insns_count;
double total_insns_count = vm_insns_count + yjit_runtime_counters.exec_instruction;
double ratio = yjit_runtime_counters.exec_instruction / total_insns_count;
fprintf(stderr, "compiled_iseq_count: %10" PRId64 "\n", rb_compiled_iseq_count);
fprintf(stderr, "main_block_code_size: %6.1f MiB\n", ((double)cb->write_pos) / 1048576.0);
fprintf(stderr, "side_block_code_size: %6.1f MiB\n", ((double)ocb->write_pos) / 1048576.0);
fprintf(stderr, "vm_insns_count: %10" PRId64 "\n", vm_insns_count);
fprintf(stderr, "ujit_exec_insns_count: %10" PRId64 "\n", ujit_runtime_counters.exec_instruction);
fprintf(stderr, "ratio_in_ujit: %9.1f%%\n", ratio * 100);
fprintf(stderr, "yjit_exec_insns_count: %10" PRId64 "\n", yjit_runtime_counters.exec_instruction);
fprintf(stderr, "ratio_in_yjit: %9.1f%%\n", ratio * 100);
print_insn_count_buffer(sorted_exit_ops, 10, 4);
//print_runtime_counters();
}
#endif // if RUBY_DEBUG
void
rb_ujit_iseq_mark(const struct rb_iseq_constant_body *body)
rb_yjit_iseq_mark(const struct rb_iseq_constant_body *body)
{
rb_darray_for(body->ujit_blocks, version_array_idx) {
rb_ujit_block_array_t version_array = rb_darray_get(body->ujit_blocks, version_array_idx);
rb_darray_for(body->yjit_blocks, version_array_idx) {
rb_yjit_block_array_t version_array = rb_darray_get(body->yjit_blocks, version_array_idx);
rb_darray_for(version_array, block_idx) {
block_t *block = rb_darray_get(version_array, block_idx);
@ -705,10 +705,10 @@ rb_ujit_iseq_mark(const struct rb_iseq_constant_body *body)
}
void
rb_ujit_iseq_update_references(const struct rb_iseq_constant_body *body)
rb_yjit_iseq_update_references(const struct rb_iseq_constant_body *body)
{
rb_darray_for(body->ujit_blocks, version_array_idx) {
rb_ujit_block_array_t version_array = rb_darray_get(body->ujit_blocks, version_array_idx);
rb_darray_for(body->yjit_blocks, version_array_idx) {
rb_yjit_block_array_t version_array = rb_darray_get(body->yjit_blocks, version_array_idx);
rb_darray_for(version_array, block_idx) {
block_t *block = rb_darray_get(version_array, block_idx);
@ -736,83 +736,83 @@ rb_ujit_iseq_update_references(const struct rb_iseq_constant_body *body)
}
}
// Free the yjit resources associated with an iseq
void
rb_ujit_iseq_free(const struct rb_iseq_constant_body *body)
rb_yjit_iseq_free(const struct rb_iseq_constant_body *body)
{
rb_darray_for(body->ujit_blocks, version_array_idx) {
rb_ujit_block_array_t version_array = rb_darray_get(body->ujit_blocks, version_array_idx);
rb_darray_for(body->yjit_blocks, version_array_idx) {
rb_yjit_block_array_t version_array = rb_darray_get(body->yjit_blocks, version_array_idx);
rb_darray_for(version_array, block_idx) {
block_t *block = rb_darray_get(version_array, block_idx);
ujit_free_block(block);
yjit_free_block(block);
}
rb_darray_free(version_array);
}
rb_darray_free(body->ujit_blocks);
rb_darray_free(body->yjit_blocks);
}
bool rb_ujit_enabled_p(void)
bool rb_yjit_enabled_p(void)
{
return rb_ujit_opts.ujit_enabled;
return rb_yjit_opts.yjit_enabled;
}
unsigned rb_ujit_call_threshold(void)
unsigned rb_yjit_call_threshold(void)
{
return rb_ujit_opts.call_threshold;
return rb_yjit_opts.call_threshold;
}
void
rb_ujit_init(struct rb_ujit_options *options)
rb_yjit_init(struct rb_yjit_options *options)
{
if (!ujit_scrape_successful || !PLATFORM_SUPPORTED_P)
if (!yjit_scrape_successful || !PLATFORM_SUPPORTED_P)
{
return;
}
rb_ujit_opts = *options;
rb_ujit_opts.ujit_enabled = true;
rb_yjit_opts = *options;
rb_yjit_opts.yjit_enabled = true;
// Normalize options
if (rb_ujit_opts.call_threshold < 1) {
rb_ujit_opts.call_threshold = 2;
if (rb_yjit_opts.call_threshold < 1) {
rb_yjit_opts.call_threshold = 2;
}
blocks_assuming_stable_global_constant_state = st_init_numtable();
blocks_assuming_single_ractor_mode = st_init_numtable();
ujit_init_core();
ujit_init_codegen();
yjit_init_core();
yjit_init_codegen();
// UJIT Ruby module
mUjit = rb_define_module("UJIT");
rb_define_module_function(mUjit, "install_entry", ujit_install_entry, 1);
rb_define_module_function(mUjit, "blocks_for", ujit_blocks_for, 1);
// YJIT Ruby module
mYjit = rb_define_module("YJIT");
rb_define_module_function(mYjit, "blocks_for", yjit_blocks_for, 1);
// UJIT::Block (block version, code block)
cUjitBlock = rb_define_class_under(mUjit, "Block", rb_cObject);
rb_define_method(cUjitBlock, "address", block_address, 0);
rb_define_method(cUjitBlock, "code", block_code, 0);
rb_define_method(cUjitBlock, "iseq_start_index", iseq_start_index, 0);
rb_define_method(cUjitBlock, "iseq_end_index", iseq_end_index, 0);
// YJIT::Block (block version, code block)
cYjitBlock = rb_define_class_under(mYjit, "Block", rb_cObject);
rb_define_method(cYjitBlock, "address", block_address, 0);
rb_define_method(cYjitBlock, "code", block_code, 0);
rb_define_method(cYjitBlock, "iseq_start_index", iseq_start_index, 0);
rb_define_method(cYjitBlock, "iseq_end_index", iseq_end_index, 0);
// UJIT disassembler interface
// YJIT disassembler interface
#if HAVE_LIBCAPSTONE
cUjitDisasm = rb_define_class_under(mUjit, "Disasm", rb_cObject);
rb_define_alloc_func(cUjitDisasm, ujit_disasm_init);
rb_define_method(cUjitDisasm, "disasm", ujit_disasm, 2);
cUjitDisasmInsn = rb_struct_define_under(cUjitDisasm, "Insn", "address", "mnemonic", "op_str", NULL);
cYjitDisasm = rb_define_class_under(mYjit, "Disasm", rb_cObject);
rb_define_alloc_func(cYjitDisasm, yjit_disasm_init);
rb_define_method(cYjitDisasm, "disasm", yjit_disasm, 2);
cYjitDisasmInsn = rb_struct_define_under(cYjitDisasm, "Insn", "address", "mnemonic", "op_str", NULL);
#endif
if (RUBY_DEBUG && rb_ujit_opts.gen_stats) {
if (RUBY_DEBUG && rb_yjit_opts.gen_stats) {
// Setup at_exit callback for printing out counters
rb_block_call(rb_mKernel, rb_intern("at_exit"), 0, NULL, at_exit_print_stats, Qfalse);
}
// Initialize the GC hooks
method_lookup_dependency = st_init_numtable();
struct ujit_root_struct *root;
VALUE ujit_root = TypedData_Make_Struct(0, struct ujit_root_struct, &ujit_root_type, root);
rb_gc_register_mark_object(ujit_root);
struct yjit_root_struct *root;
VALUE yjit_root = TypedData_Make_Struct(0, struct yjit_root_struct, &yjit_root_type, root);
rb_gc_register_mark_object(yjit_root);
}

View File

@ -1,10 +1,10 @@
//
// These are definitions uJIT uses to interface with the CRuby codebase,
// but which are only used internally by uJIT.
// These are definitions YJIT uses to interface with the CRuby codebase,
// but which are only used internally by YJIT.
//
#ifndef UJIT_IFACE_H
#define UJIT_IFACE_H 1
#ifndef YJIT_IFACE_H
#define YJIT_IFACE_H 1
#include "stddef.h"
#include "stdint.h"
@ -14,19 +14,19 @@
#include "vm_core.h"
#include "vm_callinfo.h"
#include "builtin.h"
#include "ujit_core.h"
#include "yjit_core.h"
#ifndef rb_callcache
struct rb_callcache;
#define rb_callcache rb_callcache
#endif
#define UJIT_DECLARE_COUNTERS(...) struct rb_ujit_runtime_counters { \
#define YJIT_DECLARE_COUNTERS(...) struct rb_yjit_runtime_counters { \
int64_t __VA_ARGS__; \
}; \
static char ujit_counter_names[] = #__VA_ARGS__;
static char yjit_counter_names[] = #__VA_ARGS__;
UJIT_DECLARE_COUNTERS(
YJIT_DECLARE_COUNTERS(
exec_instruction,
oswb_callsite_not_simple,
@ -62,11 +62,11 @@ UJIT_DECLARE_COUNTERS(
last_member
)
#undef UJIT_DECLARE_COUNTERS
#undef YJIT_DECLARE_COUNTERS
RUBY_EXTERN struct rb_ujit_options rb_ujit_opts;
RUBY_EXTERN struct rb_yjit_options rb_yjit_opts;
RUBY_EXTERN int64_t rb_compiled_iseq_count;
RUBY_EXTERN struct rb_ujit_runtime_counters ujit_runtime_counters;
RUBY_EXTERN struct rb_yjit_runtime_counters yjit_runtime_counters;
void cb_write_pre_call_bytes(codeblock_t* cb);
void cb_write_post_call_bytes(codeblock_t* cb);
@ -83,9 +83,9 @@ RBIMPL_ATTR_NODISCARD() bool assume_single_ractor_mode(block_t *block);
RBIMPL_ATTR_NODISCARD() bool assume_stable_global_constant_state(block_t *block);
// this function *must* return passed exit_pc
const VALUE *rb_ujit_count_side_exit_op(const VALUE *exit_pc);
const VALUE *rb_yjit_count_side_exit_op(const VALUE *exit_pc);
void ujit_unlink_method_lookup_dependency(block_t *block);
void ujit_block_assumptions_free(block_t *block);
void yjit_unlink_method_lookup_dependency(block_t *block);
void yjit_block_assumptions_free(block_t *block);
#endif // #ifndef UJIT_IFACE_H
#endif // #ifndef YJIT_IFACE_

View File

@ -1,8 +1,8 @@
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include "ujit_utils.h"
#include "ujit_asm.h"
#include "yjit_utils.h"
#include "yjit_asm.h"
// Save caller-save registers on the stack before a C call
void push_regs(codeblock_t* cb)

View File

@ -1,10 +1,10 @@
#ifndef UJIT_UTILS_H
#define UJIT_UTILS_H 1
#ifndef YJIT_UTILS_H
#define YJIT_UTILS_H 1
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include "ujit_asm.h"
#include "yjit_asm.h"
void push_regs(codeblock_t* cb);
void pop_regs(codeblock_t* cb);
@ -12,4 +12,4 @@ void print_int(codeblock_t* cb, x86opnd_t opnd);
void print_ptr(codeblock_t* cb, x86opnd_t opnd);
void print_str(codeblock_t* cb, const char* str);
#endif // #ifndef UJIT_UTILS_H
#endif // #ifndef YJIT_UTILS_H