mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
d46ab95376
These send and its variant instructions are the most frequently called paths in the entire process. Reducing macro expansions to make them dedicated function called vm_sendish() is the main goal of this changeset. It reduces the size of vm_exec_coref from 25,552 bytes to 23,728 bytes on my machine. I see no significant slowdown. Fix: [GH-2056] vanilla: ruby 2.6.0dev (2018-12-19 trunk 66449) [x86_64-darwin15] ours: ruby 2.6.0dev (2018-12-19 refactor-send 66449) [x86_64-darwin15] last_commit=insns.def: refactor to avoid CALL_METHOD macro Calculating ------------------------------------- vanilla ours vm2_defined_method 2.645M 2.823M i/s - 6.000M times in 5.109888s 4.783254s vm2_method 8.553M 8.873M i/s - 6.000M times in 1.579892s 1.524026s vm2_method_missing 3.772M 3.858M i/s - 6.000M times in 3.579482s 3.499220s vm2_method_with_block 8.494M 8.944M i/s - 6.000M times in 1.589774s 1.509463s vm2_poly_method 0.571 0.607 i/s - 1.000 times in 3.947570s 3.733528s vm2_poly_method_ov 5.514 5.168 i/s - 1.000 times in 0.408156s 0.436169s vm3_clearmethodcache 2.875 2.837 i/s - 1.000 times in 0.783018s 0.793493s Comparison: vm2_defined_method ours: 2822555.4 i/s vanilla: 2644878.1 i/s - 1.07x slower vm2_method ours: 8872947.8 i/s vanilla: 8553433.1 i/s - 1.04x slower vm2_method_missing ours: 3858192.3 i/s vanilla: 3772296.3 i/s - 1.02x slower vm2_method_with_block ours: 8943825.1 i/s vanilla: 8493955.0 i/s - 1.05x slower vm2_poly_method ours: 0.6 i/s vanilla: 0.6 i/s - 1.06x slower vm2_poly_method_ov vanilla: 5.5 i/s ours: 5.2 i/s - 1.07x slower vm3_clearmethodcache vanilla: 2.9 i/s ours: 2.8 i/s - 1.01x slower git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@66565 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
86 lines
3.5 KiB
Text
86 lines
3.5 KiB
Text
% # -*- mode:c; style:ruby; coding: utf-8; indent-tabs-mode: nil -*-
|
|
% # Copyright (c) 2018 Takashi Kokubun. All rights reserved.
|
|
% #
|
|
% # This file is a part of the programming language Ruby. Permission is hereby
|
|
% # granted, to either redistribute and/or modify this file, provided that the
|
|
% # conditions mentioned in the file COPYING are met. Consult the file for
|
|
% # details.
|
|
fprintf(f, "{\n");
|
|
{
|
|
MAYBE_UNUSED(int pc_moved_p) = FALSE;
|
|
% # compiler: Prepare operands which may be used by `insn.call_attribute`
|
|
% insn.opes.each_with_index do |ope, i|
|
|
MAYBE_UNUSED(<%= ope.fetch(:decl) %>) = (<%= ope.fetch(:type) %>)operands[<%= i %>];
|
|
% end
|
|
%
|
|
% # JIT: Declare stack_size to be used in some macro of _mjit_compile_insn_body.erb
|
|
if (status->local_stack_p) {
|
|
fprintf(f, " MAYBE_UNUSED(unsigned int) stack_size = %u;\n", b->stack_size);
|
|
}
|
|
%
|
|
% # JIT: Declare variables for operands, popped values and return values
|
|
% insn.declarations.each do |decl|
|
|
fprintf(f, " <%= decl %>;\n");
|
|
% end
|
|
|
|
% # JIT: Set const expressions for `RubyVM::OperandsUnifications` insn
|
|
% insn.preamble.each do |amble|
|
|
fprintf(f, "<%= amble.expr %>\n");
|
|
% end
|
|
%
|
|
% # JIT: Initialize operands
|
|
% insn.opes.each_with_index do |ope, i|
|
|
fprintf(f, " <%= ope.fetch(:name) %> = (<%= ope.fetch(:type) %>)0x%"PRIxVALUE";", operands[<%= i %>]);
|
|
% case ope.fetch(:type)
|
|
% when 'ID'
|
|
comment_id(f, (ID)operands[<%= i %>]);
|
|
% when 'CALL_INFO'
|
|
comment_id(f, ((CALL_INFO)operands[<%= i %>])->mid);
|
|
% when 'VALUE'
|
|
if (SYMBOL_P((VALUE)operands[<%= i %>])) comment_id(f, SYM2ID((VALUE)operands[<%= i %>]));
|
|
% end
|
|
fprintf(f, "\n");
|
|
% end
|
|
%
|
|
% # JIT: Initialize popped values
|
|
% insn.pops.reverse_each.with_index.reverse_each do |pop, i|
|
|
fprintf(f, " <%= pop.fetch(:name) %> = stack[%d];\n", b->stack_size - <%= i + 1 %>);
|
|
% end
|
|
%
|
|
% # JIT: move sp and pc if necessary
|
|
<%= render 'mjit_compile_pc_and_sp', locals: { insn: insn } -%>
|
|
%
|
|
% # JIT: Print insn body in insns.def
|
|
<%= render 'mjit_compile_insn_body', locals: { insn: insn } -%>
|
|
%
|
|
% # JIT: Set return values
|
|
% insn.rets.reverse_each.with_index do |ret, i|
|
|
% # TOPN(n) = ...
|
|
fprintf(f, " stack[%d] = <%= ret.fetch(:name) %>;\n", b->stack_size + (int)<%= insn.call_attribute('sp_inc') %> - <%= i + 1 %>);
|
|
% end
|
|
%
|
|
% # JIT: We should evaluate ISeq modified for TracePoint if it's enabled. Note: This is slow.
|
|
% unless insn.always_leaf?
|
|
fprintf(f, " if (UNLIKELY(ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS)) {\n");
|
|
fprintf(f, " reg_cfp->sp = (VALUE *)reg_cfp->bp + %d;\n", b->stack_size + (int)<%= insn.call_attribute('sp_inc') %> + 1);
|
|
if (!pc_moved_p) {
|
|
fprintf(f, " reg_cfp->pc = original_body_iseq + %d;\n", next_pos);
|
|
}
|
|
fprintf(f, " goto cancel;\n");
|
|
fprintf(f, " }\n");
|
|
% end
|
|
%
|
|
% # compiler: Move JIT compiler's internal stack pointer
|
|
b->stack_size += <%= insn.call_attribute('sp_inc') %>;
|
|
}
|
|
fprintf(f, "}\n");
|
|
%
|
|
% # compiler: If insn has conditional JUMP, the branch which is not targeted by JUMP should be compiled too.
|
|
% if insn.expr.expr =~ /if\s+\([^{}]+\)\s+\{[^{}]+JUMP\([^)]+\);[^{}]+\}/
|
|
compile_insns(f, body, b->stack_size, pos + insn_len(insn), status);
|
|
% end
|
|
%
|
|
% # compiler: If insn returns (leave) or does longjmp (throw), the branch should no longer be compiled. TODO: create attr for it?
|
|
% if insn.expr.expr =~ /\sTHROW_EXCEPTION\([^)]+\);/ || insn.expr.expr =~ /\bvm_pop_frame\(/
|
|
b->finish_p = TRUE;
|
|
% end
|