mirror of
				https://github.com/ruby/ruby.git
				synced 2022-11-09 12:17:21 -05:00 
			
		
		
		
	This patch contains several ideas:
(1) Disposable inline method cache (IMC) for race-free inline method cache
    * Making call-cache (CC) as a RVALUE (GC target object) and allocate new
      CC on cache miss.
    * This technique allows race-free access from parallel processing
      elements like RCU.
(2) Introduce per-Class method cache (pCMC)
    * Instead of fixed-size global method cache (GMC), pCMC allows flexible
      cache size.
    * Caching CCs reduces CC allocation and allow sharing CC's fast-path
      between same call-info (CI) call-sites.
(3) Invalidate an inline method cache by invalidating corresponding method
    entries (MEs)
    * Instead of using class serials, we set "invalidated" flag for method
      entry itself to represent cache invalidation.
    * Compare with using class serials, the impact of method modification
      (add/overwrite/delete) is small.
    * Updating class serials invalidate all method caches of the class and
      sub-classes.
    * Proposed approach only invalidate the method cache of only one ME.
See [Feature #16614] for more details.
		
	
			
		
			
				
	
	
		
			73 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			Ruby
		
	
	
	
	
	
			
		
		
	
	
			73 lines
		
	
	
	
		
			1.7 KiB
		
	
	
	
		
			Ruby
		
	
	
	
	
	
 | 
						|
puts <<EOS
 | 
						|
/* -*- c -*- */
 | 
						|
#if 1 /* enable or disable this optimization */
 | 
						|
 | 
						|
/* DO NOT EDIT THIS FILE DIRECTLY
 | 
						|
 *
 | 
						|
 * This file is generated by tool/mk_call_iseq_optimized.rb
 | 
						|
 */
 | 
						|
 | 
						|
EOS
 | 
						|
 | 
						|
P = (0..3)
 | 
						|
L = (0..5)
 | 
						|
 | 
						|
def fname param, local
 | 
						|
  "vm_call_iseq_setup_normal_0start_#{param}params_#{local}locals"
 | 
						|
end
 | 
						|
 | 
						|
P.each{|param|
 | 
						|
  L.each{|local|
 | 
						|
    puts <<EOS
 | 
						|
static VALUE
 | 
						|
#{fname(param, local)}(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd)
 | 
						|
{
 | 
						|
    RB_DEBUG_COUNTER_INC(ccf_iseq_fix);
 | 
						|
    return vm_call_iseq_setup_normal(ec, cfp, calling, vm_cc_cme(cd->cc), 0, #{param}, #{local});
 | 
						|
}
 | 
						|
 | 
						|
EOS
 | 
						|
    #
 | 
						|
  }
 | 
						|
}
 | 
						|
 | 
						|
puts <<EOS
 | 
						|
/* vm_call_iseq_handlers[param][local] */
 | 
						|
static const vm_call_handler vm_call_iseq_handlers[][#{L.to_a.size}] = {
 | 
						|
#{P.map{|param| '{' + L.map{|local| fname(param, local)}.join(",\n ") + '}'}.join(",\n")}
 | 
						|
};
 | 
						|
 | 
						|
static inline vm_call_handler
 | 
						|
vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size)
 | 
						|
{
 | 
						|
    if (UNLIKELY(vm_ci_flag(ci) & VM_CALL_TAILCALL)) {
 | 
						|
	return &vm_call_iseq_setup_tailcall_0start;
 | 
						|
    }
 | 
						|
    else if (0) { /* to disable optimize */
 | 
						|
        return &vm_call_iseq_setup_normal_0start;
 | 
						|
    }
 | 
						|
    else {
 | 
						|
	if (param_size <= #{P.end} &&
 | 
						|
	    local_size <= #{L.end}) {
 | 
						|
	    VM_ASSERT(local_size >= 0);
 | 
						|
	    return vm_call_iseq_handlers[param_size][local_size];
 | 
						|
	}
 | 
						|
	return &vm_call_iseq_setup_normal_0start;
 | 
						|
    }
 | 
						|
}
 | 
						|
 | 
						|
#else
 | 
						|
 | 
						|
static inline vm_call_handler
 | 
						|
vm_call_iseq_setup_func(const struct rb_callinfo *ci, const int param_size, const int local_size)
 | 
						|
{
 | 
						|
    if (UNLIKELY(vm_ci_flag(ci) & VM_CALL_TAILCALL)) {
 | 
						|
	return &vm_call_iseq_setup_tailcall_0start;
 | 
						|
    }
 | 
						|
    else {
 | 
						|
        return &vm_call_iseq_setup_normal_0start;
 | 
						|
    }
 | 
						|
}
 | 
						|
#endif
 | 
						|
EOS
 |