diff --git a/test/ruby/test_yjit.rb b/test/ruby/test_yjit.rb index d740870736..57a60db89b 100644 --- a/test/ruby/test_yjit.rb +++ b/test/ruby/test_yjit.rb @@ -830,10 +830,10 @@ class TestYJIT < Test::Unit::TestCase def test_code_gc assert_compiles(code_gc_helpers + <<~'RUBY', exits: :any, result: :ok) return :not_paged unless add_pages(100) # prepare freeable pages - code_gc # first code GC + RubyVM::YJIT.code_gc # first code GC return :not_compiled1 unless compiles { nil } # should be JITable again - code_gc # second code GC + RubyVM::YJIT.code_gc # second code GC return :not_compiled2 unless compiles { nil } # should be JITable again code_gc_count = RubyVM::YJIT.runtime_stats[:code_gc_count] @@ -854,7 +854,7 @@ class TestYJIT < Test::Unit::TestCase return :not_paged1 unless add_pages(400) # go to a page without initial ocb code return :broken_resume1 if fiber.resume != 0 # JIT the fiber - code_gc # first code GC, which should not free the fiber page + RubyVM::YJIT.code_gc # first code GC, which should not free the fiber page return :broken_resume2 if fiber.resume != 0 # The code should be still callable code_gc_count = RubyVM::YJIT.runtime_stats[:code_gc_count] @@ -873,19 +873,19 @@ class TestYJIT < Test::Unit::TestCase return :not_paged1 unless add_pages(400) # go to a page without initial ocb code return :broken_resume1 if fiber.resume(true) != 0 # JIT the fiber - code_gc # first code GC, which should not free the fiber page + RubyVM::YJIT.code_gc # first code GC, which should not free the fiber page return :not_paged2 unless add_pages(300) # add some stuff to be freed # Not calling fiber.resume here to test the case that the YJIT payload loses some # information at the previous code GC. The payload should still be there, and # thus we could know the fiber ISEQ is still on stack on this second code GC. - code_gc # second code GC, which should still not free the fiber page + RubyVM::YJIT.code_gc # second code GC, which should still not free the fiber page return :not_paged3 unless add_pages(200) # attempt to overwrite the fiber page (it shouldn't) return :broken_resume2 if fiber.resume(true) != 0 # The fiber code should be still fine return :broken_resume3 if fiber.resume(false) != nil # terminate the fiber - code_gc # third code GC, freeing a page that used to be on stack + RubyVM::YJIT.code_gc # third code GC, freeing a page that used to be on stack return :not_paged4 unless add_pages(100) # check everything still works @@ -933,11 +933,6 @@ class TestYJIT < Test::Unit::TestCase num_jits.times { return false unless eval('compiles { nil.to_i }') } pages.nil? || pages < RubyVM::YJIT.runtime_stats[:compiled_page_count] end - - def code_gc - RubyVM::YJIT.simulate_oom! # bump write_pos - eval('proc { nil }.call') # trigger code GC - end RUBY end diff --git a/yjit.c b/yjit.c index c53444d5a3..d7f369ca2e 100644 --- a/yjit.c +++ b/yjit.c @@ -1053,6 +1053,7 @@ VALUE rb_yjit_get_stats(rb_execution_context_t *ec, VALUE self); VALUE rb_yjit_reset_stats_bang(rb_execution_context_t *ec, VALUE self); VALUE rb_yjit_disasm_iseq(rb_execution_context_t *ec, VALUE self, VALUE iseq); VALUE rb_yjit_insns_compiled(rb_execution_context_t *ec, VALUE self, VALUE iseq); +VALUE rb_yjit_code_gc(rb_execution_context_t *ec, VALUE self); VALUE rb_yjit_simulate_oom_bang(rb_execution_context_t *ec, VALUE self); VALUE rb_yjit_get_exit_locations(rb_execution_context_t *ec, VALUE self); diff --git a/yjit.rb b/yjit.rb index 2a0b3dc6c6..ac49a30e90 100644 --- a/yjit.rb +++ b/yjit.rb @@ -162,6 +162,11 @@ module RubyVM::YJIT end end + # Free and recompile all existing JIT code + def self.code_gc + Primitive.rb_yjit_code_gc + end + def self.simulate_oom! Primitive.rb_yjit_simulate_oom_bang end @@ -214,14 +219,14 @@ module RubyVM::YJIT $stderr.puts "compilation_failure: " + ("%10d" % compilation_failure) if compilation_failure != 0 $stderr.puts "compiled_block_count: " + ("%10d" % stats[:compiled_block_count]) $stderr.puts "compiled_iseq_count: " + ("%10d" % stats[:compiled_iseq_count]) - $stderr.puts "compiled_page_count: " + ("%10d" % stats[:compiled_page_count]) $stderr.puts "freed_iseq_count: " + ("%10d" % stats[:freed_iseq_count]) - $stderr.puts "freed_page_count: " + ("%10d" % stats[:freed_page_count]) $stderr.puts "invalidation_count: " + ("%10d" % stats[:invalidation_count]) $stderr.puts "constant_state_bumps: " + ("%10d" % stats[:constant_state_bumps]) $stderr.puts "inline_code_size: " + ("%10d" % stats[:inline_code_size]) $stderr.puts "outlined_code_size: " + ("%10d" % stats[:outlined_code_size]) $stderr.puts "freed_code_size: " + ("%10d" % stats[:freed_code_size]) + $stderr.puts "live_page_count: " + ("%10d" % stats[:live_page_count]) + $stderr.puts "freed_page_count: " + ("%10d" % stats[:freed_page_count]) $stderr.puts "code_gc_count: " + ("%10d" % stats[:code_gc_count]) $stderr.puts "num_gc_obj_refs: " + ("%10d" % stats[:num_gc_obj_refs]) diff --git a/yjit/src/asm/mod.rs b/yjit/src/asm/mod.rs index b68520a767..7ac3625fbd 100644 --- a/yjit/src/asm/mod.rs +++ b/yjit/src/asm/mod.rs @@ -209,17 +209,25 @@ impl CodeBlock { self.page_size } - /// Return the number of code pages that have been allocated by the VirtualMemory. - pub fn num_pages(&self) -> usize { + /// Return the number of code pages that have been mapped by the VirtualMemory. + pub fn num_mapped_pages(&self) -> usize { let mapped_region_size = self.mem_block.borrow().mapped_region_size(); // CodeBlock's page size != VirtualMem's page size on Linux, // so mapped_region_size % self.page_size may not be 0 ((mapped_region_size - 1) / self.page_size) + 1 } + /// Return the number of code pages that have been reserved by the VirtualMemory. + pub fn num_virtual_pages(&self) -> usize { + let virtual_region_size = self.mem_block.borrow().virtual_region_size(); + // CodeBlock's page size != VirtualMem's page size on Linux, + // so mapped_region_size % self.page_size may not be 0 + ((virtual_region_size - 1) / self.page_size) + 1 + } + /// Return the number of code pages that have been freed and not used yet. pub fn num_freed_pages(&self) -> usize { - (0..self.num_pages()).filter(|&page_idx| self.has_freed_page(page_idx)).count() + (0..self.num_mapped_pages()).filter(|&page_idx| self.has_freed_page(page_idx)).count() } pub fn has_freed_page(&self, page_idx: usize) -> bool { @@ -303,7 +311,7 @@ impl CodeBlock { pub fn code_size(&self) -> usize { let mut size = 0; let current_page_idx = self.write_pos / self.page_size; - for page_idx in 0..self.num_pages() { + for page_idx in 0..self.num_mapped_pages() { if page_idx == current_page_idx { // Count only actually used bytes for the current page. size += (self.write_pos % self.page_size).saturating_sub(self.page_start()); @@ -546,7 +554,7 @@ impl CodeBlock { } // Check which pages are still in use - let mut pages_in_use = vec![false; self.num_pages()]; + let mut pages_in_use = vec![false; self.num_mapped_pages()]; // For each ISEQ, we currently assume that only code pages used by inline code // are used by outlined code, so we mark only code pages used by inlined code. for_each_on_stack_iseq_payload(|iseq_payload| { @@ -560,10 +568,14 @@ impl CodeBlock { } // Let VirtuamMem free the pages - let freed_pages: Vec = pages_in_use.iter().enumerate() + let mut freed_pages: Vec = pages_in_use.iter().enumerate() .filter(|&(_, &in_use)| !in_use).map(|(page, _)| page).collect(); self.free_pages(&freed_pages); + // Append virtual pages in case RubyVM::YJIT.code_gc is manually triggered. + let mut virtual_pages: Vec = (self.num_mapped_pages()..self.num_virtual_pages()).collect(); + freed_pages.append(&mut virtual_pages); + // Invalidate everything to have more compact code after code GC. // This currently patches every ISEQ, which works, but in the future, // we could limit that to patch only on-stack ISEQs for optimizing code GC. diff --git a/yjit/src/stats.rs b/yjit/src/stats.rs index e851d4e4d1..e07b475a9f 100644 --- a/yjit/src/stats.rs +++ b/yjit/src/stats.rs @@ -381,8 +381,8 @@ fn rb_yjit_gen_stats_dict() -> VALUE { // GCed code size hash_aset_usize!(hash, "freed_code_size", freed_page_count * cb.page_size()); - // Compiled pages - hash_aset_usize!(hash, "compiled_page_count", cb.num_pages() - freed_page_count); + // Live pages + hash_aset_usize!(hash, "live_page_count", cb.num_mapped_pages() - freed_page_count); } // If we're not generating stats, the hash is done diff --git a/yjit/src/yjit.rs b/yjit/src/yjit.rs index 5cd23f066f..4850dca7a8 100644 --- a/yjit/src/yjit.rs +++ b/yjit/src/yjit.rs @@ -79,6 +79,18 @@ pub extern "C" fn rb_yjit_iseq_gen_entry_point(iseq: IseqPtr, ec: EcPtr) -> *con } } +/// Free and recompile all existing JIT code +#[no_mangle] +pub extern "C" fn rb_yjit_code_gc(_ec: EcPtr, _ruby_self: VALUE) -> VALUE { + if !yjit_enabled_p() { + return Qnil; + } + + let cb = CodegenGlobals::get_inline_cb(); + cb.code_gc(); + Qnil +} + /// Simulate a situation where we are out of executable memory #[no_mangle] pub extern "C" fn rb_yjit_simulate_oom_bang(_ec: EcPtr, _ruby_self: VALUE) -> VALUE {