1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

YJIT: Reset dropped_bytes when patching code

We switch to a new page when we detect dropped_bytes flipping from false
to true. Previously, when we patch code for invalidation during code gc,
we start with the flag being set to true, so we failed to apply patches
that straddle pages. We would write out jumps half way and then stop,
which left the code corrupted.

Reset the flag before patching so we patch across pages properly.
This commit is contained in:
Alan Wu 2022-11-07 16:49:48 -05:00
parent 1466682a23
commit 5d95cd99f4
Notes: git 2022-11-08 21:09:27 +00:00
3 changed files with 19 additions and 6 deletions

View file

@ -463,6 +463,12 @@ impl CodeBlock {
self.dropped_bytes self.dropped_bytes
} }
/// To patch code that straddle pages correctly, we need to start with
/// the dropped bytes flag unset so we can detect when to switch to a new page.
pub fn set_dropped_bytes(&mut self, dropped_bytes: bool) {
self.dropped_bytes = dropped_bytes;
}
/// Allocate a new label with a given name /// Allocate a new label with a given name
pub fn new_label(&mut self, name: String) -> usize { pub fn new_label(&mut self, name: String) -> usize {
assert!(!name.contains(' '), "use underscores in label names, not spaces"); assert!(!name.contains(' '), "use underscores in label names, not spaces");

View file

@ -1599,18 +1599,13 @@ fn regenerate_branch(cb: &mut CodeBlock, branch: &mut Branch) {
} }
*/ */
let old_write_pos = cb.get_write_pos();
let mut block = branch.block.borrow_mut(); let mut block = branch.block.borrow_mut();
let branch_terminates_block = branch.end_addr == block.end_addr; let branch_terminates_block = branch.end_addr == block.end_addr;
// Rewrite the branch
assert!(branch.dst_addrs[0].is_some()); assert!(branch.dst_addrs[0].is_some());
cb.set_write_ptr(branch.start_addr.unwrap());
// Generate the branch
let mut asm = Assembler::new(); let mut asm = Assembler::new();
asm.comment("regenerate_branch"); asm.comment("regenerate_branch");
(branch.gen_fn)( (branch.gen_fn)(
&mut asm, &mut asm,
branch.dst_addrs[0].unwrap(), branch.dst_addrs[0].unwrap(),
@ -1618,6 +1613,11 @@ fn regenerate_branch(cb: &mut CodeBlock, branch: &mut Branch) {
branch.shape, branch.shape,
); );
// Rewrite the branch
let old_write_pos = cb.get_write_pos();
let old_dropped_bytes = cb.has_dropped_bytes();
cb.set_write_ptr(branch.start_addr.unwrap());
cb.set_dropped_bytes(false);
asm.compile(cb); asm.compile(cb);
branch.end_addr = Some(cb.get_write_ptr()); branch.end_addr = Some(cb.get_write_ptr());
@ -1638,6 +1638,7 @@ fn regenerate_branch(cb: &mut CodeBlock, branch: &mut Branch) {
if old_write_pos > cb.get_write_pos() { if old_write_pos > cb.get_write_pos() {
// We rewound cb->write_pos to generate the branch, now restore it. // We rewound cb->write_pos to generate the branch, now restore it.
cb.set_pos(old_write_pos); cb.set_pos(old_write_pos);
cb.set_dropped_bytes(old_dropped_bytes);
} else { } else {
// The branch sits at the end of cb and consumed some memory. // The branch sits at the end of cb and consumed some memory.
// Keep cb.write_pos. // Keep cb.write_pos.
@ -2193,10 +2194,12 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
// Patch in a jump to block.entry_exit. // Patch in a jump to block.entry_exit.
let cur_pos = cb.get_write_ptr(); let cur_pos = cb.get_write_ptr();
let cur_dropped_bytes = cb.has_dropped_bytes();
cb.set_write_ptr(block_start); cb.set_write_ptr(block_start);
let mut asm = Assembler::new(); let mut asm = Assembler::new();
asm.jmp(block_entry_exit.into()); asm.jmp(block_entry_exit.into());
cb.set_dropped_bytes(false);
asm.compile(&mut cb); asm.compile(&mut cb);
assert!( assert!(
@ -2206,6 +2209,7 @@ pub fn invalidate_block_version(blockref: &BlockRef) {
cb.get_write_ptr().into_i64() - block_start.into_i64(), cb.get_write_ptr().into_i64() - block_start.into_i64(),
); );
cb.set_write_ptr(cur_pos); cb.set_write_ptr(cur_pos);
cb.set_dropped_bytes(cur_dropped_bytes);
} }
} }

View file

@ -558,6 +558,7 @@ pub extern "C" fn rb_yjit_tracing_invalidate_all() {
// Apply patches // Apply patches
let old_pos = cb.get_write_pos(); let old_pos = cb.get_write_pos();
let old_dropped_bytes = cb.has_dropped_bytes();
let mut patches = CodegenGlobals::take_global_inval_patches(); let mut patches = CodegenGlobals::take_global_inval_patches();
patches.sort_by_cached_key(|patch| patch.inline_patch_pos.raw_ptr()); patches.sort_by_cached_key(|patch| patch.inline_patch_pos.raw_ptr());
let mut last_patch_end = std::ptr::null(); let mut last_patch_end = std::ptr::null();
@ -568,10 +569,12 @@ pub extern "C" fn rb_yjit_tracing_invalidate_all() {
asm.jmp(patch.outlined_target_pos.into()); asm.jmp(patch.outlined_target_pos.into());
cb.set_write_ptr(patch.inline_patch_pos); cb.set_write_ptr(patch.inline_patch_pos);
cb.set_dropped_bytes(false);
asm.compile(cb); asm.compile(cb);
last_patch_end = cb.get_write_ptr().raw_ptr(); last_patch_end = cb.get_write_ptr().raw_ptr();
} }
cb.set_pos(old_pos); cb.set_pos(old_pos);
cb.set_dropped_bytes(old_dropped_bytes);
// Freeze invalidated part of the codepage. We only want to wait for // Freeze invalidated part of the codepage. We only want to wait for
// running instances of the code to exit from now on, so we shouldn't // running instances of the code to exit from now on, so we shouldn't