diff --git a/yjit/src/backend/ir.rs b/yjit/src/backend/ir.rs index 066e9dd4ce..a23b27dda2 100644 --- a/yjit/src/backend/ir.rs +++ b/yjit/src/backend/ir.rs @@ -313,8 +313,7 @@ impl From for Opnd { impl From for Opnd { fn from(value: VALUE) -> Self { - let VALUE(uimm) = value; - Opnd::UImm(uimm as u64) + Opnd::Value(value) } } diff --git a/yjit/src/backend/x86_64/mod.rs b/yjit/src/backend/x86_64/mod.rs index c3885be811..d1f1698b2f 100644 --- a/yjit/src/backend/x86_64/mod.rs +++ b/yjit/src/backend/x86_64/mod.rs @@ -95,20 +95,22 @@ impl Assembler let live_ranges: Vec = std::mem::take(&mut self.live_ranges); self.forward_pass(|asm, index, op, opnds, target, text, pos_marker, original_opnds| { - // Load heap object operands into registers because most - // instructions can't directly work with 64-bit constants + // Load VALUEs into registers because + // - Most instructions can't be encoded with 64-bit immediates. + // - We look for Op::Load specifically when emiting to keep GC'ed + // VALUEs alive. This is a sort of canonicalization. let opnds = match op { - Op::Load | Op::Mov => opnds, + Op::Load => opnds, _ => opnds.into_iter().map(|opnd| { if let Opnd::Value(value) = opnd { - if !value.special_const_p() { - asm.load(opnd) - } else { - opnd + // Since mov(mem64, imm32) sign extends, as_i64() makes sure we split + // when the extended value is different. + if !value.special_const_p() || imm_num_bits(value.as_i64()) > 32 { + return asm.load(opnd); } - } else { - opnd } + + opnd }).collect() }; diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs index c89cfc366f..903e899888 100644 --- a/yjit/src/codegen.rs +++ b/yjit/src/codegen.rs @@ -849,7 +849,9 @@ pub fn gen_single_block( let mut block = jit.block.borrow_mut(); // Add the GC offsets to the block - gc_offsets.iter().map(|offs| { block.add_gc_obj_offset(*offs) }); + for offset in gc_offsets { + block.add_gc_obj_offset(offset) + } // Mark the end position of the block block.set_end_addr(cb.get_write_ptr());