1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00

fixes more clippy warnings (#6543)

* fixes more clippy warnings

* Fix x86 c_callable to have doc_strings
This commit is contained in:
Jimmy Miller 2022-10-13 18:20:04 -04:00 committed by GitHub
parent 93a87f4963
commit 3c0b4ef1a2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
Notes: git 2022-10-14 07:20:26 +09:00
Merged-By: maximecb <maximecb@ruby-lang.org>
9 changed files with 52 additions and 87 deletions

View file

@ -300,7 +300,7 @@ impl CodeBlock {
}
/// Produce hex string output from the bytes in a code block
impl<'a> fmt::LowerHex for CodeBlock {
impl fmt::LowerHex for CodeBlock {
fn fmt(&self, fmtr: &mut fmt::Formatter) -> fmt::Result {
for pos in 0..self.write_pos {
let byte = unsafe { self.mem_block.start_ptr().raw_ptr().add(pos).read() };
@ -393,7 +393,7 @@ mod tests
assert_eq!(uimm_num_bits(((u16::MAX as u32) + 1).into()), 32);
assert_eq!(uimm_num_bits(u32::MAX.into()), 32);
assert_eq!(uimm_num_bits(((u32::MAX as u64) + 1)), 64);
assert_eq!(uimm_num_bits((u32::MAX as u64) + 1), 64);
assert_eq!(uimm_num_bits(u64::MAX), 64);
}
}

View file

@ -71,7 +71,7 @@ impl Assembler
// A special scratch register for intermediate processing.
// This register is caller-saved (so we don't have to save it before using it)
const SCRATCH0: A64Opnd = A64Opnd::Reg(X16_REG);
const SCRATCH1: A64Opnd = A64Opnd::Reg(X17_REG);
const SCRATCH1: A64Opnd = A64Opnd::Reg(X17_REG);
/// Get the list of registers from which we will allocate on this platform
/// These are caller-saved registers
@ -281,6 +281,9 @@ impl Assembler
};
}
// We are replacing instructions here so we know they are already
// being used. It is okay not to use their output here.
#[allow(unused_must_use)]
match insn {
Insn::Add { left, right, .. } => {
match (left, right) {

View file

@ -220,7 +220,7 @@ impl From<usize> for Opnd {
impl From<u64> for Opnd {
fn from(value: u64) -> Self {
Opnd::UImm(value.try_into().unwrap())
Opnd::UImm(value)
}
}

View file

@ -1,47 +1,14 @@
#![cfg(test)]
use crate::asm::{CodeBlock};
use crate::virtualmem::{CodePtr};
use crate::backend::ir::*;
use crate::cruby::*;
use crate::core::*;
use crate::utils::c_callable;
use InsnOpnd::*;
// Test that this function type checks
fn gen_dup(
ctx: &mut Context,
asm: &mut Assembler,
) {
let dup_val = ctx.stack_pop(0);
let (mapping, tmp_type) = ctx.get_opnd_mapping(StackOpnd(0));
let loc0 = ctx.stack_push_mapping((mapping, tmp_type));
asm.mov(loc0, dup_val);
}
fn guard_object_is_heap(
asm: &mut Assembler,
object_opnd: Opnd,
ctx: &mut Context,
side_exit: CodePtr,
) {
asm.comment("guard object is heap");
// Test that the object is not an immediate
asm.test(object_opnd, Opnd::UImm(RUBY_IMMEDIATE_MASK as u64));
asm.jnz(Target::CodePtr(side_exit));
// Test that the object is not false or nil
asm.cmp(object_opnd, Opnd::UImm(Qnil.into()));
asm.jbe(Target::CodePtr(side_exit));
}
#[test]
fn test_add() {
let mut asm = Assembler::new();
let out = asm.add(SP, Opnd::UImm(1));
asm.add(out, Opnd::UImm(2));
let _ = asm.add(out, Opnd::UImm(2));
}
#[test]
@ -52,21 +19,21 @@ fn test_alloc_regs() {
let out1 = asm.add(EC, Opnd::UImm(1));
// Pad some instructions in to make sure it can handle that.
asm.add(EC, Opnd::UImm(2));
let _ = asm.add(EC, Opnd::UImm(2));
// Get the second output we're going to reuse.
let out2 = asm.add(EC, Opnd::UImm(3));
// Pad another instruction.
asm.add(EC, Opnd::UImm(4));
let _ = asm.add(EC, Opnd::UImm(4));
// Reuse both the previously captured outputs.
asm.add(out1, out2);
let _ = asm.add(out1, out2);
// Now get a third output to make sure that the pool has registers to
// allocate now that the previous ones have been returned.
let out3 = asm.add(EC, Opnd::UImm(5));
asm.add(out3, Opnd::UImm(6));
let _ = asm.add(out3, Opnd::UImm(6));
// Here we're going to allocate the registers.
let result = asm.alloc_regs(Assembler::get_alloc_regs());
@ -198,7 +165,7 @@ fn test_base_insn_out()
fn test_c_call()
{
c_callable! {
fn dummy_c_fun(v0: usize, v1: usize) {}
fn dummy_c_fun(_v0: usize, _v1: usize) {}
}
let (mut asm, mut cb) = setup_asm();
@ -305,11 +272,12 @@ fn test_bake_string() {
#[test]
fn test_draining_iterator() {
let mut asm = Assembler::new();
asm.load(Opnd::None);
let _ = asm.load(Opnd::None);
asm.store(Opnd::None, Opnd::None);
asm.add(Opnd::None, Opnd::None);
let _ = asm.add(Opnd::None, Opnd::None);
let mut iter = asm.into_draining_iter();
@ -327,11 +295,11 @@ fn test_draining_iterator() {
fn test_lookback_iterator() {
let mut asm = Assembler::new();
asm.load(Opnd::None);
let _ = asm.load(Opnd::None);
asm.store(Opnd::None, Opnd::None);
asm.store(Opnd::None, Opnd::None);
let mut iter = asm.into_lookback_iter();
let iter = asm.into_lookback_iter();
while let Some((index, insn)) = iter.next_unmapped() {
if index > 0 {

View file

@ -189,13 +189,6 @@ fn jit_peek_at_block_handler(jit: &JITState, level: u32) -> VALUE {
}
}
// Add a comment at the current position in the code block
fn add_comment(cb: &mut CodeBlock, comment_str: &str) {
if cfg!(feature = "asm_comments") {
cb.add_comment(comment_str);
}
}
/// Increment a profiling counter with counter_name
#[cfg(not(feature = "stats"))]
macro_rules! gen_counter_incr {
@ -6804,7 +6797,7 @@ mod tests {
#[test]
fn test_gen_check_ints() {
let (_, _ctx, mut asm, mut cb, mut ocb) = setup_codegen();
let (_, _ctx, mut asm, _cb, mut ocb) = setup_codegen();
let side_exit = ocb.unwrap().get_write_ptr();
gen_check_ints(&mut asm, side_exit);
}
@ -6822,7 +6815,7 @@ mod tests {
#[test]
fn test_gen_pop() {
let (mut jit, _, mut asm, mut cb, mut ocb) = setup_codegen();
let (mut jit, _, mut asm, _cb, mut ocb) = setup_codegen();
let mut context = Context::new_with_stack_size(1);
let status = gen_pop(&mut jit, &mut context, &mut asm, &mut ocb);
@ -6872,7 +6865,7 @@ mod tests {
#[test]
fn test_gen_swap() {
let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
let (mut jit, mut context, mut asm, _cb, mut ocb) = setup_codegen();
context.stack_push(Type::Fixnum);
context.stack_push(Type::Flonum);
@ -6940,7 +6933,7 @@ mod tests {
#[test]
fn test_int2fix() {
let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
let (mut jit, mut context, mut asm, _cb, mut ocb) = setup_codegen();
jit.opcode = YARVINSN_putobject_INT2FIX_0_.as_usize();
let status = gen_putobject_int2fix(&mut jit, &mut context, &mut asm, &mut ocb);
@ -7029,7 +7022,7 @@ mod tests {
#[test]
fn test_gen_leave() {
let (mut jit, mut context, mut asm, mut cb, mut ocb) = setup_codegen();
let (mut jit, mut context, mut asm, _cb, mut ocb) = setup_codegen();
// Push return value
context.stack_push(Type::Fixnum);
gen_leave(&mut jit, &mut context, &mut asm, &mut ocb);

View file

@ -585,10 +585,8 @@ pub extern "C" fn rb_yjit_iseq_mark(payload: *mut c_void) {
// Mark outgoing branch entries
for branch in &block.outgoing {
let branch = branch.borrow();
for target in &branch.targets {
if let Some(target) = target {
unsafe { rb_gc_mark_movable(target.iseq.into()) };
}
for target in branch.targets.iter().flatten() {
unsafe { rb_gc_mark_movable(target.iseq.into()) };
}
}
@ -643,10 +641,8 @@ pub extern "C" fn rb_yjit_iseq_update_references(payload: *mut c_void) {
// Update outgoing branch entries
for branch in &block.outgoing {
let mut branch = branch.borrow_mut();
for target in &mut branch.targets {
if let Some(target) = target {
target.iseq = unsafe { rb_gc_location(target.iseq.into()) }.as_iseq();
}
for target in branch.targets.iter_mut().flatten() {
target.iseq = unsafe { rb_gc_location(target.iseq.into()) }.as_iseq();
}
}
@ -1605,9 +1601,10 @@ fn make_branch_entry(block: &BlockRef, src_ctx: &Context, gen_fn: BranchGenFn) -
return branchref;
}
/// Generated code calls this function with the SysV calling convention.
/// See [get_branch_target].
c_callable! {
/// Generated code calls this function with the SysV calling convention.
/// See [get_branch_target].
fn branch_stub_hit(
branch_ptr: *const c_void,
target_idx: u32,
@ -2018,14 +2015,12 @@ fn free_block(blockref: &BlockRef) {
let out_branch = out_branchref.borrow();
// For each successor block
for succ in &out_branch.blocks {
if let Some(succ) = succ {
// Remove outgoing branch from the successor's incoming list
let mut succ_block = succ.borrow_mut();
succ_block
.incoming
.retain(|succ_incoming| !Rc::ptr_eq(succ_incoming, out_branchref));
}
for succ in out_branch.blocks.iter().flatten() {
// Remove outgoing branch from the successor's incoming list
let mut succ_block = succ.borrow_mut();
succ_block
.incoming
.retain(|succ_incoming| !Rc::ptr_eq(succ_incoming, out_branchref));
}
}

View file

@ -96,6 +96,7 @@ pub type size_t = u64;
pub type RedefinitionFlag = u32;
#[allow(dead_code)]
#[allow(clippy::useless_transmute)]
mod autogened {
use super::*;
// Textually include output from rust-bindgen as suggested by its user guide.

View file

@ -70,11 +70,8 @@ pub fn disasm_iseq_insn_range(iseq: IseqPtr, start_idx: u32, end_idx: u32) -> St
total_code_size += blockref.borrow().code_size();
}
out.push_str(&format!("NUM BLOCK VERSIONS: {}\n", block_list.len()));
out.push_str(&format!(
"TOTAL INLINE CODE SIZE: {} bytes\n",
total_code_size
));
writeln!(out, "NUM BLOCK VERSIONS: {}", block_list.len()).unwrap();
writeln!(out, "TOTAL INLINE CODE SIZE: {} bytes", total_code_size).unwrap();
// For each block, sorted by increasing start address
for block_idx in 0..block_list.len() {
@ -95,7 +92,7 @@ pub fn disasm_iseq_insn_range(iseq: IseqPtr, start_idx: u32, end_idx: u32) -> St
end_idx,
code_size
);
out.push_str(&format!("== {:=<60}\n", block_ident));
writeln!(out, "== {:=<60}", block_ident).unwrap();
// Disassemble the instructions
out.push_str(&disasm_addr_range(global_cb, start_addr, code_size));
@ -109,7 +106,7 @@ pub fn disasm_iseq_insn_range(iseq: IseqPtr, start_idx: u32, end_idx: u32) -> St
// Log the size of the gap between the blocks if nonzero
if gap_size > 0 {
out.push_str(&format!("... {} byte gap ...\n", gap_size));
writeln!(out, "... {} byte gap ...", gap_size).unwrap();
}
}
}
@ -141,7 +138,7 @@ pub fn disasm_addr_range(cb: &CodeBlock, start_addr: *const u8, code_size: usize
.detail(true)
.build()
.unwrap();
cs.set_skipdata(true);
cs.set_skipdata(true).unwrap();
// Disassemble the instructions
let code_slice = unsafe { std::slice::from_raw_parts(start_addr, code_size) };

View file

@ -122,12 +122,20 @@ yjit_print_iseq(const rb_iseq_t *iseq)
#[cfg(target_arch = "aarch64")]
macro_rules! c_callable {
(fn $f:ident $args:tt $(-> $ret:ty)? $body:block) => { extern "C" fn $f $args $(-> $ret)? $body };
($(#[$outer:meta])*
fn $f:ident $args:tt $(-> $ret:ty)? $body:block) => {
$(#[$outer])*
extern "C" fn $f $args $(-> $ret)? $body
};
}
#[cfg(target_arch = "x86_64")]
macro_rules! c_callable {
(fn $f:ident $args:tt $(-> $ret:ty)? $body:block) => { extern "sysv64" fn $f $args $(-> $ret)? $body };
($(#[$outer:meta])*
fn $f:ident $args:tt $(-> $ret:ty)? $body:block) => {
$(#[$outer])*
extern "sysv64" fn $f $args $(-> $ret)? $body
};
}
pub(crate) use c_callable;