diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -21,6 +21,7 @@ add_llvm_target(RISCVCodeGen RISCVAsmPrinter.cpp RISCVCodeGenPrepare.cpp + RISCVDeadRegisterDefinitions.cpp RISCVMakeCompressible.cpp RISCVExpandAtomicPseudoInsts.cpp RISCVExpandPseudoInsts.cpp diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h --- a/llvm/lib/Target/RISCV/RISCV.h +++ b/llvm/lib/Target/RISCV/RISCV.h @@ -33,6 +33,9 @@ FunctionPass *createRISCVCodeGenPreparePass(); void initializeRISCVCodeGenPreparePass(PassRegistry &); +FunctionPass *createRISCVDeadRegisterDefinitionsPass(); +void initializeRISCVDeadRegisterDefinitionsPass(PassRegistry &); + FunctionPass *createRISCVISelDag(RISCVTargetMachine &TM, CodeGenOpt::Level OptLevel); diff --git a/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVDeadRegisterDefinitions.cpp @@ -0,0 +1,97 @@ +//===- RISCVDeadRegisterDefinitions.cpp - Replace dead defs w/ zero reg --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// This pass rewrites Rd to x0 for instrs whose return values are unused. +// +//===---------------------------------------------------------------------===// + +#include "RISCV.h" +#include "RISCVInstrInfo.h" +#include "RISCVSubtarget.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" + +using namespace llvm; +#define DEBUG_TYPE "riscv-dead-defs" +#define RISCV_DEAD_REG_DEF_NAME "RISC-V Dead register definitions" + +STATISTIC(NumDeadDefsReplaced, "Number of dead definitions replaced"); + +namespace { +class RISCVDeadRegisterDefinitions : public MachineFunctionPass { +public: + static char ID; + + RISCVDeadRegisterDefinitions() : MachineFunctionPass(ID) { + initializeRISCVDeadRegisterDefinitionsPass( + *PassRegistry::getPassRegistry()); + } + bool runOnMachineFunction(MachineFunction &MF) override; + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + StringRef getPassName() const override { return RISCV_DEAD_REG_DEF_NAME; } +}; +} // end anonymous namespace + +char RISCVDeadRegisterDefinitions::ID = 0; +INITIALIZE_PASS(RISCVDeadRegisterDefinitions, DEBUG_TYPE, + RISCV_DEAD_REG_DEF_NAME, false, false) + +FunctionPass *llvm::createRISCVDeadRegisterDefinitionsPass() { + return new RISCVDeadRegisterDefinitions(); +} + +bool RISCVDeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(MF.getFunction())) + return false; + + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); + const MachineRegisterInfo *MRI = &MF.getRegInfo(); + LLVM_DEBUG(dbgs() << "***** RISCVDeadRegisterDefinitions *****\n"); + + bool MadeChange = false; + for (MachineBasicBlock &MBB : MF) { + for (MachineInstr &MI : MBB) { + // We only handle non-computational instructions since some NOP encodings + // are reserved for HINT instructions. + if (!MI.mayLoad() || MI.isPseudo()) + continue; + const MCInstrDesc &Desc = MI.getDesc(); + for (int I = 0, E = Desc.getNumDefs(); I != E; ++I) { + MachineOperand &MO = MI.getOperand(I); + if (!MO.isReg() || !MO.isDef()) + continue; + // We should not have any relevant physreg defs that are replacable by + // zero before register allocation. So we just check for dead vreg defs. + Register Reg = MO.getReg(); + if (!Reg.isVirtual() || (!MO.isDead() && !MRI->use_nodbg_empty(Reg))) + continue; + LLVM_DEBUG(dbgs() << " Dead def operand #" << I << " in:\n "; + MI.print(dbgs())); + const TargetRegisterClass *RC = TII->getRegClass(Desc, I, TRI, MF); + if (!(RC && RC->contains(RISCV::X0))) { + LLVM_DEBUG(dbgs() << " Ignoring, register is not a GPR.\n"); + continue; + } + MO.setReg(RISCV::X0); + MO.setIsDead(); + LLVM_DEBUG(dbgs() << " Replacing with zero register. New:\n "; + MI.print(dbgs())); + ++NumDeadDefsReplaced; + MadeChange = true; + } + } + } + + return MadeChange; +} diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -71,12 +71,20 @@ cl::desc("Enable the copy propagation with RISC-V copy instr"), cl::init(true), cl::Hidden); +static cl::opt EnableRISCVDeadRegisterElimination( + "riscv-enable-dead-defs", cl::Hidden, + cl::desc("Enable the pass that removes dead" + " definitons and replaces stores to" + " them with stores to x0"), + cl::init(true)); + extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() { RegisterTargetMachine X(getTheRISCV32Target()); RegisterTargetMachine Y(getTheRISCV64Target()); auto *PR = PassRegistry::getPassRegistry(); initializeGlobalISel(*PR); initializeKCFIPass(*PR); + initializeRISCVDeadRegisterDefinitionsPass(*PR); initializeRISCVMakeCompressibleOptPass(*PR); initializeRISCVGatherScatterLoweringPass(*PR); initializeRISCVCodeGenPreparePass(*PR); @@ -386,15 +394,17 @@ void RISCVPassConfig::addPreRegAlloc() { addPass(createRISCVPreRAExpandPseudoPass()); - if (TM->getOptLevel() != CodeGenOpt::None) + if (TM->getOptLevel() != CodeGenOpt::None) { addPass(createRISCVMergeBaseOffsetOptPass()); + if (EnableRISCVDeadRegisterElimination) + addPass(createRISCVDeadRegisterDefinitionsPass()); + } addPass(createRISCVInsertVSETVLIPass()); addPass(createRISCVInsertReadWriteCSRPass()); } void RISCVPassConfig::addOptimizedRegAlloc() { insertPass(&DetectDeadLanesID, &RISCVInitUndefID); - TargetPassConfig::addOptimizedRegAlloc(); } diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll --- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll +++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll @@ -107,6 +107,7 @@ ; RV64-NEXT: RISC-V Optimize W Instructions ; CHECK-NEXT: RISC-V Pre-RA pseudo instruction expansion pass ; CHECK-NEXT: RISC-V Merge Base Offset +; CHECK-NEXT: RISC-V Dead register definitions ; CHECK-NEXT: RISC-V Insert VSETVLI pass ; CHECK-NEXT: RISC-V Insert Read/Write CSR Pass ; CHECK-NEXT: Detect Dead Lanes diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/atomic-rmw-discard.ll @@ -0,0 +1,474 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O3 -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV32 %s +; RUN: llc -O3 -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV64 %s + +define void @amoswap_w_discard(ptr %a, i32 %b) nounwind { +; RV32-LABEL: amoswap_w_discard: +; RV32: # %bb.0: +; RV32-NEXT: amoswap.w.aqrl zero, a1, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: amoswap_w_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoswap.w.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw xchg ptr %a, i32 %b seq_cst + ret void +} + +define void @amoswap_d_discard(ptr %a, i64 %b) nounwind { +; RV32-LABEL: amoswap_d_discard: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: li a3, 5 +; RV32-NEXT: call __atomic_exchange_8@plt +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: amoswap_d_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoswap.d.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw xchg ptr %a, i64 %b seq_cst + ret void +} + +define void @amoadd_w_discard(ptr %a, i32 %b) nounwind { +; RV32-LABEL: amoadd_w_discard: +; RV32: # %bb.0: +; RV32-NEXT: amoadd.w.aqrl zero, a1, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: amoadd_w_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoadd.w.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw add ptr %a, i32 %b seq_cst + ret void +} + +define void @amoadd_d_discard(ptr %a, i64 %b) nounwind { +; RV32-LABEL: amoadd_d_discard: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: li a3, 5 +; RV32-NEXT: call __atomic_fetch_add_8@plt +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: amoadd_d_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoadd.d.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw add ptr %a, i64 %b seq_cst + ret void +} + +define void @amoand_w_discard(ptr %a, i32 %b) nounwind { +; RV32-LABEL: amoand_w_discard: +; RV32: # %bb.0: +; RV32-NEXT: amoand.w.aqrl zero, a1, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: amoand_w_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoand.w.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw and ptr %a, i32 %b seq_cst + ret void +} + +define void @amoand_d_discard(ptr %a, i64 %b) nounwind { +; RV32-LABEL: amoand_d_discard: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: li a3, 5 +; RV32-NEXT: call __atomic_fetch_and_8@plt +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: amoand_d_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoand.d.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw and ptr %a, i64 %b seq_cst + ret void +} + +define void @amoor_w_discard(ptr %a, i32 %b) nounwind { +; RV32-LABEL: amoor_w_discard: +; RV32: # %bb.0: +; RV32-NEXT: amoor.w.aqrl zero, a1, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: amoor_w_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoor.w.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw or ptr %a, i32 %b seq_cst + ret void +} + +define void @amoor_d_discard(ptr %a, i64 %b) nounwind { +; RV32-LABEL: amoor_d_discard: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: li a3, 5 +; RV32-NEXT: call __atomic_fetch_or_8@plt +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: amoor_d_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoor.d.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw or ptr %a, i64 %b seq_cst + ret void +} + +define void @amoxor_w_discard(ptr %a, i32 %b) nounwind { +; RV32-LABEL: amoxor_w_discard: +; RV32: # %bb.0: +; RV32-NEXT: amoor.w.aqrl zero, a1, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: amoxor_w_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoor.w.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw or ptr %a, i32 %b seq_cst + ret void +} + +define void @amoxor_d_discard(ptr %a, i64 %b) nounwind { +; RV32-LABEL: amoxor_d_discard: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32-NEXT: li a3, 5 +; RV32-NEXT: call __atomic_fetch_or_8@plt +; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: amoxor_d_discard: +; RV64: # %bb.0: +; RV64-NEXT: amoor.d.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw or ptr %a, i64 %b seq_cst + ret void +} + +define void @amomax_w_discard(ptr %a, i32 %b) nounwind { +; RV32-LABEL: amomax_w_discard: +; RV32: # %bb.0: +; RV32-NEXT: amomax.w.aqrl zero, a1, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: amomax_w_discard: +; RV64: # %bb.0: +; RV64-NEXT: amomax.w.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw max ptr %a, i32 %b seq_cst + ret void +} + +define void @amomax_d_discard(ptr %a, i64 %b) nounwind { +; RV32-LABEL: amomax_d_discard: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32-NEXT: mv s0, a0 +; RV32-NEXT: lw a4, 4(a0) +; RV32-NEXT: lw a5, 0(a0) +; RV32-NEXT: mv s1, a2 +; RV32-NEXT: mv s2, a1 +; RV32-NEXT: j .LBB11_2 +; RV32-NEXT: .LBB11_1: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB11_2 Depth=1 +; RV32-NEXT: sw a5, 8(sp) +; RV32-NEXT: sw a4, 12(sp) +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: li a4, 5 +; RV32-NEXT: li a5, 5 +; RV32-NEXT: mv a0, s0 +; RV32-NEXT: call __atomic_compare_exchange_8@plt +; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: lw a5, 8(sp) +; RV32-NEXT: bnez a0, .LBB11_6 +; RV32-NEXT: .LBB11_2: # %atomicrmw.start +; RV32-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32-NEXT: beq a4, s1, .LBB11_4 +; RV32-NEXT: # %bb.3: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB11_2 Depth=1 +; RV32-NEXT: slt a0, s1, a4 +; RV32-NEXT: mv a2, a5 +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: bnez a0, .LBB11_1 +; RV32-NEXT: j .LBB11_5 +; RV32-NEXT: .LBB11_4: # in Loop: Header=BB11_2 Depth=1 +; RV32-NEXT: sltu a0, s2, a5 +; RV32-NEXT: mv a2, a5 +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: bnez a0, .LBB11_1 +; RV32-NEXT: .LBB11_5: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB11_2 Depth=1 +; RV32-NEXT: mv a2, s2 +; RV32-NEXT: mv a3, s1 +; RV32-NEXT: j .LBB11_1 +; RV32-NEXT: .LBB11_6: # %atomicrmw.end +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: ret +; +; RV64-LABEL: amomax_d_discard: +; RV64: # %bb.0: +; RV64-NEXT: amomax.d.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw max ptr %a, i64 %b seq_cst + ret void +} + +define void @amomaxu_w_discard(ptr %a, i32 %b) nounwind { +; RV32-LABEL: amomaxu_w_discard: +; RV32: # %bb.0: +; RV32-NEXT: amomaxu.w.aqrl zero, a1, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: amomaxu_w_discard: +; RV64: # %bb.0: +; RV64-NEXT: amomaxu.w.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw umax ptr %a, i32 %b seq_cst + ret void +} + +define void @amomaxu_d_discard(ptr %a, i64 %b) nounwind { +; RV32-LABEL: amomaxu_d_discard: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32-NEXT: mv s0, a0 +; RV32-NEXT: lw a4, 4(a0) +; RV32-NEXT: lw a5, 0(a0) +; RV32-NEXT: mv s1, a2 +; RV32-NEXT: mv s2, a1 +; RV32-NEXT: j .LBB13_2 +; RV32-NEXT: .LBB13_1: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB13_2 Depth=1 +; RV32-NEXT: sw a5, 8(sp) +; RV32-NEXT: sw a4, 12(sp) +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: li a4, 5 +; RV32-NEXT: li a5, 5 +; RV32-NEXT: mv a0, s0 +; RV32-NEXT: call __atomic_compare_exchange_8@plt +; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: lw a5, 8(sp) +; RV32-NEXT: bnez a0, .LBB13_6 +; RV32-NEXT: .LBB13_2: # %atomicrmw.start +; RV32-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32-NEXT: beq a4, s1, .LBB13_4 +; RV32-NEXT: # %bb.3: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB13_2 Depth=1 +; RV32-NEXT: sltu a0, s1, a4 +; RV32-NEXT: mv a2, a5 +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: bnez a0, .LBB13_1 +; RV32-NEXT: j .LBB13_5 +; RV32-NEXT: .LBB13_4: # in Loop: Header=BB13_2 Depth=1 +; RV32-NEXT: sltu a0, s2, a5 +; RV32-NEXT: mv a2, a5 +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: bnez a0, .LBB13_1 +; RV32-NEXT: .LBB13_5: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB13_2 Depth=1 +; RV32-NEXT: mv a2, s2 +; RV32-NEXT: mv a3, s1 +; RV32-NEXT: j .LBB13_1 +; RV32-NEXT: .LBB13_6: # %atomicrmw.end +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: ret +; +; RV64-LABEL: amomaxu_d_discard: +; RV64: # %bb.0: +; RV64-NEXT: amomaxu.d.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw umax ptr %a, i64 %b seq_cst + ret void +} + +define void @amomin_w_discard(ptr %a, i32 %b) nounwind { +; RV32-LABEL: amomin_w_discard: +; RV32: # %bb.0: +; RV32-NEXT: amomin.w.aqrl zero, a1, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: amomin_w_discard: +; RV64: # %bb.0: +; RV64-NEXT: amomin.w.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw min ptr %a, i32 %b seq_cst + ret void +} + +define void @amomin_d_discard(ptr %a, i64 %b) nounwind { +; RV32-LABEL: amomin_d_discard: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32-NEXT: mv s0, a0 +; RV32-NEXT: lw a4, 4(a0) +; RV32-NEXT: lw a5, 0(a0) +; RV32-NEXT: mv s1, a2 +; RV32-NEXT: mv s2, a1 +; RV32-NEXT: j .LBB15_2 +; RV32-NEXT: .LBB15_1: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB15_2 Depth=1 +; RV32-NEXT: sw a5, 8(sp) +; RV32-NEXT: sw a4, 12(sp) +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: li a4, 5 +; RV32-NEXT: li a5, 5 +; RV32-NEXT: mv a0, s0 +; RV32-NEXT: call __atomic_compare_exchange_8@plt +; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: lw a5, 8(sp) +; RV32-NEXT: bnez a0, .LBB15_6 +; RV32-NEXT: .LBB15_2: # %atomicrmw.start +; RV32-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32-NEXT: beq a4, s1, .LBB15_4 +; RV32-NEXT: # %bb.3: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB15_2 Depth=1 +; RV32-NEXT: slt a0, s1, a4 +; RV32-NEXT: mv a2, a5 +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: beqz a0, .LBB15_1 +; RV32-NEXT: j .LBB15_5 +; RV32-NEXT: .LBB15_4: # in Loop: Header=BB15_2 Depth=1 +; RV32-NEXT: sltu a0, s2, a5 +; RV32-NEXT: mv a2, a5 +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: beqz a0, .LBB15_1 +; RV32-NEXT: .LBB15_5: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB15_2 Depth=1 +; RV32-NEXT: mv a2, s2 +; RV32-NEXT: mv a3, s1 +; RV32-NEXT: j .LBB15_1 +; RV32-NEXT: .LBB15_6: # %atomicrmw.end +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: ret +; +; RV64-LABEL: amomin_d_discard: +; RV64: # %bb.0: +; RV64-NEXT: amomin.d.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw min ptr %a, i64 %b seq_cst + ret void +} + +define void @amominu_w_discard(ptr %a, i32 %b) nounwind { +; RV32-LABEL: amominu_w_discard: +; RV32: # %bb.0: +; RV32-NEXT: amominu.w.aqrl zero, a1, (a0) +; RV32-NEXT: ret +; +; RV64-LABEL: amominu_w_discard: +; RV64: # %bb.0: +; RV64-NEXT: amominu.w.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw umin ptr %a, i32 %b seq_cst + ret void +} + +define void @amominu_d_discard(ptr %a, i64 %b) nounwind { +; RV32-LABEL: amominu_d_discard: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -32 +; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32-NEXT: mv s0, a0 +; RV32-NEXT: lw a4, 4(a0) +; RV32-NEXT: lw a5, 0(a0) +; RV32-NEXT: mv s1, a2 +; RV32-NEXT: mv s2, a1 +; RV32-NEXT: j .LBB17_2 +; RV32-NEXT: .LBB17_1: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB17_2 Depth=1 +; RV32-NEXT: sw a5, 8(sp) +; RV32-NEXT: sw a4, 12(sp) +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: li a4, 5 +; RV32-NEXT: li a5, 5 +; RV32-NEXT: mv a0, s0 +; RV32-NEXT: call __atomic_compare_exchange_8@plt +; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: lw a5, 8(sp) +; RV32-NEXT: bnez a0, .LBB17_6 +; RV32-NEXT: .LBB17_2: # %atomicrmw.start +; RV32-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32-NEXT: beq a4, s1, .LBB17_4 +; RV32-NEXT: # %bb.3: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB17_2 Depth=1 +; RV32-NEXT: sltu a0, s1, a4 +; RV32-NEXT: mv a2, a5 +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: beqz a0, .LBB17_1 +; RV32-NEXT: j .LBB17_5 +; RV32-NEXT: .LBB17_4: # in Loop: Header=BB17_2 Depth=1 +; RV32-NEXT: sltu a0, s2, a5 +; RV32-NEXT: mv a2, a5 +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: beqz a0, .LBB17_1 +; RV32-NEXT: .LBB17_5: # %atomicrmw.start +; RV32-NEXT: # in Loop: Header=BB17_2 Depth=1 +; RV32-NEXT: mv a2, s2 +; RV32-NEXT: mv a3, s1 +; RV32-NEXT: j .LBB17_1 +; RV32-NEXT: .LBB17_6: # %atomicrmw.end +; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32-NEXT: addi sp, sp, 32 +; RV32-NEXT: ret +; +; RV64-LABEL: amominu_d_discard: +; RV64: # %bb.0: +; RV64-NEXT: amominu.d.aqrl zero, a1, (a0) +; RV64-NEXT: ret + %1 = atomicrmw umin ptr %a, i64 %b seq_cst + ret void +} diff --git a/llvm/test/CodeGen/RISCV/branch.ll b/llvm/test/CodeGen/RISCV/branch.ll --- a/llvm/test/CodeGen/RISCV/branch.ll +++ b/llvm/test/CodeGen/RISCV/branch.ll @@ -35,7 +35,7 @@ ; RV32I-NEXT: lw a3, 0(a1) ; RV32I-NEXT: bgeu a0, a3, .LBB0_14 ; RV32I-NEXT: # %bb.10: # %test11 -; RV32I-NEXT: lw a0, 0(a1) +; RV32I-NEXT: lw zero, 0(a1) ; RV32I-NEXT: andi a2, a2, 1 ; RV32I-NEXT: bnez a2, .LBB0_14 ; RV32I-NEXT: # %bb.11: # %test12 @@ -45,7 +45,7 @@ ; RV32I-NEXT: lw a0, 0(a1) ; RV32I-NEXT: blez a0, .LBB0_14 ; RV32I-NEXT: # %bb.13: # %test14 -; RV32I-NEXT: lw a0, 0(a1) +; RV32I-NEXT: lw zero, 0(a1) ; RV32I-NEXT: .LBB0_14: # %end ; RV32I-NEXT: ret %val1 = load volatile i32, ptr %b diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll --- a/llvm/test/CodeGen/RISCV/double-mem.ll +++ b/llvm/test/CodeGen/RISCV/double-mem.ll @@ -136,10 +136,10 @@ ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fadd.d a0, a0, a1 ; RV64IZFINXZDINX-NEXT: lui a1, %hi(G) -; RV64IZFINXZDINX-NEXT: ld a2, %lo(G)(a1) +; RV64IZFINXZDINX-NEXT: ld zero, %lo(G)(a1) ; RV64IZFINXZDINX-NEXT: addi a2, a1, %lo(G) ; RV64IZFINXZDINX-NEXT: sd a0, %lo(G)(a1) -; RV64IZFINXZDINX-NEXT: ld a1, 72(a2) +; RV64IZFINXZDINX-NEXT: ld zero, 72(a2) ; RV64IZFINXZDINX-NEXT: sd a0, 72(a2) ; RV64IZFINXZDINX-NEXT: ret ; Use %a and %b in an FP op to ensure floating point registers are used, even diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll --- a/llvm/test/CodeGen/RISCV/float-mem.ll +++ b/llvm/test/CodeGen/RISCV/float-mem.ll @@ -75,10 +75,10 @@ ; CHECKIZFINX: # %bb.0: ; CHECKIZFINX-NEXT: fadd.s a0, a0, a1 ; CHECKIZFINX-NEXT: lui a1, %hi(G) -; CHECKIZFINX-NEXT: lw a2, %lo(G)(a1) +; CHECKIZFINX-NEXT: lw zero, %lo(G)(a1) ; CHECKIZFINX-NEXT: addi a2, a1, %lo(G) ; CHECKIZFINX-NEXT: sw a0, %lo(G)(a1) -; CHECKIZFINX-NEXT: lw a1, 36(a2) +; CHECKIZFINX-NEXT: lw zero, 36(a2) ; CHECKIZFINX-NEXT: sw a0, 36(a2) ; CHECKIZFINX-NEXT: ret %1 = fadd float %a, %b diff --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll --- a/llvm/test/CodeGen/RISCV/half-mem.ll +++ b/llvm/test/CodeGen/RISCV/half-mem.ll @@ -128,10 +128,10 @@ ; CHECKIZHINX-NEXT: fadd.h a0, a0, a1 ; CHECKIZHINX-NEXT: lui a1, %hi(G) ; CHECKIZHINX-NEXT: addi a1, a1, %lo(G) -; CHECKIZHINX-NEXT: lh a2, 0(a1) +; CHECKIZHINX-NEXT: lh zero, 0(a1) ; CHECKIZHINX-NEXT: sh a0, 0(a1) ; CHECKIZHINX-NEXT: addi a1, a1, 18 -; CHECKIZHINX-NEXT: lh a2, 0(a1) +; CHECKIZHINX-NEXT: lh zero, 0(a1) ; CHECKIZHINX-NEXT: sh a0, 0(a1) ; CHECKIZHINX-NEXT: ret ; @@ -157,10 +157,10 @@ ; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 ; CHECKIZHINXMIN-NEXT: lui a1, %hi(G) ; CHECKIZHINXMIN-NEXT: addi a1, a1, %lo(G) -; CHECKIZHINXMIN-NEXT: lh a2, 0(a1) +; CHECKIZHINXMIN-NEXT: lh zero, 0(a1) ; CHECKIZHINXMIN-NEXT: sh a0, 0(a1) ; CHECKIZHINXMIN-NEXT: addi a1, a1, 18 -; CHECKIZHINXMIN-NEXT: lh a2, 0(a1) +; CHECKIZHINXMIN-NEXT: lh zero, 0(a1) ; CHECKIZHINXMIN-NEXT: sh a0, 0(a1) ; CHECKIZHINXMIN-NEXT: ret %1 = fadd half %a, %b diff --git a/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll --- a/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll +++ b/llvm/test/CodeGen/RISCV/local-stack-slot-allocation.ll @@ -15,8 +15,8 @@ ; RV32I-NEXT: lui a0, 24 ; RV32I-NEXT: addi a0, a0, 1704 ; RV32I-NEXT: add a0, sp, a0 -; RV32I-NEXT: lbu a1, 4(a0) -; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: lbu zero, 4(a0) +; RV32I-NEXT: lbu zero, 0(a0) ; RV32I-NEXT: lui a0, 24 ; RV32I-NEXT: addi a0, a0, 1712 ; RV32I-NEXT: add sp, sp, a0 @@ -31,8 +31,8 @@ ; RV64I-NEXT: lui a0, 24 ; RV64I-NEXT: addiw a0, a0, 1704 ; RV64I-NEXT: add a0, sp, a0 -; RV64I-NEXT: lbu a1, 4(a0) -; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: lbu zero, 4(a0) +; RV64I-NEXT: lbu zero, 0(a0) ; RV64I-NEXT: lui a0, 24 ; RV64I-NEXT: addiw a0, a0, 1712 ; RV64I-NEXT: add sp, sp, a0 @@ -57,10 +57,10 @@ ; RV32I-NEXT: .cfi_def_cfa_offset 100608 ; RV32I-NEXT: lui a0, 25 ; RV32I-NEXT: add a0, sp, a0 -; RV32I-NEXT: lbu a0, -292(a0) +; RV32I-NEXT: lbu zero, -292(a0) ; RV32I-NEXT: lui a0, 24 ; RV32I-NEXT: add a0, sp, a0 -; RV32I-NEXT: lbu a0, 1704(a0) +; RV32I-NEXT: lbu zero, 1704(a0) ; RV32I-NEXT: lui a0, 25 ; RV32I-NEXT: addi a0, a0, -1792 ; RV32I-NEXT: add sp, sp, a0 @@ -74,10 +74,10 @@ ; RV64I-NEXT: .cfi_def_cfa_offset 100608 ; RV64I-NEXT: lui a0, 25 ; RV64I-NEXT: add a0, sp, a0 -; RV64I-NEXT: lbu a0, -292(a0) +; RV64I-NEXT: lbu zero, -292(a0) ; RV64I-NEXT: lui a0, 24 ; RV64I-NEXT: add a0, sp, a0 -; RV64I-NEXT: lbu a0, 1704(a0) +; RV64I-NEXT: lbu zero, 1704(a0) ; RV64I-NEXT: lui a0, 25 ; RV64I-NEXT: addiw a0, a0, -1792 ; RV64I-NEXT: add sp, sp, a0 diff --git a/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll b/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll --- a/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll +++ b/llvm/test/CodeGen/RISCV/machinelicm-address-pseudos.ll @@ -16,7 +16,7 @@ ; RV32I-NEXT: auipc a2, %pcrel_hi(l) ; RV32I-NEXT: .LBB0_1: # %loop ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: lw a3, %pcrel_lo(.Lpcrel_hi0)(a2) +; RV32I-NEXT: lw zero, %pcrel_lo(.Lpcrel_hi0)(a2) ; RV32I-NEXT: addi a1, a1, 1 ; RV32I-NEXT: blt a1, a0, .LBB0_1 ; RV32I-NEXT: # %bb.2: # %ret @@ -29,7 +29,7 @@ ; RV64I-NEXT: auipc a2, %pcrel_hi(l) ; RV64I-NEXT: .LBB0_1: # %loop ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: lw a3, %pcrel_lo(.Lpcrel_hi0)(a2) +; RV64I-NEXT: lw zero, %pcrel_lo(.Lpcrel_hi0)(a2) ; RV64I-NEXT: addiw a1, a1, 1 ; RV64I-NEXT: blt a1, a0, .LBB0_1 ; RV64I-NEXT: # %bb.2: # %ret @@ -59,7 +59,7 @@ ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: .LBB1_1: # %loop ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: lw a3, 0(a1) +; RV32I-NEXT: lw zero, 0(a1) ; RV32I-NEXT: addi a2, a2, 1 ; RV32I-NEXT: blt a2, a0, .LBB1_1 ; RV32I-NEXT: # %bb.2: # %ret @@ -73,7 +73,7 @@ ; RV64I-NEXT: li a2, 0 ; RV64I-NEXT: .LBB1_1: # %loop ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: lw a3, 0(a1) +; RV64I-NEXT: lw zero, 0(a1) ; RV64I-NEXT: addiw a2, a2, 1 ; RV64I-NEXT: blt a2, a0, .LBB1_1 ; RV64I-NEXT: # %bb.2: # %ret @@ -104,7 +104,7 @@ ; RV32I-NEXT: add a2, a2, tp ; RV32I-NEXT: .LBB2_1: # %loop ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: lw a3, 0(a2) +; RV32I-NEXT: lw zero, 0(a2) ; RV32I-NEXT: addi a1, a1, 1 ; RV32I-NEXT: blt a1, a0, .LBB2_1 ; RV32I-NEXT: # %bb.2: # %ret @@ -119,7 +119,7 @@ ; RV64I-NEXT: add a2, a2, tp ; RV64I-NEXT: .LBB2_1: # %loop ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: lw a3, 0(a2) +; RV64I-NEXT: lw zero, 0(a2) ; RV64I-NEXT: addiw a1, a1, 1 ; RV64I-NEXT: blt a1, a0, .LBB2_1 ; RV64I-NEXT: # %bb.2: # %ret @@ -157,7 +157,7 @@ ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __tls_get_addr@plt -; RV32I-NEXT: lw a0, 0(a0) +; RV32I-NEXT: lw zero, 0(a0) ; RV32I-NEXT: addi s2, s2, 1 ; RV32I-NEXT: blt s2, s0, .LBB3_1 ; RV32I-NEXT: # %bb.2: # %ret @@ -184,7 +184,7 @@ ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __tls_get_addr@plt -; RV64I-NEXT: lw a0, 0(a0) +; RV64I-NEXT: lw zero, 0(a0) ; RV64I-NEXT: addiw s2, s2, 1 ; RV64I-NEXT: blt s2, s0, .LBB3_1 ; RV64I-NEXT: # %bb.2: # %ret diff --git a/llvm/test/CodeGen/RISCV/mem.ll b/llvm/test/CodeGen/RISCV/mem.ll --- a/llvm/test/CodeGen/RISCV/mem.ll +++ b/llvm/test/CodeGen/RISCV/mem.ll @@ -8,7 +8,7 @@ ; RV32I-LABEL: lb: ; RV32I: # %bb.0: ; RV32I-NEXT: lb a1, 1(a0) -; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: lbu zero, 0(a0) ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: ret %1 = getelementptr i8, ptr %a, i32 1 @@ -23,7 +23,7 @@ ; RV32I-LABEL: lh: ; RV32I: # %bb.0: ; RV32I-NEXT: lh a1, 4(a0) -; RV32I-NEXT: lh a0, 0(a0) +; RV32I-NEXT: lh zero, 0(a0) ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: ret %1 = getelementptr i16, ptr %a, i32 2 @@ -38,7 +38,7 @@ ; RV32I-LABEL: lw: ; RV32I: # %bb.0: ; RV32I-NEXT: lw a1, 12(a0) -; RV32I-NEXT: lw a0, 0(a0) +; RV32I-NEXT: lw zero, 0(a0) ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: ret %1 = getelementptr i32, ptr %a, i32 3 @@ -123,7 +123,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lbu a1, 1(a0) ; RV32I-NEXT: lbu a2, 2(a0) -; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: lbu zero, 0(a0) ; RV32I-NEXT: sub a0, a2, a1 ; RV32I-NEXT: ret ; sextload i1 @@ -145,7 +145,7 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lbu a1, 1(a0) ; RV32I-NEXT: lbu a2, 2(a0) -; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: lbu zero, 0(a0) ; RV32I-NEXT: sub a0, a2, a1 ; RV32I-NEXT: ret ; sextload i1 @@ -172,7 +172,7 @@ ; RV32I-NEXT: lw a1, %lo(G)(a2) ; RV32I-NEXT: addi a3, a2, %lo(G) ; RV32I-NEXT: sw a0, %lo(G)(a2) -; RV32I-NEXT: lw a2, 36(a3) +; RV32I-NEXT: lw zero, 36(a3) ; RV32I-NEXT: sw a0, 36(a3) ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll --- a/llvm/test/CodeGen/RISCV/mem64.ll +++ b/llvm/test/CodeGen/RISCV/mem64.ll @@ -8,7 +8,7 @@ ; RV64I-LABEL: lb: ; RV64I: # %bb.0: ; RV64I-NEXT: lb a1, 1(a0) -; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: lbu zero, 0(a0) ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret %1 = getelementptr i8, ptr %a, i32 1 @@ -23,7 +23,7 @@ ; RV64I-LABEL: lh: ; RV64I: # %bb.0: ; RV64I-NEXT: lh a1, 4(a0) -; RV64I-NEXT: lh a0, 0(a0) +; RV64I-NEXT: lh zero, 0(a0) ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret %1 = getelementptr i16, ptr %a, i32 2 @@ -38,7 +38,7 @@ ; RV64I-LABEL: lw: ; RV64I: # %bb.0: ; RV64I-NEXT: lw a1, 12(a0) -; RV64I-NEXT: lw a0, 0(a0) +; RV64I-NEXT: lw zero, 0(a0) ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret %1 = getelementptr i32, ptr %a, i32 3 @@ -141,7 +141,7 @@ ; RV64I-LABEL: ld: ; RV64I: # %bb.0: ; RV64I-NEXT: ld a1, 80(a0) -; RV64I-NEXT: ld a0, 0(a0) +; RV64I-NEXT: ld zero, 0(a0) ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret %1 = getelementptr i64, ptr %a, i32 10 @@ -168,7 +168,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: lbu a1, 1(a0) ; RV64I-NEXT: lbu a2, 2(a0) -; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: lbu zero, 0(a0) ; RV64I-NEXT: sub a0, a2, a1 ; RV64I-NEXT: ret ; sextload i1 @@ -190,7 +190,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: lbu a1, 1(a0) ; RV64I-NEXT: lbu a2, 2(a0) -; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: lbu zero, 0(a0) ; RV64I-NEXT: sub a0, a2, a1 ; RV64I-NEXT: ret ; sextload i1 @@ -217,7 +217,7 @@ ; RV64I-NEXT: ld a1, %lo(G)(a2) ; RV64I-NEXT: addi a3, a2, %lo(G) ; RV64I-NEXT: sd a0, %lo(G)(a2) -; RV64I-NEXT: ld a2, 72(a3) +; RV64I-NEXT: ld zero, 72(a3) ; RV64I-NEXT: sd a0, 72(a3) ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll --- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll +++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll @@ -162,7 +162,7 @@ ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: sub sp, sp, a0 ; RV64IV-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb -; RV64IV-NEXT: lw a0, 12(sp) +; RV64IV-NEXT: lw zero, 12(sp) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add a0, sp, a0 @@ -170,7 +170,7 @@ ; RV64IV-NEXT: vl2r.v v8, (a0) ; RV64IV-NEXT: addi a0, sp, 16 ; RV64IV-NEXT: vl2r.v v8, (a0) -; RV64IV-NEXT: lw a0, 8(sp) +; RV64IV-NEXT: lw zero, 8(sp) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add sp, sp, a0 @@ -271,11 +271,11 @@ ; RV64IV-NEXT: add s2, s1, s2 ; RV64IV-NEXT: addi s2, s2, 224 ; RV64IV-NEXT: call notdead2@plt -; RV64IV-NEXT: lw a0, 124(s1) +; RV64IV-NEXT: lw zero, 124(s1) ; RV64IV-NEXT: vl2r.v v8, (s2) ; RV64IV-NEXT: addi a0, s1, 224 ; RV64IV-NEXT: vl2r.v v8, (a0) -; RV64IV-NEXT: lw a0, 120(s1) +; RV64IV-NEXT: lw zero, 120(s1) ; RV64IV-NEXT: addi sp, s0, -256 ; RV64IV-NEXT: ld ra, 248(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld s0, 240(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-framelayout.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-framelayout.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-framelayout.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-framelayout.ll @@ -27,7 +27,7 @@ ; CHECK-NEXT: vl2re64.v v8, (a2) ; CHECK-NEXT: slli a1, a1, 2 ; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: lw a0, 0(a0) +; CHECK-NEXT: lw zero, 0(a0) ; CHECK-NEXT: addi sp, s0, -32 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -65,7 +65,7 @@ ; CHECK-NEXT: vl1re64.v v8, (a0) ; CHECK-NEXT: addi a0, sp, 112 ; CHECK-NEXT: vl2re64.v v8, (a0) -; CHECK-NEXT: lw a0, 64(sp) +; CHECK-NEXT: lw zero, 64(sp) ; CHECK-NEXT: addi sp, s0, -128 ; CHECK-NEXT: ld ra, 120(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 112(sp) # 8-byte Folded Reload @@ -109,10 +109,10 @@ ; CHECK-NEXT: vl1re64.v v8, (a2) ; CHECK-NEXT: addi a2, s1, 112 ; CHECK-NEXT: vl2re64.v v8, (a2) -; CHECK-NEXT: lw a2, 64(s1) +; CHECK-NEXT: lw zero, 64(s1) ; CHECK-NEXT: slli a1, a1, 2 ; CHECK-NEXT: add a0, a0, a1 -; CHECK-NEXT: lw a0, 0(a0) +; CHECK-NEXT: lw zero, 0(a0) ; CHECK-NEXT: addi sp, s0, -144 ; CHECK-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 128(sp) # 8-byte Folded Reload