diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h @@ -66,6 +66,11 @@ SmallVectorImpl &Ops) const override; unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override; + + bool getRegAllocationHints(Register VirtReg, ArrayRef Order, + SmallVectorImpl &Hints, + const MachineFunction &MF, const VirtRegMap *VRM, + const LiveRegMatrix *Matrix) const override; }; } diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -29,6 +29,12 @@ using namespace llvm; +static cl::opt + DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, + cl::init(false), + cl::desc("Disable two address hints for register " + "allocation")); + static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive"); static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive"); static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive"); @@ -376,3 +382,70 @@ RISCVRegisterInfo::getRegisterCostTableIndex(const MachineFunction &MF) const { return MF.getSubtarget().hasStdExtC() ? 1 : 0; } + +// Add two address hints to improve chances of being able to use a compressed +// instruction. +bool RISCVRegisterInfo::getRegAllocationHints( + Register VirtReg, ArrayRef Order, + SmallVectorImpl &Hints, const MachineFunction &MF, + const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const { + const MachineRegisterInfo *MRI = &MF.getRegInfo(); + + bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints( + VirtReg, Order, Hints, MF, VRM, Matrix); + + if (!VRM || DisableRegAllocHints) + return BaseImplRetVal; + + // Add any two address hints after any copy hints. + SmallSet TwoAddrHints; + + auto tryAddHint = [&](const MachineOperand &VRRegMO, + const MachineOperand &MO) -> void { + Register Reg = MO.getReg(); + Register PhysReg = + Register::isPhysicalRegister(Reg) ? Reg : Register(VRM->getPhys(Reg)); + if (PhysReg) { + assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!"); + if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg)) + TwoAddrHints.insert(PhysReg); + } + }; + + // For now we support the compressible instructions which can encode all + // registers and have a single register source. + // TODO: Add more compressed instructions. + auto isCompressible = [](const MachineInstr &MI) { + switch (MI.getOpcode()) { + default: + return false; + case RISCV::SLLI: + return true; + case RISCV::ADDI: + case RISCV::ADDIW: + return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm()); + } + }; + + for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) { + const MachineInstr &MI = *MO.getParent(); + if (isCompressible(MI)) { + unsigned OpIdx = MI.getOperandNo(&MO); + if (OpIdx == 0 && MI.getOperand(1).isReg()) { + tryAddHint(MO, MI.getOperand(1)); + if (MI.isCommutable() && MI.getOperand(2).isReg()) + tryAddHint(MO, MI.getOperand(2)); + } else if (OpIdx == 1) { + tryAddHint(MO, MI.getOperand(0)); + } else if (MI.isCommutable() && OpIdx == 2) { + tryAddHint(MO, MI.getOperand(0)); + } + } + } + + for (MCPhysReg OrderReg : Order) + if (TwoAddrHints.count(OrderReg)) + Hints.push_back(OrderReg); + + return BaseImplRetVal; +} diff --git a/llvm/test/CodeGen/RISCV/add-before-shl.ll b/llvm/test/CodeGen/RISCV/add-before-shl.ll --- a/llvm/test/CodeGen/RISCV/add-before-shl.ll +++ b/llvm/test/CodeGen/RISCV/add-before-shl.ll @@ -196,7 +196,7 @@ ; ; RV32C-LABEL: add_wide_operand: ; RV32C: # %bb.0: -; RV32C-NEXT: c.lw a2, 4(a1) +; RV32C-NEXT: lw a6, 4(a1) ; RV32C-NEXT: c.lw a3, 12(a1) ; RV32C-NEXT: c.lw a4, 0(a1) ; RV32C-NEXT: c.lw a1, 8(a1) @@ -204,18 +204,18 @@ ; RV32C-NEXT: c.add a3, a5 ; RV32C-NEXT: c.slli a3, 3 ; RV32C-NEXT: srli a5, a1, 29 -; RV32C-NEXT: or a6, a3, a5 -; RV32C-NEXT: srli a5, a4, 29 -; RV32C-NEXT: slli a3, a2, 3 ; RV32C-NEXT: c.or a3, a5 -; RV32C-NEXT: c.srli a2, 29 +; RV32C-NEXT: srli a5, a4, 29 +; RV32C-NEXT: slli a2, a6, 3 +; RV32C-NEXT: c.or a2, a5 +; RV32C-NEXT: srli a5, a6, 29 ; RV32C-NEXT: c.slli a1, 3 -; RV32C-NEXT: c.or a1, a2 -; RV32C-NEXT: slli a2, a4, 3 -; RV32C-NEXT: c.sw a2, 0(a0) +; RV32C-NEXT: c.or a1, a5 +; RV32C-NEXT: c.slli a4, 3 +; RV32C-NEXT: c.sw a4, 0(a0) ; RV32C-NEXT: c.sw a1, 8(a0) -; RV32C-NEXT: c.sw a3, 4(a0) -; RV32C-NEXT: sw a6, 12(a0) +; RV32C-NEXT: c.sw a2, 4(a0) +; RV32C-NEXT: c.sw a3, 12(a0) ; RV32C-NEXT: c.jr ra ; ; RV64C-LABEL: add_wide_operand: diff --git a/llvm/test/CodeGen/RISCV/addcarry.ll b/llvm/test/CodeGen/RISCV/addcarry.ll --- a/llvm/test/CodeGen/RISCV/addcarry.ll +++ b/llvm/test/CodeGen/RISCV/addcarry.ll @@ -32,13 +32,13 @@ ; RISCV32-NEXT: # %bb.3: ; RISCV32-NEXT: sub a5, a5, a0 ; RISCV32-NEXT: .LBB0_4: -; RISCV32-NEXT: slli a1, a5, 30 -; RISCV32-NEXT: srli a3, a4, 2 -; RISCV32-NEXT: or a1, a1, a3 -; RISCV32-NEXT: slli a3, a4, 30 +; RISCV32-NEXT: slli a5, a5, 30 +; RISCV32-NEXT: srli a1, a4, 2 +; RISCV32-NEXT: or a1, a5, a1 +; RISCV32-NEXT: slli a4, a4, 30 ; RISCV32-NEXT: mul a0, a0, a2 ; RISCV32-NEXT: srli a0, a0, 2 -; RISCV32-NEXT: or a0, a3, a0 +; RISCV32-NEXT: or a0, a4, a0 ; RISCV32-NEXT: ret %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2); ret i64 %tmp; diff --git a/llvm/test/CodeGen/RISCV/aext-to-sext.ll b/llvm/test/CodeGen/RISCV/aext-to-sext.ll --- a/llvm/test/CodeGen/RISCV/aext-to-sext.ll +++ b/llvm/test/CodeGen/RISCV/aext-to-sext.ll @@ -82,8 +82,8 @@ ; RV64I-NEXT: # %bb.1: # %iffalse ; RV64I-NEXT: li a1, -2 ; RV64I-NEXT: .LBB2_2: # %merge -; RV64I-NEXT: slli a0, a1, 32 -; RV64I-NEXT: srli a0, a0, 32 +; RV64I-NEXT: slli a1, a1, 32 +; RV64I-NEXT: srli a0, a1, 32 ; RV64I-NEXT: ret %a = icmp ne i32 %c, 0 br i1 %a, label %iftrue, label %iffalse diff --git a/llvm/test/CodeGen/RISCV/and.ll b/llvm/test/CodeGen/RISCV/and.ll --- a/llvm/test/CodeGen/RISCV/and.ll +++ b/llvm/test/CodeGen/RISCV/and.ll @@ -187,8 +187,8 @@ define i64 @and64_0x7fffffff00000000(i64 %x) { ; RV32I-LABEL: and64_0x7fffffff00000000: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a1, 1 -; RV32I-NEXT: srli a1, a0, 1 +; RV32I-NEXT: slli a1, a1, 1 +; RV32I-NEXT: srli a1, a1, 1 ; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -8350,8 +8350,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB100_2 ; RV32I-NEXT: .LBB100_1: # %atomicrmw.start @@ -8419,8 +8419,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB100_2 ; RV64I-NEXT: .LBB100_1: # %atomicrmw.start @@ -8492,8 +8492,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB101_2 ; RV32I-NEXT: .LBB101_1: # %atomicrmw.start @@ -8561,8 +8561,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB101_2 ; RV64I-NEXT: .LBB101_1: # %atomicrmw.start @@ -8634,8 +8634,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB102_2 ; RV32I-NEXT: .LBB102_1: # %atomicrmw.start @@ -8703,8 +8703,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB102_2 ; RV64I-NEXT: .LBB102_1: # %atomicrmw.start @@ -8776,8 +8776,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB103_2 ; RV32I-NEXT: .LBB103_1: # %atomicrmw.start @@ -8845,8 +8845,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB103_2 ; RV64I-NEXT: .LBB103_1: # %atomicrmw.start @@ -8918,8 +8918,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB104_2 ; RV32I-NEXT: .LBB104_1: # %atomicrmw.start @@ -8987,8 +8987,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB104_2 ; RV64I-NEXT: .LBB104_1: # %atomicrmw.start @@ -9060,8 +9060,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB105_2 ; RV32I-NEXT: .LBB105_1: # %atomicrmw.start @@ -9129,8 +9129,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB105_2 ; RV64I-NEXT: .LBB105_1: # %atomicrmw.start @@ -9202,8 +9202,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB106_2 ; RV32I-NEXT: .LBB106_1: # %atomicrmw.start @@ -9271,8 +9271,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB106_2 ; RV64I-NEXT: .LBB106_1: # %atomicrmw.start @@ -9344,8 +9344,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB107_2 ; RV32I-NEXT: .LBB107_1: # %atomicrmw.start @@ -9413,8 +9413,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB107_2 ; RV64I-NEXT: .LBB107_1: # %atomicrmw.start @@ -9486,8 +9486,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB108_2 ; RV32I-NEXT: .LBB108_1: # %atomicrmw.start @@ -9555,8 +9555,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB108_2 ; RV64I-NEXT: .LBB108_1: # %atomicrmw.start @@ -9628,8 +9628,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB109_2 ; RV32I-NEXT: .LBB109_1: # %atomicrmw.start @@ -9697,8 +9697,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB109_2 ; RV64I-NEXT: .LBB109_1: # %atomicrmw.start diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll --- a/llvm/test/CodeGen/RISCV/atomic-signext.ll +++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll @@ -609,8 +609,8 @@ ; RV32I-NEXT: mv a2, s1 ; RV32I-NEXT: j .LBB10_1 ; RV32I-NEXT: .LBB10_4: # %atomicrmw.end -; RV32I-NEXT: slli a0, a3, 24 -; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: srai a0, a3, 24 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -683,8 +683,8 @@ ; RV64I-NEXT: mv a2, s1 ; RV64I-NEXT: j .LBB10_1 ; RV64I-NEXT: .LBB10_4: # %atomicrmw.end -; RV64I-NEXT: slli a0, a3, 56 -; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: slli a3, a3, 56 +; RV64I-NEXT: srai a0, a3, 56 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -761,8 +761,8 @@ ; RV32I-NEXT: mv a2, s1 ; RV32I-NEXT: j .LBB11_1 ; RV32I-NEXT: .LBB11_4: # %atomicrmw.end -; RV32I-NEXT: slli a0, a3, 24 -; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: srai a0, a3, 24 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -835,8 +835,8 @@ ; RV64I-NEXT: mv a2, s1 ; RV64I-NEXT: j .LBB11_1 ; RV64I-NEXT: .LBB11_4: # %atomicrmw.end -; RV64I-NEXT: slli a0, a3, 56 -; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: slli a3, a3, 56 +; RV64I-NEXT: srai a0, a3, 56 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -911,8 +911,8 @@ ; RV32I-NEXT: mv a2, s1 ; RV32I-NEXT: j .LBB12_1 ; RV32I-NEXT: .LBB12_4: # %atomicrmw.end -; RV32I-NEXT: slli a0, a3, 24 -; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: srai a0, a3, 24 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -978,8 +978,8 @@ ; RV64I-NEXT: mv a2, s1 ; RV64I-NEXT: j .LBB12_1 ; RV64I-NEXT: .LBB12_4: # %atomicrmw.end -; RV64I-NEXT: slli a0, a3, 56 -; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: slli a3, a3, 56 +; RV64I-NEXT: srai a0, a3, 56 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -1049,8 +1049,8 @@ ; RV32I-NEXT: mv a2, s1 ; RV32I-NEXT: j .LBB13_1 ; RV32I-NEXT: .LBB13_4: # %atomicrmw.end -; RV32I-NEXT: slli a0, a3, 24 -; RV32I-NEXT: srai a0, a0, 24 +; RV32I-NEXT: slli a3, a3, 24 +; RV32I-NEXT: srai a0, a3, 24 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1116,8 +1116,8 @@ ; RV64I-NEXT: mv a2, s1 ; RV64I-NEXT: j .LBB13_1 ; RV64I-NEXT: .LBB13_4: # %atomicrmw.end -; RV64I-NEXT: slli a0, a3, 56 -; RV64I-NEXT: srai a0, a0, 56 +; RV64I-NEXT: slli a3, a3, 56 +; RV64I-NEXT: srai a0, a3, 56 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -1657,8 +1657,8 @@ ; RV32I-NEXT: mv a2, s1 ; RV32I-NEXT: j .LBB21_1 ; RV32I-NEXT: .LBB21_4: # %atomicrmw.end -; RV32I-NEXT: slli a0, a3, 16 -; RV32I-NEXT: srai a0, a0, 16 +; RV32I-NEXT: slli a3, a3, 16 +; RV32I-NEXT: srai a0, a3, 16 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1733,8 +1733,8 @@ ; RV64I-NEXT: mv a2, s1 ; RV64I-NEXT: j .LBB21_1 ; RV64I-NEXT: .LBB21_4: # %atomicrmw.end -; RV64I-NEXT: slli a0, a3, 48 -; RV64I-NEXT: srai a0, a0, 48 +; RV64I-NEXT: slli a3, a3, 48 +; RV64I-NEXT: srai a0, a3, 48 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -1813,8 +1813,8 @@ ; RV32I-NEXT: mv a2, s1 ; RV32I-NEXT: j .LBB22_1 ; RV32I-NEXT: .LBB22_4: # %atomicrmw.end -; RV32I-NEXT: slli a0, a3, 16 -; RV32I-NEXT: srai a0, a0, 16 +; RV32I-NEXT: slli a3, a3, 16 +; RV32I-NEXT: srai a0, a3, 16 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -1889,8 +1889,8 @@ ; RV64I-NEXT: mv a2, s1 ; RV64I-NEXT: j .LBB22_1 ; RV64I-NEXT: .LBB22_4: # %atomicrmw.end -; RV64I-NEXT: slli a0, a3, 48 -; RV64I-NEXT: srai a0, a0, 48 +; RV64I-NEXT: slli a3, a3, 48 +; RV64I-NEXT: srai a0, a3, 48 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -1946,8 +1946,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB23_2 ; RV32I-NEXT: .LBB23_1: # %atomicrmw.start @@ -1970,8 +1970,8 @@ ; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB23_1 ; RV32I-NEXT: .LBB23_4: # %atomicrmw.end -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai a0, a1, 16 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -2018,8 +2018,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB23_2 ; RV64I-NEXT: .LBB23_1: # %atomicrmw.start @@ -2042,8 +2042,8 @@ ; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB23_1 ; RV64I-NEXT: .LBB23_4: # %atomicrmw.end -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai a0, a1, 48 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload @@ -2094,8 +2094,8 @@ ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 ; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 16 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: and s3, s0, s2 ; RV32I-NEXT: j .LBB24_2 ; RV32I-NEXT: .LBB24_1: # %atomicrmw.start @@ -2118,8 +2118,8 @@ ; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB24_1 ; RV32I-NEXT: .LBB24_4: # %atomicrmw.end -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai a0, a1, 16 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload @@ -2166,8 +2166,8 @@ ; RV64I-NEXT: mv s0, a1 ; RV64I-NEXT: mv s1, a0 ; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s2, a0, -1 +; RV64I-NEXT: lui s2, 16 +; RV64I-NEXT: addiw s2, s2, -1 ; RV64I-NEXT: and s3, s0, s2 ; RV64I-NEXT: j .LBB24_2 ; RV64I-NEXT: .LBB24_1: # %atomicrmw.start @@ -2190,8 +2190,8 @@ ; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB24_1 ; RV64I-NEXT: .LBB24_4: # %atomicrmw.end -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai a0, a1, 48 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/bittest.ll b/llvm/test/CodeGen/RISCV/bittest.ll --- a/llvm/test/CodeGen/RISCV/bittest.ll +++ b/llvm/test/CodeGen/RISCV/bittest.ll @@ -987,8 +987,8 @@ define i64 @bit_55_nz_select_i64(i64 %a, i64 %b, i64 %c) { ; RV32I-LABEL: bit_55_nz_select_i64: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a0, a1, 8 -; RV32I-NEXT: srli a6, a0, 31 +; RV32I-NEXT: slli a1, a1, 8 +; RV32I-NEXT: srli a6, a1, 31 ; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: bnez a6, .LBB34_2 @@ -1584,8 +1584,8 @@ define void @bit_62_z_branch_i64(i64 %0) { ; RV32-LABEL: bit_62_z_branch_i64: ; RV32: # %bb.0: -; RV32-NEXT: slli a0, a1, 1 -; RV32-NEXT: bltz a0, .LBB55_2 +; RV32-NEXT: slli a1, a1, 1 +; RV32-NEXT: bltz a1, .LBB55_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: tail bar@plt ; RV32-NEXT: .LBB55_2: @@ -1614,8 +1614,8 @@ define void @bit_62_nz_branch_i64(i64 %0) { ; RV32-LABEL: bit_62_nz_branch_i64: ; RV32: # %bb.0: -; RV32-NEXT: slli a0, a1, 1 -; RV32-NEXT: bgez a0, .LBB56_2 +; RV32-NEXT: slli a1, a1, 1 +; RV32-NEXT: bgez a1, .LBB56_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: tail bar@plt ; RV32-NEXT: .LBB56_2: diff --git a/llvm/test/CodeGen/RISCV/branch-relaxation.ll b/llvm/test/CodeGen/RISCV/branch-relaxation.ll --- a/llvm/test/CodeGen/RISCV/branch-relaxation.ll +++ b/llvm/test/CodeGen/RISCV/branch-relaxation.ll @@ -342,11 +342,9 @@ ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li ra, 1 ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: sd ra, 16(sp) # 8-byte Folded Spill ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li t0, 5 ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: sd t0, 8(sp) # 8-byte Folded Spill ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li t1, 6 ; CHECK-RV64-NEXT: #NO_APP @@ -422,24 +420,24 @@ ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li t5, 30 ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: sext.w t0, t5 +; CHECK-RV64-NEXT: sd t5, 16(sp) # 8-byte Folded Spill +; CHECK-RV64-NEXT: sext.w t5, t5 ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li t6, 31 ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: sext.w ra, t6 -; CHECK-RV64-NEXT: beq t0, ra, .LBB2_1 +; CHECK-RV64-NEXT: sd t6, 8(sp) # 8-byte Folded Spill +; CHECK-RV64-NEXT: sext.w t6, t6 +; CHECK-RV64-NEXT: beq t5, t6, .LBB2_1 ; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: jump .LBB2_2, t0 +; CHECK-RV64-NEXT: jump .LBB2_2, t5 ; CHECK-RV64-NEXT: .LBB2_1: # %branch_1 ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: .zero 1048576 ; CHECK-RV64-NEXT: #NO_APP ; CHECK-RV64-NEXT: .LBB2_2: # %branch_2 -; CHECK-RV64-NEXT: ld ra, 16(sp) # 8-byte Folded Reload ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use ra ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: ld t0, 8(sp) # 8-byte Folded Reload ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use t0 ; CHECK-RV64-NEXT: #NO_APP @@ -515,9 +513,11 @@ ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use t4 ; CHECK-RV64-NEXT: #NO_APP +; CHECK-RV64-NEXT: ld t5, 16(sp) # 8-byte Folded Reload ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use t5 ; CHECK-RV64-NEXT: #NO_APP +; CHECK-RV64-NEXT: ld t6, 8(sp) # 8-byte Folded Reload ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use t6 ; CHECK-RV64-NEXT: #NO_APP @@ -885,17 +885,9 @@ ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li ra, 1 ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: lui a0, 1 -; CHECK-RV64-NEXT: addiw a0, a0, -8 -; CHECK-RV64-NEXT: add a0, sp, a0 -; CHECK-RV64-NEXT: sd ra, 0(a0) # 8-byte Folded Spill ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li t0, 5 ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: lui a0, 1 -; CHECK-RV64-NEXT: addiw a0, a0, -16 -; CHECK-RV64-NEXT: add a0, sp, a0 -; CHECK-RV64-NEXT: sd t0, 0(a0) # 8-byte Folded Spill ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li t1, 6 ; CHECK-RV64-NEXT: #NO_APP @@ -971,30 +963,32 @@ ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li t5, 30 ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: sext.w t0, t5 +; CHECK-RV64-NEXT: sd t0, 0(sp) +; CHECK-RV64-NEXT: lui t0, 1 +; CHECK-RV64-NEXT: addiw t0, t0, -8 +; CHECK-RV64-NEXT: add t0, sp, t0 +; CHECK-RV64-NEXT: sd t5, 0(t0) # 8-byte Folded Spill +; CHECK-RV64-NEXT: sext.w t5, t5 ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: li t6, 31 ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: sext.w ra, t6 -; CHECK-RV64-NEXT: beq t0, ra, .LBB3_1 +; CHECK-RV64-NEXT: lui t0, 1 +; CHECK-RV64-NEXT: addiw t0, t0, -16 +; CHECK-RV64-NEXT: add t0, sp, t0 +; CHECK-RV64-NEXT: sd t6, 0(t0) # 8-byte Folded Spill +; CHECK-RV64-NEXT: ld t0, 0(sp) +; CHECK-RV64-NEXT: sext.w t6, t6 +; CHECK-RV64-NEXT: beq t5, t6, .LBB3_1 ; CHECK-RV64-NEXT: # %bb.3: -; CHECK-RV64-NEXT: jump .LBB3_2, t0 +; CHECK-RV64-NEXT: jump .LBB3_2, t5 ; CHECK-RV64-NEXT: .LBB3_1: # %branch_1 ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: .zero 1048576 ; CHECK-RV64-NEXT: #NO_APP ; CHECK-RV64-NEXT: .LBB3_2: # %branch_2 -; CHECK-RV64-NEXT: lui t0, 1 -; CHECK-RV64-NEXT: addiw t0, t0, -8 -; CHECK-RV64-NEXT: add t0, sp, t0 -; CHECK-RV64-NEXT: ld ra, 0(t0) # 8-byte Folded Reload ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use ra ; CHECK-RV64-NEXT: #NO_APP -; CHECK-RV64-NEXT: lui t0, 1 -; CHECK-RV64-NEXT: addiw t0, t0, -16 -; CHECK-RV64-NEXT: add t0, sp, t0 -; CHECK-RV64-NEXT: ld t0, 0(t0) # 8-byte Folded Reload ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use t0 ; CHECK-RV64-NEXT: #NO_APP @@ -1070,9 +1064,17 @@ ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use t4 ; CHECK-RV64-NEXT: #NO_APP +; CHECK-RV64-NEXT: lui a0, 1 +; CHECK-RV64-NEXT: addiw a0, a0, -8 +; CHECK-RV64-NEXT: add a0, sp, a0 +; CHECK-RV64-NEXT: ld t5, 0(a0) # 8-byte Folded Reload ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use t5 ; CHECK-RV64-NEXT: #NO_APP +; CHECK-RV64-NEXT: lui a0, 1 +; CHECK-RV64-NEXT: addiw a0, a0, -16 +; CHECK-RV64-NEXT: add a0, sp, a0 +; CHECK-RV64-NEXT: ld t6, 0(a0) # 8-byte Folded Reload ; CHECK-RV64-NEXT: #APP ; CHECK-RV64-NEXT: # reg use t6 ; CHECK-RV64-NEXT: #NO_APP diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll @@ -19,8 +19,8 @@ ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srli a0, a0, 16 +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srli a0, a1, 16 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: call __fixsfsi@plt ; RV32I-NEXT: add a0, s0, a0 @@ -35,8 +35,8 @@ ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srli a0, a0, 48 +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srli a0, a1, 48 ; RV64I-NEXT: call __extendhfsf2@plt ; RV64I-NEXT: call __fixsfdi@plt ; RV64I-NEXT: addw a0, s0, a0 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-common.ll @@ -33,8 +33,8 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: li a0, 1 -; RV64I-NEXT: slli a1, a0, 62 +; RV64I-NEXT: li a1, 1 +; RV64I-NEXT: slli a1, a1, 62 ; RV64I-NEXT: li a0, 1 ; RV64I-NEXT: call callee_double_in_regs@plt ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -110,10 +110,9 @@ ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lui a0, 269824 -; RV32I-NEXT: addi a3, a0, -1 +; RV32I-NEXT: lui a3, 269824 +; RV32I-NEXT: addi a3, a3, -1 ; RV32I-NEXT: lui a2, 1047552 -; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __gtdf2@plt ; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: lui a3, 794112 @@ -161,9 +160,8 @@ ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: li a0, -497 -; RV64I-NEXT: slli a1, a0, 53 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: li a1, -497 +; RV64I-NEXT: slli a1, a1, 53 ; RV64I-NEXT: call __gedf2@plt ; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 @@ -306,10 +304,9 @@ ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lui a0, 270080 -; RV32I-NEXT: addi a3, a0, -1 +; RV32I-NEXT: lui a3, 270080 +; RV32I-NEXT: addi a3, a3, -1 ; RV32I-NEXT: lui a2, 1048064 -; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __gtdf2@plt ; RV32I-NEXT: sgtz a0, a0 ; RV32I-NEXT: neg s2, a0 @@ -596,10 +593,9 @@ ; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lui a0, 278016 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 278016 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: li a2, -1 -; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a3, s2 ; RV32I-NEXT: call __gtdf2@plt ; RV32I-NEXT: mv s4, a0 @@ -676,9 +672,8 @@ ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: li a0, -481 -; RV64I-NEXT: slli a1, a0, 53 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: li a1, -481 +; RV64I-NEXT: slli a1, a1, 53 ; RV64I-NEXT: call __gedf2@plt ; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 @@ -800,10 +795,9 @@ ; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lui a0, 278272 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 278272 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: li a2, -1 -; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a3, s2 ; RV32I-NEXT: call __gtdf2@plt ; RV32I-NEXT: sgtz a0, a0 @@ -1357,8 +1351,8 @@ ; RV32I-NEXT: .LBB26_2: # %start ; RV32I-NEXT: blez s2, .LBB26_4 ; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: lui a0, 8 -; RV32I-NEXT: addi s3, a0, -1 +; RV32I-NEXT: lui s3, 8 +; RV32I-NEXT: addi s3, s3, -1 ; RV32I-NEXT: .LBB26_4: # %start ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s0 @@ -1387,9 +1381,8 @@ ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: li a0, -505 -; RV64I-NEXT: slli a1, a0, 53 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: li a1, -505 +; RV64I-NEXT: slli a1, a1, 53 ; RV64I-NEXT: call __gedf2@plt ; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 @@ -1406,8 +1399,8 @@ ; RV64I-NEXT: call __gtdf2@plt ; RV64I-NEXT: blez a0, .LBB26_4 ; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: lui a0, 8 -; RV64I-NEXT: addiw s1, a0, -1 +; RV64I-NEXT: lui s1, 8 +; RV64I-NEXT: addiw s1, s1, -1 ; RV64I-NEXT: .LBB26_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 @@ -1492,9 +1485,8 @@ ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s1, a1 ; RV32I-NEXT: mv s2, a0 -; RV32I-NEXT: lui a0, 265984 -; RV32I-NEXT: addi a3, a0, -32 -; RV32I-NEXT: mv a0, s2 +; RV32I-NEXT: lui a3, 265984 +; RV32I-NEXT: addi a3, a3, -32 ; RV32I-NEXT: li a2, 0 ; RV32I-NEXT: call __gtdf2@plt ; RV32I-NEXT: mv s3, a0 @@ -1693,9 +1685,8 @@ ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: li a0, -509 -; RV64I-NEXT: slli a1, a0, 53 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: li a1, -509 +; RV64I-NEXT: slli a1, a1, 53 ; RV64I-NEXT: call __gedf2@plt ; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 @@ -1705,8 +1696,8 @@ ; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: li s1, -128 ; RV64I-NEXT: .LBB30_2: # %start -; RV64I-NEXT: lui a0, 65919 -; RV64I-NEXT: slli a1, a0, 34 +; RV64I-NEXT: lui a1, 65919 +; RV64I-NEXT: slli a1, a1, 34 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtdf2@plt ; RV64I-NEXT: blez a0, .LBB30_4 @@ -1845,8 +1836,8 @@ ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunsdfdi@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lui a0, 131967 -; RV64I-NEXT: slli a1, a0, 33 +; RV64I-NEXT: lui a1, 131967 +; RV64I-NEXT: slli a1, a1, 33 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtdf2@plt ; RV64I-NEXT: blez a0, .LBB32_2 @@ -1902,10 +1893,9 @@ ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lui a0, 270080 -; RV32I-NEXT: addi a3, a0, -1 +; RV32I-NEXT: lui a3, 270080 +; RV32I-NEXT: addi a3, a3, -1 ; RV32I-NEXT: lui a2, 1048064 -; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __gtdf2@plt ; RV32I-NEXT: sgtz a0, a0 ; RV32I-NEXT: neg s2, a0 @@ -1993,10 +1983,9 @@ ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lui a0, 269824 -; RV32I-NEXT: addi a3, a0, -1 +; RV32I-NEXT: lui a3, 269824 +; RV32I-NEXT: addi a3, a3, -1 ; RV32I-NEXT: lui a2, 1047552 -; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: call __gtdf2@plt ; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: lui a3, 794112 @@ -2044,9 +2033,8 @@ ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: li a0, -497 -; RV64I-NEXT: slli a1, a0, 53 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: li a1, -497 +; RV64I-NEXT: slli a1, a1, 53 ; RV64I-NEXT: call __gedf2@plt ; RV64I-NEXT: mv s2, a0 ; RV64I-NEXT: mv a0, s0 diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll --- a/llvm/test/CodeGen/RISCV/float-arith.ll +++ b/llvm/test/CodeGen/RISCV/float-arith.ll @@ -238,8 +238,8 @@ ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: lui a1, 524288 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: slli a1, s0, 1 -; RV32I-NEXT: srli a1, a1, 1 +; RV32I-NEXT: slli s0, s0, 1 +; RV32I-NEXT: srli a1, s0, 1 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload @@ -256,8 +256,8 @@ ; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: lui a1, 524288 ; RV64I-NEXT: and a0, a0, a1 -; RV64I-NEXT: slli a1, s0, 33 -; RV64I-NEXT: srli a1, a1, 33 +; RV64I-NEXT: slli s0, s0, 33 +; RV64I-NEXT: srli a1, s0, 33 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll --- a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll @@ -73,8 +73,8 @@ ; RV32F-NEXT: mv s1, a0 ; RV32F-NEXT: call __adddf3@plt ; RV32F-NEXT: mv a2, a0 -; RV32F-NEXT: slli a0, a1, 1 -; RV32F-NEXT: srli a3, a0, 1 +; RV32F-NEXT: slli a1, a1, 1 +; RV32F-NEXT: srli a3, a1, 1 ; RV32F-NEXT: mv a0, s1 ; RV32F-NEXT: mv a1, s0 ; RV32F-NEXT: call __adddf3@plt diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -65,8 +65,8 @@ ; RV32I-NEXT: # %bb.1: # %start ; RV32I-NEXT: lui s1, 524288 ; RV32I-NEXT: .LBB1_2: # %start -; RV32I-NEXT: lui a0, 323584 -; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: lui a1, 323584 +; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB1_4 @@ -107,8 +107,8 @@ ; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB1_2: # %start -; RV64I-NEXT: lui a0, 323584 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 323584 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB1_4 @@ -239,8 +239,8 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixunssfsi@plt ; RV32I-NEXT: and s1, s1, a0 -; RV32I-NEXT: lui a0, 325632 -; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: lui a1, 325632 +; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: sgtz a0, a0 @@ -266,8 +266,8 @@ ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lui a0, 325632 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 325632 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB4_2 @@ -584,8 +584,8 @@ ; RV32I-NEXT: # %bb.1: # %start ; RV32I-NEXT: lui s3, 524288 ; RV32I-NEXT: .LBB12_2: # %start -; RV32I-NEXT: lui a0, 389120 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 389120 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __gtsf2@plt @@ -647,8 +647,8 @@ ; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: slli s1, s3, 63 ; RV64I-NEXT: .LBB12_2: # %start -; RV64I-NEXT: lui a0, 389120 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 389120 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB12_4 @@ -762,8 +762,8 @@ ; RV32I-NEXT: call __fixunssfdi@plt ; RV32I-NEXT: mv s1, a1 ; RV32I-NEXT: and s3, s2, a0 -; RV32I-NEXT: lui a0, 391168 -; RV32I-NEXT: addi s2, a0, -1 +; RV32I-NEXT: lui s2, 391168 +; RV32I-NEXT: addi s2, s2, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __gtsf2@plt @@ -805,8 +805,8 @@ ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __fixunssfdi@plt ; RV64I-NEXT: and s1, s1, a0 -; RV64I-NEXT: lui a0, 391168 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 391168 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: sgtz a0, a0 @@ -1204,8 +1204,8 @@ ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB24_4 ; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: lui a0, 8 -; RV32I-NEXT: addi s1, a0, -1 +; RV32I-NEXT: lui s1, 8 +; RV32I-NEXT: addi s1, s1, -1 ; RV32I-NEXT: .LBB24_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 @@ -1246,8 +1246,8 @@ ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB24_4 ; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: lui a0, 8 -; RV64I-NEXT: addiw s1, a0, -1 +; RV64I-NEXT: lui s1, 8 +; RV64I-NEXT: addiw s1, s1, -1 ; RV64I-NEXT: .LBB24_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 @@ -1710,8 +1710,8 @@ ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __fixunssfsi@plt ; RV32I-NEXT: and s1, s1, a0 -; RV32I-NEXT: lui a0, 325632 -; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: lui a1, 325632 +; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: sgtz a0, a0 @@ -1737,8 +1737,8 @@ ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lui a0, 325632 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 325632 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB31_2 @@ -1794,8 +1794,8 @@ ; RV32I-NEXT: # %bb.1: # %start ; RV32I-NEXT: lui s1, 524288 ; RV32I-NEXT: .LBB32_2: # %start -; RV32I-NEXT: lui a0, 323584 -; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: lui a1, 323584 +; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB32_4 @@ -1836,8 +1836,8 @@ ; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB32_2: # %start -; RV64I-NEXT: lui a0, 323584 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 323584 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB32_4 diff --git a/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll b/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll --- a/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll +++ b/llvm/test/CodeGen/RISCV/fold-vector-cmp.ll @@ -17,9 +17,9 @@ ; CHECK-V-NEXT: vmv.v.x v8, a1 ; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, tu, ma ; CHECK-V-NEXT: vmv.s.x v8, a0 -; CHECK-V-NEXT: addiw a0, a1, 2 +; CHECK-V-NEXT: addiw a1, a1, 2 ; CHECK-V-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-V-NEXT: vmslt.vx v0, v8, a0 +; CHECK-V-NEXT: vmslt.vx v0, v8, a1 ; CHECK-V-NEXT: vmv.v.i v8, 0 ; CHECK-V-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma diff --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll --- a/llvm/test/CodeGen/RISCV/forced-atomics.ll +++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll @@ -2957,8 +2957,8 @@ ; RV64-NO-ATOMIC-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64-NO-ATOMIC-NEXT: mv s0, a0 ; RV64-NO-ATOMIC-NEXT: ld s2, 0(a0) -; RV64-NO-ATOMIC-NEXT: li a0, 1023 -; RV64-NO-ATOMIC-NEXT: slli s1, a0, 52 +; RV64-NO-ATOMIC-NEXT: li s1, 1023 +; RV64-NO-ATOMIC-NEXT: slli s1, s1, 52 ; RV64-NO-ATOMIC-NEXT: .LBB54_1: # %atomicrmw.start ; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64-NO-ATOMIC-NEXT: mv a0, s2 @@ -2991,8 +2991,8 @@ ; RV64-ATOMIC-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64-ATOMIC-NEXT: mv s0, a0 ; RV64-ATOMIC-NEXT: ld a0, 0(a0) -; RV64-ATOMIC-NEXT: li a1, 1023 -; RV64-ATOMIC-NEXT: slli s1, a1, 52 +; RV64-ATOMIC-NEXT: li s1, 1023 +; RV64-ATOMIC-NEXT: slli s1, s1, 52 ; RV64-ATOMIC-NEXT: .LBB54_1: # %atomicrmw.start ; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64-ATOMIC-NEXT: mv s2, a0 @@ -3063,8 +3063,8 @@ ; RV64-NO-ATOMIC-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64-NO-ATOMIC-NEXT: mv s0, a0 ; RV64-NO-ATOMIC-NEXT: ld s2, 0(a0) -; RV64-NO-ATOMIC-NEXT: li a0, -1025 -; RV64-NO-ATOMIC-NEXT: slli s1, a0, 52 +; RV64-NO-ATOMIC-NEXT: li s1, -1025 +; RV64-NO-ATOMIC-NEXT: slli s1, s1, 52 ; RV64-NO-ATOMIC-NEXT: .LBB55_1: # %atomicrmw.start ; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64-NO-ATOMIC-NEXT: mv a0, s2 @@ -3097,8 +3097,8 @@ ; RV64-ATOMIC-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64-ATOMIC-NEXT: mv s0, a0 ; RV64-ATOMIC-NEXT: ld a0, 0(a0) -; RV64-ATOMIC-NEXT: li a1, -1025 -; RV64-ATOMIC-NEXT: slli s1, a1, 52 +; RV64-ATOMIC-NEXT: li s1, -1025 +; RV64-ATOMIC-NEXT: slli s1, s1, 52 ; RV64-ATOMIC-NEXT: .LBB55_1: # %atomicrmw.start ; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64-ATOMIC-NEXT: mv s2, a0 @@ -3169,8 +3169,8 @@ ; RV64-NO-ATOMIC-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64-NO-ATOMIC-NEXT: mv s0, a0 ; RV64-NO-ATOMIC-NEXT: ld s2, 0(a0) -; RV64-NO-ATOMIC-NEXT: li a0, 1023 -; RV64-NO-ATOMIC-NEXT: slli s1, a0, 52 +; RV64-NO-ATOMIC-NEXT: li s1, 1023 +; RV64-NO-ATOMIC-NEXT: slli s1, s1, 52 ; RV64-NO-ATOMIC-NEXT: .LBB56_1: # %atomicrmw.start ; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64-NO-ATOMIC-NEXT: mv a0, s2 @@ -3203,8 +3203,8 @@ ; RV64-ATOMIC-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64-ATOMIC-NEXT: mv s0, a0 ; RV64-ATOMIC-NEXT: ld a0, 0(a0) -; RV64-ATOMIC-NEXT: li a1, 1023 -; RV64-ATOMIC-NEXT: slli s1, a1, 52 +; RV64-ATOMIC-NEXT: li s1, 1023 +; RV64-ATOMIC-NEXT: slli s1, s1, 52 ; RV64-ATOMIC-NEXT: .LBB56_1: # %atomicrmw.start ; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64-ATOMIC-NEXT: mv s2, a0 @@ -3275,8 +3275,8 @@ ; RV64-NO-ATOMIC-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64-NO-ATOMIC-NEXT: mv s0, a0 ; RV64-NO-ATOMIC-NEXT: ld s2, 0(a0) -; RV64-NO-ATOMIC-NEXT: li a0, 1023 -; RV64-NO-ATOMIC-NEXT: slli s1, a0, 52 +; RV64-NO-ATOMIC-NEXT: li s1, 1023 +; RV64-NO-ATOMIC-NEXT: slli s1, s1, 52 ; RV64-NO-ATOMIC-NEXT: .LBB57_1: # %atomicrmw.start ; RV64-NO-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64-NO-ATOMIC-NEXT: mv a0, s2 @@ -3309,8 +3309,8 @@ ; RV64-ATOMIC-NEXT: sd s2, 0(sp) # 8-byte Folded Spill ; RV64-ATOMIC-NEXT: mv s0, a0 ; RV64-ATOMIC-NEXT: ld a0, 0(a0) -; RV64-ATOMIC-NEXT: li a1, 1023 -; RV64-ATOMIC-NEXT: slli s1, a1, 52 +; RV64-ATOMIC-NEXT: li s1, 1023 +; RV64-ATOMIC-NEXT: slli s1, s1, 52 ; RV64-ATOMIC-NEXT: .LBB57_1: # %atomicrmw.start ; RV64-ATOMIC-NEXT: # =>This Inner Loop Header: Depth=1 ; RV64-ATOMIC-NEXT: mv s2, a0 diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll --- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll @@ -831,8 +831,8 @@ ; CHECK-NOV-LABEL: stest_f64i16: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.w.d a1, fa1, rtz -; CHECK-NOV-NEXT: lui a0, 8 -; CHECK-NOV-NEXT: addiw a2, a0, -1 +; CHECK-NOV-NEXT: lui a2, 8 +; CHECK-NOV-NEXT: addiw a2, a2, -1 ; CHECK-NOV-NEXT: fcvt.w.d a0, fa0, rtz ; CHECK-NOV-NEXT: bge a1, a2, .LBB9_5 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -884,8 +884,8 @@ ; CHECK-NOV-LABEL: utest_f64i16: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.wu.d a0, fa0, rtz -; CHECK-NOV-NEXT: lui a1, 16 -; CHECK-NOV-NEXT: addiw a2, a1, -1 +; CHECK-NOV-NEXT: lui a2, 16 +; CHECK-NOV-NEXT: addiw a2, a2, -1 ; CHECK-NOV-NEXT: fcvt.wu.d a1, fa1, rtz ; CHECK-NOV-NEXT: bgeu a0, a2, .LBB10_3 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -921,8 +921,8 @@ ; CHECK-NOV-LABEL: ustest_f64i16: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.w.d a0, fa0, rtz -; CHECK-NOV-NEXT: lui a1, 16 -; CHECK-NOV-NEXT: addiw a2, a1, -1 +; CHECK-NOV-NEXT: lui a2, 16 +; CHECK-NOV-NEXT: addiw a2, a2, -1 ; CHECK-NOV-NEXT: fcvt.w.d a1, fa1, rtz ; CHECK-NOV-NEXT: blt a0, a2, .LBB11_2 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -965,8 +965,8 @@ ; CHECK-NOV-LABEL: stest_f32i16: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.w.s a1, fa3, rtz -; CHECK-NOV-NEXT: lui a2, 8 -; CHECK-NOV-NEXT: addiw a5, a2, -1 +; CHECK-NOV-NEXT: lui a5, 8 +; CHECK-NOV-NEXT: addiw a5, a5, -1 ; CHECK-NOV-NEXT: fcvt.w.s a2, fa2, rtz ; CHECK-NOV-NEXT: bge a1, a5, .LBB12_10 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -1046,8 +1046,8 @@ ; CHECK-NOV-LABEL: utest_f32i16: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.wu.s a1, fa0, rtz -; CHECK-NOV-NEXT: lui a2, 16 -; CHECK-NOV-NEXT: addiw a3, a2, -1 +; CHECK-NOV-NEXT: lui a3, 16 +; CHECK-NOV-NEXT: addiw a3, a3, -1 ; CHECK-NOV-NEXT: fcvt.wu.s a2, fa1, rtz ; CHECK-NOV-NEXT: bgeu a1, a3, .LBB13_6 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -1101,8 +1101,8 @@ ; CHECK-NOV-LABEL: ustest_f32i16: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.w.s a1, fa0, rtz -; CHECK-NOV-NEXT: lui a2, 16 -; CHECK-NOV-NEXT: addiw a4, a2, -1 +; CHECK-NOV-NEXT: lui a4, 16 +; CHECK-NOV-NEXT: addiw a4, a4, -1 ; CHECK-NOV-NEXT: fcvt.w.s a2, fa1, rtz ; CHECK-NOV-NEXT: bge a1, a4, .LBB14_6 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -1238,8 +1238,8 @@ ; CHECK-NOV-NEXT: mv a0, s1 ; CHECK-NOV-NEXT: call __extendhfsf2@plt ; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz -; CHECK-NOV-NEXT: lui a1, 8 -; CHECK-NOV-NEXT: addiw a7, a1, -1 +; CHECK-NOV-NEXT: lui a7, 8 +; CHECK-NOV-NEXT: addiw a7, a7, -1 ; CHECK-NOV-NEXT: bge a0, a7, .LBB15_18 ; CHECK-NOV-NEXT: # %bb.1: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a1, fs5, rtz @@ -1828,8 +1828,8 @@ ; CHECK-NOV-NEXT: mv a0, s1 ; CHECK-NOV-NEXT: call __extendhfsf2@plt ; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz -; CHECK-NOV-NEXT: lui a1, 16 -; CHECK-NOV-NEXT: addiw a3, a1, -1 +; CHECK-NOV-NEXT: lui a3, 16 +; CHECK-NOV-NEXT: addiw a3, a3, -1 ; CHECK-NOV-NEXT: bge a0, a3, .LBB17_10 ; CHECK-NOV-NEXT: # %bb.1: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a1, fs5, rtz @@ -4113,8 +4113,8 @@ ; CHECK-NOV-LABEL: stest_f64i16_mm: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.w.d a1, fa1, rtz -; CHECK-NOV-NEXT: lui a0, 8 -; CHECK-NOV-NEXT: addiw a2, a0, -1 +; CHECK-NOV-NEXT: lui a2, 8 +; CHECK-NOV-NEXT: addiw a2, a2, -1 ; CHECK-NOV-NEXT: fcvt.w.d a0, fa0, rtz ; CHECK-NOV-NEXT: bge a1, a2, .LBB36_5 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -4164,8 +4164,8 @@ ; CHECK-NOV-LABEL: utest_f64i16_mm: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.wu.d a0, fa0, rtz -; CHECK-NOV-NEXT: lui a1, 16 -; CHECK-NOV-NEXT: addiw a2, a1, -1 +; CHECK-NOV-NEXT: lui a2, 16 +; CHECK-NOV-NEXT: addiw a2, a2, -1 ; CHECK-NOV-NEXT: fcvt.wu.d a1, fa1, rtz ; CHECK-NOV-NEXT: bgeu a0, a2, .LBB37_3 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -4200,8 +4200,8 @@ ; CHECK-NOV-LABEL: ustest_f64i16_mm: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.w.d a1, fa1, rtz -; CHECK-NOV-NEXT: lui a0, 16 -; CHECK-NOV-NEXT: addiw a2, a0, -1 +; CHECK-NOV-NEXT: lui a2, 16 +; CHECK-NOV-NEXT: addiw a2, a2, -1 ; CHECK-NOV-NEXT: fcvt.w.d a0, fa0, rtz ; CHECK-NOV-NEXT: blt a1, a2, .LBB38_2 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -4242,8 +4242,8 @@ ; CHECK-NOV-LABEL: stest_f32i16_mm: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.w.s a1, fa3, rtz -; CHECK-NOV-NEXT: lui a2, 8 -; CHECK-NOV-NEXT: addiw a5, a2, -1 +; CHECK-NOV-NEXT: lui a5, 8 +; CHECK-NOV-NEXT: addiw a5, a5, -1 ; CHECK-NOV-NEXT: fcvt.w.s a2, fa2, rtz ; CHECK-NOV-NEXT: bge a1, a5, .LBB39_10 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -4321,8 +4321,8 @@ ; CHECK-NOV-LABEL: utest_f32i16_mm: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.wu.s a1, fa0, rtz -; CHECK-NOV-NEXT: lui a2, 16 -; CHECK-NOV-NEXT: addiw a3, a2, -1 +; CHECK-NOV-NEXT: lui a3, 16 +; CHECK-NOV-NEXT: addiw a3, a3, -1 ; CHECK-NOV-NEXT: fcvt.wu.s a2, fa1, rtz ; CHECK-NOV-NEXT: bgeu a1, a3, .LBB40_6 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -4375,8 +4375,8 @@ ; CHECK-NOV-LABEL: ustest_f32i16_mm: ; CHECK-NOV: # %bb.0: # %entry ; CHECK-NOV-NEXT: fcvt.w.s a1, fa3, rtz -; CHECK-NOV-NEXT: lui a2, 16 -; CHECK-NOV-NEXT: addiw a4, a2, -1 +; CHECK-NOV-NEXT: lui a4, 16 +; CHECK-NOV-NEXT: addiw a4, a4, -1 ; CHECK-NOV-NEXT: fcvt.w.s a2, fa2, rtz ; CHECK-NOV-NEXT: bge a1, a4, .LBB41_6 ; CHECK-NOV-NEXT: # %bb.1: # %entry @@ -4510,8 +4510,8 @@ ; CHECK-NOV-NEXT: mv a0, s1 ; CHECK-NOV-NEXT: call __extendhfsf2@plt ; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz -; CHECK-NOV-NEXT: lui a1, 8 -; CHECK-NOV-NEXT: addiw a7, a1, -1 +; CHECK-NOV-NEXT: lui a7, 8 +; CHECK-NOV-NEXT: addiw a7, a7, -1 ; CHECK-NOV-NEXT: bge a0, a7, .LBB42_18 ; CHECK-NOV-NEXT: # %bb.1: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a1, fs5, rtz @@ -4823,8 +4823,8 @@ ; CHECK-NOV-NEXT: call __extendhfsf2@plt ; CHECK-NOV-NEXT: fmv.s fs0, fa0 ; CHECK-NOV-NEXT: fcvt.lu.s s3, fs6, rtz -; CHECK-NOV-NEXT: fcvt.lu.s a0, fs5, rtz -; CHECK-NOV-NEXT: sext.w s2, a0 +; CHECK-NOV-NEXT: fcvt.lu.s s2, fs5, rtz +; CHECK-NOV-NEXT: sext.w s2, s2 ; CHECK-NOV-NEXT: mv a0, s1 ; CHECK-NOV-NEXT: call __extendhfsf2@plt ; CHECK-NOV-NEXT: fcvt.lu.s a0, fa0, rtz @@ -5095,8 +5095,8 @@ ; CHECK-NOV-NEXT: mv a0, s1 ; CHECK-NOV-NEXT: call __extendhfsf2@plt ; CHECK-NOV-NEXT: fcvt.l.s a0, fa0, rtz -; CHECK-NOV-NEXT: lui a1, 16 -; CHECK-NOV-NEXT: addiw a3, a1, -1 +; CHECK-NOV-NEXT: lui a3, 16 +; CHECK-NOV-NEXT: addiw a3, a3, -1 ; CHECK-NOV-NEXT: bge a0, a3, .LBB44_10 ; CHECK-NOV-NEXT: # %bb.1: # %entry ; CHECK-NOV-NEXT: fcvt.l.s a1, fs5, rtz @@ -5587,8 +5587,8 @@ ; CHECK-NOV-NEXT: snez a1, s1 ; CHECK-NOV-NEXT: addi a1, a1, -1 ; CHECK-NOV-NEXT: and a1, a1, s0 -; CHECK-NOV-NEXT: addi a2, s1, -1 -; CHECK-NOV-NEXT: seqz a2, a2 +; CHECK-NOV-NEXT: addi s1, s1, -1 +; CHECK-NOV-NEXT: seqz a2, s1 ; CHECK-NOV-NEXT: addi a2, a2, -1 ; CHECK-NOV-NEXT: and a1, a2, a1 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -5627,8 +5627,8 @@ ; CHECK-V-NEXT: snez a2, s1 ; CHECK-V-NEXT: addi a2, a2, -1 ; CHECK-V-NEXT: and a2, a2, s0 -; CHECK-V-NEXT: addi a3, s1, -1 -; CHECK-V-NEXT: seqz a3, a3 +; CHECK-V-NEXT: addi s1, s1, -1 +; CHECK-V-NEXT: seqz a3, s1 ; CHECK-V-NEXT: addi a3, a3, -1 ; CHECK-V-NEXT: and a2, a3, a2 ; CHECK-V-NEXT: snez a3, a1 @@ -5694,14 +5694,14 @@ ; CHECK-NOV-NEXT: slti a3, a1, 1 ; CHECK-NOV-NEXT: neg a3, a3 ; CHECK-NOV-NEXT: and a3, a3, a0 -; CHECK-NOV-NEXT: addi a0, a1, -1 -; CHECK-NOV-NEXT: seqz a0, a0 -; CHECK-NOV-NEXT: addi a1, a0, -1 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: seqz a1, a1 +; CHECK-NOV-NEXT: addi a1, a1, -1 ; CHECK-NOV-NEXT: slti a0, s1, 1 ; CHECK-NOV-NEXT: neg a0, a0 ; CHECK-NOV-NEXT: and a0, a0, s0 -; CHECK-NOV-NEXT: addi a5, s1, -1 -; CHECK-NOV-NEXT: seqz a5, a5 +; CHECK-NOV-NEXT: addi s1, s1, -1 +; CHECK-NOV-NEXT: seqz a5, s1 ; CHECK-NOV-NEXT: addi a5, a5, -1 ; CHECK-NOV-NEXT: and a0, a5, a0 ; CHECK-NOV-NEXT: beqz a4, .LBB47_6 @@ -6074,8 +6074,8 @@ ; CHECK-NOV-NEXT: snez a1, s1 ; CHECK-NOV-NEXT: addi a1, a1, -1 ; CHECK-NOV-NEXT: and a1, a1, s0 -; CHECK-NOV-NEXT: addi a2, s1, -1 -; CHECK-NOV-NEXT: seqz a2, a2 +; CHECK-NOV-NEXT: addi s1, s1, -1 +; CHECK-NOV-NEXT: seqz a2, s1 ; CHECK-NOV-NEXT: addi a2, a2, -1 ; CHECK-NOV-NEXT: and a1, a2, a1 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -6114,8 +6114,8 @@ ; CHECK-V-NEXT: snez a2, s1 ; CHECK-V-NEXT: addi a2, a2, -1 ; CHECK-V-NEXT: and a2, a2, s0 -; CHECK-V-NEXT: addi a3, s1, -1 -; CHECK-V-NEXT: seqz a3, a3 +; CHECK-V-NEXT: addi s1, s1, -1 +; CHECK-V-NEXT: seqz a3, s1 ; CHECK-V-NEXT: addi a3, a3, -1 ; CHECK-V-NEXT: and a2, a3, a2 ; CHECK-V-NEXT: snez a3, a1 @@ -6181,14 +6181,14 @@ ; CHECK-NOV-NEXT: slti a3, a1, 1 ; CHECK-NOV-NEXT: neg a3, a3 ; CHECK-NOV-NEXT: and a3, a3, a0 -; CHECK-NOV-NEXT: addi a0, a1, -1 -; CHECK-NOV-NEXT: seqz a0, a0 -; CHECK-NOV-NEXT: addi a1, a0, -1 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: seqz a1, a1 +; CHECK-NOV-NEXT: addi a1, a1, -1 ; CHECK-NOV-NEXT: slti a0, s1, 1 ; CHECK-NOV-NEXT: neg a0, a0 ; CHECK-NOV-NEXT: and a0, a0, s0 -; CHECK-NOV-NEXT: addi a5, s1, -1 -; CHECK-NOV-NEXT: seqz a5, a5 +; CHECK-NOV-NEXT: addi s1, s1, -1 +; CHECK-NOV-NEXT: seqz a5, s1 ; CHECK-NOV-NEXT: addi a5, a5, -1 ; CHECK-NOV-NEXT: and a0, a5, a0 ; CHECK-NOV-NEXT: beqz a4, .LBB50_6 @@ -6557,8 +6557,8 @@ ; CHECK-NOV-NEXT: snez a1, s2 ; CHECK-NOV-NEXT: addi a1, a1, -1 ; CHECK-NOV-NEXT: and a1, a1, s1 -; CHECK-NOV-NEXT: addi a2, s2, -1 -; CHECK-NOV-NEXT: seqz a2, a2 +; CHECK-NOV-NEXT: addi s2, s2, -1 +; CHECK-NOV-NEXT: seqz a2, s2 ; CHECK-NOV-NEXT: addi a2, a2, -1 ; CHECK-NOV-NEXT: and a1, a2, a1 ; CHECK-NOV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -6599,8 +6599,8 @@ ; CHECK-V-NEXT: snez a1, s2 ; CHECK-V-NEXT: addi a1, a1, -1 ; CHECK-V-NEXT: and a1, a1, s1 -; CHECK-V-NEXT: addi a2, s2, -1 -; CHECK-V-NEXT: seqz a2, a2 +; CHECK-V-NEXT: addi s2, s2, -1 +; CHECK-V-NEXT: seqz a2, s2 ; CHECK-V-NEXT: addi a2, a2, -1 ; CHECK-V-NEXT: and a1, a2, a1 ; CHECK-V-NEXT: sd a1, 8(sp) @@ -6659,14 +6659,14 @@ ; CHECK-NOV-NEXT: slti a3, a1, 1 ; CHECK-NOV-NEXT: neg a3, a3 ; CHECK-NOV-NEXT: and a3, a3, a0 -; CHECK-NOV-NEXT: addi a0, a1, -1 -; CHECK-NOV-NEXT: seqz a0, a0 -; CHECK-NOV-NEXT: addi a1, a0, -1 +; CHECK-NOV-NEXT: addi a1, a1, -1 +; CHECK-NOV-NEXT: seqz a1, a1 +; CHECK-NOV-NEXT: addi a1, a1, -1 ; CHECK-NOV-NEXT: slti a0, s1, 1 ; CHECK-NOV-NEXT: neg a0, a0 ; CHECK-NOV-NEXT: and a0, a0, s0 -; CHECK-NOV-NEXT: addi a5, s1, -1 -; CHECK-NOV-NEXT: seqz a5, a5 +; CHECK-NOV-NEXT: addi s1, s1, -1 +; CHECK-NOV-NEXT: seqz a5, s1 ; CHECK-NOV-NEXT: addi a5, a5, -1 ; CHECK-NOV-NEXT: and a0, a5, a0 ; CHECK-NOV-NEXT: beqz a4, .LBB53_6 @@ -6722,14 +6722,14 @@ ; CHECK-V-NEXT: slti a3, a1, 1 ; CHECK-V-NEXT: neg a3, a3 ; CHECK-V-NEXT: and a3, a3, a0 -; CHECK-V-NEXT: addi a0, a1, -1 -; CHECK-V-NEXT: seqz a0, a0 -; CHECK-V-NEXT: addi a1, a0, -1 +; CHECK-V-NEXT: addi a1, a1, -1 +; CHECK-V-NEXT: seqz a1, a1 +; CHECK-V-NEXT: addi a1, a1, -1 ; CHECK-V-NEXT: slti a0, s1, 1 ; CHECK-V-NEXT: neg a0, a0 ; CHECK-V-NEXT: and a0, a0, s0 -; CHECK-V-NEXT: addi a5, s1, -1 -; CHECK-V-NEXT: seqz a5, a5 +; CHECK-V-NEXT: addi s1, s1, -1 +; CHECK-V-NEXT: seqz a5, s1 ; CHECK-V-NEXT: addi a5, a5, -1 ; CHECK-V-NEXT: and a0, a5, a0 ; CHECK-V-NEXT: beqz a4, .LBB53_6 diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll --- a/llvm/test/CodeGen/RISCV/half-arith.ll +++ b/llvm/test/CodeGen/RISCV/half-arith.ll @@ -438,8 +438,8 @@ ; RV32I-NEXT: call __truncsfhf2@plt ; RV32I-NEXT: lui a1, 1048568 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: slli a1, s1, 17 -; RV32I-NEXT: srli a1, a1, 17 +; RV32I-NEXT: slli s1, s1, 17 +; RV32I-NEXT: srli a1, s1, 17 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload @@ -477,8 +477,8 @@ ; RV64I-NEXT: call __truncsfhf2@plt ; RV64I-NEXT: lui a1, 1048568 ; RV64I-NEXT: and a0, a0, a1 -; RV64I-NEXT: slli a1, s1, 49 -; RV64I-NEXT: srli a1, a1, 49 +; RV64I-NEXT: slli s1, s1, 49 +; RV64I-NEXT: srli a1, s1, 49 ; RV64I-NEXT: or a0, a1, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload @@ -908,8 +908,8 @@ ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s1, a2 ; RV32I-NEXT: mv s0, a1 -; RV32I-NEXT: lui a1, 16 -; RV32I-NEXT: addi s3, a1, -1 +; RV32I-NEXT: lui s3, 16 +; RV32I-NEXT: addi s3, s3, -1 ; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: li a1, 0 @@ -966,8 +966,8 @@ ; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s1, a2 ; RV64I-NEXT: mv s0, a1 -; RV64I-NEXT: lui a1, 16 -; RV64I-NEXT: addiw s3, a1, -1 +; RV64I-NEXT: lui s3, 16 +; RV64I-NEXT: addiw s3, s3, -1 ; RV64I-NEXT: and a0, a0, s3 ; RV64I-NEXT: call __extendhfsf2@plt ; RV64I-NEXT: li a1, 0 @@ -1040,8 +1040,8 @@ ; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s1, a2 ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s3, a0, -1 +; RV32I-NEXT: lui s3, 16 +; RV32I-NEXT: addi s3, s3, -1 ; RV32I-NEXT: and a0, a1, s3 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: li a1, 0 @@ -1098,8 +1098,8 @@ ; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s1, a2 ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s3, a0, -1 +; RV64I-NEXT: lui s3, 16 +; RV64I-NEXT: addiw s3, s3, -1 ; RV64I-NEXT: and a0, a1, s3 ; RV64I-NEXT: call __extendhfsf2@plt ; RV64I-NEXT: li a1, 0 @@ -1743,8 +1743,8 @@ ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s1, a1 -; RV32I-NEXT: lui a1, 16 -; RV32I-NEXT: addi s3, a1, -1 +; RV32I-NEXT: lui s3, 16 +; RV32I-NEXT: addi s3, s3, -1 ; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: li a1, 0 @@ -1804,8 +1804,8 @@ ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a2 ; RV64I-NEXT: mv s1, a1 -; RV64I-NEXT: lui a1, 16 -; RV64I-NEXT: addiw s3, a1, -1 +; RV64I-NEXT: lui s3, 16 +; RV64I-NEXT: addiw s3, s3, -1 ; RV64I-NEXT: and a0, a0, s3 ; RV64I-NEXT: call __extendhfsf2@plt ; RV64I-NEXT: li a1, 0 @@ -1882,8 +1882,8 @@ ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s1, a1 -; RV32I-NEXT: lui a1, 16 -; RV32I-NEXT: addi s3, a1, -1 +; RV32I-NEXT: lui s3, 16 +; RV32I-NEXT: addi s3, s3, -1 ; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: li a1, 0 @@ -1933,8 +1933,8 @@ ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: mv s0, a2 ; RV64I-NEXT: mv s1, a1 -; RV64I-NEXT: lui a1, 16 -; RV64I-NEXT: addiw s3, a1, -1 +; RV64I-NEXT: lui s3, 16 +; RV64I-NEXT: addiw s3, s3, -1 ; RV64I-NEXT: and a0, a0, s3 ; RV64I-NEXT: call __extendhfsf2@plt ; RV64I-NEXT: li a1, 0 diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -152,8 +152,8 @@ ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB1_4 ; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: lui a0, 8 -; RV32I-NEXT: addi s1, a0, -1 +; RV32I-NEXT: lui s1, 8 +; RV32I-NEXT: addi s1, s1, -1 ; RV32I-NEXT: .LBB1_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 @@ -195,8 +195,8 @@ ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB1_4 ; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: lui a0, 8 -; RV64I-NEXT: addiw s1, a0, -1 +; RV64I-NEXT: lui s1, 8 +; RV64I-NEXT: addiw s1, s1, -1 ; RV64I-NEXT: .LBB1_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 @@ -317,8 +317,8 @@ ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui a1, 16 -; RV32I-NEXT: addi s0, a1, -1 +; RV32I-NEXT: lui s0, 16 +; RV32I-NEXT: addi s0, s0, -1 ; RV32I-NEXT: and a0, a0, s0 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: mv s3, a0 @@ -355,8 +355,8 @@ ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: lui a1, 16 -; RV64I-NEXT: addiw s0, a1, -1 +; RV64I-NEXT: lui s0, 16 +; RV64I-NEXT: addiw s0, s0, -1 ; RV64I-NEXT: and a0, a0, s0 ; RV64I-NEXT: call __extendhfsf2@plt ; RV64I-NEXT: mv s3, a0 @@ -484,8 +484,8 @@ ; RV32I-NEXT: # %bb.1: # %start ; RV32I-NEXT: lui s1, 524288 ; RV32I-NEXT: .LBB5_2: # %start -; RV32I-NEXT: lui a0, 323584 -; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: lui a1, 323584 +; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB5_4 @@ -529,8 +529,8 @@ ; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB5_2: # %start -; RV64I-NEXT: lui a0, 323584 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 323584 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB5_4 @@ -718,9 +718,8 @@ ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lui a0, 325632 -; RV32I-NEXT: addi a1, a0, -1 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: lui a1, 325632 +; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: sgtz a0, a0 ; RV32I-NEXT: neg s1, a0 @@ -757,8 +756,8 @@ ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lui a0, 325632 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 325632 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB8_2 @@ -961,8 +960,8 @@ ; RV32I-NEXT: # %bb.1: # %start ; RV32I-NEXT: lui s2, 524288 ; RV32I-NEXT: .LBB10_2: # %start -; RV32I-NEXT: lui a0, 389120 -; RV32I-NEXT: addi s3, a0, -1 +; RV32I-NEXT: lui s3, 389120 +; RV32I-NEXT: addi s3, s3, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __gtsf2@plt @@ -1027,8 +1026,8 @@ ; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: slli s1, s3, 63 ; RV64I-NEXT: .LBB10_2: # %start -; RV64I-NEXT: lui a0, 389120 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 389120 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB10_4 @@ -1192,9 +1191,8 @@ ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lui a0, 391168 -; RV32I-NEXT: addi s1, a0, -1 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: lui s1, 391168 +; RV32I-NEXT: addi s1, s1, -1 ; RV32I-NEXT: mv a1, s1 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: sgtz a0, a0 @@ -1242,9 +1240,8 @@ ; RV64I-NEXT: srli a0, a0, 48 ; RV64I-NEXT: call __extendhfsf2@plt ; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lui a0, 391168 -; RV64I-NEXT: addiw a1, a0, -1 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: lui a1, 391168 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: sgtz a0, a0 ; RV64I-NEXT: neg s1, a0 @@ -2267,8 +2264,8 @@ ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB32_4 ; RV32I-NEXT: # %bb.3: # %start -; RV32I-NEXT: lui a0, 8 -; RV32I-NEXT: addi s1, a0, -1 +; RV32I-NEXT: lui s1, 8 +; RV32I-NEXT: addi s1, s1, -1 ; RV32I-NEXT: .LBB32_4: # %start ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: mv a1, s0 @@ -2312,8 +2309,8 @@ ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB32_4 ; RV64I-NEXT: # %bb.3: # %start -; RV64I-NEXT: lui a0, 8 -; RV64I-NEXT: addiw s1, a0, -1 +; RV64I-NEXT: lui s1, 8 +; RV64I-NEXT: addiw s1, s1, -1 ; RV64I-NEXT: .LBB32_4: # %start ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: mv a1, s0 @@ -2435,8 +2432,8 @@ ; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill -; RV32I-NEXT: lui a1, 16 -; RV32I-NEXT: addi s3, a1, -1 +; RV32I-NEXT: lui s3, 16 +; RV32I-NEXT: addi s3, s3, -1 ; RV32I-NEXT: and a0, a0, s3 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: mv s2, a0 @@ -2476,8 +2473,8 @@ ; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill -; RV64I-NEXT: lui a1, 16 -; RV64I-NEXT: addiw s3, a1, -1 +; RV64I-NEXT: lui s3, 16 +; RV64I-NEXT: addiw s3, s3, -1 ; RV64I-NEXT: and a0, a0, s3 ; RV64I-NEXT: call __extendhfsf2@plt ; RV64I-NEXT: mv s2, a0 @@ -2941,9 +2938,8 @@ ; RV32I-NEXT: srli a0, a0, 16 ; RV32I-NEXT: call __extendhfsf2@plt ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lui a0, 325632 -; RV32I-NEXT: addi a1, a0, -1 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: lui a1, 325632 +; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: sgtz a0, a0 ; RV32I-NEXT: neg s1, a0 @@ -2980,8 +2976,8 @@ ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __fixunssfdi@plt ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lui a0, 325632 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 325632 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB39_2 @@ -3058,8 +3054,8 @@ ; RV32I-NEXT: # %bb.1: # %start ; RV32I-NEXT: lui s1, 524288 ; RV32I-NEXT: .LBB40_2: # %start -; RV32I-NEXT: lui a0, 323584 -; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: lui a1, 323584 +; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: mv a0, s0 ; RV32I-NEXT: call __gtsf2@plt ; RV32I-NEXT: blez a0, .LBB40_4 @@ -3103,8 +3099,8 @@ ; RV64I-NEXT: # %bb.1: # %start ; RV64I-NEXT: lui s1, 524288 ; RV64I-NEXT: .LBB40_2: # %start -; RV64I-NEXT: lui a0, 323584 -; RV64I-NEXT: addiw a1, a0, -1 +; RV64I-NEXT: lui a1, 323584 +; RV64I-NEXT: addiw a1, a1, -1 ; RV64I-NEXT: mv a0, s0 ; RV64I-NEXT: call __gtsf2@plt ; RV64I-NEXT: blez a0, .LBB40_4 diff --git a/llvm/test/CodeGen/RISCV/imm.ll b/llvm/test/CodeGen/RISCV/imm.ll --- a/llvm/test/CodeGen/RISCV/imm.ll +++ b/llvm/test/CodeGen/RISCV/imm.ll @@ -733,8 +733,8 @@ define i64 @imm_left_shifted_lui_3() nounwind { ; RV32I-LABEL: imm_left_shifted_lui_3: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a0, 1 -; RV32I-NEXT: addi a1, a0, 1 +; RV32I-NEXT: lui a1, 1 +; RV32I-NEXT: addi a1, a1, 1 ; RV32I-NEXT: li a0, 0 ; RV32I-NEXT: ret ; @@ -2316,8 +2316,8 @@ define i64 @li_rori_2() { ; RV32I-LABEL: li_rori_2: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a0, 720896 -; RV32I-NEXT: addi a1, a0, -1 +; RV32I-NEXT: lui a1, 720896 +; RV32I-NEXT: addi a1, a1, -1 ; RV32I-NEXT: li a0, -6 ; RV32I-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/narrow-shl-cst.ll b/llvm/test/CodeGen/RISCV/narrow-shl-cst.ll --- a/llvm/test/CodeGen/RISCV/narrow-shl-cst.ll +++ b/llvm/test/CodeGen/RISCV/narrow-shl-cst.ll @@ -39,8 +39,8 @@ define i64 @test3(i64 %x) nounwind { ; RV32-LABEL: test3: ; RV32: # %bb.0: -; RV32-NEXT: andi a0, a0, 241 -; RV32-NEXT: slli a1, a0, 8 +; RV32-NEXT: andi a1, a0, 241 +; RV32-NEXT: slli a1, a1, 8 ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; @@ -57,8 +57,8 @@ define i64 @test4(i64 %x) nounwind { ; RV32-LABEL: test4: ; RV32: # %bb.0: -; RV32-NEXT: ori a0, a0, 241 -; RV32-NEXT: slli a1, a0, 8 +; RV32-NEXT: ori a1, a0, 241 +; RV32-NEXT: slli a1, a1, 8 ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; @@ -75,8 +75,8 @@ define i64 @test5(i64 %x) nounwind { ; RV32-LABEL: test5: ; RV32: # %bb.0: -; RV32-NEXT: ori a0, a0, 31 -; RV32-NEXT: slli a1, a0, 8 +; RV32-NEXT: ori a1, a0, 31 +; RV32-NEXT: slli a1, a1, 8 ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; @@ -93,8 +93,8 @@ define i64 @test6(i64 %x) nounwind { ; RV32-LABEL: test6: ; RV32: # %bb.0: -; RV32-NEXT: xori a0, a0, 241 -; RV32-NEXT: slli a1, a0, 8 +; RV32-NEXT: xori a1, a0, 241 +; RV32-NEXT: slli a1, a1, 8 ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; @@ -111,8 +111,8 @@ define i64 @test7(i64 %x) nounwind { ; RV32-LABEL: test7: ; RV32: # %bb.0: -; RV32-NEXT: xori a0, a0, 31 -; RV32-NEXT: slli a1, a0, 8 +; RV32-NEXT: xori a1, a0, 31 +; RV32-NEXT: slli a1, a1, 8 ; RV32-NEXT: li a0, 0 ; RV32-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll b/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll --- a/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll +++ b/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll @@ -64,16 +64,16 @@ ; CHECK-NEXT: lw a6, 0(a4) ; CHECK-NEXT: addiw a5, a5, 4 ; CHECK-NEXT: sw a5, -4(a4) -; CHECK-NEXT: addiw a5, a6, 4 -; CHECK-NEXT: sw a5, 0(a4) +; CHECK-NEXT: addiw a6, a6, 4 +; CHECK-NEXT: sw a6, 0(a4) ; CHECK-NEXT: addi a3, a3, 2 ; CHECK-NEXT: addi a4, a4, 8 ; CHECK-NEXT: bne a1, a3, .LBB1_4 ; CHECK-NEXT: .LBB1_5: # %for.cond.cleanup.loopexit.unr-lcssa ; CHECK-NEXT: beqz a2, .LBB1_7 ; CHECK-NEXT: # %bb.6: # %for.body.epil -; CHECK-NEXT: slli a1, a3, 2 -; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: add a0, a0, a3 ; CHECK-NEXT: lw a1, 0(a0) ; CHECK-NEXT: addiw a1, a1, 4 ; CHECK-NEXT: sw a1, 0(a0) diff --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll --- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll @@ -1211,8 +1211,8 @@ ; RV32I-NEXT: sll a1, a1, a7 ; RV32I-NEXT: or a1, a1, t0 ; RV32I-NEXT: srl t0, a0, a4 -; RV32I-NEXT: slli a0, a6, 1 -; RV32I-NEXT: sll a6, a0, a7 +; RV32I-NEXT: slli a6, a6, 1 +; RV32I-NEXT: sll a6, a6, a7 ; RV32I-NEXT: addi a0, a5, -32 ; RV32I-NEXT: or a6, a6, t0 ; RV32I-NEXT: bltz a0, .LBB19_6 @@ -1265,8 +1265,8 @@ ; RV32ZBB-NEXT: sll a1, a1, a7 ; RV32ZBB-NEXT: or a1, a1, t0 ; RV32ZBB-NEXT: srl t0, a0, a4 -; RV32ZBB-NEXT: slli a0, a6, 1 -; RV32ZBB-NEXT: sll a6, a0, a7 +; RV32ZBB-NEXT: slli a6, a6, 1 +; RV32ZBB-NEXT: sll a6, a6, a7 ; RV32ZBB-NEXT: addi a0, a5, -32 ; RV32ZBB-NEXT: or a6, a6, t0 ; RV32ZBB-NEXT: bltz a0, .LBB19_6 @@ -1528,8 +1528,8 @@ ; RV32I-NEXT: not a0, a4 ; RV32I-NEXT: sll t0, t0, a0 ; RV32I-NEXT: srl t1, a1, a4 -; RV32I-NEXT: slli a1, a6, 1 -; RV32I-NEXT: sll t2, a1, a0 +; RV32I-NEXT: slli a6, a6, 1 +; RV32I-NEXT: sll t2, a6, a0 ; RV32I-NEXT: mv a6, a2 ; RV32I-NEXT: beqz a5, .LBB23_6 ; RV32I-NEXT: # %bb.5: @@ -1546,8 +1546,8 @@ ; RV32I-NEXT: sll a2, a2, a0 ; RV32I-NEXT: or a2, a2, t0 ; RV32I-NEXT: srl a3, a3, a4 -; RV32I-NEXT: slli a4, a6, 1 -; RV32I-NEXT: sll a0, a4, a0 +; RV32I-NEXT: slli a6, a6, 1 +; RV32I-NEXT: sll a0, a6, a0 ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: add a3, a7, a0 ; RV32I-NEXT: add a0, a1, a2 @@ -1584,8 +1584,8 @@ ; RV32ZBB-NEXT: not a0, a4 ; RV32ZBB-NEXT: sll t0, t0, a0 ; RV32ZBB-NEXT: srl t1, a1, a4 -; RV32ZBB-NEXT: slli a1, a6, 1 -; RV32ZBB-NEXT: sll t2, a1, a0 +; RV32ZBB-NEXT: slli a6, a6, 1 +; RV32ZBB-NEXT: sll t2, a6, a0 ; RV32ZBB-NEXT: mv a6, a2 ; RV32ZBB-NEXT: beqz a5, .LBB23_6 ; RV32ZBB-NEXT: # %bb.5: @@ -1602,8 +1602,8 @@ ; RV32ZBB-NEXT: sll a2, a2, a0 ; RV32ZBB-NEXT: or a2, a2, t0 ; RV32ZBB-NEXT: srl a3, a3, a4 -; RV32ZBB-NEXT: slli a4, a6, 1 -; RV32ZBB-NEXT: sll a0, a4, a0 +; RV32ZBB-NEXT: slli a6, a6, 1 +; RV32ZBB-NEXT: sll a0, a6, a0 ; RV32ZBB-NEXT: or a0, a0, a3 ; RV32ZBB-NEXT: add a3, a7, a0 ; RV32ZBB-NEXT: add a0, a1, a2 diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbkb.ll @@ -207,8 +207,8 @@ ; CHECK-NEXT: sll a0, a0, a5 ; CHECK-NEXT: or a0, a0, a4 ; CHECK-NEXT: srl a1, a1, a2 -; CHECK-NEXT: slli a2, a3, 1 -; CHECK-NEXT: sll a2, a2, a5 +; CHECK-NEXT: slli a3, a3, 1 +; CHECK-NEXT: sll a2, a3, a5 ; CHECK-NEXT: or a1, a2, a1 ; CHECK-NEXT: ret %or = tail call i64 @llvm.fshr.i64(i64 %a, i64 %a, i64 %b) @@ -305,8 +305,8 @@ ; CHECK-NEXT: neg a4, a3 ; CHECK-NEXT: and a2, a4, a2 ; CHECK-NEXT: sll a0, a1, a0 -; CHECK-NEXT: addi a1, a3, -1 -; CHECK-NEXT: and a1, a1, a0 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a1, a3, a0 ; CHECK-NEXT: not a0, a2 ; CHECK-NEXT: not a1, a1 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll --- a/llvm/test/CodeGen/RISCV/rv32zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll @@ -149,8 +149,8 @@ ; RV32I-LABEL: packh_i64: ; RV32I: # %bb.0: ; RV32I-NEXT: andi a0, a0, 255 -; RV32I-NEXT: slli a1, a2, 24 -; RV32I-NEXT: srli a1, a1, 16 +; RV32I-NEXT: slli a2, a2, 24 +; RV32I-NEXT: srli a1, a2, 16 ; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: li a1, 0 ; RV32I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll --- a/llvm/test/CodeGen/RISCV/rv32zbs.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll @@ -54,8 +54,8 @@ ; RV32I-NEXT: neg a6, a5 ; RV32I-NEXT: and a4, a6, a4 ; RV32I-NEXT: sll a2, a3, a2 -; RV32I-NEXT: addi a3, a5, -1 -; RV32I-NEXT: and a2, a3, a2 +; RV32I-NEXT: addi a5, a5, -1 +; RV32I-NEXT: and a2, a5, a2 ; RV32I-NEXT: not a3, a4 ; RV32I-NEXT: not a2, a2 ; RV32I-NEXT: and a0, a3, a0 @@ -176,8 +176,8 @@ ; RV32I-NEXT: neg a0, a4 ; RV32I-NEXT: and a0, a0, a2 ; RV32I-NEXT: sll a1, a1, a3 -; RV32I-NEXT: addi a2, a4, -1 -; RV32I-NEXT: and a1, a2, a1 +; RV32I-NEXT: addi a4, a4, -1 +; RV32I-NEXT: and a1, a4, a1 ; RV32I-NEXT: ret ; ; RV32ZBS-LABEL: bset_i64_zero: diff --git a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll --- a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll @@ -21,8 +21,8 @@ ; CHECK-NEXT: mv a1, s1 ; CHECK-NEXT: call __addsf3@plt ; CHECK-NEXT: slli a0, a0, 32 -; CHECK-NEXT: slli a1, s2, 32 -; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: slli s2, s2, 32 +; CHECK-NEXT: srli a1, s2, 32 ; CHECK-NEXT: or a0, a0, a1 ; CHECK-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll @@ -252,8 +252,8 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addw a0, a1, a0 ; RV64I-NEXT: slli a0, a0, 32 -; RV64I-NEXT: slli a1, a2, 32 -; RV64I-NEXT: srli a1, a1, 32 +; RV64I-NEXT: slli a2, a2, 32 +; RV64I-NEXT: srli a1, a2, 32 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll @@ -648,8 +648,8 @@ ; RV64-NEXT: # %bb.1: ; RV64-NEXT: mv a2, a1 ; RV64-NEXT: .LBB52_2: -; RV64-NEXT: slli a1, a2, 3 -; RV64-NEXT: add a0, a0, a1 +; RV64-NEXT: slli a2, a2, 3 +; RV64-NEXT: add a0, a0, a2 ; RV64-NEXT: fld fa0, 0(a0) ; RV64-NEXT: addi sp, s0, -80 ; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll @@ -830,8 +830,8 @@ ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a2, a1 ; CHECK-NEXT: .LBB72_2: -; CHECK-NEXT: slli a1, a2, 3 -; CHECK-NEXT: add a0, a0, a1 +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a0, a0, a2 ; CHECK-NEXT: ld a0, 0(a0) ; CHECK-NEXT: addi sp, s0, -80 ; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll @@ -853,9 +853,9 @@ ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: bltu a4, a5, .LBB13_5 ; CHECK-NEXT: # %bb.2: -; CHECK-NEXT: slli a3, a4, 32 -; CHECK-NEXT: srli a3, a3, 32 -; CHECK-NEXT: addi a4, a3, 1 +; CHECK-NEXT: slli a4, a4, 32 +; CHECK-NEXT: srli a4, a4, 32 +; CHECK-NEXT: addi a4, a4, 1 ; CHECK-NEXT: andi a5, a4, -32 ; CHECK-NEXT: add a3, a5, a2 ; CHECK-NEXT: slli a6, a2, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-conv.ll @@ -104,8 +104,8 @@ ; LMULMAX1-NEXT: vse64.v v8, (a1) ; LMULMAX1-NEXT: addi a0, a1, 48 ; LMULMAX1-NEXT: vse64.v v11, (a0) -; LMULMAX1-NEXT: addi a0, a1, 16 -; LMULMAX1-NEXT: vse64.v v9, (a0) +; LMULMAX1-NEXT: addi a1, a1, 16 +; LMULMAX1-NEXT: vse64.v v9, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x half>, <8 x half>* %x %d = fpext <8 x half> %a to <8 x double> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll @@ -96,8 +96,8 @@ ; LMULMAX1-NEXT: vfcvt.rtz.x.f.v v8, v8 ; LMULMAX1-NEXT: vfcvt.rtz.x.f.v v9, v9 ; LMULMAX1-NEXT: vse32.v v9, (a1) -; LMULMAX1-NEXT: addi a0, a1, 16 -; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: addi a1, a1, 16 +; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %d = fptosi <8 x float> %a to <8 x i32> @@ -123,8 +123,8 @@ ; LMULMAX1-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; LMULMAX1-NEXT: vfcvt.rtz.xu.f.v v9, v9 ; LMULMAX1-NEXT: vse32.v v9, (a1) -; LMULMAX1-NEXT: addi a0, a1, 16 -; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: addi a1, a1, 16 +; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x float>, <8 x float>* %x %d = fptoui <8 x float> %a to <8 x i32> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -102,8 +102,8 @@ ; LMULMAX1-NEXT: vfcvt.f.x.v v8, v8 ; LMULMAX1-NEXT: vfcvt.f.x.v v9, v9 ; LMULMAX1-NEXT: vse32.v v9, (a1) -; LMULMAX1-NEXT: addi a0, a1, 16 -; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: addi a1, a1, 16 +; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %d = sitofp <8 x i32> %a to <8 x float> @@ -129,8 +129,8 @@ ; LMULMAX1-NEXT: vfcvt.f.xu.v v8, v8 ; LMULMAX1-NEXT: vfcvt.f.xu.v v9, v9 ; LMULMAX1-NEXT: vse32.v v9, (a1) -; LMULMAX1-NEXT: addi a0, a1, 16 -; LMULMAX1-NEXT: vse32.v v8, (a0) +; LMULMAX1-NEXT: addi a1, a1, 16 +; LMULMAX1-NEXT: vse32.v v8, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i32>, <8 x i32>* %x %d = uitofp <8 x i32> %a to <8 x float> @@ -263,8 +263,8 @@ ; LMULMAX1-NEXT: vse64.v v8, (a1) ; LMULMAX1-NEXT: addi a0, a1, 48 ; LMULMAX1-NEXT: vse64.v v11, (a0) -; LMULMAX1-NEXT: addi a0, a1, 16 -; LMULMAX1-NEXT: vse64.v v9, (a0) +; LMULMAX1-NEXT: addi a1, a1, 16 +; LMULMAX1-NEXT: vse64.v v9, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %d = sitofp <8 x i16> %a to <8 x double> @@ -307,8 +307,8 @@ ; LMULMAX1-NEXT: vse64.v v8, (a1) ; LMULMAX1-NEXT: addi a0, a1, 48 ; LMULMAX1-NEXT: vse64.v v11, (a0) -; LMULMAX1-NEXT: addi a0, a1, 16 -; LMULMAX1-NEXT: vse64.v v9, (a0) +; LMULMAX1-NEXT: addi a1, a1, 16 +; LMULMAX1-NEXT: vse64.v v9, (a1) ; LMULMAX1-NEXT: ret %a = load <8 x i16>, <8 x i16>* %x %d = uitofp <8 x i16> %a to <8 x double> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll @@ -108,8 +108,8 @@ ; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, ma ; RV64-NEXT: vle16.v v8, (a0) ; RV64-NEXT: vmv.s.x v12, a1 -; RV64-NEXT: slli a1, a2, 32 -; RV64-NEXT: srli a1, a1, 32 +; RV64-NEXT: slli a2, a2, 32 +; RV64-NEXT: srli a1, a2, 32 ; RV64-NEXT: addi a2, a1, 1 ; RV64-NEXT: vsetvli zero, a2, e16, m4, tu, ma ; RV64-NEXT: vslideup.vx v8, v12, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll @@ -773,8 +773,8 @@ ; LMULMAX1-RV32-NEXT: addi a0, a3, 48 ; LMULMAX1-RV32-NEXT: vse64.v v13, (a0) ; LMULMAX1-RV32-NEXT: vse64.v v14, (a3) -; LMULMAX1-RV32-NEXT: addi a0, a3, 16 -; LMULMAX1-RV32-NEXT: vse64.v v15, (a0) +; LMULMAX1-RV32-NEXT: addi a3, a3, 16 +; LMULMAX1-RV32-NEXT: vse64.v v15, (a3) ; LMULMAX1-RV32-NEXT: ret ; ; LMULMAX8-RV64-LABEL: vadd_vx_v16i64: @@ -847,8 +847,8 @@ ; LMULMAX1-RV64-NEXT: vse64.v v14, (a0) ; LMULMAX1-RV64-NEXT: addi a0, a2, 48 ; LMULMAX1-RV64-NEXT: vse64.v v12, (a0) -; LMULMAX1-RV64-NEXT: addi a0, a2, 16 -; LMULMAX1-RV64-NEXT: vse64.v v13, (a0) +; LMULMAX1-RV64-NEXT: addi a2, a2, 16 +; LMULMAX1-RV64-NEXT: vse64.v v13, (a2) ; LMULMAX1-RV64-NEXT: ret %va = load <16 x i64>, <16 x i64>* %a %head = insertelement <16 x i64> poison, i64 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll @@ -208,9 +208,9 @@ define void @splat_concat_high(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) { ; CHECK-LABEL: splat_concat_high: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, a1, 2 +; CHECK-NEXT: addi a1, a1, 2 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma -; CHECK-NEXT: vlse16.v v8, (a0), zero +; CHECK-NEXT: vlse16.v v8, (a1), zero ; CHECK-NEXT: vse16.v v8, (a2) ; CHECK-NEXT: ret %a = load <4 x i16>, <4 x i16>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -5877,12 +5877,12 @@ ; ; RV64ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8i64: ; RV64ZVE32F: # %bb.0: -; RV64ZVE32F-NEXT: lui a3, 16 +; RV64ZVE32F-NEXT: lui a5, 16 ; RV64ZVE32F-NEXT: vsetivli zero, 0, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a6, v0 -; RV64ZVE32F-NEXT: andi a4, a6, 1 -; RV64ZVE32F-NEXT: addiw a5, a3, -1 -; RV64ZVE32F-NEXT: beqz a4, .LBB53_3 +; RV64ZVE32F-NEXT: andi a3, a6, 1 +; RV64ZVE32F-NEXT: addiw a5, a5, -1 +; RV64ZVE32F-NEXT: beqz a3, .LBB53_3 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vmv.x.s a3, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -6005,8 +6005,8 @@ ; RV64ZVE32F-NEXT: andi a0, a7, -128 ; RV64ZVE32F-NEXT: beqz a0, .LBB51_9 ; RV64ZVE32F-NEXT: .LBB51_8: # %cond.store13 -; RV64ZVE32F-NEXT: slli a0, a5, 3 -; RV64ZVE32F-NEXT: add a0, a1, a0 +; RV64ZVE32F-NEXT: slli a5, a5, 3 +; RV64ZVE32F-NEXT: add a0, a1, a5 ; RV64ZVE32F-NEXT: sd a3, 0(a0) ; RV64ZVE32F-NEXT: .LBB51_9: # %else14 ; RV64ZVE32F-NEXT: ld s0, 24(sp) # 8-byte Folded Reload @@ -6024,38 +6024,38 @@ ; RV64ZVE32F-NEXT: andi a0, a7, 2 ; RV64ZVE32F-NEXT: beqz a0, .LBB51_2 ; RV64ZVE32F-NEXT: .LBB51_11: # %cond.store1 -; RV64ZVE32F-NEXT: slli a0, s2, 3 -; RV64ZVE32F-NEXT: add a0, a1, a0 +; RV64ZVE32F-NEXT: slli s2, s2, 3 +; RV64ZVE32F-NEXT: add a0, a1, s2 ; RV64ZVE32F-NEXT: sd s1, 0(a0) ; RV64ZVE32F-NEXT: andi a0, a7, 4 ; RV64ZVE32F-NEXT: beqz a0, .LBB51_3 ; RV64ZVE32F-NEXT: .LBB51_12: # %cond.store3 -; RV64ZVE32F-NEXT: slli a0, s0, 3 -; RV64ZVE32F-NEXT: add a0, a1, a0 +; RV64ZVE32F-NEXT: slli s0, s0, 3 +; RV64ZVE32F-NEXT: add a0, a1, s0 ; RV64ZVE32F-NEXT: sd t6, 0(a0) ; RV64ZVE32F-NEXT: andi a0, a7, 8 ; RV64ZVE32F-NEXT: beqz a0, .LBB51_4 ; RV64ZVE32F-NEXT: .LBB51_13: # %cond.store5 -; RV64ZVE32F-NEXT: slli a0, t5, 3 -; RV64ZVE32F-NEXT: add a0, a1, a0 +; RV64ZVE32F-NEXT: slli t5, t5, 3 +; RV64ZVE32F-NEXT: add a0, a1, t5 ; RV64ZVE32F-NEXT: sd t3, 0(a0) ; RV64ZVE32F-NEXT: andi a0, a7, 16 ; RV64ZVE32F-NEXT: beqz a0, .LBB51_5 ; RV64ZVE32F-NEXT: .LBB51_14: # %cond.store7 -; RV64ZVE32F-NEXT: slli a0, t4, 3 -; RV64ZVE32F-NEXT: add a0, a1, a0 +; RV64ZVE32F-NEXT: slli t4, t4, 3 +; RV64ZVE32F-NEXT: add a0, a1, t4 ; RV64ZVE32F-NEXT: sd t1, 0(a0) ; RV64ZVE32F-NEXT: andi a0, a7, 32 ; RV64ZVE32F-NEXT: beqz a0, .LBB51_6 ; RV64ZVE32F-NEXT: .LBB51_15: # %cond.store9 -; RV64ZVE32F-NEXT: slli a0, t2, 3 -; RV64ZVE32F-NEXT: add a0, a1, a0 +; RV64ZVE32F-NEXT: slli t2, t2, 3 +; RV64ZVE32F-NEXT: add a0, a1, t2 ; RV64ZVE32F-NEXT: sd a6, 0(a0) ; RV64ZVE32F-NEXT: andi a0, a7, 64 ; RV64ZVE32F-NEXT: beqz a0, .LBB51_7 ; RV64ZVE32F-NEXT: .LBB51_16: # %cond.store11 -; RV64ZVE32F-NEXT: slli a0, t0, 3 -; RV64ZVE32F-NEXT: add a0, a1, a0 +; RV64ZVE32F-NEXT: slli t0, t0, 3 +; RV64ZVE32F-NEXT: add a0, a1, t0 ; RV64ZVE32F-NEXT: sd a4, 0(a0) ; RV64ZVE32F-NEXT: andi a0, a7, -128 ; RV64ZVE32F-NEXT: bnez a0, .LBB51_8 @@ -10701,44 +10701,44 @@ ; RV64ZVE32F-NEXT: andi a1, a3, 2 ; RV64ZVE32F-NEXT: beqz a1, .LBB90_2 ; RV64ZVE32F-NEXT: .LBB90_10: # %cond.store1 -; RV64ZVE32F-NEXT: slli a1, t1, 3 -; RV64ZVE32F-NEXT: add a1, a0, a1 +; RV64ZVE32F-NEXT: slli t1, t1, 3 +; RV64ZVE32F-NEXT: add a1, a0, t1 ; RV64ZVE32F-NEXT: fsd fa1, 0(a1) ; RV64ZVE32F-NEXT: andi a1, a3, 4 ; RV64ZVE32F-NEXT: beqz a1, .LBB90_3 ; RV64ZVE32F-NEXT: .LBB90_11: # %cond.store3 -; RV64ZVE32F-NEXT: slli a1, t0, 3 -; RV64ZVE32F-NEXT: add a1, a0, a1 +; RV64ZVE32F-NEXT: slli t0, t0, 3 +; RV64ZVE32F-NEXT: add a1, a0, t0 ; RV64ZVE32F-NEXT: fsd fa2, 0(a1) ; RV64ZVE32F-NEXT: andi a1, a3, 8 ; RV64ZVE32F-NEXT: beqz a1, .LBB90_4 ; RV64ZVE32F-NEXT: .LBB90_12: # %cond.store5 -; RV64ZVE32F-NEXT: slli a1, a7, 3 -; RV64ZVE32F-NEXT: add a1, a0, a1 +; RV64ZVE32F-NEXT: slli a7, a7, 3 +; RV64ZVE32F-NEXT: add a1, a0, a7 ; RV64ZVE32F-NEXT: fsd fa3, 0(a1) ; RV64ZVE32F-NEXT: andi a1, a3, 16 ; RV64ZVE32F-NEXT: beqz a1, .LBB90_5 ; RV64ZVE32F-NEXT: .LBB90_13: # %cond.store7 -; RV64ZVE32F-NEXT: slli a1, a6, 3 -; RV64ZVE32F-NEXT: add a1, a0, a1 +; RV64ZVE32F-NEXT: slli a6, a6, 3 +; RV64ZVE32F-NEXT: add a1, a0, a6 ; RV64ZVE32F-NEXT: fsd fa4, 0(a1) ; RV64ZVE32F-NEXT: andi a1, a3, 32 ; RV64ZVE32F-NEXT: beqz a1, .LBB90_6 ; RV64ZVE32F-NEXT: .LBB90_14: # %cond.store9 -; RV64ZVE32F-NEXT: slli a1, a5, 3 -; RV64ZVE32F-NEXT: add a1, a0, a1 +; RV64ZVE32F-NEXT: slli a5, a5, 3 +; RV64ZVE32F-NEXT: add a1, a0, a5 ; RV64ZVE32F-NEXT: fsd fa5, 0(a1) ; RV64ZVE32F-NEXT: andi a1, a3, 64 ; RV64ZVE32F-NEXT: beqz a1, .LBB90_7 ; RV64ZVE32F-NEXT: .LBB90_15: # %cond.store11 -; RV64ZVE32F-NEXT: slli a1, a4, 3 -; RV64ZVE32F-NEXT: add a1, a0, a1 +; RV64ZVE32F-NEXT: slli a4, a4, 3 +; RV64ZVE32F-NEXT: add a1, a0, a4 ; RV64ZVE32F-NEXT: fsd fa6, 0(a1) ; RV64ZVE32F-NEXT: andi a1, a3, -128 ; RV64ZVE32F-NEXT: beqz a1, .LBB90_8 ; RV64ZVE32F-NEXT: .LBB90_16: # %cond.store13 -; RV64ZVE32F-NEXT: slli a1, a2, 3 -; RV64ZVE32F-NEXT: add a0, a0, a1 +; RV64ZVE32F-NEXT: slli a2, a2, 3 +; RV64ZVE32F-NEXT: add a0, a0, a2 ; RV64ZVE32F-NEXT: fsd fa7, 0(a0) ; RV64ZVE32F-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -423,9 +423,9 @@ ; RV32-NEXT: lbu a6, 3(a0) ; RV32-NEXT: slli a3, a3, 8 ; RV32-NEXT: or a3, a3, a4 -; RV32-NEXT: slli a4, a5, 16 -; RV32-NEXT: slli a5, a6, 24 -; RV32-NEXT: or a4, a5, a4 +; RV32-NEXT: slli a5, a5, 16 +; RV32-NEXT: slli a6, a6, 24 +; RV32-NEXT: or a4, a6, a5 ; RV32-NEXT: or a3, a4, a3 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.x v8, a3 @@ -444,9 +444,9 @@ ; RV32-NEXT: lbu a0, 7(a0) ; RV32-NEXT: slli a2, a2, 8 ; RV32-NEXT: or a2, a2, a3 -; RV32-NEXT: slli a3, a4, 16 +; RV32-NEXT: slli a4, a4, 16 ; RV32-NEXT: slli a0, a0, 24 -; RV32-NEXT: or a0, a0, a3 +; RV32-NEXT: or a0, a0, a4 ; RV32-NEXT: or a0, a0, a2 ; RV32-NEXT: vmv.s.x v9, a0 ; RV32-NEXT: vsetvli zero, zero, e32, mf2, tu, ma @@ -471,9 +471,9 @@ ; RV64-NEXT: lb a6, 3(a0) ; RV64-NEXT: slli a3, a3, 8 ; RV64-NEXT: or a3, a3, a4 -; RV64-NEXT: slli a4, a5, 16 -; RV64-NEXT: slli a5, a6, 24 -; RV64-NEXT: or a4, a5, a4 +; RV64-NEXT: slli a5, a5, 16 +; RV64-NEXT: slli a6, a6, 24 +; RV64-NEXT: or a4, a6, a5 ; RV64-NEXT: or a3, a4, a3 ; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-NEXT: vmv.v.x v8, a3 @@ -492,9 +492,9 @@ ; RV64-NEXT: lb a0, 7(a0) ; RV64-NEXT: slli a2, a2, 8 ; RV64-NEXT: or a2, a2, a3 -; RV64-NEXT: slli a3, a4, 16 +; RV64-NEXT: slli a4, a4, 16 ; RV64-NEXT: slli a0, a0, 24 -; RV64-NEXT: or a0, a0, a3 +; RV64-NEXT: or a0, a0, a4 ; RV64-NEXT: or a0, a0, a2 ; RV64-NEXT: vmv.s.x v9, a0 ; RV64-NEXT: vsetvli zero, zero, e32, mf2, tu, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll @@ -659,8 +659,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 -; CHECK-NEXT: slli a0, a1, 32 -; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a0, a1, 32 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 @@ -696,8 +696,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e64, m2, ta, ma ; CHECK-NEXT: vmv.s.x v10, a0 -; CHECK-NEXT: slli a0, a1, 32 -; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a0, a1, 32 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 @@ -733,8 +733,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e64, m4, ta, ma ; CHECK-NEXT: vmv.s.x v12, a0 -; CHECK-NEXT: slli a0, a1, 32 -; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a0, a1, 32 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 @@ -770,8 +770,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 -; CHECK-NEXT: slli a0, a1, 32 -; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a0, a1, 32 ; CHECK-NEXT: addi a1, a0, 1 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -2174,9 +2174,9 @@ ; CHECK-NEXT: beqz a5, .LBB34_7 ; CHECK-NEXT: .LBB34_5: # %for.body.preheader ; CHECK-NEXT: addi a2, a4, -1024 -; CHECK-NEXT: slli a3, a4, 2 -; CHECK-NEXT: add a1, a1, a3 -; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: slli a4, a4, 2 +; CHECK-NEXT: add a1, a1, a4 +; CHECK-NEXT: add a0, a0, a4 ; CHECK-NEXT: .LBB34_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: flw ft0, 0(a0) @@ -2277,9 +2277,9 @@ ; CHECK-NEXT: beqz a5, .LBB35_7 ; CHECK-NEXT: .LBB35_5: # %for.body.preheader ; CHECK-NEXT: addi a2, a4, -1024 -; CHECK-NEXT: slli a3, a4, 2 -; CHECK-NEXT: add a1, a1, a3 -; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: slli a4, a4, 2 +; CHECK-NEXT: add a1, a1, a4 +; CHECK-NEXT: add a0, a0, a4 ; CHECK-NEXT: .LBB35_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: flw ft0, 0(a0) diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll @@ -446,8 +446,8 @@ ; CHECK-NEXT: srli a2, a3, 2 ; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v24, a2 -; CHECK-NEXT: slli a2, a3, 4 -; CHECK-NEXT: add a1, a1, a2 +; CHECK-NEXT: slli a3, a3, 4 +; CHECK-NEXT: add a1, a1, a3 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll @@ -93,11 +93,11 @@ define half @vpreduce_fadd_nxv64f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_fadd_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a1, a2, 1 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v24, v0, a2 -; CHECK-NEXT: slli a2, a1, 2 +; CHECK-NEXT: vslidedown.vx v24, v0, a1 +; CHECK-NEXT: slli a2, a2, 2 ; CHECK-NEXT: sub a1, a0, a2 ; CHECK-NEXT: sltu a3, a0, a1 ; CHECK-NEXT: addi a3, a3, -1 @@ -125,11 +125,11 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, %v, %m, i32 zeroext %evl) { ; CHECK-LABEL: vpreduce_ord_fadd_nxv64f16: ; CHECK: # %bb.0: -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a2, a1, 1 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a1, a2, 1 ; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma -; CHECK-NEXT: vslidedown.vx v24, v0, a2 -; CHECK-NEXT: slli a2, a1, 2 +; CHECK-NEXT: vslidedown.vx v24, v0, a1 +; CHECK-NEXT: slli a2, a2, 2 ; CHECK-NEXT: sub a1, a0, a2 ; CHECK-NEXT: sltu a3, a0, a1 ; CHECK-NEXT: addi a3, a3, -1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll @@ -1153,11 +1153,11 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, %v, %m, i32 zeroext %evl) { ; RV32-LABEL: vpreduce_umax_nxv32i32: ; RV32: # %bb.0: -; RV32-NEXT: csrr a2, vlenb -; RV32-NEXT: srli a3, a2, 2 +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: srli a2, a3, 2 ; RV32-NEXT: vsetvli a4, zero, e8, mf2, ta, ma -; RV32-NEXT: vslidedown.vx v24, v0, a3 -; RV32-NEXT: slli a3, a2, 1 +; RV32-NEXT: vslidedown.vx v24, v0, a2 +; RV32-NEXT: slli a3, a3, 1 ; RV32-NEXT: sub a2, a1, a3 ; RV32-NEXT: sltu a4, a1, a2 ; RV32-NEXT: addi a4, a4, -1 diff --git a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll --- a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll +++ b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll @@ -64,12 +64,12 @@ ; RV32I-NEXT: lw a6, 0(a6) ; RV32I-NEXT: beqz a1, .LBB1_9 ; RV32I-NEXT: # %bb.8: # %entry -; RV32I-NEXT: addi a1, a4, 12 +; RV32I-NEXT: addi a3, a4, 12 ; RV32I-NEXT: j .LBB1_10 ; RV32I-NEXT: .LBB1_9: -; RV32I-NEXT: addi a1, a3, 12 +; RV32I-NEXT: addi a3, a3, 12 ; RV32I-NEXT: .LBB1_10: # %entry -; RV32I-NEXT: lw a1, 0(a1) +; RV32I-NEXT: lw a1, 0(a3) ; RV32I-NEXT: sw a1, 12(a0) ; RV32I-NEXT: sw a6, 8(a0) ; RV32I-NEXT: sw a5, 4(a0) @@ -147,12 +147,12 @@ ; RV32I-NEXT: lw a6, 0(a6) ; RV32I-NEXT: bnez a1, .LBB3_9 ; RV32I-NEXT: # %bb.8: # %entry -; RV32I-NEXT: addi a1, a3, 12 +; RV32I-NEXT: addi a2, a3, 12 ; RV32I-NEXT: j .LBB3_10 ; RV32I-NEXT: .LBB3_9: -; RV32I-NEXT: addi a1, a2, 12 +; RV32I-NEXT: addi a2, a2, 12 ; RV32I-NEXT: .LBB3_10: # %entry -; RV32I-NEXT: lw a1, 0(a1) +; RV32I-NEXT: lw a1, 0(a2) ; RV32I-NEXT: sw a1, 12(a0) ; RV32I-NEXT: sw a6, 8(a0) ; RV32I-NEXT: sw a5, 4(a0) diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll --- a/llvm/test/CodeGen/RISCV/shifts.ll +++ b/llvm/test/CodeGen/RISCV/shifts.ll @@ -563,8 +563,8 @@ ; RV32I-NEXT: sll a0, a0, a5 ; RV32I-NEXT: or a0, a0, a4 ; RV32I-NEXT: srl a1, a1, a2 -; RV32I-NEXT: slli a2, a3, 1 -; RV32I-NEXT: sll a2, a2, a5 +; RV32I-NEXT: slli a3, a3, 1 +; RV32I-NEXT: sll a2, a3, a5 ; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: ret ; @@ -631,8 +631,8 @@ ; RV32I-NEXT: sll a4, a4, t1 ; RV32I-NEXT: or a3, a4, a3 ; RV32I-NEXT: srl a1, a1, a2 -; RV32I-NEXT: slli a2, a5, 1 -; RV32I-NEXT: sll a2, a2, t1 +; RV32I-NEXT: slli a5, a5, 1 +; RV32I-NEXT: sll a2, a5, t1 ; RV32I-NEXT: or a1, a2, a1 ; RV32I-NEXT: sw a1, 12(a0) ; RV32I-NEXT: sw a3, 8(a0) @@ -665,8 +665,8 @@ ; RV64I-NEXT: sll a0, a0, a5 ; RV64I-NEXT: or a0, a0, a4 ; RV64I-NEXT: srl a1, a1, a2 -; RV64I-NEXT: slli a2, a3, 1 -; RV64I-NEXT: sll a2, a2, a5 +; RV64I-NEXT: slli a3, a3, 1 +; RV64I-NEXT: sll a2, a3, a5 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: ret %res = tail call i128 @llvm.fshr.i128(i128 %a, i128 %a, i128 %b) diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -325,8 +325,8 @@ ; RV32-NEXT: srli a0, a0, 1 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: lw s4, 0(s0) -; RV32-NEXT: slli a1, a2, 30 -; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: slli a2, a2, 30 +; RV32-NEXT: srli a1, a2, 31 ; RV32-NEXT: neg a1, a1 ; RV32-NEXT: li a2, 7 ; RV32-NEXT: li a3, 0 @@ -353,21 +353,21 @@ ; RV32-NEXT: seqz a3, a3 ; RV32-NEXT: or a0, a0, a1 ; RV32-NEXT: snez a0, a0 -; RV32-NEXT: addi a1, a3, -1 +; RV32-NEXT: addi a3, a3, -1 ; RV32-NEXT: addi a2, a2, -1 -; RV32-NEXT: neg a3, a0 -; RV32-NEXT: sw a3, 0(s0) -; RV32-NEXT: andi a3, a2, 7 -; RV32-NEXT: sb a3, 12(s0) -; RV32-NEXT: slli a3, a1, 1 -; RV32-NEXT: or a0, a3, a0 +; RV32-NEXT: neg a1, a0 +; RV32-NEXT: sw a1, 0(s0) +; RV32-NEXT: andi a1, a2, 7 +; RV32-NEXT: sb a1, 12(s0) +; RV32-NEXT: slli a1, a3, 1 +; RV32-NEXT: or a0, a1, a0 ; RV32-NEXT: sw a0, 4(s0) -; RV32-NEXT: srli a0, a1, 31 -; RV32-NEXT: andi a1, a1, 1 +; RV32-NEXT: srli a0, a3, 31 +; RV32-NEXT: andi a1, a3, 1 ; RV32-NEXT: slli a1, a1, 1 ; RV32-NEXT: or a0, a0, a1 -; RV32-NEXT: slli a1, a2, 2 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: slli a2, a2, 2 +; RV32-NEXT: or a0, a0, a2 ; RV32-NEXT: sw a0, 8(s0) ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload @@ -400,8 +400,8 @@ ; RV64-NEXT: slli a1, a1, 62 ; RV64-NEXT: or a0, a1, a0 ; RV64-NEXT: srai a0, a0, 31 -; RV64-NEXT: slli a1, a2, 31 -; RV64-NEXT: srai s2, a1, 31 +; RV64-NEXT: slli a2, a2, 31 +; RV64-NEXT: srai s2, a2, 31 ; RV64-NEXT: li a1, 7 ; RV64-NEXT: call __moddi3@plt ; RV64-NEXT: mv s3, a0 @@ -420,10 +420,10 @@ ; RV64-NEXT: srli a0, a0, 1 ; RV64-NEXT: or a0, a0, a2 ; RV64-NEXT: sltu a0, a1, a0 -; RV64-NEXT: addi a1, s1, -2 -; RV64-NEXT: seqz a1, a1 -; RV64-NEXT: addi a2, s3, -1 -; RV64-NEXT: seqz a2, a2 +; RV64-NEXT: addi s1, s1, -2 +; RV64-NEXT: seqz a1, s1 +; RV64-NEXT: addi s3, s3, -1 +; RV64-NEXT: seqz a2, s3 ; RV64-NEXT: neg a0, a0 ; RV64-NEXT: addi a2, a2, -1 ; RV64-NEXT: addi a1, a1, -1 @@ -437,8 +437,8 @@ ; RV64-NEXT: sb a1, 12(s0) ; RV64-NEXT: slli a0, a0, 31 ; RV64-NEXT: srli a0, a0, 31 -; RV64-NEXT: slli a1, a2, 33 -; RV64-NEXT: or a0, a0, a1 +; RV64-NEXT: slli a2, a2, 33 +; RV64-NEXT: or a0, a0, a2 ; RV64-NEXT: sd a0, 0(s0) ; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload @@ -475,8 +475,8 @@ ; RV32M-NEXT: srli a0, a0, 1 ; RV32M-NEXT: or a0, a0, a1 ; RV32M-NEXT: lw s4, 0(s0) -; RV32M-NEXT: slli a1, a2, 30 -; RV32M-NEXT: srli a1, a1, 31 +; RV32M-NEXT: slli a2, a2, 30 +; RV32M-NEXT: srli a1, a2, 31 ; RV32M-NEXT: neg a1, a1 ; RV32M-NEXT: li a2, 7 ; RV32M-NEXT: li a3, 0 @@ -503,21 +503,21 @@ ; RV32M-NEXT: seqz a3, a3 ; RV32M-NEXT: or a0, a0, a1 ; RV32M-NEXT: snez a0, a0 -; RV32M-NEXT: addi a1, a3, -1 +; RV32M-NEXT: addi a3, a3, -1 ; RV32M-NEXT: addi a2, a2, -1 -; RV32M-NEXT: neg a3, a0 -; RV32M-NEXT: sw a3, 0(s0) -; RV32M-NEXT: andi a3, a2, 7 -; RV32M-NEXT: sb a3, 12(s0) -; RV32M-NEXT: slli a3, a1, 1 -; RV32M-NEXT: or a0, a3, a0 +; RV32M-NEXT: neg a1, a0 +; RV32M-NEXT: sw a1, 0(s0) +; RV32M-NEXT: andi a1, a2, 7 +; RV32M-NEXT: sb a1, 12(s0) +; RV32M-NEXT: slli a1, a3, 1 +; RV32M-NEXT: or a0, a1, a0 ; RV32M-NEXT: sw a0, 4(s0) -; RV32M-NEXT: srli a0, a1, 31 -; RV32M-NEXT: andi a1, a1, 1 +; RV32M-NEXT: srli a0, a3, 31 +; RV32M-NEXT: andi a1, a3, 1 ; RV32M-NEXT: slli a1, a1, 1 ; RV32M-NEXT: or a0, a0, a1 -; RV32M-NEXT: slli a1, a2, 2 -; RV32M-NEXT: or a0, a0, a1 +; RV32M-NEXT: slli a2, a2, 2 +; RV32M-NEXT: or a0, a0, a2 ; RV32M-NEXT: sw a0, 8(s0) ; RV32M-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32M-NEXT: lw s0, 24(sp) # 4-byte Folded Reload @@ -590,8 +590,8 @@ ; RV64M-NEXT: srli a3, a3, 62 ; RV64M-NEXT: or a1, a3, a1 ; RV64M-NEXT: sw a1, 8(a0) -; RV64M-NEXT: slli a1, a2, 29 -; RV64M-NEXT: srli a1, a1, 61 +; RV64M-NEXT: slli a2, a2, 29 +; RV64M-NEXT: srli a1, a2, 61 ; RV64M-NEXT: sb a1, 12(a0) ; RV64M-NEXT: ret ; @@ -620,8 +620,8 @@ ; RV32MV-NEXT: slli a3, a2, 30 ; RV32MV-NEXT: srli a0, a0, 2 ; RV32MV-NEXT: or s5, a0, a3 -; RV32MV-NEXT: slli a0, a2, 29 -; RV32MV-NEXT: srli a2, a0, 31 +; RV32MV-NEXT: slli a2, a2, 29 +; RV32MV-NEXT: srli a2, a2, 31 ; RV32MV-NEXT: lw a0, 0(s2) ; RV32MV-NEXT: neg s6, a2 ; RV32MV-NEXT: andi a1, a1, 1 @@ -779,10 +779,10 @@ ; RV64MV-NEXT: slli a4, a3, 33 ; RV64MV-NEXT: or a1, a1, a4 ; RV64MV-NEXT: sd a1, 0(a0) -; RV64MV-NEXT: slli a1, a2, 2 -; RV64MV-NEXT: slli a2, a3, 31 -; RV64MV-NEXT: srli a2, a2, 62 -; RV64MV-NEXT: or a1, a2, a1 +; RV64MV-NEXT: slli a2, a2, 2 +; RV64MV-NEXT: slli a3, a3, 31 +; RV64MV-NEXT: srli a1, a3, 62 +; RV64MV-NEXT: or a1, a1, a2 ; RV64MV-NEXT: sw a1, 8(a0) ; RV64MV-NEXT: addi sp, s0, -64 ; RV64MV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll --- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll +++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll @@ -68,9 +68,9 @@ ; RV32I-NEXT: lbu a0, 3(a0) ; RV32I-NEXT: slli a1, a1, 8 ; RV32I-NEXT: or a1, a1, a2 -; RV32I-NEXT: slli a2, a3, 16 +; RV32I-NEXT: slli a3, a3, 16 ; RV32I-NEXT: slli a0, a0, 24 -; RV32I-NEXT: or a0, a0, a2 +; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: ret ; @@ -82,9 +82,9 @@ ; RV64I-NEXT: lb a0, 3(a0) ; RV64I-NEXT: slli a1, a1, 8 ; RV64I-NEXT: or a1, a1, a2 -; RV64I-NEXT: slli a2, a3, 16 +; RV64I-NEXT: slli a3, a3, 16 ; RV64I-NEXT: slli a0, a0, 24 -; RV64I-NEXT: or a0, a0, a2 +; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: ret ; @@ -105,9 +105,9 @@ ; RV32I-NEXT: lbu a4, 3(a0) ; RV32I-NEXT: slli a1, a1, 8 ; RV32I-NEXT: or a1, a1, a2 -; RV32I-NEXT: slli a2, a3, 16 -; RV32I-NEXT: slli a3, a4, 24 -; RV32I-NEXT: or a2, a3, a2 +; RV32I-NEXT: slli a3, a3, 16 +; RV32I-NEXT: slli a4, a4, 24 +; RV32I-NEXT: or a2, a4, a3 ; RV32I-NEXT: or a2, a2, a1 ; RV32I-NEXT: lbu a1, 5(a0) ; RV32I-NEXT: lbu a3, 4(a0) @@ -115,9 +115,9 @@ ; RV32I-NEXT: lbu a0, 7(a0) ; RV32I-NEXT: slli a1, a1, 8 ; RV32I-NEXT: or a1, a1, a3 -; RV32I-NEXT: slli a3, a4, 16 +; RV32I-NEXT: slli a4, a4, 16 ; RV32I-NEXT: slli a0, a0, 24 -; RV32I-NEXT: or a0, a0, a3 +; RV32I-NEXT: or a0, a0, a4 ; RV32I-NEXT: or a1, a0, a1 ; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: ret @@ -130,9 +130,9 @@ ; RV64I-NEXT: lbu a4, 3(a0) ; RV64I-NEXT: slli a1, a1, 8 ; RV64I-NEXT: or a1, a1, a2 -; RV64I-NEXT: slli a2, a3, 16 -; RV64I-NEXT: slli a3, a4, 24 -; RV64I-NEXT: or a2, a3, a2 +; RV64I-NEXT: slli a3, a3, 16 +; RV64I-NEXT: slli a4, a4, 24 +; RV64I-NEXT: or a2, a4, a3 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: lbu a2, 5(a0) ; RV64I-NEXT: lbu a3, 4(a0) @@ -140,9 +140,9 @@ ; RV64I-NEXT: lbu a0, 7(a0) ; RV64I-NEXT: slli a2, a2, 8 ; RV64I-NEXT: or a2, a2, a3 -; RV64I-NEXT: slli a3, a4, 16 +; RV64I-NEXT: slli a4, a4, 16 ; RV64I-NEXT: slli a0, a0, 24 -; RV64I-NEXT: or a0, a0, a3 +; RV64I-NEXT: or a0, a0, a4 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: or a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -357,18 +357,18 @@ ; RV32-NEXT: addi a0, a0, -1463 ; RV32-NEXT: andi a0, a0, 2047 ; RV32-NEXT: sltiu a0, a0, 293 -; RV32-NEXT: addi a1, s3, -1 +; RV32-NEXT: addi s3, s3, -1 ; RV32-NEXT: addi a0, a0, -1 -; RV32-NEXT: addi a2, s1, -1 -; RV32-NEXT: slli a3, a2, 21 -; RV32-NEXT: srli a3, a3, 31 -; RV32-NEXT: sb a3, 4(s0) -; RV32-NEXT: andi a1, a1, 2047 +; RV32-NEXT: addi s1, s1, -1 +; RV32-NEXT: slli a1, s1, 21 +; RV32-NEXT: srli a1, a1, 31 +; RV32-NEXT: sb a1, 4(s0) +; RV32-NEXT: andi a1, s3, 2047 ; RV32-NEXT: andi a0, a0, 2047 ; RV32-NEXT: slli a0, a0, 11 ; RV32-NEXT: or a0, a1, a0 -; RV32-NEXT: slli a1, a2, 22 -; RV32-NEXT: or a0, a0, a1 +; RV32-NEXT: slli s1, s1, 22 +; RV32-NEXT: or a0, a0, s1 ; RV32-NEXT: sw a0, 0(s0) ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload @@ -414,11 +414,11 @@ ; RV64-NEXT: addiw a0, a0, -1638 ; RV64-NEXT: andi a0, a0, 2047 ; RV64-NEXT: sltiu a0, a0, 2 -; RV64-NEXT: addiw a1, s3, -1 +; RV64-NEXT: addiw s3, s3, -1 ; RV64-NEXT: addi a0, a0, -1 -; RV64-NEXT: addiw a2, s2, -1 -; RV64-NEXT: andi a1, a1, 2047 -; RV64-NEXT: andi a2, a2, 2047 +; RV64-NEXT: addiw s2, s2, -1 +; RV64-NEXT: andi a1, s3, 2047 +; RV64-NEXT: andi a2, s2, 2047 ; RV64-NEXT: slli a2, a2, 11 ; RV64-NEXT: or a1, a1, a2 ; RV64-NEXT: slli a0, a0, 22 diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -459,8 +459,8 @@ ; LP64-LP64F-LP64D-FPELIM: # %bb.0: ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1023 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a0, 52 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52 ; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 2 ; LP64-LP64F-LP64D-FPELIM-NEXT: call va1@plt ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -473,8 +473,8 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16 -; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1023 -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a0, 52 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1023 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 52 ; LP64-LP64F-LP64D-WITHFP-NEXT: li a2, 2 ; LP64-LP64F-LP64D-WITHFP-NEXT: call va1@plt ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -775,8 +775,8 @@ ; LP64-LP64F-LP64D-FPELIM: # %bb.0: ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1023 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a0, 52 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1023 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 52 ; LP64-LP64F-LP64D-FPELIM-NEXT: call va2@plt ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 16 @@ -788,8 +788,8 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16 -; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1023 -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a0, 52 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1023 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 52 ; LP64-LP64F-LP64D-WITHFP-NEXT: call va2@plt ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload @@ -1110,8 +1110,8 @@ ; LP64-LP64F-LP64D-FPELIM: # %bb.0: ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -16 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd ra, 8(sp) # 8-byte Folded Spill -; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a0, 62 +; LP64-LP64F-LP64D-FPELIM-NEXT: li a2, 1 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a2, 62 ; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 2 ; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 1111 ; LP64-LP64F-LP64D-FPELIM-NEXT: call va3@plt @@ -1125,8 +1125,8 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 0(sp) # 8-byte Folded Spill ; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 16 -; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1 -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a0, 62 +; LP64-LP64F-LP64D-WITHFP-NEXT: li a2, 1 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a2, 62 ; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 2 ; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 1111 ; LP64-LP64F-LP64D-WITHFP-NEXT: call va3@plt @@ -1527,8 +1527,8 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, %hi(.LCPI11_2) ; LP64-LP64F-LP64D-FPELIM-NEXT: ld a3, %lo(.LCPI11_2)(a0) ; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 2384 -; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a0, a0, 761 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a6, a0, 11 +; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a6, a0, 761 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a6, a6, 11 ; LP64-LP64F-LP64D-FPELIM-NEXT: li a0, 1 ; LP64-LP64F-LP64D-FPELIM-NEXT: li a1, 11 ; LP64-LP64F-LP64D-FPELIM-NEXT: li a4, 12 @@ -1559,8 +1559,8 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, %hi(.LCPI11_2) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld a3, %lo(.LCPI11_2)(a0) ; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, 2384 -; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a0, a0, 761 -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a6, a0, 11 +; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a6, a0, 761 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a6, a6, 11 ; LP64-LP64F-LP64D-WITHFP-NEXT: li a0, 1 ; LP64-LP64F-LP64D-WITHFP-NEXT: li a1, 11 ; LP64-LP64F-LP64D-WITHFP-NEXT: li a4, 12 diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -4018,9 +4018,9 @@ ; RV32-LABEL: uaddo.i64.constant_2048: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mv a3, a0 -; RV32-NEXT: addi a0, a0, 2047 -; RV32-NEXT: addi a4, a0, 1 -; RV32-NEXT: sltu a0, a4, a3 +; RV32-NEXT: addi a4, a0, 2047 +; RV32-NEXT: addi a4, a4, 1 +; RV32-NEXT: sltu a0, a4, a0 ; RV32-NEXT: add a5, a1, a0 ; RV32-NEXT: bgeu a4, a3, .LBB67_2 ; RV32-NEXT: # %bb.1: # %entry @@ -4041,9 +4041,9 @@ ; RV32ZBA-LABEL: uaddo.i64.constant_2048: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mv a3, a0 -; RV32ZBA-NEXT: addi a0, a0, 2047 -; RV32ZBA-NEXT: addi a4, a0, 1 -; RV32ZBA-NEXT: sltu a0, a4, a3 +; RV32ZBA-NEXT: addi a4, a0, 2047 +; RV32ZBA-NEXT: addi a4, a4, 1 +; RV32ZBA-NEXT: sltu a0, a4, a0 ; RV32ZBA-NEXT: add a5, a1, a0 ; RV32ZBA-NEXT: bgeu a4, a3, .LBB67_2 ; RV32ZBA-NEXT: # %bb.1: # %entry @@ -4072,9 +4072,9 @@ ; RV32-LABEL: uaddo.i64.constant_2049: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mv a3, a0 -; RV32-NEXT: addi a0, a0, 2047 -; RV32-NEXT: addi a4, a0, 2 -; RV32-NEXT: sltu a0, a4, a3 +; RV32-NEXT: addi a4, a0, 2047 +; RV32-NEXT: addi a4, a4, 2 +; RV32-NEXT: sltu a0, a4, a0 ; RV32-NEXT: add a5, a1, a0 ; RV32-NEXT: bgeu a4, a3, .LBB68_2 ; RV32-NEXT: # %bb.1: # %entry @@ -4095,9 +4095,9 @@ ; RV32ZBA-LABEL: uaddo.i64.constant_2049: ; RV32ZBA: # %bb.0: # %entry ; RV32ZBA-NEXT: mv a3, a0 -; RV32ZBA-NEXT: addi a0, a0, 2047 -; RV32ZBA-NEXT: addi a4, a0, 2 -; RV32ZBA-NEXT: sltu a0, a4, a3 +; RV32ZBA-NEXT: addi a4, a0, 2047 +; RV32ZBA-NEXT: addi a4, a4, 2 +; RV32ZBA-NEXT: sltu a0, a4, a0 ; RV32ZBA-NEXT: add a5, a1, a0 ; RV32ZBA-NEXT: bgeu a4, a3, .LBB68_2 ; RV32ZBA-NEXT: # %bb.1: # %entry