diff --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt --- a/llvm/lib/Target/RISCV/CMakeLists.txt +++ b/llvm/lib/Target/RISCV/CMakeLists.txt @@ -37,6 +37,7 @@ RISCVRedundantCopyElimination.cpp RISCVRegisterInfo.cpp RISCVSExtWRemoval.cpp + RISCVStripWSuffix.cpp RISCVSubtarget.cpp RISCVTargetMachine.cpp RISCVTargetObjectFile.cpp diff --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h --- a/llvm/lib/Target/RISCV/RISCV.h +++ b/llvm/lib/Target/RISCV/RISCV.h @@ -50,6 +50,9 @@ FunctionPass *createRISCVSExtWRemovalPass(); void initializeRISCVSExtWRemovalPass(PassRegistry &); +FunctionPass *createRISCVStripWSuffixPass(); +void initializeRISCVStripWSuffixPass(PassRegistry &); + FunctionPass *createRISCVMergeBaseOffsetOptPass(); void initializeRISCVMergeBaseOffsetOptPass(PassRegistry &); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h @@ -249,6 +249,10 @@ // Special immediate for AVL operand of V pseudo instructions to indicate VLMax. static constexpr int64_t VLMaxSentinel = -1LL; + +// Returns true if all uses of OrigMI only depend on the lower word of its +// output, so we can transform OrigMI to the corresponding W-version. +bool hasAllWUsers(const MachineInstr &MI, MachineRegisterInfo &MRI); } // namespace RISCV namespace RISCVVPseudosTable { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -2603,3 +2603,179 @@ MachineOperand FrmOp2 = MI2.getOperand(MI2FrmOpIdx); return FrmOp1.getImm() == FrmOp2.getImm(); } + +// Checks if all users only demand the lower word of the original instruction's +// result. +// TODO: handle multiple interdependent transformations +bool RISCV::hasAllWUsers(const MachineInstr &OrigMI, MachineRegisterInfo &MRI) { + + SmallPtrSet Visited; + SmallVector Worklist; + + Worklist.push_back(&OrigMI); + + while (!Worklist.empty()) { + const MachineInstr *MI = Worklist.pop_back_val(); + + if (!Visited.insert(MI).second) + continue; + + // Only handle instructions with one def. + if (MI->getNumExplicitDefs() != 1) + return false; + + for (auto &UserOp : MRI.use_operands(MI->getOperand(0).getReg())) { + const MachineInstr *UserMI = UserOp.getParent(); + unsigned OpIdx = UserMI->getOperandNo(&UserOp); + + switch (UserMI->getOpcode()) { + default: + return false; + + case RISCV::ADDIW: + case RISCV::ADDW: + case RISCV::DIVUW: + case RISCV::DIVW: + case RISCV::MULW: + case RISCV::REMUW: + case RISCV::REMW: + case RISCV::SLLIW: + case RISCV::SLLW: + case RISCV::SRAIW: + case RISCV::SRAW: + case RISCV::SRLIW: + case RISCV::SRLW: + case RISCV::SUBW: + case RISCV::ROLW: + case RISCV::RORW: + case RISCV::RORIW: + case RISCV::CLZW: + case RISCV::CTZW: + case RISCV::CPOPW: + case RISCV::SLLI_UW: + case RISCV::FMV_H_X: + case RISCV::FMV_W_X: + case RISCV::FCVT_H_W: + case RISCV::FCVT_H_WU: + case RISCV::FCVT_S_W: + case RISCV::FCVT_S_WU: + case RISCV::FCVT_D_W: + case RISCV::FCVT_D_WU: + case RISCV::SEXT_B: + case RISCV::SEXT_H: + case RISCV::ZEXT_H_RV64: + case RISCV::PACK: + case RISCV::PACKH: + case RISCV::PACKW: + break; + + // these overwrite higher input bits, otherwise the lower word of output + // depends only on the lower word of input. So check their uses read W. + case RISCV::SLLI: + if (UserMI->getOperand(2).getImm() >= 32) + break; + Worklist.push_back(UserMI); + break; + case RISCV::ANDI: + if (isUInt<11>(UserMI->getOperand(2).getImm())) + break; + Worklist.push_back(UserMI); + break; + case RISCV::ORI: + if (!isUInt<11>(UserMI->getOperand(2).getImm())) + break; + Worklist.push_back(UserMI); + break; + + case RISCV::SLL: + case RISCV::BSET: + case RISCV::BCLR: + case RISCV::BINV: + // Operand 2 is the shift amount which uses 6 bits. + if (OpIdx == 2) + break; + Worklist.push_back(UserMI); + break; + + case RISCV::SRA: + case RISCV::SRL: + case RISCV::ROL: + case RISCV::ROR: + // Operand 2 is the shift amount which uses 6 bits. + if (OpIdx == 2) + break; + return false; + + case RISCV::ADD_UW: + case RISCV::SH1ADD_UW: + case RISCV::SH2ADD_UW: + case RISCV::SH3ADD_UW: + // Operand 1 is implicitly zero extended. + if (OpIdx == 1) + break; + Worklist.push_back(UserMI); + break; + + case RISCV::BEXTI: + if (UserMI->getOperand(2).getImm() >= 32) + return false; + break; + + case RISCV::SB: + case RISCV::SH: + case RISCV::SW: + // The first argument is the value to store. + if (OpIdx != 0) + return false; + break; + + // For these, lower word of output in these operations, depends only on + // the lower word of input. So, we check all uses only read lower word. + case RISCV::COPY: + case RISCV::PHI: + + case RISCV::ADD: + case RISCV::ADDI: + case RISCV::AND: + case RISCV::MUL: + case RISCV::OR: + case RISCV::SUB: + case RISCV::XOR: + case RISCV::XORI: + + case RISCV::ANDN: + case RISCV::BREV8: + case RISCV::CLMUL: + case RISCV::ORC_B: + case RISCV::ORN: + case RISCV::SH1ADD: + case RISCV::SH2ADD: + case RISCV::SH3ADD: + case RISCV::XNOR: + case RISCV::BSETI: + case RISCV::BCLRI: + case RISCV::BINVI: + Worklist.push_back(UserMI); + break; + + case RISCV::PseudoCCMOVGPR: + // Either operand 4 or operand 5 is returned by this instruction. If + // only the lower word of the result is used, then only the lower word + // of operand 4 and 5 is used. + if (OpIdx != 4 && OpIdx != 5) + return false; + Worklist.push_back(UserMI); + break; + + case RISCV::VT_MASKC: + case RISCV::VT_MASKCN: + if (OpIdx != 1) + return false; + Worklist.push_back(UserMI); + break; + } + } + } + + return true; +} diff --git a/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp b/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp --- a/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp +++ b/llvm/lib/Target/RISCV/RISCVSExtWRemoval.cpp @@ -60,182 +60,6 @@ return new RISCVSExtWRemoval(); } -// returns true if all uses of OrigMI only depend on the lower word of its -// output, so we can transform OrigMI to the corresponding W-version. -// TODO: handle multiple interdependent transformations -static bool hasAllWUsers(const MachineInstr &OrigMI, MachineRegisterInfo &MRI) { - - SmallPtrSet Visited; - SmallVector Worklist; - - Worklist.push_back(&OrigMI); - - while (!Worklist.empty()) { - const MachineInstr *MI = Worklist.pop_back_val(); - - if (!Visited.insert(MI).second) - continue; - - // Only handle instructions with one def. - if (MI->getNumExplicitDefs() != 1) - return false; - - for (auto &UserOp : MRI.use_operands(MI->getOperand(0).getReg())) { - const MachineInstr *UserMI = UserOp.getParent(); - unsigned OpIdx = UserMI->getOperandNo(&UserOp); - - switch (UserMI->getOpcode()) { - default: - return false; - - case RISCV::ADDIW: - case RISCV::ADDW: - case RISCV::DIVUW: - case RISCV::DIVW: - case RISCV::MULW: - case RISCV::REMUW: - case RISCV::REMW: - case RISCV::SLLIW: - case RISCV::SLLW: - case RISCV::SRAIW: - case RISCV::SRAW: - case RISCV::SRLIW: - case RISCV::SRLW: - case RISCV::SUBW: - case RISCV::ROLW: - case RISCV::RORW: - case RISCV::RORIW: - case RISCV::CLZW: - case RISCV::CTZW: - case RISCV::CPOPW: - case RISCV::SLLI_UW: - case RISCV::FMV_H_X: - case RISCV::FMV_W_X: - case RISCV::FCVT_H_W: - case RISCV::FCVT_H_WU: - case RISCV::FCVT_S_W: - case RISCV::FCVT_S_WU: - case RISCV::FCVT_D_W: - case RISCV::FCVT_D_WU: - case RISCV::SEXT_B: - case RISCV::SEXT_H: - case RISCV::ZEXT_H_RV64: - case RISCV::PACK: - case RISCV::PACKH: - case RISCV::PACKW: - break; - - // these overwrite higher input bits, otherwise the lower word of output - // depends only on the lower word of input. So check their uses read W. - case RISCV::SLLI: - if (UserMI->getOperand(2).getImm() >= 32) - break; - Worklist.push_back(UserMI); - break; - case RISCV::ANDI: - if (isUInt<11>(UserMI->getOperand(2).getImm())) - break; - Worklist.push_back(UserMI); - break; - case RISCV::ORI: - if (!isUInt<11>(UserMI->getOperand(2).getImm())) - break; - Worklist.push_back(UserMI); - break; - - case RISCV::SLL: - case RISCV::BSET: - case RISCV::BCLR: - case RISCV::BINV: - // Operand 2 is the shift amount which uses 6 bits. - if (OpIdx == 2) - break; - Worklist.push_back(UserMI); - break; - - case RISCV::SRA: - case RISCV::SRL: - case RISCV::ROL: - case RISCV::ROR: - // Operand 2 is the shift amount which uses 6 bits. - if (OpIdx == 2) - break; - return false; - - case RISCV::ADD_UW: - case RISCV::SH1ADD_UW: - case RISCV::SH2ADD_UW: - case RISCV::SH3ADD_UW: - // Operand 1 is implicitly zero extended. - if (OpIdx == 1) - break; - Worklist.push_back(UserMI); - break; - - case RISCV::BEXTI: - if (UserMI->getOperand(2).getImm() >= 32) - return false; - break; - - case RISCV::SB: - case RISCV::SH: - case RISCV::SW: - // The first argument is the value to store. - if (OpIdx != 0) - return false; - break; - - // For these, lower word of output in these operations, depends only on - // the lower word of input. So, we check all uses only read lower word. - case RISCV::COPY: - case RISCV::PHI: - - case RISCV::ADD: - case RISCV::ADDI: - case RISCV::AND: - case RISCV::MUL: - case RISCV::OR: - case RISCV::SUB: - case RISCV::XOR: - case RISCV::XORI: - - case RISCV::ANDN: - case RISCV::BREV8: - case RISCV::CLMUL: - case RISCV::ORC_B: - case RISCV::ORN: - case RISCV::SH1ADD: - case RISCV::SH2ADD: - case RISCV::SH3ADD: - case RISCV::XNOR: - case RISCV::BSETI: - case RISCV::BCLRI: - case RISCV::BINVI: - Worklist.push_back(UserMI); - break; - - case RISCV::PseudoCCMOVGPR: - // Either operand 4 or operand 5 is returned by this instruction. If - // only the lower word of the result is used, then only the lower word - // of operand 4 and 5 is used. - if (OpIdx != 4 && OpIdx != 5) - return false; - Worklist.push_back(UserMI); - break; - - case RISCV::VT_MASKC: - case RISCV::VT_MASKCN: - if (OpIdx != 1) - return false; - Worklist.push_back(UserMI); - break; - } - } - } - - return true; -} - // This function returns true if the machine instruction always outputs a value // where bits 63:32 match bit 31. static bool isSignExtendingOpW(MachineInstr &MI, MachineRegisterInfo &MRI) { @@ -438,7 +262,7 @@ case RISCV::LWU: case RISCV::MUL: case RISCV::SUB: - if (hasAllWUsers(*MI, MRI)) { + if (RISCV::hasAllWUsers(*MI, MRI)) { FixableDef.insert(MI); break; } @@ -499,7 +323,7 @@ // If all users only use the lower bits, this sext.w is redundant. // Or if all definitions reaching MI sign-extend their output, // then sext.w is redundant. - if (!hasAllWUsers(*MI, MRI) && !isSignExtendedW(SrcReg, MRI, FixableDefs)) + if (!RISCV::hasAllWUsers(*MI, MRI) && !isSignExtendedW(SrcReg, MRI, FixableDefs)) continue; Register DstReg = MI->getOperand(0).getReg(); diff --git a/llvm/lib/Target/RISCV/RISCVStripWSuffix.cpp b/llvm/lib/Target/RISCV/RISCVStripWSuffix.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Target/RISCV/RISCVStripWSuffix.cpp @@ -0,0 +1,87 @@ +//===-------------- RISCVStripWSuffix.cpp - -w Suffix Removal -------------===// +// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// +// +// This pass removes the -w suffix from each addiw and slliw instructions +// whenever all users are dependent only on the lower word of the result of the +// instruction. We do this only for addiw and slliw because the -w forms are +// less compressible. +// +//===---------------------------------------------------------------------===// + +#include "RISCV.h" +#include "RISCVMachineFunctionInfo.h" + +using namespace llvm; + +static cl::opt DisableStripWSuffix("riscv-disable-strip-w-suffix", + cl::desc("Disable strip W suffix"), + cl::init(false), cl::Hidden); + +namespace { + +class RISCVStripWSuffix : public MachineFunctionPass { +public: + static char ID; + + RISCVStripWSuffix() : MachineFunctionPass(ID) { + initializeRISCVStripWSuffixPass(*PassRegistry::getPassRegistry()); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.setPreservesCFG(); + MachineFunctionPass::getAnalysisUsage(AU); + } + + StringRef getPassName() const override { return "RISCV Strip W Suffix"; } +}; + +} // end anonymous namespace + +char RISCVStripWSuffix::ID = 0; +INITIALIZE_PASS(RISCVStripWSuffix, "riscv-strip-w-suffix", + "RISCV Strip W Suffix", false, false) + +FunctionPass *llvm::createRISCVStripWSuffixPass() { + return new RISCVStripWSuffix(); +} + +bool RISCVStripWSuffix::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(MF.getFunction()) || DisableStripWSuffix) + return false; + + MachineRegisterInfo &MRI = MF.getRegInfo(); + const RISCVSubtarget &ST = MF.getSubtarget(); + const RISCVInstrInfo &TII = *ST.getInstrInfo(); + + if (!ST.is64Bit()) + return false; + + bool MadeChange = false; + for (MachineBasicBlock &MBB : MF) { + for (auto I = MBB.begin(), IE = MBB.end(); I != IE; ++I) { + MachineInstr &MI = *I; + + switch (MI.getOpcode()) { + case RISCV::ADDW: + case RISCV::SLLIW: + if (RISCV::hasAllWUsers(MI, MRI)) { + unsigned Opc = + MI.getOpcode() == RISCV::ADDW ? RISCV::ADD : RISCV::SLLI; + MI.setDesc(TII.get(Opc)); + MadeChange = true; + } + break; + } + } + } + + return MadeChange; +} diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -77,6 +77,7 @@ initializeRISCVCodeGenPreparePass(*PR); initializeRISCVMergeBaseOffsetOptPass(*PR); initializeRISCVSExtWRemovalPass(*PR); + initializeRISCVStripWSuffixPass(*PR); initializeRISCVPreRAExpandPseudoPass(*PR); initializeRISCVExpandPseudoPass(*PR); initializeRISCVInsertVSETVLIPass(*PR); @@ -333,8 +334,10 @@ if (EnableMachineCombiner) addPass(&MachineCombinerID); - if (TM->getTargetTriple().getArch() == Triple::riscv64) + if (TM->getTargetTriple().getArch() == Triple::riscv64) { addPass(createRISCVSExtWRemovalPass()); + addPass(createRISCVStripWSuffixPass()); + } } void RISCVPassConfig::addPreRegAlloc() { diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll --- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll +++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll @@ -102,6 +102,7 @@ ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: Machine InstCombiner ; RV64-NEXT: RISCV sext.w Removal +; RV64-NEXT: RISCV Strip W Suffix ; CHECK-NEXT: RISCV Pre-RA pseudo instruction expansion pass ; CHECK-NEXT: RISCV Merge Base Offset ; CHECK-NEXT: RISCV Insert VSETVLI pass diff --git a/llvm/test/CodeGen/RISCV/add-before-shl.ll b/llvm/test/CodeGen/RISCV/add-before-shl.ll --- a/llvm/test/CodeGen/RISCV/add-before-shl.ll +++ b/llvm/test/CodeGen/RISCV/add-before-shl.ll @@ -79,7 +79,7 @@ ; RV64C: # %bb.0: ; RV64C-NEXT: c.lui a1, 1 ; RV64C-NEXT: c.addiw a1, -1 -; RV64C-NEXT: c.addw a0, a1 +; RV64C-NEXT: c.add a0, a1 ; RV64C-NEXT: c.slli a0, 48 ; RV64C-NEXT: c.srai a0, 48 ; RV64C-NEXT: c.jr ra @@ -119,7 +119,7 @@ ; RV64C: # %bb.0: ; RV64C-NEXT: c.lui a1, 8 ; RV64C-NEXT: c.addiw a1, -1 -; RV64C-NEXT: c.addw a0, a1 +; RV64C-NEXT: c.add a0, a1 ; RV64C-NEXT: c.slli a0, 48 ; RV64C-NEXT: c.srai a0, 48 ; RV64C-NEXT: c.jr ra diff --git a/llvm/test/CodeGen/RISCV/add-imm.ll b/llvm/test/CodeGen/RISCV/add-imm.ll --- a/llvm/test/CodeGen/RISCV/add-imm.ll +++ b/llvm/test/CodeGen/RISCV/add-imm.ll @@ -232,8 +232,8 @@ ; RV64I-NEXT: lw a3, %lo(gb)(a2) ; RV64I-NEXT: lui a4, 1 ; RV64I-NEXT: addiw a4, a4, -1096 -; RV64I-NEXT: addw a1, a1, a4 -; RV64I-NEXT: addw a3, a3, a4 +; RV64I-NEXT: add a1, a1, a4 +; RV64I-NEXT: add a3, a3, a4 ; RV64I-NEXT: sw a1, %lo(ga)(a0) ; RV64I-NEXT: sw a3, %lo(gb)(a2) ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll --- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll +++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll @@ -99,7 +99,7 @@ ; RV64IA-LABEL: cmpxchg_masked_and_branch1: ; RV64IA: # %bb.0: # %entry ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a4, a0, 3 +; RV64IA-NEXT: slli a4, a0, 3 ; RV64IA-NEXT: li a0, 255 ; RV64IA-NEXT: sllw a0, a0, a4 ; RV64IA-NEXT: andi a1, a1, 255 @@ -172,7 +172,7 @@ ; RV64IA-LABEL: cmpxchg_masked_and_branch2: ; RV64IA: # %bb.0: # %entry ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a4, a0, 3 +; RV64IA-NEXT: slli a4, a0, 3 ; RV64IA-NEXT: li a0, 255 ; RV64IA-NEXT: sllw a0, a0, a4 ; RV64IA-NEXT: andi a1, a1, 255 diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll --- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll +++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll @@ -61,7 +61,7 @@ ; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -137,7 +137,7 @@ ; RV64IA-LABEL: cmpxchg_i8_acquire_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -213,7 +213,7 @@ ; RV64IA-LABEL: cmpxchg_i8_acquire_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -289,7 +289,7 @@ ; RV64IA-LABEL: cmpxchg_i8_release_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -365,7 +365,7 @@ ; RV64IA-LABEL: cmpxchg_i8_release_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -441,7 +441,7 @@ ; RV64IA-LABEL: cmpxchg_i8_acq_rel_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -517,7 +517,7 @@ ; RV64IA-LABEL: cmpxchg_i8_acq_rel_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -593,7 +593,7 @@ ; RV64IA-LABEL: cmpxchg_i8_seq_cst_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -669,7 +669,7 @@ ; RV64IA-LABEL: cmpxchg_i8_seq_cst_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -745,7 +745,7 @@ ; RV64IA-LABEL: cmpxchg_i8_seq_cst_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -822,7 +822,7 @@ ; RV64IA-LABEL: cmpxchg_i16_monotonic_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -900,7 +900,7 @@ ; RV64IA-LABEL: cmpxchg_i16_acquire_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -978,7 +978,7 @@ ; RV64IA-LABEL: cmpxchg_i16_acquire_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1056,7 +1056,7 @@ ; RV64IA-LABEL: cmpxchg_i16_release_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1134,7 +1134,7 @@ ; RV64IA-LABEL: cmpxchg_i16_release_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1212,7 +1212,7 @@ ; RV64IA-LABEL: cmpxchg_i16_acq_rel_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1290,7 +1290,7 @@ ; RV64IA-LABEL: cmpxchg_i16_acq_rel_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1368,7 +1368,7 @@ ; RV64IA-LABEL: cmpxchg_i16_seq_cst_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1446,7 +1446,7 @@ ; RV64IA-LABEL: cmpxchg_i16_seq_cst_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1524,7 +1524,7 @@ ; RV64IA-LABEL: cmpxchg_i16_seq_cst_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -52,7 +52,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -116,7 +116,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -180,7 +180,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -244,7 +244,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -308,7 +308,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -372,7 +372,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -436,7 +436,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -500,7 +500,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -564,7 +564,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -628,7 +628,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -692,7 +692,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -756,7 +756,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -820,7 +820,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -884,7 +884,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -948,7 +948,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1006,7 +1006,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1058,7 +1058,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1110,7 +1110,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1162,7 +1162,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1214,7 +1214,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1273,7 +1273,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1339,7 +1339,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1405,7 +1405,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1471,7 +1471,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1537,7 +1537,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1592,7 +1592,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w a1, a1, (a2) @@ -1636,7 +1636,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w.aq a1, a1, (a2) @@ -1680,7 +1680,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w.rl a1, a1, (a2) @@ -1724,7 +1724,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w.aqrl a1, a1, (a2) @@ -1768,7 +1768,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w.aqrl a1, a1, (a2) @@ -1812,7 +1812,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w a1, a1, (a2) @@ -1856,7 +1856,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w.aq a1, a1, (a2) @@ -1900,7 +1900,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w.rl a1, a1, (a2) @@ -1944,7 +1944,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w.aqrl a1, a1, (a2) @@ -1988,7 +1988,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w.aqrl a1, a1, (a2) @@ -2115,7 +2115,7 @@ ; RV64IA-LABEL: atomicrmw_max_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -2261,7 +2261,7 @@ ; RV64IA-LABEL: atomicrmw_max_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -2407,7 +2407,7 @@ ; RV64IA-LABEL: atomicrmw_max_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -2553,7 +2553,7 @@ ; RV64IA-LABEL: atomicrmw_max_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -2699,7 +2699,7 @@ ; RV64IA-LABEL: atomicrmw_max_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -2845,7 +2845,7 @@ ; RV64IA-LABEL: atomicrmw_min_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -2991,7 +2991,7 @@ ; RV64IA-LABEL: atomicrmw_min_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -3137,7 +3137,7 @@ ; RV64IA-LABEL: atomicrmw_min_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -3283,7 +3283,7 @@ ; RV64IA-LABEL: atomicrmw_min_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -3429,7 +3429,7 @@ ; RV64IA-LABEL: atomicrmw_min_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -3566,7 +3566,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -3698,7 +3698,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -3830,7 +3830,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -3962,7 +3962,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4094,7 +4094,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4226,7 +4226,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4358,7 +4358,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4490,7 +4490,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4622,7 +4622,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4754,7 +4754,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4823,7 +4823,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -4889,7 +4889,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -4955,7 +4955,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5021,7 +5021,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5087,7 +5087,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5153,7 +5153,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5219,7 +5219,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5285,7 +5285,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5351,7 +5351,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5417,7 +5417,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5483,7 +5483,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5549,7 +5549,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5615,7 +5615,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5681,7 +5681,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5747,7 +5747,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5807,7 +5807,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5861,7 +5861,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5915,7 +5915,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5969,7 +5969,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6023,7 +6023,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6084,7 +6084,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6152,7 +6152,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6220,7 +6220,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6288,7 +6288,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6356,7 +6356,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6413,7 +6413,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6459,7 +6459,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6505,7 +6505,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6551,7 +6551,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6597,7 +6597,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6643,7 +6643,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6689,7 +6689,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6735,7 +6735,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6781,7 +6781,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6827,7 +6827,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -6957,7 +6957,7 @@ ; RV64IA-LABEL: atomicrmw_max_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -7107,7 +7107,7 @@ ; RV64IA-LABEL: atomicrmw_max_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -7257,7 +7257,7 @@ ; RV64IA-LABEL: atomicrmw_max_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -7407,7 +7407,7 @@ ; RV64IA-LABEL: atomicrmw_max_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -7557,7 +7557,7 @@ ; RV64IA-LABEL: atomicrmw_max_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -7707,7 +7707,7 @@ ; RV64IA-LABEL: atomicrmw_min_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -7857,7 +7857,7 @@ ; RV64IA-LABEL: atomicrmw_min_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -8007,7 +8007,7 @@ ; RV64IA-LABEL: atomicrmw_min_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -8157,7 +8157,7 @@ ; RV64IA-LABEL: atomicrmw_min_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -8307,7 +8307,7 @@ ; RV64IA-LABEL: atomicrmw_min_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -8455,7 +8455,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -8597,7 +8597,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -8739,7 +8739,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -8881,7 +8881,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9023,7 +9023,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9165,7 +9165,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9307,7 +9307,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9449,7 +9449,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9591,7 +9591,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9733,7 +9733,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll --- a/llvm/test/CodeGen/RISCV/atomic-signext.ll +++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll @@ -170,7 +170,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -242,7 +242,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -314,7 +314,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -380,7 +380,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -447,7 +447,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -510,7 +510,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w a1, a1, (a2) @@ -562,7 +562,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w a1, a1, (a2) @@ -695,7 +695,7 @@ ; RV64IA-LABEL: atomicrmw_max_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -847,7 +847,7 @@ ; RV64IA-LABEL: atomicrmw_min_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 @@ -990,7 +990,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1128,7 +1128,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a3, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1205,7 +1205,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1279,7 +1279,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1353,7 +1353,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1421,7 +1421,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1490,7 +1490,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1555,7 +1555,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -1609,7 +1609,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srli a1, a1, 48 ; RV64IA-NEXT: sllw a1, a1, a0 @@ -1745,7 +1745,7 @@ ; RV64IA-LABEL: atomicrmw_max_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -1901,7 +1901,7 @@ ; RV64IA-LABEL: atomicrmw_min_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: andi a3, a0, 24 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 @@ -2055,7 +2055,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -2203,7 +2203,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -3802,7 +3802,7 @@ ; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic_val0: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -3885,7 +3885,7 @@ ; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic_val1: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: li a4, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -3971,7 +3971,7 @@ ; RV64IA-LABEL: cmpxchg_i16_monotonic_monotonic_val0: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -4056,7 +4056,7 @@ ; RV64IA-LABEL: cmpxchg_i16_monotonic_monotonic_val1: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slliw a0, a0, 3 +; RV64IA-NEXT: slli a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 diff --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll --- a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll @@ -1512,7 +1512,7 @@ ; RV64ZBB-LABEL: pr55484: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: srli a1, a0, 8 -; RV64ZBB-NEXT: slliw a0, a0, 8 +; RV64ZBB-NEXT: slli a0, a0, 8 ; RV64ZBB-NEXT: or a0, a1, a0 ; RV64ZBB-NEXT: sext.h a0, a0 ; RV64ZBB-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll @@ -53,15 +53,15 @@ ; RV64I-NEXT: andi a0, a0, 255 ; RV64I-NEXT: slli a1, a1, 48 ; RV64I-NEXT: srli a1, a1, 48 -; RV64I-NEXT: addw a0, a0, a1 -; RV64I-NEXT: addw a0, a0, a2 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: add a0, a0, a2 ; RV64I-NEXT: xor a1, a4, t1 ; RV64I-NEXT: xor a2, a3, a7 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: seqz a1, a1 -; RV64I-NEXT: addw a0, a1, a0 -; RV64I-NEXT: addw a0, a0, a5 -; RV64I-NEXT: addw a0, a0, a6 +; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: add a0, a0, a5 +; RV64I-NEXT: add a0, a0, a6 ; RV64I-NEXT: addw a0, a0, t0 ; RV64I-NEXT: ret %a_ext = zext i8 %a to i32 diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll @@ -65,7 +65,7 @@ ; RV64NOZBB-NEXT: andi a0, a0, 51 ; RV64NOZBB-NEXT: add a0, a1, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a0, a0, 15 ; RV64NOZBB-NEXT: ret ; RV64NOZBB-NEXT: .LBB0_2: @@ -140,7 +140,7 @@ ; RV64NOZBB-NEXT: and a0, a0, a1 ; RV64NOZBB-NEXT: add a0, a2, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 15 ; RV64NOZBB-NEXT: slli a0, a0, 52 ; RV64NOZBB-NEXT: srli a0, a0, 60 @@ -445,7 +445,7 @@ ; RV64NOZBB-NEXT: andi a0, a0, 51 ; RV64NOZBB-NEXT: add a0, a1, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a0, a0, 15 ; RV64NOZBB-NEXT: ret ; @@ -504,7 +504,7 @@ ; RV64NOZBB-NEXT: and a0, a0, a1 ; RV64NOZBB-NEXT: add a0, a2, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 15 ; RV64NOZBB-NEXT: slli a0, a0, 52 ; RV64NOZBB-NEXT: srli a0, a0, 60 @@ -783,7 +783,7 @@ ; RV64NOZBB-NEXT: andi a0, a0, 51 ; RV64NOZBB-NEXT: add a0, a1, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a0, a0, 15 ; RV64NOZBB-NEXT: ret ; RV64NOZBB-NEXT: .LBB8_2: @@ -878,7 +878,7 @@ ; RV64NOZBB-NEXT: and a0, a0, a1 ; RV64NOZBB-NEXT: add a0, a2, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 15 ; RV64NOZBB-NEXT: slli a0, a0, 52 ; RV64NOZBB-NEXT: srli a0, a0, 60 @@ -1063,7 +1063,7 @@ ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: add a0, a2, a0 ; RV64M-NEXT: srli a1, a0, 4 -; RV64M-NEXT: addw a0, a0, a1 +; RV64M-NEXT: add a0, a0, a1 ; RV64M-NEXT: lui a1, 61681 ; RV64M-NEXT: addiw a1, a1, -241 ; RV64M-NEXT: and a0, a0, a1 @@ -1397,7 +1397,7 @@ ; RV64NOZBB-NEXT: andi a0, a0, 51 ; RV64NOZBB-NEXT: add a0, a1, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a0, a0, 15 ; RV64NOZBB-NEXT: ret ; @@ -1480,7 +1480,7 @@ ; RV64NOZBB-NEXT: and a0, a0, a1 ; RV64NOZBB-NEXT: add a0, a2, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 15 ; RV64NOZBB-NEXT: slli a0, a0, 52 ; RV64NOZBB-NEXT: srli a0, a0, 60 @@ -1643,7 +1643,7 @@ ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: add a0, a2, a0 ; RV64M-NEXT: srli a1, a0, 4 -; RV64M-NEXT: addw a0, a0, a1 +; RV64M-NEXT: add a0, a0, a1 ; RV64M-NEXT: lui a1, 61681 ; RV64M-NEXT: addiw a1, a1, -241 ; RV64M-NEXT: and a0, a0, a1 @@ -1944,7 +1944,7 @@ ; RV64NOZBB-NEXT: andi a0, a0, 51 ; RV64NOZBB-NEXT: add a0, a1, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a0, a0, 15 ; RV64NOZBB-NEXT: ret ; @@ -1999,7 +1999,7 @@ ; RV64NOZBB-NEXT: and a0, a0, a1 ; RV64NOZBB-NEXT: add a0, a2, a0 ; RV64NOZBB-NEXT: srli a1, a0, 4 -; RV64NOZBB-NEXT: addw a0, a0, a1 +; RV64NOZBB-NEXT: add a0, a0, a1 ; RV64NOZBB-NEXT: andi a1, a0, 15 ; RV64NOZBB-NEXT: slli a0, a0, 52 ; RV64NOZBB-NEXT: srli a0, a0, 60 @@ -2116,7 +2116,7 @@ ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: add a0, a2, a0 ; RV64M-NEXT: srli a1, a0, 4 -; RV64M-NEXT: addw a0, a0, a1 +; RV64M-NEXT: add a0, a0, a1 ; RV64M-NEXT: lui a1, 61681 ; RV64M-NEXT: addiw a1, a1, -241 ; RV64M-NEXT: and a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/div-by-constant.ll b/llvm/test/CodeGen/RISCV/div-by-constant.ll --- a/llvm/test/CodeGen/RISCV/div-by-constant.ll +++ b/llvm/test/CodeGen/RISCV/div-by-constant.ll @@ -323,7 +323,7 @@ ; RV64-NEXT: addiw a2, a2, 1171 ; RV64-NEXT: mul a1, a1, a2 ; RV64-NEXT: srli a1, a1, 32 -; RV64-NEXT: addw a0, a1, a0 +; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: srliw a1, a0, 31 ; RV64-NEXT: sraiw a0, a0, 2 ; RV64-NEXT: add a0, a0, a1 @@ -597,7 +597,7 @@ ; RV64IM-NEXT: li a2, -109 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 8 -; RV64IM-NEXT: addw a0, a1, a0 +; RV64IM-NEXT: add a0, a1, a0 ; RV64IM-NEXT: slli a0, a0, 56 ; RV64IM-NEXT: srli a1, a0, 63 ; RV64IM-NEXT: srai a0, a0, 58 @@ -610,7 +610,7 @@ ; RV64IMZB-NEXT: li a2, -109 ; RV64IMZB-NEXT: mul a1, a1, a2 ; RV64IMZB-NEXT: srli a1, a1, 8 -; RV64IMZB-NEXT: addw a0, a1, a0 +; RV64IMZB-NEXT: add a0, a1, a0 ; RV64IMZB-NEXT: slli a0, a0, 56 ; RV64IMZB-NEXT: srli a1, a0, 63 ; RV64IMZB-NEXT: srai a0, a0, 58 @@ -816,7 +816,7 @@ ; RV64IM-NEXT: addiw a2, a2, -1911 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 16 -; RV64IM-NEXT: addw a0, a1, a0 +; RV64IM-NEXT: add a0, a1, a0 ; RV64IM-NEXT: slli a0, a0, 48 ; RV64IM-NEXT: srli a1, a0, 63 ; RV64IM-NEXT: srai a0, a0, 51 @@ -830,7 +830,7 @@ ; RV64IMZB-NEXT: addiw a2, a2, -1911 ; RV64IMZB-NEXT: mul a1, a1, a2 ; RV64IMZB-NEXT: srli a1, a1, 16 -; RV64IMZB-NEXT: addw a0, a1, a0 +; RV64IMZB-NEXT: add a0, a1, a0 ; RV64IMZB-NEXT: slli a0, a0, 48 ; RV64IMZB-NEXT: srli a1, a0, 63 ; RV64IMZB-NEXT: srai a0, a0, 51 diff --git a/llvm/test/CodeGen/RISCV/div-pow2.ll b/llvm/test/CodeGen/RISCV/div-pow2.ll --- a/llvm/test/CodeGen/RISCV/div-pow2.ll +++ b/llvm/test/CodeGen/RISCV/div-pow2.ll @@ -15,7 +15,7 @@ ; RV64I-LABEL: sdiv32_pow2_2: ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: srliw a1, a0, 31 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 1 ; RV64I-NEXT: ret entry: @@ -35,7 +35,7 @@ ; RV64I-LABEL: sdiv32_pow2_negative_2: ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: srliw a1, a0, 31 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 1 ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: ret @@ -57,7 +57,7 @@ ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 21 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 11 ; RV64I-NEXT: ret entry: @@ -79,7 +79,7 @@ ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 21 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 11 ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: ret @@ -101,7 +101,7 @@ ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 20 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 12 ; RV64I-NEXT: ret entry: @@ -123,7 +123,7 @@ ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 20 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 12 ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: ret @@ -145,7 +145,7 @@ ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 16 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 16 ; RV64I-NEXT: ret entry: @@ -167,7 +167,7 @@ ; RV64I: # %bb.0: # %entry ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 16 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 16 ; RV64I-NEXT: neg a0, a0 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll --- a/llvm/test/CodeGen/RISCV/div.ll +++ b/llvm/test/CodeGen/RISCV/div.ll @@ -667,7 +667,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 29 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 3 ; RV64I-NEXT: ret ; @@ -675,7 +675,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 29 -; RV64IM-NEXT: addw a0, a0, a1 +; RV64IM-NEXT: add a0, a0, a1 ; RV64IM-NEXT: sraiw a0, a0, 3 ; RV64IM-NEXT: ret %1 = sdiv i32 %a, 8 @@ -703,7 +703,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 16 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 16 ; RV64I-NEXT: ret ; @@ -711,7 +711,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 16 -; RV64IM-NEXT: addw a0, a0, a1 +; RV64IM-NEXT: add a0, a0, a1 ; RV64IM-NEXT: sraiw a0, a0, 16 ; RV64IM-NEXT: ret %1 = sdiv i32 %a, 65536 @@ -1040,7 +1040,7 @@ ; RV64I-NEXT: srai a1, a1, 56 ; RV64I-NEXT: slli a1, a1, 49 ; RV64I-NEXT: srli a1, a1, 61 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 59 ; RV64I-NEXT: ret @@ -1051,7 +1051,7 @@ ; RV64IM-NEXT: srai a1, a1, 56 ; RV64IM-NEXT: slli a1, a1, 49 ; RV64IM-NEXT: srli a1, a1, 61 -; RV64IM-NEXT: addw a0, a0, a1 +; RV64IM-NEXT: add a0, a0, a1 ; RV64IM-NEXT: slli a0, a0, 56 ; RV64IM-NEXT: srai a0, a0, 59 ; RV64IM-NEXT: ret @@ -1232,7 +1232,7 @@ ; RV64I-NEXT: srai a1, a1, 48 ; RV64I-NEXT: slli a1, a1, 33 ; RV64I-NEXT: srli a1, a1, 61 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 51 ; RV64I-NEXT: ret @@ -1243,7 +1243,7 @@ ; RV64IM-NEXT: srai a1, a1, 48 ; RV64IM-NEXT: slli a1, a1, 33 ; RV64IM-NEXT: srli a1, a1, 61 -; RV64IM-NEXT: addw a0, a0, a1 +; RV64IM-NEXT: add a0, a0, a1 ; RV64IM-NEXT: slli a0, a0, 48 ; RV64IM-NEXT: srai a0, a0, 51 ; RV64IM-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/fpenv.ll b/llvm/test/CodeGen/RISCV/fpenv.ll --- a/llvm/test/CodeGen/RISCV/fpenv.ll +++ b/llvm/test/CodeGen/RISCV/fpenv.ll @@ -16,7 +16,7 @@ ; RV64IF-LABEL: func_01: ; RV64IF: # %bb.0: ; RV64IF-NEXT: frrm a0 -; RV64IF-NEXT: slliw a0, a0, 2 +; RV64IF-NEXT: slli a0, a0, 2 ; RV64IF-NEXT: lui a1, 66 ; RV64IF-NEXT: addiw a1, a1, 769 ; RV64IF-NEXT: srl a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/machine-cse.ll b/llvm/test/CodeGen/RISCV/machine-cse.ll --- a/llvm/test/CodeGen/RISCV/machine-cse.ll +++ b/llvm/test/CodeGen/RISCV/machine-cse.ll @@ -19,7 +19,7 @@ ; ; RV64-LABEL: commute_add_i32: ; RV64: # %bb.0: -; RV64-NEXT: addw a0, a0, a1 +; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: sw a0, 0(a2) ; RV64-NEXT: beqz a4, .LBB0_2 ; RV64-NEXT: # %bb.1: # %trueblock diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -91,13 +91,13 @@ ; ; RV64I-LABEL: mul_constant: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a0, 2 +; RV64I-NEXT: slli a1, a0, 2 ; RV64I-NEXT: addw a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: mul_constant: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a1, a0, 2 +; RV64IM-NEXT: slli a1, a0, 2 ; RV64IM-NEXT: addw a0, a1, a0 ; RV64IM-NEXT: ret %1 = mul i32 %a, 5 @@ -480,13 +480,13 @@ ; ; RV64I-LABEL: muli32_p65: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a0, 6 +; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: addw a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p65: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a1, a0, 6 +; RV64IM-NEXT: slli a1, a0, 6 ; RV64IM-NEXT: addw a0, a1, a0 ; RV64IM-NEXT: ret %1 = mul i32 %a, 65 @@ -508,13 +508,13 @@ ; ; RV64I-LABEL: muli32_p63: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a0, 6 +; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: subw a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p63: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a1, a0, 6 +; RV64IM-NEXT: slli a1, a0, 6 ; RV64IM-NEXT: subw a0, a1, a0 ; RV64IM-NEXT: ret %1 = mul i32 %a, 63 @@ -615,13 +615,13 @@ ; ; RV64I-LABEL: muli32_m63: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a0, 6 +; RV64I-NEXT: slli a1, a0, 6 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_m63: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a1, a0, 6 +; RV64IM-NEXT: slli a1, a0, 6 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, -63 @@ -645,15 +645,15 @@ ; ; RV64I-LABEL: muli32_m65: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a0, 6 -; RV64I-NEXT: addw a0, a1, a0 +; RV64I-NEXT: slli a1, a0, 6 +; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: negw a0, a0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_m65: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a1, a0, 6 -; RV64IM-NEXT: addw a0, a1, a0 +; RV64IM-NEXT: slli a1, a0, 6 +; RV64IM-NEXT: add a0, a1, a0 ; RV64IM-NEXT: negw a0, a0 ; RV64IM-NEXT: ret %1 = mul i32 %a, -65 @@ -826,15 +826,15 @@ ; ; RV64I-LABEL: muli32_p4352: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a0, 8 -; RV64I-NEXT: slliw a0, a0, 12 +; RV64I-NEXT: slli a1, a0, 8 +; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p4352: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a1, a0, 8 -; RV64IM-NEXT: slliw a0, a0, 12 +; RV64IM-NEXT: slli a1, a0, 8 +; RV64IM-NEXT: slli a0, a0, 12 ; RV64IM-NEXT: addw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, 4352 @@ -858,15 +858,15 @@ ; ; RV64I-LABEL: muli32_p3840: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a0, 8 -; RV64I-NEXT: slliw a0, a0, 12 +; RV64I-NEXT: slli a1, a0, 8 +; RV64I-NEXT: slli a0, a0, 12 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a1, a0, 8 -; RV64IM-NEXT: slliw a0, a0, 12 +; RV64IM-NEXT: slli a1, a0, 8 +; RV64IM-NEXT: slli a0, a0, 12 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, 3840 @@ -890,15 +890,15 @@ ; ; RV64I-LABEL: muli32_m3840: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a0, 12 -; RV64I-NEXT: slliw a0, a0, 8 +; RV64I-NEXT: slli a1, a0, 12 +; RV64I-NEXT: slli a0, a0, 8 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_m3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a1, a0, 12 -; RV64IM-NEXT: slliw a0, a0, 8 +; RV64IM-NEXT: slli a1, a0, 12 +; RV64IM-NEXT: slli a0, a0, 8 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, -3840 @@ -1539,14 +1539,14 @@ ; ; RV64I-LABEL: muladd_demand: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 1 +; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: subw a0, a1, a0 ; RV64I-NEXT: andi a0, a0, 15 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muladd_demand: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a0, a0, 1 +; RV64IM-NEXT: slli a0, a0, 1 ; RV64IM-NEXT: subw a0, a1, a0 ; RV64IM-NEXT: andi a0, a0, 15 ; RV64IM-NEXT: ret @@ -1573,15 +1573,15 @@ ; ; RV64I-LABEL: mulsub_demand: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 1 -; RV64I-NEXT: addw a0, a1, a0 +; RV64I-NEXT: slli a0, a0, 1 +; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: andi a0, a0, 15 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: mulsub_demand: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a0, a0, 1 -; RV64IM-NEXT: addw a0, a1, a0 +; RV64IM-NEXT: slli a0, a0, 1 +; RV64IM-NEXT: add a0, a1, a0 ; RV64IM-NEXT: andi a0, a0, 15 ; RV64IM-NEXT: ret %m = mul i8 %x, 14 @@ -1607,14 +1607,14 @@ ; ; RV64I-LABEL: muladd_demand_2: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 1 +; RV64I-NEXT: slli a0, a0, 1 ; RV64I-NEXT: subw a1, a1, a0 ; RV64I-NEXT: ori a0, a1, -16 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muladd_demand_2: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a0, a0, 1 +; RV64IM-NEXT: slli a0, a0, 1 ; RV64IM-NEXT: subw a1, a1, a0 ; RV64IM-NEXT: ori a0, a1, -16 ; RV64IM-NEXT: ret @@ -1641,15 +1641,15 @@ ; ; RV64I-LABEL: mulsub_demand_2: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 1 -; RV64I-NEXT: addw a0, a1, a0 +; RV64I-NEXT: slli a0, a0, 1 +; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: ori a0, a0, -16 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: mulsub_demand_2: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slliw a0, a0, 1 -; RV64IM-NEXT: addw a0, a1, a0 +; RV64IM-NEXT: slli a0, a0, 1 +; RV64IM-NEXT: add a0, a1, a0 ; RV64IM-NEXT: ori a0, a0, -16 ; RV64IM-NEXT: ret %m = mul i8 %x, 14 diff --git a/llvm/test/CodeGen/RISCV/pr58511.ll b/llvm/test/CodeGen/RISCV/pr58511.ll --- a/llvm/test/CodeGen/RISCV/pr58511.ll +++ b/llvm/test/CodeGen/RISCV/pr58511.ll @@ -4,8 +4,8 @@ define i32 @f(i1 %0, i32 %1, ptr %2) { ; CHECK-LABEL: f: ; CHECK: # %bb.0: # %BB -; CHECK-NEXT: slliw a3, a1, 11 -; CHECK-NEXT: slliw a1, a1, 12 +; CHECK-NEXT: slli a3, a1, 11 +; CHECK-NEXT: slli a1, a1, 12 ; CHECK-NEXT: subw a1, a1, a3 ; CHECK-NEXT: slli a0, a0, 63 ; CHECK-NEXT: srai a0, a0, 63 @@ -25,8 +25,8 @@ define i32 @g(i1 %0, i32 %1, ptr %2) { ; CHECK-LABEL: g: ; CHECK: # %bb.0: # %BB -; CHECK-NEXT: slliw a3, a1, 11 -; CHECK-NEXT: slliw a1, a1, 12 +; CHECK-NEXT: slli a3, a1, 11 +; CHECK-NEXT: slli a1, a1, 12 ; CHECK-NEXT: subw a1, a1, a3 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: addi a0, a0, -1 @@ -46,8 +46,8 @@ define i32 @h(i1 %0, i32 %1, ptr %2) { ; CHECK-LABEL: h: ; CHECK: # %bb.0: # %BB -; CHECK-NEXT: slliw a3, a1, 11 -; CHECK-NEXT: slliw a1, a1, 12 +; CHECK-NEXT: slli a3, a1, 11 +; CHECK-NEXT: slli a1, a1, 12 ; CHECK-NEXT: subw a1, a1, a3 ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: slli a0, a0, 11 @@ -65,8 +65,8 @@ ; CHECK-LABEL: i: ; CHECK: # %bb.0: # %BB ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: slliw a3, a1, 11 -; CHECK-NEXT: slliw a1, a1, 12 +; CHECK-NEXT: slli a3, a1, 11 +; CHECK-NEXT: slli a1, a1, 12 ; CHECK-NEXT: subw a1, a1, a3 ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: li a3, 1 diff --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll --- a/llvm/test/CodeGen/RISCV/rem.ll +++ b/llvm/test/CodeGen/RISCV/rem.ll @@ -125,7 +125,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 29 -; RV64I-NEXT: addw a1, a0, a1 +; RV64I-NEXT: add a1, a0, a1 ; RV64I-NEXT: andi a1, a1, -8 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret @@ -134,7 +134,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 29 -; RV64IM-NEXT: addw a1, a0, a1 +; RV64IM-NEXT: add a1, a0, a1 ; RV64IM-NEXT: andi a1, a1, -8 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret @@ -167,7 +167,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 16 -; RV64I-NEXT: addw a1, a0, a1 +; RV64I-NEXT: add a1, a0, a1 ; RV64I-NEXT: lui a2, 1048560 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: subw a0, a0, a1 @@ -177,7 +177,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 16 -; RV64IM-NEXT: addw a1, a0, a1 +; RV64IM-NEXT: add a1, a0, a1 ; RV64IM-NEXT: lui a2, 1048560 ; RV64IM-NEXT: and a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll --- a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll @@ -11,7 +11,7 @@ ; CHECK-NEXT: mulw a0, a0, a0 ; CHECK-NEXT: addiw a0, a0, 1 ; CHECK-NEXT: mulw a0, a0, a0 -; CHECK-NEXT: addw a0, a0, a2 +; CHECK-NEXT: add a0, a0, a2 ; CHECK-NEXT: addiw a0, a0, 1 ; CHECK-NEXT: sllw a0, a0, a1 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll b/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll --- a/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-exhaustive-w-insts.ll @@ -180,14 +180,14 @@ define zeroext i32 @zext_addw_aext_aext(i32 %a, i32 %b) nounwind { ; RV64I-LABEL: zext_addw_aext_aext: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_addw_aext_aext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addw a0, a0, a1 +; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = add i32 %a, %b @@ -197,14 +197,14 @@ define zeroext i32 @zext_addw_aext_sext(i32 %a, i32 signext %b) nounwind { ; RV64I-LABEL: zext_addw_aext_sext: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_addw_aext_sext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addw a0, a0, a1 +; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = add i32 %a, %b @@ -214,14 +214,14 @@ define zeroext i32 @zext_addw_aext_zext(i32 %a, i32 zeroext %b) nounwind { ; RV64I-LABEL: zext_addw_aext_zext: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_addw_aext_zext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addw a0, a0, a1 +; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = add i32 %a, %b @@ -231,14 +231,14 @@ define zeroext i32 @zext_addw_sext_aext(i32 signext %a, i32 %b) nounwind { ; RV64I-LABEL: zext_addw_sext_aext: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_addw_sext_aext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addw a0, a0, a1 +; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = add i32 %a, %b @@ -248,14 +248,14 @@ define zeroext i32 @zext_addw_sext_sext(i32 signext %a, i32 signext %b) nounwind { ; RV64I-LABEL: zext_addw_sext_sext: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_addw_sext_sext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addw a0, a0, a1 +; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = add i32 %a, %b @@ -265,14 +265,14 @@ define zeroext i32 @zext_addw_sext_zext(i32 signext %a, i32 zeroext %b) nounwind { ; RV64I-LABEL: zext_addw_sext_zext: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_addw_sext_zext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addw a0, a0, a1 +; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = add i32 %a, %b @@ -282,14 +282,14 @@ define zeroext i32 @zext_addw_zext_aext(i32 zeroext %a, i32 %b) nounwind { ; RV64I-LABEL: zext_addw_zext_aext: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_addw_zext_aext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addw a0, a0, a1 +; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = add i32 %a, %b @@ -299,14 +299,14 @@ define zeroext i32 @zext_addw_zext_sext(i32 zeroext %a, i32 signext %b) nounwind { ; RV64I-LABEL: zext_addw_zext_sext: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_addw_zext_sext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addw a0, a0, a1 +; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = add i32 %a, %b @@ -316,14 +316,14 @@ define zeroext i32 @zext_addw_zext_zext(i32 zeroext %a, i32 zeroext %b) nounwind { ; RV64I-LABEL: zext_addw_zext_zext: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: ret ; ; RV64ZBA-LABEL: zext_addw_zext_zext: ; RV64ZBA: # %bb.0: -; RV64ZBA-NEXT: addw a0, a0, a1 +; RV64ZBA-NEXT: add a0, a0, a1 ; RV64ZBA-NEXT: zext.w a0, a0 ; RV64ZBA-NEXT: ret %1 = add i32 %a, %b diff --git a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll --- a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll @@ -7,7 +7,7 @@ ; CHECK-NEXT: bge a0, a1, .LBB0_2 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: not a2, a0 -; CHECK-NEXT: addw a2, a2, a1 +; CHECK-NEXT: add a2, a2, a1 ; CHECK-NEXT: addiw a3, a0, 1 ; CHECK-NEXT: mulw a3, a2, a3 ; CHECK-NEXT: subw a1, a1, a0 @@ -16,7 +16,7 @@ ; CHECK-NEXT: slli a2, a2, 32 ; CHECK-NEXT: mulhu a1, a2, a1 ; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: addw a0, a3, a0 +; CHECK-NEXT: add a0, a3, a0 ; CHECK-NEXT: addw a0, a0, a1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_2: @@ -53,7 +53,7 @@ ; CHECK-NEXT: bge a0, a1, .LBB1_2 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: not a2, a0 -; CHECK-NEXT: addw a3, a2, a1 +; CHECK-NEXT: add a3, a2, a1 ; CHECK-NEXT: mulw a2, a3, a2 ; CHECK-NEXT: subw a1, a1, a0 ; CHECK-NEXT: addiw a1, a1, -2 diff --git a/llvm/test/CodeGen/RISCV/rv64zba.ll b/llvm/test/CodeGen/RISCV/rv64zba.ll --- a/llvm/test/CodeGen/RISCV/rv64zba.ll +++ b/llvm/test/CodeGen/RISCV/rv64zba.ll @@ -293,8 +293,8 @@ define i64 @sh2add_extra_sext(i32 %x, i32 %y, i32 %z) { ; RV64I-LABEL: sh2add_extra_sext: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 2 -; RV64I-NEXT: addw a0, a0, a1 +; RV64I-NEXT: slli a0, a0, 2 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: sllw a1, a2, a0 ; RV64I-NEXT: sraiw a0, a0, 2 ; RV64I-NEXT: mul a0, a1, a0 @@ -1141,8 +1141,8 @@ define signext i32 @addshl32_5_6(i32 signext %a, i32 signext %b) { ; RV64I-LABEL: addshl32_5_6: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: slliw a1, a1, 6 +; RV64I-NEXT: slli a0, a0, 5 +; RV64I-NEXT: slli a1, a1, 6 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; @@ -1179,8 +1179,8 @@ define signext i32 @addshl32_5_7(i32 signext %a, i32 signext %b) { ; RV64I-LABEL: addshl32_5_7: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: slliw a1, a1, 7 +; RV64I-NEXT: slli a0, a0, 5 +; RV64I-NEXT: slli a1, a1, 7 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; @@ -1217,8 +1217,8 @@ define signext i32 @addshl32_5_8(i32 signext %a, i32 signext %b) { ; RV64I-LABEL: addshl32_5_8: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 -; RV64I-NEXT: slliw a1, a1, 8 +; RV64I-NEXT: slli a0, a0, 5 +; RV64I-NEXT: slli a1, a1, 8 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll @@ -279,7 +279,7 @@ ; RV64I-LABEL: rori_i32_fshl_nosext: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a2, a0, 1 -; RV64I-NEXT: slliw a0, a0, 31 +; RV64I-NEXT: slli a0, a0, 31 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: sw a0, 0(a1) ; RV64I-NEXT: ret @@ -314,7 +314,7 @@ define void @rori_i32_fshr_nosext(i32 signext %a, ptr %x) nounwind { ; RV64I-LABEL: rori_i32_fshr_nosext: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a2, a0, 1 +; RV64I-NEXT: slli a2, a0, 1 ; RV64I-NEXT: srliw a0, a0, 31 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: sw a0, 0(a1) @@ -352,7 +352,7 @@ define i64 @roriw_bug(i64 %x) nounwind { ; CHECK-LABEL: roriw_bug: ; CHECK: # %bb.0: -; CHECK-NEXT: slliw a1, a0, 31 +; CHECK-NEXT: slli a1, a0, 31 ; CHECK-NEXT: andi a2, a0, -2 ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: or a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -981,8 +981,8 @@ ; RV64I-NEXT: srliw a4, a0, 24 ; RV64I-NEXT: or a2, a2, a4 ; RV64I-NEXT: and a3, a0, a3 -; RV64I-NEXT: slliw a3, a3, 8 -; RV64I-NEXT: slliw a0, a0, 24 +; RV64I-NEXT: slli a3, a3, 8 +; RV64I-NEXT: slli a0, a0, 24 ; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: sw a0, 0(a1) diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll @@ -45,7 +45,7 @@ define signext i32 @pack_i32_3(i16 zeroext %0, i16 zeroext %1, i32 signext %2) { ; RV64I-LABEL: pack_i32_3: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 16 +; RV64I-NEXT: slli a0, a0, 16 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: addw a0, a0, a2 ; RV64I-NEXT: ret @@ -227,8 +227,8 @@ define zeroext i16 @packh_i16_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2) { ; RV64I-LABEL: packh_i16_2: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a1, a0 -; RV64I-NEXT: slliw a0, a0, 8 +; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: slli a0, a0, 8 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srli a0, a0, 48 @@ -236,7 +236,7 @@ ; ; RV64ZBKB-LABEL: packh_i16_2: ; RV64ZBKB: # %bb.0: -; RV64ZBKB-NEXT: addw a0, a1, a0 +; RV64ZBKB-NEXT: add a0, a1, a0 ; RV64ZBKB-NEXT: packh a0, a2, a0 ; RV64ZBKB-NEXT: ret %4 = add i8 %1, %0 @@ -250,7 +250,7 @@ define i64 @pack_i64_allWUsers(i32 signext %0, i32 signext %1, i32 signext %2) { ; RV64I-LABEL: pack_i64_allWUsers: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a1, a0 +; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: slli a2, a2, 32 ; RV64I-NEXT: srli a2, a2, 32 @@ -259,7 +259,7 @@ ; ; RV64ZBKB-LABEL: pack_i64_allWUsers: ; RV64ZBKB: # %bb.0: -; RV64ZBKB-NEXT: addw a0, a1, a0 +; RV64ZBKB-NEXT: add a0, a1, a0 ; RV64ZBKB-NEXT: pack a0, a2, a0 ; RV64ZBKB-NEXT: ret %4 = add i32 %1, %0 @@ -273,14 +273,14 @@ define signext i32 @pack_i32_allWUsers(i16 zeroext %0, i16 zeroext %1, i16 zeroext %2) { ; RV64I-LABEL: pack_i32_allWUsers: ; RV64I: # %bb.0: -; RV64I-NEXT: addw a0, a1, a0 +; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: slliw a0, a0, 16 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: ret ; ; RV64ZBKB-LABEL: pack_i32_allWUsers: ; RV64ZBKB: # %bb.0: -; RV64ZBKB-NEXT: addw a0, a1, a0 +; RV64ZBKB-NEXT: add a0, a1, a0 ; RV64ZBKB-NEXT: packw a0, a2, a0 ; RV64ZBKB-NEXT: ret %4 = add i16 %1, %0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll @@ -858,7 +858,7 @@ ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: lb a3, 0(a1) ; CHECK-NEXT: lb a4, 0(a0) -; CHECK-NEXT: addw a3, a4, a3 +; CHECK-NEXT: add a3, a4, a3 ; CHECK-NEXT: sb a3, 0(a0) ; CHECK-NEXT: addiw a2, a2, 1 ; CHECK-NEXT: addi a0, a0, 1 diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -366,7 +366,7 @@ ; CHECK-NEXT: .LBB8_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: lw a2, 0(a0) -; CHECK-NEXT: addw a2, a2, a1 +; CHECK-NEXT: add a2, a2, a1 ; CHECK-NEXT: sw a2, 0(a0) ; CHECK-NEXT: addi a3, a3, 1 ; CHECK-NEXT: addi a0, a0, 4 @@ -457,7 +457,7 @@ ; CHECK-NEXT: .LBB9_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: lw a2, 0(a0) -; CHECK-NEXT: addw a2, a2, a1 +; CHECK-NEXT: add a2, a2, a1 ; CHECK-NEXT: sw a2, 0(a0) ; CHECK-NEXT: addi a3, a3, 1 ; CHECK-NEXT: addi a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll --- a/llvm/test/CodeGen/RISCV/sextw-removal.ll +++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll @@ -196,7 +196,7 @@ ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: srli a2, a0, 4 -; RV64I-NEXT: addw a0, a0, a2 +; RV64I-NEXT: add a0, a0, a2 ; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: mulw a0, a0, s3 ; RV64I-NEXT: srliw a0, a0, 24 @@ -792,7 +792,7 @@ ; CHECK-NEXT: sext.w a4, a0 ; CHECK-NEXT: blt a3, a4, .LBB14_5 ; CHECK-NEXT: # %bb.3: # in Loop: Header=BB14_2 Depth=1 -; CHECK-NEXT: addw a0, a2, a0 +; CHECK-NEXT: add a0, a2, a0 ; CHECK-NEXT: addiw a2, a2, 1 ; CHECK-NEXT: blt a2, a1, .LBB14_2 ; CHECK-NEXT: .LBB14_4: @@ -814,7 +814,7 @@ ; NOREMOVAL-NEXT: sext.w a4, a0 ; NOREMOVAL-NEXT: blt a3, a4, .LBB14_5 ; NOREMOVAL-NEXT: # %bb.3: # in Loop: Header=BB14_2 Depth=1 -; NOREMOVAL-NEXT: addw a0, a2, a0 +; NOREMOVAL-NEXT: add a0, a2, a0 ; NOREMOVAL-NEXT: addiw a2, a2, 1 ; NOREMOVAL-NEXT: blt a2, a1, .LBB14_2 ; NOREMOVAL-NEXT: .LBB14_4: @@ -858,7 +858,7 @@ ; CHECK-NEXT: sext.w a4, a0 ; CHECK-NEXT: blt a3, a4, .LBB15_5 ; CHECK-NEXT: # %bb.3: # in Loop: Header=BB15_2 Depth=1 -; CHECK-NEXT: addw a0, a2, a0 +; CHECK-NEXT: add a0, a2, a0 ; CHECK-NEXT: addiw a2, a2, 1 ; CHECK-NEXT: blt a2, a1, .LBB15_2 ; CHECK-NEXT: .LBB15_4: @@ -880,7 +880,7 @@ ; NOREMOVAL-NEXT: sext.w a4, a0 ; NOREMOVAL-NEXT: blt a3, a4, .LBB15_5 ; NOREMOVAL-NEXT: # %bb.3: # in Loop: Header=BB15_2 Depth=1 -; NOREMOVAL-NEXT: addw a0, a2, a0 +; NOREMOVAL-NEXT: add a0, a2, a0 ; NOREMOVAL-NEXT: addiw a2, a2, 1 ; NOREMOVAL-NEXT: blt a2, a1, .LBB15_2 ; NOREMOVAL-NEXT: .LBB15_4: @@ -1043,7 +1043,7 @@ ; CHECK-NEXT: .LBB18_6: # %if.end ; CHECK-NEXT: addiw a2, a2, -1 ; CHECK-NEXT: andi a2, a2, -8 -; CHECK-NEXT: addw a1, a1, a2 +; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: srliw a3, a0, 28 ; CHECK-NEXT: snez a2, a3 ; CHECK-NEXT: bnez a3, .LBB18_8 @@ -1052,7 +1052,7 @@ ; CHECK-NEXT: .LBB18_8: # %if.end ; CHECK-NEXT: addiw a2, a2, -1 ; CHECK-NEXT: andi a2, a2, -4 -; CHECK-NEXT: addw a1, a1, a2 +; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: srliw a3, a0, 30 ; CHECK-NEXT: snez a2, a3 ; CHECK-NEXT: bnez a3, .LBB18_10 @@ -1061,7 +1061,7 @@ ; CHECK-NEXT: .LBB18_10: # %if.end ; CHECK-NEXT: addiw a2, a2, -1 ; CHECK-NEXT: andi a2, a2, -2 -; CHECK-NEXT: addw a1, a1, a2 +; CHECK-NEXT: add a1, a1, a2 ; CHECK-NEXT: not a0, a0 ; CHECK-NEXT: srli a0, a0, 31 ; CHECK-NEXT: addw a0, a1, a0 @@ -1078,27 +1078,27 @@ ; NOREMOVAL-NEXT: li a1, 32 ; NOREMOVAL-NEXT: j .LBB18_4 ; NOREMOVAL-NEXT: .LBB18_3: -; NOREMOVAL-NEXT: slliw a0, a0, 16 +; NOREMOVAL-NEXT: slli a0, a0, 16 ; NOREMOVAL-NEXT: li a1, 16 ; NOREMOVAL-NEXT: .LBB18_4: # %if.end ; NOREMOVAL-NEXT: srliw a3, a0, 24 ; NOREMOVAL-NEXT: snez a2, a3 ; NOREMOVAL-NEXT: bnez a3, .LBB18_6 ; NOREMOVAL-NEXT: # %bb.5: -; NOREMOVAL-NEXT: slliw a0, a0, 8 +; NOREMOVAL-NEXT: slli a0, a0, 8 ; NOREMOVAL-NEXT: .LBB18_6: # %if.end ; NOREMOVAL-NEXT: addiw a2, a2, -1 ; NOREMOVAL-NEXT: andi a2, a2, -8 -; NOREMOVAL-NEXT: addw a1, a1, a2 +; NOREMOVAL-NEXT: add a1, a1, a2 ; NOREMOVAL-NEXT: srliw a3, a0, 28 ; NOREMOVAL-NEXT: snez a2, a3 ; NOREMOVAL-NEXT: bnez a3, .LBB18_8 ; NOREMOVAL-NEXT: # %bb.7: -; NOREMOVAL-NEXT: slliw a0, a0, 4 +; NOREMOVAL-NEXT: slli a0, a0, 4 ; NOREMOVAL-NEXT: .LBB18_8: # %if.end ; NOREMOVAL-NEXT: addiw a2, a2, -1 ; NOREMOVAL-NEXT: andi a2, a2, -4 -; NOREMOVAL-NEXT: addw a1, a1, a2 +; NOREMOVAL-NEXT: add a1, a1, a2 ; NOREMOVAL-NEXT: srliw a3, a0, 30 ; NOREMOVAL-NEXT: snez a2, a3 ; NOREMOVAL-NEXT: bnez a3, .LBB18_10 @@ -1108,7 +1108,7 @@ ; NOREMOVAL-NEXT: sext.w a0, a0 ; NOREMOVAL-NEXT: addiw a2, a2, -1 ; NOREMOVAL-NEXT: andi a2, a2, -2 -; NOREMOVAL-NEXT: addw a1, a1, a2 +; NOREMOVAL-NEXT: add a1, a1, a2 ; NOREMOVAL-NEXT: not a0, a0 ; NOREMOVAL-NEXT: srli a0, a0, 31 ; NOREMOVAL-NEXT: addw a0, a1, a0 @@ -1333,13 +1333,13 @@ define signext i32 @sextw_sh2add(i1 zeroext %0, ptr %1, i32 signext %2, i32 signext %3, i32 signext %4) { ; RV64I-LABEL: sextw_sh2add: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a2, a2, 2 -; RV64I-NEXT: addw a2, a2, a3 +; RV64I-NEXT: slli a2, a2, 2 +; RV64I-NEXT: add a3, a2, a3 ; RV64I-NEXT: beqz a0, .LBB22_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: sw a2, 0(a1) +; RV64I-NEXT: sw a3, 0(a1) ; RV64I-NEXT: .LBB22_2: -; RV64I-NEXT: addw a0, a2, a4 +; RV64I-NEXT: addw a0, a3, a4 ; RV64I-NEXT: ret ; ; RV64ZBB-LABEL: sextw_sh2add: diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll --- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll +++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll @@ -121,8 +121,8 @@ ; RV64-NEXT: call bar@plt ; RV64-NEXT: mv s3, a0 ; RV64-NEXT: call bar@plt -; RV64-NEXT: addw s0, s0, s1 -; RV64-NEXT: addw a0, s3, a0 +; RV64-NEXT: add s0, s0, s1 +; RV64-NEXT: add a0, s3, a0 ; RV64-NEXT: addw a0, s0, a0 ; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll --- a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll +++ b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll @@ -112,7 +112,7 @@ ; ; RV64I-LABEL: sll_redundant_mask_zeros: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a1, 1 +; RV64I-NEXT: slli a1, a1, 1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: ret %1 = shl i32 %b, 1 @@ -130,7 +130,7 @@ ; ; RV64I-LABEL: srl_redundant_mask_zeros: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a1, 2 +; RV64I-NEXT: slli a1, a1, 2 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: ret %1 = shl i32 %b, 2 @@ -148,7 +148,7 @@ ; ; RV64I-LABEL: sra_redundant_mask_zeros: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a1, 3 +; RV64I-NEXT: slli a1, a1, 3 ; RV64I-NEXT: sraw a0, a0, a1 ; RV64I-NEXT: ret %1 = shl i32 %b, 3 @@ -182,7 +182,7 @@ ; ; RV64I-LABEL: sll_redundant_mask_zeros_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a1, 2 +; RV64I-NEXT: slli a1, a1, 2 ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: ret %1 = shl i64 %b, 2 @@ -216,7 +216,7 @@ ; ; RV64I-LABEL: srl_redundant_mask_zeros_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a1, 3 +; RV64I-NEXT: slli a1, a1, 3 ; RV64I-NEXT: srl a0, a0, a1 ; RV64I-NEXT: ret %1 = shl i64 %b, 3 @@ -247,7 +247,7 @@ ; ; RV64I-LABEL: sra_redundant_mask_zeros_i64: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a1, a1, 4 +; RV64I-NEXT: slli a1, a1, 4 ; RV64I-NEXT: sra a0, a0, a1 ; RV64I-NEXT: ret %1 = shl i64 %b, 4 diff --git a/llvm/test/CodeGen/RISCV/shlimm-addimm.ll b/llvm/test/CodeGen/RISCV/shlimm-addimm.ll --- a/llvm/test/CodeGen/RISCV/shlimm-addimm.ll +++ b/llvm/test/CodeGen/RISCV/shlimm-addimm.ll @@ -18,7 +18,7 @@ ; ; RV64I-LABEL: shl5_add1184_a: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: addiw a0, a0, 1184 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 @@ -35,7 +35,7 @@ ; ; RV64I-LABEL: shl5_add1184_b: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: addiw a0, a0, 1184 ; RV64I-NEXT: ret %tmp0 = shl i32 %x, 5 @@ -76,7 +76,7 @@ ; ; RV64I-LABEL: shl5_add101024_a: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 25 ; RV64I-NEXT: addiw a1, a1, -1376 ; RV64I-NEXT: addw a0, a0, a1 @@ -97,7 +97,7 @@ ; ; RV64I-LABEL: shl5_add101024_b: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 25 ; RV64I-NEXT: addiw a1, a1, -1376 ; RV64I-NEXT: addw a0, a0, a1 @@ -144,7 +144,7 @@ ; ; RV64I-LABEL: shl5_add47968_a: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1184 ; RV64I-NEXT: addw a0, a0, a1 @@ -165,7 +165,7 @@ ; ; RV64I-LABEL: shl5_add47968_b: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1184 ; RV64I-NEXT: addw a0, a0, a1 @@ -212,7 +212,7 @@ ; ; RV64I-LABEL: shl5_add47969_a: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1183 ; RV64I-NEXT: addw a0, a0, a1 @@ -233,7 +233,7 @@ ; ; RV64I-LABEL: shl5_add47969_b: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 12 ; RV64I-NEXT: addiw a1, a1, -1183 ; RV64I-NEXT: addw a0, a0, a1 @@ -280,7 +280,7 @@ ; ; RV64I-LABEL: shl5_sub47968_a: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1184 ; RV64I-NEXT: addw a0, a0, a1 @@ -301,7 +301,7 @@ ; ; RV64I-LABEL: shl5_sub47968_b: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1184 ; RV64I-NEXT: addw a0, a0, a1 @@ -349,7 +349,7 @@ ; ; RV64I-LABEL: shl5_sub47969_a: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1183 ; RV64I-NEXT: addw a0, a0, a1 @@ -370,7 +370,7 @@ ; ; RV64I-LABEL: shl5_sub47969_b: ; RV64I: # %bb.0: -; RV64I-NEXT: slliw a0, a0, 5 +; RV64I-NEXT: slli a0, a0, 5 ; RV64I-NEXT: lui a1, 1048564 ; RV64I-NEXT: addiw a1, a1, 1183 ; RV64I-NEXT: addw a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll --- a/llvm/test/CodeGen/RISCV/srem-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll @@ -46,10 +46,10 @@ ; RV64IM-NEXT: addiw a2, a2, 389 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: addw a1, a1, a0 +; RV64IM-NEXT: add a1, a1, a0 ; RV64IM-NEXT: srliw a2, a1, 31 ; RV64IM-NEXT: sraiw a1, a1, 6 -; RV64IM-NEXT: addw a1, a1, a2 +; RV64IM-NEXT: add a1, a1, a2 ; RV64IM-NEXT: li a2, 95 ; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 @@ -97,7 +97,7 @@ ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a2, a1, 63 ; RV64IM-NEXT: srai a1, a1, 40 -; RV64IM-NEXT: addw a1, a1, a2 +; RV64IM-NEXT: add a1, a1, a2 ; RV64IM-NEXT: li a2, 1060 ; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 @@ -145,7 +145,7 @@ ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a2, a1, 63 ; RV64IM-NEXT: srai a1, a1, 40 -; RV64IM-NEXT: addw a1, a1, a2 +; RV64IM-NEXT: add a1, a1, a2 ; RV64IM-NEXT: li a2, -723 ; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 @@ -196,7 +196,7 @@ ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a2, a1, 63 ; RV64IM-NEXT: srai a1, a1, 40 -; RV64IM-NEXT: addw a1, a1, a2 +; RV64IM-NEXT: add a1, a1, a2 ; RV64IM-NEXT: lui a2, 1048570 ; RV64IM-NEXT: addiw a2, a2, 1595 ; RV64IM-NEXT: mulw a1, a1, a2 @@ -272,10 +272,10 @@ ; RV64IM-NEXT: addiw a2, a2, 389 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: addw a1, a1, a0 +; RV64IM-NEXT: add a1, a1, a0 ; RV64IM-NEXT: srliw a2, a1, 31 ; RV64IM-NEXT: sraiw a1, a1, 6 -; RV64IM-NEXT: addw a1, a1, a2 +; RV64IM-NEXT: add a1, a1, a2 ; RV64IM-NEXT: li a2, 95 ; RV64IM-NEXT: mulw a2, a1, a2 ; RV64IM-NEXT: subw a0, a0, a2 @@ -311,7 +311,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 26 -; RV64I-NEXT: addw a1, a0, a1 +; RV64I-NEXT: add a1, a0, a1 ; RV64I-NEXT: andi a1, a1, -64 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret @@ -320,7 +320,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 26 -; RV64IM-NEXT: addw a1, a0, a1 +; RV64IM-NEXT: add a1, a0, a1 ; RV64IM-NEXT: andi a1, a1, -64 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret @@ -364,7 +364,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 1 -; RV64I-NEXT: addw a1, a0, a1 +; RV64I-NEXT: add a1, a0, a1 ; RV64I-NEXT: lui a2, 524288 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: addw a0, a0, a1 @@ -374,7 +374,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 1 -; RV64IM-NEXT: addw a1, a0, a1 +; RV64IM-NEXT: add a1, a0, a1 ; RV64IM-NEXT: lui a2, 524288 ; RV64IM-NEXT: and a1, a1, a2 ; RV64IM-NEXT: addw a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -35,7 +35,7 @@ ; RV64-NEXT: call __muldi3@plt ; RV64-NEXT: lui a1, 662 ; RV64-NEXT: addiw a1, a1, -83 -; RV64-NEXT: addw a0, a0, a1 +; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: slli a0, a0, 35 ; RV64-NEXT: srli a0, a0, 35 ; RV64-NEXT: lui a1, 1324 @@ -67,7 +67,7 @@ ; RV64M-NEXT: mulw a0, a0, a1 ; RV64M-NEXT: lui a1, 662 ; RV64M-NEXT: addiw a1, a1, -83 -; RV64M-NEXT: addw a0, a0, a1 +; RV64M-NEXT: add a0, a0, a1 ; RV64M-NEXT: slli a0, a0, 35 ; RV64M-NEXT: srli a0, a0, 35 ; RV64M-NEXT: lui a1, 1324 @@ -97,7 +97,7 @@ ; RV64MV-NEXT: mulw a0, a0, a1 ; RV64MV-NEXT: lui a1, 662 ; RV64MV-NEXT: addiw a1, a1, -83 -; RV64MV-NEXT: addw a0, a0, a1 +; RV64MV-NEXT: add a0, a0, a1 ; RV64MV-NEXT: slli a0, a0, 35 ; RV64MV-NEXT: srli a0, a0, 35 ; RV64MV-NEXT: lui a1, 1324 @@ -165,7 +165,7 @@ ; RV64M-NEXT: srli a2, a1, 4 ; RV64M-NEXT: slli a1, a1, 56 ; RV64M-NEXT: srli a1, a1, 63 -; RV64M-NEXT: addw a1, a2, a1 +; RV64M-NEXT: add a1, a2, a1 ; RV64M-NEXT: li a2, 6 ; RV64M-NEXT: mulw a1, a1, a2 ; RV64M-NEXT: subw a0, a0, a1 @@ -201,7 +201,7 @@ ; RV64MV-NEXT: srli a2, a1, 4 ; RV64MV-NEXT: slli a1, a1, 56 ; RV64MV-NEXT: srli a1, a1, 63 -; RV64MV-NEXT: addw a1, a2, a1 +; RV64MV-NEXT: add a1, a2, a1 ; RV64MV-NEXT: li a2, 6 ; RV64MV-NEXT: mulw a1, a1, a2 ; RV64MV-NEXT: subw a0, a0, a1 @@ -234,7 +234,7 @@ ; RV64-NEXT: srai a1, a1, 58 ; RV64-NEXT: slli a1, a1, 53 ; RV64-NEXT: srli a1, a1, 62 -; RV64-NEXT: addw a1, a0, a1 +; RV64-NEXT: add a1, a0, a1 ; RV64-NEXT: andi a1, a1, 60 ; RV64-NEXT: subw a0, a0, a1 ; RV64-NEXT: andi a0, a0, 63 @@ -260,7 +260,7 @@ ; RV64M-NEXT: srai a1, a1, 58 ; RV64M-NEXT: slli a1, a1, 53 ; RV64M-NEXT: srli a1, a1, 62 -; RV64M-NEXT: addw a1, a0, a1 +; RV64M-NEXT: add a1, a0, a1 ; RV64M-NEXT: andi a1, a1, 60 ; RV64M-NEXT: subw a0, a0, a1 ; RV64M-NEXT: andi a0, a0, 63 @@ -286,7 +286,7 @@ ; RV64MV-NEXT: srai a1, a1, 58 ; RV64MV-NEXT: slli a1, a1, 53 ; RV64MV-NEXT: srli a1, a1, 62 -; RV64MV-NEXT: addw a1, a0, a1 +; RV64MV-NEXT: add a1, a0, a1 ; RV64MV-NEXT: andi a1, a1, 60 ; RV64MV-NEXT: subw a0, a0, a1 ; RV64MV-NEXT: andi a0, a0, 63 @@ -427,7 +427,7 @@ ; RV64-NEXT: neg a0, a0 ; RV64-NEXT: addi a2, a2, -1 ; RV64-NEXT: addi a1, a1, -1 -; RV64-NEXT: slliw a3, a1, 2 +; RV64-NEXT: slli a3, a1, 2 ; RV64-NEXT: slli a4, a2, 31 ; RV64-NEXT: srli a4, a4, 62 ; RV64-NEXT: or a3, a4, a3 @@ -585,7 +585,7 @@ ; RV64M-NEXT: srli a1, a1, 31 ; RV64M-NEXT: or a1, a1, a4 ; RV64M-NEXT: sd a1, 0(a0) -; RV64M-NEXT: slliw a1, a2, 2 +; RV64M-NEXT: slli a1, a2, 2 ; RV64M-NEXT: slli a3, a3, 31 ; RV64M-NEXT: srli a3, a3, 62 ; RV64M-NEXT: or a1, a3, a1 @@ -779,11 +779,11 @@ ; RV64MV-NEXT: slli a4, a3, 33 ; RV64MV-NEXT: or a1, a1, a4 ; RV64MV-NEXT: sd a1, 0(a0) -; RV64MV-NEXT: slliw a1, a2, 2 +; RV64MV-NEXT: slli a2, a2, 2 ; RV64MV-NEXT: slli a3, a3, 31 ; RV64MV-NEXT: srli a3, a3, 62 -; RV64MV-NEXT: or a1, a3, a1 -; RV64MV-NEXT: sw a1, 8(a0) +; RV64MV-NEXT: or a2, a3, a2 +; RV64MV-NEXT: sw a2, 8(a0) ; RV64MV-NEXT: addi sp, s0, -64 ; RV64MV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64MV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll --- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll @@ -155,7 +155,7 @@ ; RV64IM-NEXT: add a3, a3, a2 ; RV64IM-NEXT: srli a6, a3, 63 ; RV64IM-NEXT: srli a3, a3, 6 -; RV64IM-NEXT: addw a3, a3, a6 +; RV64IM-NEXT: add a3, a3, a6 ; RV64IM-NEXT: lui a6, %hi(.LCPI0_1) ; RV64IM-NEXT: ld a6, %lo(.LCPI0_1)(a6) ; RV64IM-NEXT: li a7, 95 @@ -165,7 +165,7 @@ ; RV64IM-NEXT: sub a3, a3, a1 ; RV64IM-NEXT: srli a6, a3, 63 ; RV64IM-NEXT: srli a3, a3, 6 -; RV64IM-NEXT: addw a3, a3, a6 +; RV64IM-NEXT: add a3, a3, a6 ; RV64IM-NEXT: lui a6, %hi(.LCPI0_2) ; RV64IM-NEXT: ld a6, %lo(.LCPI0_2)(a6) ; RV64IM-NEXT: li a7, -124 @@ -174,7 +174,7 @@ ; RV64IM-NEXT: mulh a3, a5, a6 ; RV64IM-NEXT: srli a6, a3, 63 ; RV64IM-NEXT: srli a3, a3, 5 -; RV64IM-NEXT: addw a3, a3, a6 +; RV64IM-NEXT: add a3, a3, a6 ; RV64IM-NEXT: lui a6, %hi(.LCPI0_3) ; RV64IM-NEXT: ld a6, %lo(.LCPI0_3)(a6) ; RV64IM-NEXT: li a7, 98 @@ -183,7 +183,7 @@ ; RV64IM-NEXT: mulh a3, a4, a6 ; RV64IM-NEXT: srli a6, a3, 63 ; RV64IM-NEXT: srli a3, a3, 7 -; RV64IM-NEXT: addw a3, a3, a6 +; RV64IM-NEXT: add a3, a3, a6 ; RV64IM-NEXT: li a6, -1003 ; RV64IM-NEXT: mulw a3, a3, a6 ; RV64IM-NEXT: subw a4, a4, a3 @@ -336,7 +336,7 @@ ; RV64IM-NEXT: add a6, a6, a2 ; RV64IM-NEXT: srli a7, a6, 63 ; RV64IM-NEXT: srli a6, a6, 6 -; RV64IM-NEXT: addw a6, a6, a7 +; RV64IM-NEXT: add a6, a6, a7 ; RV64IM-NEXT: li a7, 95 ; RV64IM-NEXT: mulw a6, a6, a7 ; RV64IM-NEXT: subw a2, a2, a6 @@ -344,21 +344,21 @@ ; RV64IM-NEXT: add a6, a6, a1 ; RV64IM-NEXT: srli t0, a6, 63 ; RV64IM-NEXT: srli a6, a6, 6 -; RV64IM-NEXT: addw a6, a6, t0 +; RV64IM-NEXT: add a6, a6, t0 ; RV64IM-NEXT: mulw a6, a6, a7 ; RV64IM-NEXT: subw a1, a1, a6 ; RV64IM-NEXT: mulh a6, a5, a3 ; RV64IM-NEXT: add a6, a6, a5 ; RV64IM-NEXT: srli t0, a6, 63 ; RV64IM-NEXT: srli a6, a6, 6 -; RV64IM-NEXT: addw a6, a6, t0 +; RV64IM-NEXT: add a6, a6, t0 ; RV64IM-NEXT: mulw a6, a6, a7 ; RV64IM-NEXT: subw a5, a5, a6 ; RV64IM-NEXT: mulh a3, a4, a3 ; RV64IM-NEXT: add a3, a3, a4 ; RV64IM-NEXT: srli a6, a3, 63 ; RV64IM-NEXT: srli a3, a3, 6 -; RV64IM-NEXT: addw a3, a3, a6 +; RV64IM-NEXT: add a3, a3, a6 ; RV64IM-NEXT: mulw a3, a3, a7 ; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: sh a4, 6(a0) @@ -539,13 +539,13 @@ ; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __divdi3@plt -; RV64I-NEXT: addw a0, s8, a0 -; RV64I-NEXT: addw a1, s7, s2 -; RV64I-NEXT: addw a2, s6, s3 -; RV64I-NEXT: addw a3, s5, s4 -; RV64I-NEXT: sh a3, 6(s0) -; RV64I-NEXT: sh a2, 4(s0) -; RV64I-NEXT: sh a1, 2(s0) +; RV64I-NEXT: add a0, s8, a0 +; RV64I-NEXT: add s2, s7, s2 +; RV64I-NEXT: add s3, s6, s3 +; RV64I-NEXT: add s4, s5, s4 +; RV64I-NEXT: sh s4, 6(s0) +; RV64I-NEXT: sh s3, 4(s0) +; RV64I-NEXT: sh s2, 2(s0) ; RV64I-NEXT: sh a0, 0(s0) ; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 64(sp) # 8-byte Folded Reload @@ -572,7 +572,7 @@ ; RV64IM-NEXT: add a6, a6, a2 ; RV64IM-NEXT: srli a7, a6, 63 ; RV64IM-NEXT: srai a6, a6, 6 -; RV64IM-NEXT: addw a6, a6, a7 +; RV64IM-NEXT: add a6, a6, a7 ; RV64IM-NEXT: li a7, 95 ; RV64IM-NEXT: mulw t0, a6, a7 ; RV64IM-NEXT: subw a2, a2, t0 @@ -580,30 +580,30 @@ ; RV64IM-NEXT: add t0, t0, a1 ; RV64IM-NEXT: srli t1, t0, 63 ; RV64IM-NEXT: srai t0, t0, 6 -; RV64IM-NEXT: addw t0, t0, t1 +; RV64IM-NEXT: add t0, t0, t1 ; RV64IM-NEXT: mulw t1, t0, a7 ; RV64IM-NEXT: subw a1, a1, t1 ; RV64IM-NEXT: mulh t1, a5, a3 ; RV64IM-NEXT: add t1, t1, a5 ; RV64IM-NEXT: srli t2, t1, 63 ; RV64IM-NEXT: srai t1, t1, 6 -; RV64IM-NEXT: addw t1, t1, t2 +; RV64IM-NEXT: add t1, t1, t2 ; RV64IM-NEXT: mulw t2, t1, a7 ; RV64IM-NEXT: subw a5, a5, t2 ; RV64IM-NEXT: mulh a3, a4, a3 ; RV64IM-NEXT: add a3, a3, a4 ; RV64IM-NEXT: srli t2, a3, 63 ; RV64IM-NEXT: srai a3, a3, 6 -; RV64IM-NEXT: addw a3, a3, t2 +; RV64IM-NEXT: add a3, a3, t2 ; RV64IM-NEXT: mulw a7, a3, a7 ; RV64IM-NEXT: subw a4, a4, a7 -; RV64IM-NEXT: addw a3, a4, a3 -; RV64IM-NEXT: addw a4, a5, t1 -; RV64IM-NEXT: addw a1, a1, t0 -; RV64IM-NEXT: addw a2, a2, a6 +; RV64IM-NEXT: add a3, a4, a3 +; RV64IM-NEXT: add a5, a5, t1 +; RV64IM-NEXT: add a1, a1, t0 +; RV64IM-NEXT: add a2, a2, a6 ; RV64IM-NEXT: sh a2, 6(a0) ; RV64IM-NEXT: sh a1, 4(a0) -; RV64IM-NEXT: sh a4, 2(a0) +; RV64IM-NEXT: sh a5, 2(a0) ; RV64IM-NEXT: sh a3, 0(a0) ; RV64IM-NEXT: ret %1 = srem <4 x i16> %x, @@ -701,15 +701,15 @@ ; RV64I-NEXT: lh a3, 16(a1) ; RV64I-NEXT: lh a1, 8(a1) ; RV64I-NEXT: srli a4, a2, 58 -; RV64I-NEXT: addw a4, a2, a4 +; RV64I-NEXT: add a4, a2, a4 ; RV64I-NEXT: andi a4, a4, -64 ; RV64I-NEXT: subw s1, a2, a4 ; RV64I-NEXT: srli a2, a1, 59 -; RV64I-NEXT: addw a2, a1, a2 +; RV64I-NEXT: add a2, a1, a2 ; RV64I-NEXT: andi a2, a2, -32 ; RV64I-NEXT: subw s2, a1, a2 ; RV64I-NEXT: srli a1, a3, 61 -; RV64I-NEXT: addw a1, a3, a1 +; RV64I-NEXT: add a1, a3, a1 ; RV64I-NEXT: andi a1, a1, -8 ; RV64I-NEXT: subw s3, a3, a1 ; RV64I-NEXT: li a1, 95 @@ -738,20 +738,20 @@ ; RV64IM-NEXT: add a3, a3, a2 ; RV64IM-NEXT: srli a6, a3, 63 ; RV64IM-NEXT: srli a3, a3, 6 -; RV64IM-NEXT: addw a3, a3, a6 +; RV64IM-NEXT: add a3, a3, a6 ; RV64IM-NEXT: li a6, 95 ; RV64IM-NEXT: mulw a3, a3, a6 ; RV64IM-NEXT: subw a2, a2, a3 ; RV64IM-NEXT: srli a3, a1, 58 -; RV64IM-NEXT: addw a3, a1, a3 +; RV64IM-NEXT: add a3, a1, a3 ; RV64IM-NEXT: andi a3, a3, -64 ; RV64IM-NEXT: subw a1, a1, a3 ; RV64IM-NEXT: srli a3, a5, 59 -; RV64IM-NEXT: addw a3, a5, a3 +; RV64IM-NEXT: add a3, a5, a3 ; RV64IM-NEXT: andi a3, a3, -32 ; RV64IM-NEXT: subw a5, a5, a3 ; RV64IM-NEXT: srli a3, a4, 61 -; RV64IM-NEXT: addw a3, a4, a3 +; RV64IM-NEXT: add a3, a4, a3 ; RV64IM-NEXT: andi a3, a3, -8 ; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: sh a4, 4(a0) @@ -889,7 +889,7 @@ ; RV64IM-NEXT: add a3, a3, a2 ; RV64IM-NEXT: srli a5, a3, 63 ; RV64IM-NEXT: srli a3, a3, 4 -; RV64IM-NEXT: addw a3, a3, a5 +; RV64IM-NEXT: add a3, a3, a5 ; RV64IM-NEXT: lui a5, %hi(.LCPI4_1) ; RV64IM-NEXT: ld a5, %lo(.LCPI4_1)(a5) ; RV64IM-NEXT: li a6, 23 @@ -898,7 +898,7 @@ ; RV64IM-NEXT: mulh a3, a1, a5 ; RV64IM-NEXT: srli a5, a3, 63 ; RV64IM-NEXT: srli a3, a3, 8 -; RV64IM-NEXT: addw a3, a3, a5 +; RV64IM-NEXT: add a3, a3, a5 ; RV64IM-NEXT: lui a5, %hi(.LCPI4_2) ; RV64IM-NEXT: ld a5, %lo(.LCPI4_2)(a5) ; RV64IM-NEXT: li a6, 654 @@ -907,7 +907,7 @@ ; RV64IM-NEXT: mulh a3, a4, a5 ; RV64IM-NEXT: srli a5, a3, 63 ; RV64IM-NEXT: srli a3, a3, 11 -; RV64IM-NEXT: addw a3, a3, a5 +; RV64IM-NEXT: add a3, a3, a5 ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 ; RV64IM-NEXT: mulw a3, a3, a5 @@ -1008,7 +1008,7 @@ ; RV64I-NEXT: lh s1, 24(a1) ; RV64I-NEXT: lh a0, 16(a1) ; RV64I-NEXT: srli a1, a2, 49 -; RV64I-NEXT: addw a1, a2, a1 +; RV64I-NEXT: add a1, a2, a1 ; RV64I-NEXT: lui a3, 8 ; RV64I-NEXT: and a1, a1, a3 ; RV64I-NEXT: subw s3, a2, a1 @@ -1041,7 +1041,7 @@ ; RV64IM-NEXT: add a3, a3, a2 ; RV64IM-NEXT: srli a5, a3, 63 ; RV64IM-NEXT: srli a3, a3, 4 -; RV64IM-NEXT: addw a3, a3, a5 +; RV64IM-NEXT: add a3, a3, a5 ; RV64IM-NEXT: li a5, 23 ; RV64IM-NEXT: lui a6, %hi(.LCPI5_1) ; RV64IM-NEXT: ld a6, %lo(.LCPI5_1)(a6) @@ -1051,13 +1051,13 @@ ; RV64IM-NEXT: mulh a3, a4, a6 ; RV64IM-NEXT: srli a5, a3, 63 ; RV64IM-NEXT: srli a3, a3, 11 -; RV64IM-NEXT: addw a3, a3, a5 +; RV64IM-NEXT: add a3, a3, a5 ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 ; RV64IM-NEXT: mulw a3, a3, a5 ; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: srli a3, a1, 49 -; RV64IM-NEXT: addw a3, a1, a3 +; RV64IM-NEXT: add a3, a1, a3 ; RV64IM-NEXT: lui a5, 8 ; RV64IM-NEXT: and a3, a3, a5 ; RV64IM-NEXT: subw a1, a1, a3 diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll --- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll +++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll @@ -138,11 +138,11 @@ ; RV64I-NEXT: lbu a3, 4(a0) ; RV64I-NEXT: lbu a4, 6(a0) ; RV64I-NEXT: lbu a0, 7(a0) -; RV64I-NEXT: slliw a2, a2, 8 +; RV64I-NEXT: slli a2, a2, 8 ; RV64I-NEXT: or a2, a2, a3 -; RV64I-NEXT: slliw a3, a4, 16 -; RV64I-NEXT: slliw a0, a0, 24 -; RV64I-NEXT: or a0, a0, a3 +; RV64I-NEXT: slli a4, a4, 16 +; RV64I-NEXT: slli a0, a0, 24 +; RV64I-NEXT: or a0, a0, a4 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: or a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -107,7 +107,7 @@ ; RV64-NEXT: lui a1, 28087 ; RV64-NEXT: addiw a1, a1, -585 ; RV64-NEXT: call __muldi3@plt -; RV64-NEXT: slliw a1, a0, 26 +; RV64-NEXT: slli a1, a0, 26 ; RV64-NEXT: slli a0, a0, 37 ; RV64-NEXT: srli a0, a0, 38 ; RV64-NEXT: or a0, a0, a1 @@ -141,7 +141,7 @@ ; RV64M-NEXT: lui a1, 28087 ; RV64M-NEXT: addiw a1, a1, -585 ; RV64M-NEXT: mulw a0, a0, a1 -; RV64M-NEXT: slliw a1, a0, 26 +; RV64M-NEXT: slli a1, a0, 26 ; RV64M-NEXT: slli a0, a0, 37 ; RV64M-NEXT: srli a0, a0, 38 ; RV64M-NEXT: or a0, a0, a1 @@ -173,7 +173,7 @@ ; RV64MV-NEXT: lui a1, 28087 ; RV64MV-NEXT: addiw a1, a1, -585 ; RV64MV-NEXT: mulw a0, a0, a1 -; RV64MV-NEXT: slliw a1, a0, 26 +; RV64MV-NEXT: slli a1, a0, 26 ; RV64MV-NEXT: slli a0, a0, 37 ; RV64MV-NEXT: srli a0, a0, 38 ; RV64MV-NEXT: or a0, a0, a1 @@ -201,8 +201,8 @@ ; ; RV64-LABEL: test_urem_odd_setne: ; RV64: # %bb.0: -; RV64-NEXT: slliw a1, a0, 1 -; RV64-NEXT: addw a0, a1, a0 +; RV64-NEXT: slli a1, a0, 1 +; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: negw a0, a0 ; RV64-NEXT: andi a0, a0, 15 ; RV64-NEXT: sltiu a0, a0, 4 @@ -221,8 +221,8 @@ ; ; RV64M-LABEL: test_urem_odd_setne: ; RV64M: # %bb.0: -; RV64M-NEXT: slliw a1, a0, 1 -; RV64M-NEXT: addw a0, a1, a0 +; RV64M-NEXT: slli a1, a0, 1 +; RV64M-NEXT: add a0, a1, a0 ; RV64M-NEXT: negw a0, a0 ; RV64M-NEXT: andi a0, a0, 15 ; RV64M-NEXT: sltiu a0, a0, 4 @@ -241,8 +241,8 @@ ; ; RV64MV-LABEL: test_urem_odd_setne: ; RV64MV: # %bb.0: -; RV64MV-NEXT: slliw a1, a0, 1 -; RV64MV-NEXT: addw a0, a1, a0 +; RV64MV-NEXT: slli a1, a0, 1 +; RV64MV-NEXT: add a0, a1, a0 ; RV64MV-NEXT: negw a0, a0 ; RV64MV-NEXT: andi a0, a0, 15 ; RV64MV-NEXT: sltiu a0, a0, 4 @@ -396,7 +396,7 @@ ; RV64-NEXT: andi a0, a0, 2047 ; RV64-NEXT: li a1, 683 ; RV64-NEXT: call __muldi3@plt -; RV64-NEXT: slliw a1, a0, 10 +; RV64-NEXT: slli a1, a0, 10 ; RV64-NEXT: slli a0, a0, 53 ; RV64-NEXT: srli a0, a0, 54 ; RV64-NEXT: or a0, a0, a1 @@ -488,7 +488,7 @@ ; RV64M-NEXT: andi a1, a1, 2047 ; RV64M-NEXT: li a4, 683 ; RV64M-NEXT: mulw a1, a1, a4 -; RV64M-NEXT: slliw a4, a1, 10 +; RV64M-NEXT: slli a4, a1, 10 ; RV64M-NEXT: slli a1, a1, 53 ; RV64M-NEXT: srli a1, a1, 54 ; RV64M-NEXT: or a1, a1, a4 diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll --- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll @@ -529,13 +529,13 @@ ; RV64I-NEXT: li a1, 95 ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: call __udivdi3@plt -; RV64I-NEXT: addw a0, s8, a0 -; RV64I-NEXT: addw a1, s7, s2 -; RV64I-NEXT: addw a2, s6, s3 -; RV64I-NEXT: addw a3, s5, s4 -; RV64I-NEXT: sh a3, 6(s0) -; RV64I-NEXT: sh a2, 4(s0) -; RV64I-NEXT: sh a1, 2(s0) +; RV64I-NEXT: add a0, s8, a0 +; RV64I-NEXT: add s2, s7, s2 +; RV64I-NEXT: add s3, s6, s3 +; RV64I-NEXT: add s4, s5, s4 +; RV64I-NEXT: sh s4, 6(s0) +; RV64I-NEXT: sh s3, 4(s0) +; RV64I-NEXT: sh s2, 2(s0) ; RV64I-NEXT: sh a0, 0(s0) ; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload ; RV64I-NEXT: ld s0, 64(sp) # 8-byte Folded Reload @@ -587,13 +587,13 @@ ; RV64IM-NEXT: srli a3, a3, 6 ; RV64IM-NEXT: mulw a7, a3, a7 ; RV64IM-NEXT: subw a4, a4, a7 -; RV64IM-NEXT: addw a3, a4, a3 -; RV64IM-NEXT: addw a4, a5, t1 -; RV64IM-NEXT: addw a1, a1, t0 -; RV64IM-NEXT: addw a2, a2, a6 +; RV64IM-NEXT: add a3, a4, a3 +; RV64IM-NEXT: add a5, a5, t1 +; RV64IM-NEXT: add a1, a1, t0 +; RV64IM-NEXT: add a2, a2, a6 ; RV64IM-NEXT: sh a2, 6(a0) ; RV64IM-NEXT: sh a1, 4(a0) -; RV64IM-NEXT: sh a4, 2(a0) +; RV64IM-NEXT: sh a5, 2(a0) ; RV64IM-NEXT: sh a3, 0(a0) ; RV64IM-NEXT: ret %1 = urem <4 x i16> %x, diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -1290,8 +1290,8 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, a0, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 0(a0) -; LP64-LP64F-LP64D-FPELIM-NEXT: addw a1, a1, s0 -; LP64-LP64F-LP64D-FPELIM-NEXT: addw a1, a1, a2 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, a1, s0 +; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, a1, a2 ; LP64-LP64F-LP64D-FPELIM-NEXT: addw a0, a1, a0 ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload @@ -1333,8 +1333,8 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, a0, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, -32(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld a0, 0(a0) -; LP64-LP64F-LP64D-WITHFP-NEXT: addw a1, a1, s1 -; LP64-LP64F-LP64D-WITHFP-NEXT: addw a1, a1, a2 +; LP64-LP64F-LP64D-WITHFP-NEXT: add a1, a1, s1 +; LP64-LP64F-LP64D-WITHFP-NEXT: add a1, a1, a2 ; LP64-LP64F-LP64D-WITHFP-NEXT: addw a0, a1, a0 ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll --- a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll +++ b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll @@ -52,7 +52,7 @@ ; RV64-NEXT: sgtz a4, a4 ; RV64-NEXT: negw a4, a4 ; RV64-NEXT: and a3, a4, a3 -; RV64-NEXT: slliw a3, a3, 8 +; RV64-NEXT: slli a3, a3, 8 ; RV64-NEXT: negw a4, a5 ; RV64-NEXT: and a0, a4, a0 ; RV64-NEXT: andi a0, a0, 255