diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -58,6 +58,8 @@ bool selectSExti32(SDValue N, SDValue &Val); bool selectZExti32(SDValue N, SDValue &Val); + bool hasAllWUsers(SDNode *N) const; + bool selectVLOp(SDValue N, SDValue &VL); bool selectVSplat(SDValue N, SDValue &SplatVal); diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1496,6 +1496,74 @@ return false; } +// Return true if all users of this SDNode* only consume the lower 32-bits. +// This can be used for form W instructions for add/sub/mul/shl even when the +// root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if +// SimplifyDemandedBits has made it so some users see a sext_inreg and some +// don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave +// the add/sub/mul/shl to become non-W instructions. By checking the users we +// may be able to use a W instruction and CSE with the other instruction if +// this has happened. We could try to detect that the CSE opportunity exists +// before doing this, but that would be more complicated. +// TODO: Does this need to look through AND/OR/XOR to their users to find more +// opportunities. +bool RISCVDAGToDAGISel::hasAllWUsers(SDNode *N) const { + assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB || + N->getOpcode() == ISD::MUL || N->getOpcode() == ISD::SHL) && + "Unexpected opcode"); + + for (auto UI = N->use_begin(), UE = N->use_end(); UI != UE; ++UI) { + SDNode *User = *UI; + // Users of this node should have already been instruction selected + if (!User->isMachineOpcode()) + return false; + + // TODO: Add more opcodes. + switch (User->getMachineOpcode()) { + default: + return false; + case RISCV::ADDW: + case RISCV::ADDIW: + case RISCV::SUBW: + case RISCV::MULW: + case RISCV::SLLW: + case RISCV::SLLIW: + case RISCV::SRAW: + case RISCV::SRAIW: + case RISCV::SRLW: + case RISCV::SRLIW: + case RISCV::DIVW: + case RISCV::DIVUW: + case RISCV::REMW: + case RISCV::REMUW: + case RISCV::ROLW: + case RISCV::RORW: + case RISCV::RORIW: + case RISCV::CLZW: + case RISCV::CTZW: + case RISCV::CPOPW: + case RISCV::SLLIUW: + break; + case RISCV::ADDUW: + case RISCV::SH1ADDUW: + case RISCV::SH2ADDUW: + case RISCV::SH3ADDUW: + // The first operand to add.uw/shXadd.uw is implicitly zero extended. + if (UI.getOperandNo() != 0) + return false; + break; + case RISCV::SB: + case RISCV::SH: + case RISCV::SW: + if (UI.getOperandNo() != 0) + return false; + break; + } + } + + return true; +} + // Select VL as a 5 bit immediate or a value that will become a register. This // allows us to choose betwen VSETIVLI or VSETVLI later. bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -1239,6 +1239,19 @@ (SRLI (SLLI GPR:$rs1, 32), (ImmSubFrom32 uimm5:$shamt))>; } +def addw : PatFrag<(ops node:$lhs, node:$rhs), + (add node:$lhs, node:$rhs), [{ + return hasAllWUsers(Node); +}]>; +def subw : PatFrag<(ops node:$lhs, node:$rhs), + (sub node:$lhs, node:$rhs), [{ + return hasAllWUsers(Node); +}]>; +def shlw : PatFrag<(ops node:$lhs, node:$rhs), + (shl node:$lhs, node:$rhs), [{ + return hasAllWUsers(Node); +}]>; + let Predicates = [IsRV64] in { /// sext and zext @@ -1268,6 +1281,13 @@ def : PatGprGpr, SRLW>; def : PatGprGpr, SRAW>; +// Select W instructions without sext_inreg if only the lower 32-bits of the +// result are used. +def : Pat<(addw GPR:$rs1, GPR:$rs2), (ADDW GPR:$rs1, GPR:$rs2)>; +def : Pat<(addw GPR:$rs1, simm12:$imm12), (ADDIW GPR:$rs1, simm12:$imm12)>; +def : Pat<(subw GPR:$rs1, GPR:$rs2), (SUBW GPR:$rs1, GPR:$rs2)>; +def : Pat<(shlw GPR:$rs1, uimm5:$shamt), (SLLIW GPR:$rs1, uimm5:$shamt)>; + /// Loads defm : LdPat; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoM.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoM.td @@ -71,10 +71,19 @@ def : PatGprGpr; } // Predicates = [HasStdExtM] +def mulw : PatFrag<(ops node:$lhs, node:$rhs), + (mul node:$lhs, node:$rhs), [{ + return hasAllWUsers(Node); +}]>; + let Predicates = [HasStdExtM, IsRV64] in { def : Pat<(sext_inreg (mul GPR:$rs1, GPR:$rs2), i32), (MULW GPR:$rs1, GPR:$rs2)>; +// Select W instructions without sext_inreg if only the lower 32-bits of the +// result are used. +def : Pat<(mulw GPR:$rs1, GPR:$rs2), (MULW GPR:$rs1, GPR:$rs2)>; + def : PatGprGpr; def : PatGprGpr; def : PatGprGpr; diff --git a/llvm/test/CodeGen/RISCV/add-imm.ll b/llvm/test/CodeGen/RISCV/add-imm.ll --- a/llvm/test/CodeGen/RISCV/add-imm.ll +++ b/llvm/test/CodeGen/RISCV/add-imm.ll @@ -180,10 +180,9 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: lui a1, 1 ; RV64I-NEXT: addiw a1, a1, -1096 -; RV64I-NEXT: add a2, a0, a1 -; RV64I-NEXT: lui a3, %hi(gv0) ; RV64I-NEXT: addw a0, a0, a1 -; RV64I-NEXT: sw a2, %lo(gv0)(a3) +; RV64I-NEXT: lui a1, %hi(gv0) +; RV64I-NEXT: sw a0, %lo(gv0)(a1) ; RV64I-NEXT: ret %b = add nsw i32 %a, 3000 store i32 %b, i32* @gv0, align 4 @@ -234,8 +233,8 @@ ; RV64I-NEXT: lw a3, %lo(gb)(a2) ; RV64I-NEXT: lui a4, 1 ; RV64I-NEXT: addiw a4, a4, -1096 -; RV64I-NEXT: add a1, a1, a4 -; RV64I-NEXT: add a3, a3, a4 +; RV64I-NEXT: addw a1, a1, a4 +; RV64I-NEXT: addw a3, a3, a4 ; RV64I-NEXT: sw a1, %lo(ga)(a0) ; RV64I-NEXT: sw a3, %lo(gb)(a2) ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll --- a/llvm/test/CodeGen/RISCV/addimm-mulimm.ll +++ b/llvm/test/CodeGen/RISCV/addimm-mulimm.ll @@ -18,7 +18,7 @@ ; RV64IM-LABEL: add_mul_trans_accept_1: ; RV64IM: # %bb.0: ; RV64IM-NEXT: addi a1, zero, 11 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: mulw a0, a0, a1 ; RV64IM-NEXT: addiw a0, a0, 407 ; RV64IM-NEXT: ret %tmp0 = add i32 %x, 37 @@ -39,7 +39,7 @@ ; RV64IM-LABEL: add_mul_trans_accept_2: ; RV64IM: # %bb.0: ; RV64IM-NEXT: addi a1, zero, 13 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: mulw a0, a0, a1 ; RV64IM-NEXT: lui a1, 28 ; RV64IM-NEXT: addiw a1, a1, 1701 ; RV64IM-NEXT: addw a0, a0, a1 @@ -62,7 +62,7 @@ ; RV64IM-LABEL: add_mul_trans_reject_1: ; RV64IM: # %bb.0: ; RV64IM-NEXT: addi a1, zero, 19 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: mulw a0, a0, a1 ; RV64IM-NEXT: lui a1, 9 ; RV64IM-NEXT: addiw a1, a1, 585 ; RV64IM-NEXT: addw a0, a0, a1 @@ -87,7 +87,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: lui a1, 792 ; RV64IM-NEXT: addiw a1, a1, -1709 -; RV64IM-NEXT: mul a0, a0, a1 +; RV64IM-NEXT: mulw a0, a0, a1 ; RV64IM-NEXT: lui a1, 1014660 ; RV64IM-NEXT: addiw a1, a1, -1891 ; RV64IM-NEXT: addw a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll --- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll +++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll @@ -61,7 +61,7 @@ ; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -137,7 +137,7 @@ ; RV64IA-LABEL: cmpxchg_i8_acquire_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -213,7 +213,7 @@ ; RV64IA-LABEL: cmpxchg_i8_acquire_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -289,7 +289,7 @@ ; RV64IA-LABEL: cmpxchg_i8_release_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -365,7 +365,7 @@ ; RV64IA-LABEL: cmpxchg_i8_release_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -441,7 +441,7 @@ ; RV64IA-LABEL: cmpxchg_i8_acq_rel_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -517,7 +517,7 @@ ; RV64IA-LABEL: cmpxchg_i8_acq_rel_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -593,7 +593,7 @@ ; RV64IA-LABEL: cmpxchg_i8_seq_cst_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -669,7 +669,7 @@ ; RV64IA-LABEL: cmpxchg_i8_seq_cst_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -745,7 +745,7 @@ ; RV64IA-LABEL: cmpxchg_i8_seq_cst_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a4, zero, 255 ; RV64IA-NEXT: sllw a4, a4, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -822,7 +822,7 @@ ; RV64IA-LABEL: cmpxchg_i16_monotonic_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -900,7 +900,7 @@ ; RV64IA-LABEL: cmpxchg_i16_acquire_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -978,7 +978,7 @@ ; RV64IA-LABEL: cmpxchg_i16_acquire_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1056,7 +1056,7 @@ ; RV64IA-LABEL: cmpxchg_i16_release_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1134,7 +1134,7 @@ ; RV64IA-LABEL: cmpxchg_i16_release_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1212,7 +1212,7 @@ ; RV64IA-LABEL: cmpxchg_i16_acq_rel_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1290,7 +1290,7 @@ ; RV64IA-LABEL: cmpxchg_i16_acq_rel_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1368,7 +1368,7 @@ ; RV64IA-LABEL: cmpxchg_i16_seq_cst_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1446,7 +1446,7 @@ ; RV64IA-LABEL: cmpxchg_i16_seq_cst_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 @@ -1524,7 +1524,7 @@ ; RV64IA-LABEL: cmpxchg_i16_seq_cst_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a4, 16 ; RV64IA-NEXT: addiw a4, a4, -1 ; RV64IA-NEXT: sllw a5, a4, a0 diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -52,7 +52,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -116,7 +116,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -180,7 +180,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -244,7 +244,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -308,7 +308,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -372,7 +372,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -436,7 +436,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -500,7 +500,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -564,7 +564,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -628,7 +628,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -692,7 +692,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -756,7 +756,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -820,7 +820,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -884,7 +884,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -948,7 +948,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1006,7 +1006,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1058,7 +1058,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1110,7 +1110,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1162,7 +1162,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1214,7 +1214,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -1273,7 +1273,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1339,7 +1339,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1405,7 +1405,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1471,7 +1471,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1537,7 +1537,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1592,7 +1592,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w a1, a1, (a2) @@ -1636,7 +1636,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w.aq a1, a1, (a2) @@ -1680,7 +1680,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w.rl a1, a1, (a2) @@ -1724,7 +1724,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w.aqrl a1, a1, (a2) @@ -1768,7 +1768,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w.aqrl a1, a1, (a2) @@ -1812,7 +1812,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w a1, a1, (a2) @@ -1856,7 +1856,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w.aq a1, a1, (a2) @@ -1900,7 +1900,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w.rl a1, a1, (a2) @@ -1944,7 +1944,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w.aqrl a1, a1, (a2) @@ -1988,7 +1988,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w.aqrl a1, a1, (a2) @@ -3586,7 +3586,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -3718,7 +3718,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -3850,7 +3850,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -3982,7 +3982,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4114,7 +4114,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4246,7 +4246,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4378,7 +4378,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4510,7 +4510,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4642,7 +4642,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4774,7 +4774,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -4843,7 +4843,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -4909,7 +4909,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -4975,7 +4975,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5041,7 +5041,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5107,7 +5107,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5173,7 +5173,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5239,7 +5239,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5305,7 +5305,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5371,7 +5371,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5437,7 +5437,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5503,7 +5503,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5569,7 +5569,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5635,7 +5635,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5701,7 +5701,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5767,7 +5767,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5827,7 +5827,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5881,7 +5881,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5935,7 +5935,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -5989,7 +5989,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6043,7 +6043,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6104,7 +6104,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6172,7 +6172,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6240,7 +6240,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6308,7 +6308,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6376,7 +6376,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -6434,7 +6434,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -6482,7 +6482,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -6530,7 +6530,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -6578,7 +6578,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -6626,7 +6626,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -6674,7 +6674,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -6722,7 +6722,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -6770,7 +6770,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -6818,7 +6818,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -6866,7 +6866,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -8495,7 +8495,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -8637,7 +8637,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -8779,7 +8779,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -8921,7 +8921,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9063,7 +9063,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9205,7 +9205,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9347,7 +9347,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_acquire: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9489,7 +9489,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_release: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9631,7 +9631,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_acq_rel: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -9773,7 +9773,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_seq_cst: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll --- a/llvm/test/CodeGen/RISCV/atomic-signext.ll +++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll @@ -170,7 +170,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -242,7 +242,7 @@ ; RV64IA-LABEL: atomicrmw_add_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -314,7 +314,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -380,7 +380,7 @@ ; RV64IA-LABEL: atomicrmw_and_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: not a3, a3 @@ -447,7 +447,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -510,7 +510,7 @@ ; RV64IA-LABEL: atomicrmw_or_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoor.w a1, a1, (a2) @@ -562,7 +562,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: andi a1, a1, 255 ; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: amoxor.w a1, a1, (a2) @@ -994,7 +994,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1132,7 +1132,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i8_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: addi a3, zero, 255 ; RV64IA-NEXT: sllw a3, a3, a0 ; RV64IA-NEXT: andi a1, a1, 255 @@ -1209,7 +1209,7 @@ ; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1283,7 +1283,7 @@ ; RV64IA-LABEL: atomicrmw_add_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1357,7 +1357,7 @@ ; RV64IA-LABEL: atomicrmw_sub_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1425,7 +1425,7 @@ ; RV64IA-LABEL: atomicrmw_and_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1494,7 +1494,7 @@ ; RV64IA-LABEL: atomicrmw_nand_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -1560,7 +1560,7 @@ ; RV64IA-LABEL: atomicrmw_or_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -1616,7 +1616,7 @@ ; RV64IA-LABEL: atomicrmw_xor_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: and a1, a1, a3 @@ -2063,7 +2063,7 @@ ; RV64IA-LABEL: atomicrmw_umax_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 @@ -2211,7 +2211,7 @@ ; RV64IA-LABEL: atomicrmw_umin_i16_monotonic: ; RV64IA: # %bb.0: ; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 +; RV64IA-NEXT: slliw a0, a0, 3 ; RV64IA-NEXT: lui a3, 16 ; RV64IA-NEXT: addiw a3, a3, -1 ; RV64IA-NEXT: sllw a4, a3, a0 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll @@ -54,15 +54,15 @@ ; RV64I-NEXT: lui a0, 16 ; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a1, a0 -; RV64I-NEXT: add a0, t2, a0 -; RV64I-NEXT: add a0, a0, a2 +; RV64I-NEXT: addw a0, t2, a0 +; RV64I-NEXT: addw a0, a0, a2 ; RV64I-NEXT: xor a1, a4, t1 ; RV64I-NEXT: xor a2, a3, a7 ; RV64I-NEXT: or a1, a2, a1 ; RV64I-NEXT: seqz a1, a1 -; RV64I-NEXT: add a0, a1, a0 -; RV64I-NEXT: add a0, a0, a5 -; RV64I-NEXT: add a0, a0, a6 +; RV64I-NEXT: addw a0, a1, a0 +; RV64I-NEXT: addw a0, a0, a5 +; RV64I-NEXT: addw a0, a0, a6 ; RV64I-NEXT: addw a0, a0, t0 ; RV64I-NEXT: ret %a_ext = zext i8 %a to i32 diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll --- a/llvm/test/CodeGen/RISCV/div.ll +++ b/llvm/test/CodeGen/RISCV/div.ll @@ -713,7 +713,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 29 -; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 3 ; RV64I-NEXT: ret ; @@ -721,7 +721,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 29 -; RV64IM-NEXT: add a0, a0, a1 +; RV64IM-NEXT: addw a0, a0, a1 ; RV64IM-NEXT: sraiw a0, a0, 3 ; RV64IM-NEXT: ret %1 = sdiv i32 %a, 8 @@ -749,7 +749,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 16 -; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: sraiw a0, a0, 16 ; RV64I-NEXT: ret ; @@ -757,7 +757,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 16 -; RV64IM-NEXT: add a0, a0, a1 +; RV64IM-NEXT: addw a0, a0, a1 ; RV64IM-NEXT: sraiw a0, a0, 16 ; RV64IM-NEXT: ret %1 = sdiv i32 %a, 65536 diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -91,13 +91,13 @@ ; ; RV64I-LABEL: mul_constant: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a0, 2 +; RV64I-NEXT: slliw a1, a0, 2 ; RV64I-NEXT: addw a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: mul_constant: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a0, 2 +; RV64IM-NEXT: slliw a1, a0, 2 ; RV64IM-NEXT: addw a0, a1, a0 ; RV64IM-NEXT: ret %1 = mul i32 %a, 5 @@ -485,13 +485,13 @@ ; ; RV64I-LABEL: muli32_p65: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a0, 6 +; RV64I-NEXT: slliw a1, a0, 6 ; RV64I-NEXT: addw a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p65: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a0, 6 +; RV64IM-NEXT: slliw a1, a0, 6 ; RV64IM-NEXT: addw a0, a1, a0 ; RV64IM-NEXT: ret %1 = mul i32 %a, 65 @@ -513,13 +513,13 @@ ; ; RV64I-LABEL: muli32_p63: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a0, 6 +; RV64I-NEXT: slliw a1, a0, 6 ; RV64I-NEXT: subw a0, a1, a0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p63: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a0, 6 +; RV64IM-NEXT: slliw a1, a0, 6 ; RV64IM-NEXT: subw a0, a1, a0 ; RV64IM-NEXT: ret %1 = mul i32 %a, 63 @@ -620,13 +620,13 @@ ; ; RV64I-LABEL: muli32_m63: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a0, 6 +; RV64I-NEXT: slliw a1, a0, 6 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_m63: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a0, 6 +; RV64IM-NEXT: slliw a1, a0, 6 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, -63 @@ -650,15 +650,15 @@ ; ; RV64I-LABEL: muli32_m65: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a0, 6 -; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: slliw a1, a0, 6 +; RV64I-NEXT: addw a0, a1, a0 ; RV64I-NEXT: negw a0, a0 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_m65: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a0, 6 -; RV64IM-NEXT: add a0, a1, a0 +; RV64IM-NEXT: slliw a1, a0, 6 +; RV64IM-NEXT: addw a0, a1, a0 ; RV64IM-NEXT: negw a0, a0 ; RV64IM-NEXT: ret %1 = mul i32 %a, -65 @@ -841,15 +841,15 @@ ; ; RV64I-LABEL: muli32_p4352: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a0, 8 -; RV64I-NEXT: slli a0, a0, 12 +; RV64I-NEXT: slliw a1, a0, 8 +; RV64I-NEXT: slliw a0, a0, 12 ; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p4352: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a0, 8 -; RV64IM-NEXT: slli a0, a0, 12 +; RV64IM-NEXT: slliw a1, a0, 8 +; RV64IM-NEXT: slliw a0, a0, 12 ; RV64IM-NEXT: addw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, 4352 @@ -873,15 +873,15 @@ ; ; RV64I-LABEL: muli32_p3840: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a0, 8 -; RV64I-NEXT: slli a0, a0, 12 +; RV64I-NEXT: slliw a1, a0, 8 +; RV64I-NEXT: slliw a0, a0, 12 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_p3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a0, 8 -; RV64IM-NEXT: slli a0, a0, 12 +; RV64IM-NEXT: slliw a1, a0, 8 +; RV64IM-NEXT: slliw a0, a0, 12 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, 3840 @@ -905,15 +905,15 @@ ; ; RV64I-LABEL: muli32_m3840: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a0, 12 -; RV64I-NEXT: slli a0, a0, 8 +; RV64I-NEXT: slliw a1, a0, 12 +; RV64I-NEXT: slliw a0, a0, 8 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret ; ; RV64IM-LABEL: muli32_m3840: ; RV64IM: # %bb.0: -; RV64IM-NEXT: slli a1, a0, 12 -; RV64IM-NEXT: slli a0, a0, 8 +; RV64IM-NEXT: slliw a1, a0, 12 +; RV64IM-NEXT: slliw a0, a0, 8 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = mul i32 %a, -3840 diff --git a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll --- a/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-demanded-bits.ll @@ -10,10 +10,10 @@ ; CHECK-LABEL: foo: ; CHECK: # %bb.0: ; CHECK-NEXT: mulw a0, a0, a0 -; CHECK-NEXT: addi a0, a0, 1 -; CHECK-NEXT: mul a0, a0, a0 -; CHECK-NEXT: add a0, a0, a2 -; CHECK-NEXT: addi a0, a0, 1 +; CHECK-NEXT: addiw a0, a0, 1 +; CHECK-NEXT: mulw a0, a0, a0 +; CHECK-NEXT: addw a0, a0, a2 +; CHECK-NEXT: addiw a0, a0, 1 ; CHECK-NEXT: sllw a0, a0, a1 ; CHECK-NEXT: ret %b = mul i32 %x, %x diff --git a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll --- a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll @@ -8,15 +8,15 @@ ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: not a2, a0 ; CHECK-NEXT: add a2, a2, a1 -; CHECK-NEXT: addi a3, a0, 1 -; CHECK-NEXT: mul a3, a2, a3 +; CHECK-NEXT: addiw a3, a0, 1 +; CHECK-NEXT: mulw a3, a2, a3 ; CHECK-NEXT: sub a1, a1, a0 ; CHECK-NEXT: addi a1, a1, -2 ; CHECK-NEXT: slli a1, a1, 32 ; CHECK-NEXT: slli a2, a2, 32 ; CHECK-NEXT: mulhu a1, a2, a1 ; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: add a0, a3, a0 +; CHECK-NEXT: addw a0, a3, a0 ; CHECK-NEXT: addw a0, a0, a1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_2: @@ -54,14 +54,14 @@ ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: not a2, a0 ; CHECK-NEXT: add a3, a2, a1 -; CHECK-NEXT: mul a2, a3, a2 +; CHECK-NEXT: mulw a2, a3, a2 ; CHECK-NEXT: sub a1, a1, a0 ; CHECK-NEXT: addi a1, a1, -2 ; CHECK-NEXT: slli a1, a1, 32 ; CHECK-NEXT: slli a3, a3, 32 ; CHECK-NEXT: mulhu a1, a3, a1 ; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: sub a0, a2, a0 +; CHECK-NEXT: subw a0, a2, a0 ; CHECK-NEXT: subw a0, a0, a1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll @@ -170,7 +170,7 @@ ; RV64I-LABEL: rol_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a2, a0, a1 -; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -198,7 +198,7 @@ ; RV64I-LABEL: rol_i32_nosext: ; RV64I: # %bb.0: ; RV64I-NEXT: sllw a3, a0, a1 -; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sw a0, 0(a2) @@ -231,7 +231,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, -2 ; RV64I-NEXT: sllw a2, a1, a0 -; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: negw a0, a0 ; RV64I-NEXT: srlw a0, a1, a0 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -292,7 +292,7 @@ ; RV64I-LABEL: ror_i32: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a2, a0, a1 -; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret @@ -320,7 +320,7 @@ ; RV64I-LABEL: ror_i32_nosext: ; RV64I: # %bb.0: ; RV64I-NEXT: srlw a3, a0, a1 -; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: or a0, a3, a0 ; RV64I-NEXT: sw a0, 0(a2) @@ -353,7 +353,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: addi a1, zero, -2 ; RV64I-NEXT: srlw a2, a1, a0 -; RV64I-NEXT: neg a0, a0 +; RV64I-NEXT: negw a0, a0 ; RV64I-NEXT: sllw a0, a1, a0 ; RV64I-NEXT: or a0, a2, a0 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -265,7 +265,7 @@ ; ; RV64IB-LABEL: log2_ceil_i32: ; RV64IB: # %bb.0: -; RV64IB-NEXT: addi a0, a0, -1 +; RV64IB-NEXT: addiw a0, a0, -1 ; RV64IB-NEXT: clzw a0, a0 ; RV64IB-NEXT: addi a1, zero, 32 ; RV64IB-NEXT: sub a0, a1, a0 @@ -273,7 +273,7 @@ ; ; RV64IBB-LABEL: log2_ceil_i32: ; RV64IBB: # %bb.0: -; RV64IBB-NEXT: addi a0, a0, -1 +; RV64IBB-NEXT: addiw a0, a0, -1 ; RV64IBB-NEXT: clzw a0, a0 ; RV64IBB-NEXT: addi a1, zero, 32 ; RV64IBB-NEXT: sub a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll @@ -537,7 +537,7 @@ ; LMULMAX2-RV64-NEXT: addi a4, a4, 257 ; LMULMAX2-RV64-NEXT: mul a5, a5, a4 ; LMULMAX2-RV64-NEXT: srli a5, a5, 56 -; LMULMAX2-RV64-NEXT: addi a5, a5, -56 +; LMULMAX2-RV64-NEXT: addiw a5, a5, -56 ; LMULMAX2-RV64-NEXT: sb a5, 16(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 15 @@ -568,7 +568,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 31(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 14 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -598,7 +598,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 30(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 13 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -628,7 +628,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 29(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 12 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -658,7 +658,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 28(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 11 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -688,7 +688,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 27(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 10 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -718,7 +718,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 26(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 9 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -748,7 +748,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 25(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 8 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -778,7 +778,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 24(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 7 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -808,7 +808,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 23(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 6 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -838,7 +838,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 22(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 5 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -868,7 +868,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 21(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 4 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -898,7 +898,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 20(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 3 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -928,7 +928,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 19(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 2 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -958,7 +958,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 18(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v25 @@ -988,7 +988,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 17(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX2-RV64-NEXT: addi a1, sp, 16 @@ -1529,7 +1529,7 @@ ; LMULMAX1-RV64-NEXT: addi a4, a4, 257 ; LMULMAX1-RV64-NEXT: mul a5, a5, a4 ; LMULMAX1-RV64-NEXT: srli a5, a5, 56 -; LMULMAX1-RV64-NEXT: addi a5, a5, -56 +; LMULMAX1-RV64-NEXT: addiw a5, a5, -56 ; LMULMAX1-RV64-NEXT: sb a5, 16(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 15 @@ -1560,7 +1560,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 31(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 14 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1590,7 +1590,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 30(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 13 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1620,7 +1620,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 29(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 12 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1650,7 +1650,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 28(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 11 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1680,7 +1680,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 27(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 10 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1710,7 +1710,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 26(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 9 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1740,7 +1740,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 25(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 8 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1770,7 +1770,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 24(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 7 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1800,7 +1800,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 23(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 6 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1830,7 +1830,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 22(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 5 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1860,7 +1860,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 21(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 4 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1890,7 +1890,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 20(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 3 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1920,7 +1920,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 19(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 2 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -1950,7 +1950,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 18(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v25 @@ -1980,7 +1980,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 17(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-RV64-NEXT: addi a1, sp, 16 @@ -2309,7 +2309,7 @@ ; LMULMAX2-RV64-NEXT: addi a5, a5, 257 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 16(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 7 @@ -2340,7 +2340,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 30(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 6 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -2370,7 +2370,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 28(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 5 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -2400,7 +2400,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 26(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 4 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -2430,7 +2430,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 24(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 3 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -2460,7 +2460,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 22(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 2 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -2490,7 +2490,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 20(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v25 @@ -2520,7 +2520,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 18(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX2-RV64-NEXT: addi a1, sp, 16 @@ -2841,7 +2841,7 @@ ; LMULMAX1-RV64-NEXT: addi a5, a5, 257 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -48 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX1-RV64-NEXT: sh a1, 16(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 7 @@ -2872,7 +2872,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -48 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX1-RV64-NEXT: sh a1, 30(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 6 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -2902,7 +2902,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -48 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX1-RV64-NEXT: sh a1, 28(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 5 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -2932,7 +2932,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -48 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX1-RV64-NEXT: sh a1, 26(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 4 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -2962,7 +2962,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -48 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX1-RV64-NEXT: sh a1, 24(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 3 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -2992,7 +2992,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -48 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX1-RV64-NEXT: sh a1, 22(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 2 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -3022,7 +3022,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -48 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX1-RV64-NEXT: sh a1, 20(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v25 @@ -3052,7 +3052,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -48 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX1-RV64-NEXT: sh a1, 18(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-RV64-NEXT: addi a1, sp, 16 @@ -3258,7 +3258,7 @@ ; LMULMAX2-RV64-NEXT: addi a4, a4, 257 ; LMULMAX2-RV64-NEXT: mul a5, a5, a4 ; LMULMAX2-RV64-NEXT: srli a5, a5, 56 -; LMULMAX2-RV64-NEXT: addi a5, a5, -32 +; LMULMAX2-RV64-NEXT: addiw a5, a5, -32 ; LMULMAX2-RV64-NEXT: sw a5, 16(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 3 @@ -3290,7 +3290,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 28(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v25, 2 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -3321,7 +3321,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 24(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v25 @@ -3352,7 +3352,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 20(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX2-RV64-NEXT: addi a1, sp, 16 @@ -3550,7 +3550,7 @@ ; LMULMAX1-RV64-NEXT: addi a4, a4, 257 ; LMULMAX1-RV64-NEXT: mul a5, a5, a4 ; LMULMAX1-RV64-NEXT: srli a5, a5, 56 -; LMULMAX1-RV64-NEXT: addi a5, a5, -32 +; LMULMAX1-RV64-NEXT: addiw a5, a5, -32 ; LMULMAX1-RV64-NEXT: sw a5, 16(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 3 @@ -3582,7 +3582,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 28(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 2 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -3613,7 +3613,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 24(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v25 @@ -3644,7 +3644,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a3 ; LMULMAX1-RV64-NEXT: mul a1, a1, a4 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 20(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV64-NEXT: addi a1, sp, 16 @@ -5138,7 +5138,7 @@ ; LMULMAX2-RV64-NEXT: addi a5, a5, 257 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 32(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e8, m2, ta, mu ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 31 @@ -5169,7 +5169,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 63(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 30 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5199,7 +5199,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 62(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 29 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5229,7 +5229,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 61(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 28 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5259,7 +5259,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 60(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 27 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5289,7 +5289,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 59(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 26 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5319,7 +5319,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 58(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 25 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5349,7 +5349,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 57(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 24 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5379,7 +5379,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 56(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 23 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5409,7 +5409,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 55(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 22 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5439,7 +5439,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 54(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 21 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5469,7 +5469,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 53(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 20 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5499,7 +5499,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 52(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 19 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5529,7 +5529,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 51(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 18 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5559,7 +5559,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 50(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 17 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5589,7 +5589,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 49(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 16 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5619,7 +5619,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 48(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 15 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5649,7 +5649,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 47(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 14 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5679,7 +5679,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 46(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 13 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5709,7 +5709,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 45(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 12 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5739,7 +5739,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 44(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 11 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5769,7 +5769,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 43(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 10 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5799,7 +5799,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 42(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 9 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5829,7 +5829,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 41(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 8 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5859,7 +5859,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 40(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 7 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5889,7 +5889,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 39(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 6 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5919,7 +5919,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 38(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 5 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5949,7 +5949,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 37(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 4 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -5979,7 +5979,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 36(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 3 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -6009,7 +6009,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 35(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 2 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -6039,7 +6039,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 34(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v26, 1 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -6069,7 +6069,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -56 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX2-RV64-NEXT: sb a1, 33(sp) ; LMULMAX2-RV64-NEXT: vsetvli zero, a6, e8, m2, ta, mu ; LMULMAX2-RV64-NEXT: addi a1, sp, 32 @@ -7067,7 +7067,7 @@ ; LMULMAX1-RV64-NEXT: addi a5, a5, 257 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 32(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e8, m1, ta, mu ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 15 @@ -7098,7 +7098,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 47(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 14 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7128,7 +7128,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 46(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 13 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7158,7 +7158,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 45(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 12 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7188,7 +7188,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 44(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 11 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7218,7 +7218,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 43(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 10 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7248,7 +7248,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 42(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 9 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7278,7 +7278,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 41(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 8 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7308,7 +7308,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 40(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 7 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7338,7 +7338,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 39(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 6 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7368,7 +7368,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 38(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 5 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7398,7 +7398,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 37(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 4 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7428,7 +7428,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 36(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 3 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7458,7 +7458,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 35(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 2 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -7488,7 +7488,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 34(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v26, 1 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7518,7 +7518,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 33(sp) ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v25 ; LMULMAX1-RV64-NEXT: andi a1, a1, 255 @@ -7547,7 +7547,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 16(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 15 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7577,7 +7577,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 31(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 14 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7607,7 +7607,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 30(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 13 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7637,7 +7637,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 29(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 12 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7667,7 +7667,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 28(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 11 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7697,7 +7697,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 27(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 10 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7727,7 +7727,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 26(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 9 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7757,7 +7757,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 25(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 8 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7787,7 +7787,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 24(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 7 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7817,7 +7817,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 23(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 6 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7847,7 +7847,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 22(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 5 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7877,7 +7877,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 21(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 4 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7907,7 +7907,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 20(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 3 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7937,7 +7937,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 19(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 2 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -7967,7 +7967,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 18(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v25 @@ -7997,7 +7997,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -56 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -56 ; LMULMAX1-RV64-NEXT: sb a1, 17(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu ; LMULMAX1-RV64-NEXT: addi a1, sp, 16 @@ -8570,7 +8570,7 @@ ; LMULMAX2-RV64-NEXT: addi a5, a5, 257 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 32(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e16, m2, ta, mu ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 15 @@ -8601,7 +8601,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 62(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 14 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8631,7 +8631,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 60(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 13 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8661,7 +8661,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 58(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 12 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8691,7 +8691,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 56(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 11 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8721,7 +8721,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 54(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 10 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8751,7 +8751,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 52(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 9 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8781,7 +8781,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 50(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 8 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8811,7 +8811,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 48(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 7 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8841,7 +8841,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 46(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 6 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8871,7 +8871,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 44(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 5 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8901,7 +8901,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 42(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 4 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8931,7 +8931,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 40(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 3 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8961,7 +8961,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 38(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 2 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -8991,7 +8991,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 36(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v26, 1 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -9021,7 +9021,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a4 ; LMULMAX2-RV64-NEXT: mul a1, a1, a5 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -48 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX2-RV64-NEXT: sh a1, 34(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu ; LMULMAX2-RV64-NEXT: addi a1, sp, 32 @@ -9575,7 +9575,7 @@ ; LMULMAX1-RV64-NEXT: addi a1, a1, 257 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 32(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e16, m1, ta, mu ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 7 @@ -9606,7 +9606,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 46(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 6 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v27 @@ -9636,7 +9636,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 44(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 5 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v27 @@ -9666,7 +9666,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 42(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 4 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v27 @@ -9696,7 +9696,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 40(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 3 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v27 @@ -9726,7 +9726,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 38(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 2 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v27 @@ -9756,7 +9756,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 36(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v26, 1 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v26 @@ -9786,7 +9786,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 34(sp) ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v25 ; LMULMAX1-RV64-NEXT: and a2, a2, a7 @@ -9815,7 +9815,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 16(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 7 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v26 @@ -9845,7 +9845,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 30(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 6 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v26 @@ -9875,7 +9875,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 28(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 5 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v26 @@ -9905,7 +9905,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 26(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 4 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v26 @@ -9935,7 +9935,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 24(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 3 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v26 @@ -9965,7 +9965,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 22(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 2 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v26 @@ -9995,7 +9995,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a2, a2, a1 ; LMULMAX1-RV64-NEXT: srli a2, a2, 56 -; LMULMAX1-RV64-NEXT: addi a2, a2, -48 +; LMULMAX1-RV64-NEXT: addiw a2, a2, -48 ; LMULMAX1-RV64-NEXT: sh a2, 20(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX1-RV64-NEXT: vmv.x.s a2, v25 @@ -10025,7 +10025,7 @@ ; LMULMAX1-RV64-NEXT: and a2, a2, a5 ; LMULMAX1-RV64-NEXT: mul a1, a2, a1 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -48 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -48 ; LMULMAX1-RV64-NEXT: sh a1, 18(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; LMULMAX1-RV64-NEXT: addi a1, sp, 16 @@ -10355,7 +10355,7 @@ ; LMULMAX2-RV64-NEXT: addi a4, a4, 257 ; LMULMAX2-RV64-NEXT: mul a5, a5, a4 ; LMULMAX2-RV64-NEXT: srli a5, a5, 56 -; LMULMAX2-RV64-NEXT: addi a5, a5, -32 +; LMULMAX2-RV64-NEXT: addiw a5, a5, -32 ; LMULMAX2-RV64-NEXT: sw a5, 32(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 1, e32, m2, ta, mu ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 7 @@ -10387,7 +10387,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 60(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 6 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -10418,7 +10418,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 56(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 5 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -10449,7 +10449,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 52(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 4 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -10480,7 +10480,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 48(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 3 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -10511,7 +10511,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 44(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v28, v26, 2 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v28 @@ -10542,7 +10542,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 40(sp) ; LMULMAX2-RV64-NEXT: vslidedown.vi v26, v26, 1 ; LMULMAX2-RV64-NEXT: vmv.x.s a1, v26 @@ -10573,7 +10573,7 @@ ; LMULMAX2-RV64-NEXT: and a1, a1, a3 ; LMULMAX2-RV64-NEXT: mul a1, a1, a4 ; LMULMAX2-RV64-NEXT: srli a1, a1, 56 -; LMULMAX2-RV64-NEXT: addi a1, a1, -32 +; LMULMAX2-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX2-RV64-NEXT: sw a1, 36(sp) ; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; LMULMAX2-RV64-NEXT: addi a1, sp, 32 @@ -10884,7 +10884,7 @@ ; LMULMAX1-RV64-NEXT: addi a5, a5, 257 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 32(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, mu ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 3 @@ -10916,7 +10916,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 44(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v26, 2 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v27 @@ -10947,7 +10947,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 40(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v26, 1 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -10978,7 +10978,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 36(sp) ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v25 ; LMULMAX1-RV64-NEXT: srliw a2, a1, 1 @@ -11008,7 +11008,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 16(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 3 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -11039,7 +11039,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 28(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 2 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v26 @@ -11070,7 +11070,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 24(sp) ; LMULMAX1-RV64-NEXT: vslidedown.vi v25, v25, 1 ; LMULMAX1-RV64-NEXT: vmv.x.s a1, v25 @@ -11101,7 +11101,7 @@ ; LMULMAX1-RV64-NEXT: and a1, a1, a4 ; LMULMAX1-RV64-NEXT: mul a1, a1, a5 ; LMULMAX1-RV64-NEXT: srli a1, a1, 56 -; LMULMAX1-RV64-NEXT: addi a1, a1, -32 +; LMULMAX1-RV64-NEXT: addiw a1, a1, -32 ; LMULMAX1-RV64-NEXT: sw a1, 20(sp) ; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; LMULMAX1-RV64-NEXT: addi a1, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/shadowcallstack.ll b/llvm/test/CodeGen/RISCV/shadowcallstack.ll --- a/llvm/test/CodeGen/RISCV/shadowcallstack.ll +++ b/llvm/test/CodeGen/RISCV/shadowcallstack.ll @@ -121,8 +121,8 @@ ; RV64-NEXT: call bar@plt ; RV64-NEXT: mv s0, a0 ; RV64-NEXT: call bar@plt -; RV64-NEXT: add a1, s3, s1 -; RV64-NEXT: add a0, s0, a0 +; RV64-NEXT: addw a1, s3, s1 +; RV64-NEXT: addw a0, s0, a0 ; RV64-NEXT: addw a0, a1, a0 ; RV64-NEXT: ld s3, 0(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll --- a/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll +++ b/llvm/test/CodeGen/RISCV/shift-masked-shamt.ll @@ -112,7 +112,7 @@ ; ; RV64I-LABEL: sll_redundant_mask_zeros: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a1, 1 +; RV64I-NEXT: slliw a1, a1, 1 ; RV64I-NEXT: sllw a0, a0, a1 ; RV64I-NEXT: ret %1 = shl i32 %b, 1 @@ -130,7 +130,7 @@ ; ; RV64I-LABEL: srl_redundant_mask_zeros: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a1, 2 +; RV64I-NEXT: slliw a1, a1, 2 ; RV64I-NEXT: srlw a0, a0, a1 ; RV64I-NEXT: ret %1 = shl i32 %b, 2 @@ -148,7 +148,7 @@ ; ; RV64I-LABEL: sra_redundant_mask_zeros: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a1, a1, 3 +; RV64I-NEXT: slliw a1, a1, 3 ; RV64I-NEXT: sraw a0, a0, a1 ; RV64I-NEXT: ret %1 = shl i32 %b, 3 diff --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll --- a/llvm/test/CodeGen/RISCV/srem-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll @@ -54,9 +54,9 @@ ; RV64IM-NEXT: addw a1, a1, a0 ; RV64IM-NEXT: srliw a2, a1, 31 ; RV64IM-NEXT: srli a1, a1, 6 -; RV64IM-NEXT: add a1, a1, a2 +; RV64IM-NEXT: addw a1, a1, a2 ; RV64IM-NEXT: addi a2, zero, 95 -; RV64IM-NEXT: mul a1, a1, a2 +; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = srem i32 %x, 95 @@ -107,9 +107,9 @@ ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a2, a1, 63 ; RV64IM-NEXT: srai a1, a1, 40 -; RV64IM-NEXT: add a1, a1, a2 +; RV64IM-NEXT: addw a1, a1, a2 ; RV64IM-NEXT: addi a2, zero, 1060 -; RV64IM-NEXT: mul a1, a1, a2 +; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = srem i32 %x, 1060 @@ -160,9 +160,9 @@ ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a2, a1, 63 ; RV64IM-NEXT: srai a1, a1, 40 -; RV64IM-NEXT: add a1, a1, a2 +; RV64IM-NEXT: addw a1, a1, a2 ; RV64IM-NEXT: addi a2, zero, -723 -; RV64IM-NEXT: mul a1, a1, a2 +; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = srem i32 %x, -723 @@ -216,10 +216,10 @@ ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a2, a1, 63 ; RV64IM-NEXT: srai a1, a1, 40 -; RV64IM-NEXT: add a1, a1, a2 +; RV64IM-NEXT: addw a1, a1, a2 ; RV64IM-NEXT: lui a2, 1048570 ; RV64IM-NEXT: addiw a2, a2, 1595 -; RV64IM-NEXT: mul a1, a1, a2 +; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = srem i32 %x, -22981 @@ -292,14 +292,13 @@ ; RV64IM-NEXT: addiw a2, a2, 389 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: addw a2, a1, a0 -; RV64IM-NEXT: srliw a2, a2, 31 -; RV64IM-NEXT: add a1, a1, a0 +; RV64IM-NEXT: addw a1, a1, a0 +; RV64IM-NEXT: srliw a2, a1, 31 ; RV64IM-NEXT: sraiw a1, a1, 6 -; RV64IM-NEXT: add a1, a1, a2 +; RV64IM-NEXT: addw a1, a1, a2 ; RV64IM-NEXT: addi a2, zero, 95 -; RV64IM-NEXT: mul a2, a1, a2 -; RV64IM-NEXT: sub a0, a0, a2 +; RV64IM-NEXT: mulw a2, a1, a2 +; RV64IM-NEXT: subw a0, a0, a2 ; RV64IM-NEXT: addw a0, a0, a1 ; RV64IM-NEXT: ret %1 = srem i32 %x, 95 diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -462,11 +462,11 @@ ; RV64-NEXT: and a3, a3, a4 ; RV64-NEXT: srli a3, a3, 32 ; RV64-NEXT: sb a3, 12(s0) -; RV64-NEXT: slli a1, a1, 2 +; RV64-NEXT: slliw a1, a1, 2 ; RV64-NEXT: srli a3, s4, 31 ; RV64-NEXT: and a2, a2, a3 ; RV64-NEXT: srli a4, a2, 31 -; RV64-NEXT: sub a1, a4, a1 +; RV64-NEXT: subw a1, a4, a1 ; RV64-NEXT: sw a1, 8(s0) ; RV64-NEXT: and a0, a0, a3 ; RV64-NEXT: slli a1, a2, 33 @@ -649,11 +649,11 @@ ; RV64M-NEXT: and a4, a4, a5 ; RV64M-NEXT: srli a4, a4, 32 ; RV64M-NEXT: sb a4, 12(a0) -; RV64M-NEXT: slli a2, a2, 2 +; RV64M-NEXT: slliw a2, a2, 2 ; RV64M-NEXT: srli a4, a6, 31 ; RV64M-NEXT: and a1, a1, a4 ; RV64M-NEXT: srli a5, a1, 31 -; RV64M-NEXT: sub a2, a5, a2 +; RV64M-NEXT: subw a2, a5, a2 ; RV64M-NEXT: sw a2, 8(a0) ; RV64M-NEXT: slli a1, a1, 33 ; RV64M-NEXT: and a2, a3, a4 diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll --- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll @@ -165,10 +165,10 @@ ; RV64IM-NEXT: add a5, a5, a1 ; RV64IM-NEXT: srli a2, a5, 63 ; RV64IM-NEXT: srli a5, a5, 6 -; RV64IM-NEXT: add a2, a5, a2 +; RV64IM-NEXT: addw a2, a5, a2 ; RV64IM-NEXT: addi a5, zero, 95 -; RV64IM-NEXT: mul a2, a2, a5 -; RV64IM-NEXT: sub a1, a1, a2 +; RV64IM-NEXT: mulw a2, a2, a5 +; RV64IM-NEXT: subw a1, a1, a2 ; RV64IM-NEXT: lui a2, 777976 ; RV64IM-NEXT: addiw a2, a2, -1057 ; RV64IM-NEXT: slli a2, a2, 15 @@ -180,10 +180,10 @@ ; RV64IM-NEXT: sub a2, a2, a4 ; RV64IM-NEXT: srli a5, a2, 63 ; RV64IM-NEXT: srli a2, a2, 6 -; RV64IM-NEXT: add a2, a2, a5 +; RV64IM-NEXT: addw a2, a2, a5 ; RV64IM-NEXT: addi a5, zero, -124 -; RV64IM-NEXT: mul a2, a2, a5 -; RV64IM-NEXT: sub a2, a4, a2 +; RV64IM-NEXT: mulw a2, a2, a5 +; RV64IM-NEXT: subw a2, a4, a2 ; RV64IM-NEXT: lui a4, 2675 ; RV64IM-NEXT: addiw a4, a4, -251 ; RV64IM-NEXT: slli a4, a4, 13 @@ -195,10 +195,10 @@ ; RV64IM-NEXT: mulh a4, a3, a4 ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 5 -; RV64IM-NEXT: add a4, a4, a5 +; RV64IM-NEXT: addw a4, a4, a5 ; RV64IM-NEXT: addi a5, zero, 98 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a3, a3, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: lui a4, 1040212 ; RV64IM-NEXT: addiw a4, a4, 1977 ; RV64IM-NEXT: slli a4, a4, 12 @@ -210,10 +210,10 @@ ; RV64IM-NEXT: mulh a4, a6, a4 ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 7 -; RV64IM-NEXT: add a4, a4, a5 +; RV64IM-NEXT: addw a4, a4, a5 ; RV64IM-NEXT: addi a5, zero, -1003 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a4, a6, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a4, a6, a4 ; RV64IM-NEXT: sh a4, 6(a0) ; RV64IM-NEXT: sh a3, 4(a0) ; RV64IM-NEXT: sh a2, 2(a0) @@ -373,31 +373,31 @@ ; RV64IM-NEXT: add a2, a2, a1 ; RV64IM-NEXT: srli a3, a2, 63 ; RV64IM-NEXT: srli a2, a2, 6 -; RV64IM-NEXT: add a2, a2, a3 +; RV64IM-NEXT: addw a2, a2, a3 ; RV64IM-NEXT: addi a3, zero, 95 -; RV64IM-NEXT: mul a2, a2, a3 -; RV64IM-NEXT: sub t0, a1, a2 +; RV64IM-NEXT: mulw a2, a2, a3 +; RV64IM-NEXT: subw t0, a1, a2 ; RV64IM-NEXT: mulh a2, a4, a5 ; RV64IM-NEXT: add a2, a2, a4 ; RV64IM-NEXT: srli a1, a2, 63 ; RV64IM-NEXT: srli a2, a2, 6 -; RV64IM-NEXT: add a1, a2, a1 -; RV64IM-NEXT: mul a1, a1, a3 -; RV64IM-NEXT: sub a1, a4, a1 +; RV64IM-NEXT: addw a1, a2, a1 +; RV64IM-NEXT: mulw a1, a1, a3 +; RV64IM-NEXT: subw a1, a4, a1 ; RV64IM-NEXT: mulh a2, a7, a5 ; RV64IM-NEXT: add a2, a2, a7 ; RV64IM-NEXT: srli a4, a2, 63 ; RV64IM-NEXT: srli a2, a2, 6 -; RV64IM-NEXT: add a2, a2, a4 -; RV64IM-NEXT: mul a2, a2, a3 -; RV64IM-NEXT: sub a2, a7, a2 +; RV64IM-NEXT: addw a2, a2, a4 +; RV64IM-NEXT: mulw a2, a2, a3 +; RV64IM-NEXT: subw a2, a7, a2 ; RV64IM-NEXT: mulh a4, a6, a5 ; RV64IM-NEXT: add a4, a4, a6 ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 6 -; RV64IM-NEXT: add a4, a4, a5 -; RV64IM-NEXT: mul a3, a4, a3 -; RV64IM-NEXT: sub a3, a6, a3 +; RV64IM-NEXT: addw a4, a4, a5 +; RV64IM-NEXT: mulw a3, a4, a3 +; RV64IM-NEXT: subw a3, a6, a3 ; RV64IM-NEXT: sh a3, 6(a0) ; RV64IM-NEXT: sh a2, 4(a0) ; RV64IM-NEXT: sh a1, 2(a0) @@ -579,10 +579,10 @@ ; RV64I-NEXT: addi a1, zero, 95 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __divdi3@plt -; RV64I-NEXT: add a0, s8, a0 -; RV64I-NEXT: add a1, s7, s1 -; RV64I-NEXT: add a2, s6, s4 -; RV64I-NEXT: add a3, s5, s9 +; RV64I-NEXT: addw a0, s8, a0 +; RV64I-NEXT: addw a1, s7, s1 +; RV64I-NEXT: addw a2, s6, s4 +; RV64I-NEXT: addw a3, s5, s9 ; RV64I-NEXT: sh a3, 6(s0) ; RV64I-NEXT: sh a2, 4(s0) ; RV64I-NEXT: sh a1, 2(s0) @@ -619,35 +619,35 @@ ; RV64IM-NEXT: add a2, a2, a1 ; RV64IM-NEXT: srli a3, a2, 63 ; RV64IM-NEXT: srai a2, a2, 6 -; RV64IM-NEXT: add t3, a2, a3 +; RV64IM-NEXT: addw t3, a2, a3 ; RV64IM-NEXT: addi t0, zero, 95 -; RV64IM-NEXT: mul a3, t3, t0 -; RV64IM-NEXT: sub t1, a1, a3 +; RV64IM-NEXT: mulw a3, t3, t0 +; RV64IM-NEXT: subw t1, a1, a3 ; RV64IM-NEXT: mulh a3, a4, a5 ; RV64IM-NEXT: add a3, a3, a4 ; RV64IM-NEXT: srli a1, a3, 63 ; RV64IM-NEXT: srai a3, a3, 6 -; RV64IM-NEXT: add a1, a3, a1 -; RV64IM-NEXT: mul a3, a1, t0 -; RV64IM-NEXT: sub t2, a4, a3 +; RV64IM-NEXT: addw a1, a3, a1 +; RV64IM-NEXT: mulw a3, a1, t0 +; RV64IM-NEXT: subw t2, a4, a3 ; RV64IM-NEXT: mulh a4, a7, a5 ; RV64IM-NEXT: add a4, a4, a7 ; RV64IM-NEXT: srli a3, a4, 63 ; RV64IM-NEXT: srai a4, a4, 6 -; RV64IM-NEXT: add a3, a4, a3 -; RV64IM-NEXT: mul a4, a3, t0 -; RV64IM-NEXT: sub a4, a7, a4 +; RV64IM-NEXT: addw a3, a4, a3 +; RV64IM-NEXT: mulw a4, a3, t0 +; RV64IM-NEXT: subw a4, a7, a4 ; RV64IM-NEXT: mulh a5, a6, a5 ; RV64IM-NEXT: add a5, a5, a6 ; RV64IM-NEXT: srli a2, a5, 63 ; RV64IM-NEXT: srai a5, a5, 6 -; RV64IM-NEXT: add a2, a5, a2 -; RV64IM-NEXT: mul a5, a2, t0 -; RV64IM-NEXT: sub a5, a6, a5 -; RV64IM-NEXT: add a2, a5, a2 -; RV64IM-NEXT: add a3, a4, a3 -; RV64IM-NEXT: add a1, t2, a1 -; RV64IM-NEXT: add a4, t1, t3 +; RV64IM-NEXT: addw a2, a5, a2 +; RV64IM-NEXT: mulw a5, a2, t0 +; RV64IM-NEXT: subw a5, a6, a5 +; RV64IM-NEXT: addw a2, a5, a2 +; RV64IM-NEXT: addw a3, a4, a3 +; RV64IM-NEXT: addw a1, t2, a1 +; RV64IM-NEXT: addw a4, t1, t3 ; RV64IM-NEXT: sh a4, 6(a0) ; RV64IM-NEXT: sh a1, 4(a0) ; RV64IM-NEXT: sh a3, 2(a0) @@ -750,15 +750,15 @@ ; RV64I-NEXT: srli a4, a2, 58 ; RV64I-NEXT: add a4, a2, a4 ; RV64I-NEXT: andi a4, a4, -64 -; RV64I-NEXT: sub s2, a2, a4 +; RV64I-NEXT: subw s2, a2, a4 ; RV64I-NEXT: srli a2, a1, 59 ; RV64I-NEXT: add a2, a1, a2 ; RV64I-NEXT: andi a2, a2, -32 -; RV64I-NEXT: sub s3, a1, a2 +; RV64I-NEXT: subw s3, a1, a2 ; RV64I-NEXT: srli a1, a3, 61 ; RV64I-NEXT: add a1, a3, a1 ; RV64I-NEXT: andi a1, a1, -8 -; RV64I-NEXT: sub s1, a3, a1 +; RV64I-NEXT: subw s1, a3, a1 ; RV64I-NEXT: addi a1, zero, 95 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: sh a0, 6(s0) @@ -791,22 +791,22 @@ ; RV64IM-NEXT: add a5, a5, a1 ; RV64IM-NEXT: srli a2, a5, 63 ; RV64IM-NEXT: srli a5, a5, 6 -; RV64IM-NEXT: add a2, a5, a2 +; RV64IM-NEXT: addw a2, a5, a2 ; RV64IM-NEXT: addi a5, zero, 95 -; RV64IM-NEXT: mul a2, a2, a5 -; RV64IM-NEXT: sub a1, a1, a2 +; RV64IM-NEXT: mulw a2, a2, a5 +; RV64IM-NEXT: subw a1, a1, a2 ; RV64IM-NEXT: srli a2, a4, 58 ; RV64IM-NEXT: add a2, a4, a2 ; RV64IM-NEXT: andi a2, a2, -64 -; RV64IM-NEXT: sub a2, a4, a2 +; RV64IM-NEXT: subw a2, a4, a2 ; RV64IM-NEXT: srli a4, a3, 59 ; RV64IM-NEXT: add a4, a3, a4 ; RV64IM-NEXT: andi a4, a4, -32 -; RV64IM-NEXT: sub a3, a3, a4 +; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: srli a4, a6, 61 ; RV64IM-NEXT: add a4, a6, a4 ; RV64IM-NEXT: andi a4, a4, -8 -; RV64IM-NEXT: sub a4, a6, a4 +; RV64IM-NEXT: subw a4, a6, a4 ; RV64IM-NEXT: sh a4, 4(a0) ; RV64IM-NEXT: sh a3, 2(a0) ; RV64IM-NEXT: sh a2, 0(a0) @@ -948,10 +948,10 @@ ; RV64IM-NEXT: add a4, a4, a1 ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 4 -; RV64IM-NEXT: add a4, a4, a5 +; RV64IM-NEXT: addw a4, a4, a5 ; RV64IM-NEXT: addi a5, zero, 23 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a1, a1, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a1, a1, a4 ; RV64IM-NEXT: lui a4, 6413 ; RV64IM-NEXT: addiw a4, a4, 1265 ; RV64IM-NEXT: slli a4, a4, 13 @@ -963,10 +963,10 @@ ; RV64IM-NEXT: mulh a4, a3, a4 ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 8 -; RV64IM-NEXT: add a4, a4, a5 +; RV64IM-NEXT: addw a4, a4, a5 ; RV64IM-NEXT: addi a5, zero, 654 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a3, a3, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: lui a4, 12375 ; RV64IM-NEXT: addiw a4, a4, -575 ; RV64IM-NEXT: slli a4, a4, 12 @@ -978,11 +978,11 @@ ; RV64IM-NEXT: mulh a4, a2, a4 ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 11 -; RV64IM-NEXT: add a4, a4, a5 +; RV64IM-NEXT: addw a4, a4, a5 ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a2, a2, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a2, a2, a4 ; RV64IM-NEXT: sh zero, 0(a0) ; RV64IM-NEXT: sh a2, 6(a0) ; RV64IM-NEXT: sh a3, 2(a0) @@ -1082,7 +1082,7 @@ ; RV64I-NEXT: add a1, a2, a1 ; RV64I-NEXT: lui a3, 8 ; RV64I-NEXT: and a1, a1, a3 -; RV64I-NEXT: sub s3, a2, a1 +; RV64I-NEXT: subw s3, a2, a1 ; RV64I-NEXT: addi a1, zero, 23 ; RV64I-NEXT: call __moddi3@plt ; RV64I-NEXT: mv s1, a0 @@ -1119,10 +1119,10 @@ ; RV64IM-NEXT: add a4, a4, a1 ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 4 -; RV64IM-NEXT: add a4, a4, a5 +; RV64IM-NEXT: addw a4, a4, a5 ; RV64IM-NEXT: addi a5, zero, 23 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a1, a1, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a1, a1, a4 ; RV64IM-NEXT: lui a4, 12375 ; RV64IM-NEXT: addiw a4, a4, -575 ; RV64IM-NEXT: slli a4, a4, 12 @@ -1134,16 +1134,16 @@ ; RV64IM-NEXT: mulh a4, a3, a4 ; RV64IM-NEXT: srli a5, a4, 63 ; RV64IM-NEXT: srli a4, a4, 11 -; RV64IM-NEXT: add a4, a4, a5 +; RV64IM-NEXT: addw a4, a4, a5 ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a3, a3, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: srli a4, a2, 49 ; RV64IM-NEXT: add a4, a2, a4 ; RV64IM-NEXT: lui a5, 8 ; RV64IM-NEXT: and a4, a4, a5 -; RV64IM-NEXT: sub a2, a2, a4 +; RV64IM-NEXT: subw a2, a2, a4 ; RV64IM-NEXT: sh zero, 0(a0) ; RV64IM-NEXT: sh a2, 2(a0) ; RV64IM-NEXT: sh a3, 6(a0) diff --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll @@ -25,7 +25,7 @@ ; ; RV64I-LABEL: func32: ; RV64I: # %bb.0: -; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: mulw a1, a1, a2 ; RV64I-NEXT: addw a1, a0, a1 ; RV64I-NEXT: sext.w a2, a0 ; RV64I-NEXT: addi a0, zero, -1 diff --git a/llvm/test/CodeGen/RISCV/urem-lkk.ll b/llvm/test/CodeGen/RISCV/urem-lkk.ll --- a/llvm/test/CodeGen/RISCV/urem-lkk.ll +++ b/llvm/test/CodeGen/RISCV/urem-lkk.ll @@ -53,12 +53,12 @@ ; RV64IM-NEXT: addiw a2, a2, 777 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: sub a2, a0, a1 +; RV64IM-NEXT: subw a2, a0, a1 ; RV64IM-NEXT: srliw a2, a2, 1 ; RV64IM-NEXT: add a1, a2, a1 ; RV64IM-NEXT: srli a1, a1, 6 ; RV64IM-NEXT: addi a2, zero, 95 -; RV64IM-NEXT: mul a1, a1, a2 +; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = urem i32 %x, 95 @@ -110,7 +110,7 @@ ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 42 ; RV64IM-NEXT: addi a2, zero, 1060 -; RV64IM-NEXT: mul a1, a1, a2 +; RV64IM-NEXT: mulw a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret %1 = urem i32 %x, 1060 @@ -185,13 +185,13 @@ ; RV64IM-NEXT: addiw a2, a2, 777 ; RV64IM-NEXT: mul a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 32 -; RV64IM-NEXT: sub a2, a0, a1 +; RV64IM-NEXT: subw a2, a0, a1 ; RV64IM-NEXT: srliw a2, a2, 1 ; RV64IM-NEXT: add a1, a2, a1 ; RV64IM-NEXT: srli a1, a1, 6 ; RV64IM-NEXT: addi a2, zero, 95 -; RV64IM-NEXT: mul a2, a1, a2 -; RV64IM-NEXT: sub a0, a0, a2 +; RV64IM-NEXT: mulw a2, a1, a2 +; RV64IM-NEXT: subw a0, a0, a2 ; RV64IM-NEXT: addw a0, a0, a1 ; RV64IM-NEXT: ret %1 = urem i32 %x, 95 diff --git a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll --- a/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/urem-vector-lkk.ll @@ -162,8 +162,8 @@ ; RV64IM-NEXT: add a2, a2, a5 ; RV64IM-NEXT: srli a2, a2, 6 ; RV64IM-NEXT: addi a5, zero, 95 -; RV64IM-NEXT: mul a2, a2, a5 -; RV64IM-NEXT: sub a1, a1, a2 +; RV64IM-NEXT: mulw a2, a2, a5 +; RV64IM-NEXT: subw a1, a1, a2 ; RV64IM-NEXT: srli a2, a4, 2 ; RV64IM-NEXT: lui a5, 264 ; RV64IM-NEXT: addiw a5, a5, 1057 @@ -176,8 +176,8 @@ ; RV64IM-NEXT: mulhu a2, a2, a5 ; RV64IM-NEXT: srli a2, a2, 3 ; RV64IM-NEXT: addi a5, zero, 124 -; RV64IM-NEXT: mul a2, a2, a5 -; RV64IM-NEXT: sub a2, a4, a2 +; RV64IM-NEXT: mulw a2, a2, a5 +; RV64IM-NEXT: subw a2, a4, a2 ; RV64IM-NEXT: srli a4, a3, 1 ; RV64IM-NEXT: lui a5, 2675 ; RV64IM-NEXT: addiw a5, a5, -251 @@ -190,8 +190,8 @@ ; RV64IM-NEXT: mulhu a4, a4, a5 ; RV64IM-NEXT: srli a4, a4, 4 ; RV64IM-NEXT: addi a5, zero, 98 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a3, a3, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: lui a4, 8364 ; RV64IM-NEXT: addiw a4, a4, -1977 ; RV64IM-NEXT: slli a4, a4, 12 @@ -203,8 +203,8 @@ ; RV64IM-NEXT: mulhu a4, a6, a4 ; RV64IM-NEXT: srli a4, a4, 7 ; RV64IM-NEXT: addi a5, zero, 1003 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a4, a6, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a4, a6, a4 ; RV64IM-NEXT: sh a4, 6(a0) ; RV64IM-NEXT: sh a3, 4(a0) ; RV64IM-NEXT: sh a2, 2(a0) @@ -366,29 +366,29 @@ ; RV64IM-NEXT: add a2, a3, a2 ; RV64IM-NEXT: srli a2, a2, 6 ; RV64IM-NEXT: addi a3, zero, 95 -; RV64IM-NEXT: mul a2, a2, a3 -; RV64IM-NEXT: sub t0, a1, a2 +; RV64IM-NEXT: mulw a2, a2, a3 +; RV64IM-NEXT: subw t0, a1, a2 ; RV64IM-NEXT: mulhu a2, a4, a5 ; RV64IM-NEXT: sub a1, a4, a2 ; RV64IM-NEXT: srli a1, a1, 1 ; RV64IM-NEXT: add a1, a1, a2 ; RV64IM-NEXT: srli a1, a1, 6 -; RV64IM-NEXT: mul a1, a1, a3 -; RV64IM-NEXT: sub a1, a4, a1 +; RV64IM-NEXT: mulw a1, a1, a3 +; RV64IM-NEXT: subw a1, a4, a1 ; RV64IM-NEXT: mulhu a2, a7, a5 ; RV64IM-NEXT: sub a4, a7, a2 ; RV64IM-NEXT: srli a4, a4, 1 ; RV64IM-NEXT: add a2, a4, a2 ; RV64IM-NEXT: srli a2, a2, 6 -; RV64IM-NEXT: mul a2, a2, a3 -; RV64IM-NEXT: sub a2, a7, a2 +; RV64IM-NEXT: mulw a2, a2, a3 +; RV64IM-NEXT: subw a2, a7, a2 ; RV64IM-NEXT: mulhu a4, a6, a5 ; RV64IM-NEXT: sub a5, a6, a4 ; RV64IM-NEXT: srli a5, a5, 1 ; RV64IM-NEXT: add a4, a5, a4 ; RV64IM-NEXT: srli a4, a4, 6 -; RV64IM-NEXT: mul a3, a4, a3 -; RV64IM-NEXT: sub a3, a6, a3 +; RV64IM-NEXT: mulw a3, a4, a3 +; RV64IM-NEXT: subw a3, a6, a3 ; RV64IM-NEXT: sh a3, 6(a0) ; RV64IM-NEXT: sh a2, 4(a0) ; RV64IM-NEXT: sh a1, 2(a0) @@ -570,10 +570,10 @@ ; RV64I-NEXT: addi a1, zero, 95 ; RV64I-NEXT: mv a0, s2 ; RV64I-NEXT: call __udivdi3@plt -; RV64I-NEXT: add a0, s8, a0 -; RV64I-NEXT: add a1, s7, s1 -; RV64I-NEXT: add a2, s6, s4 -; RV64I-NEXT: add a3, s5, s9 +; RV64I-NEXT: addw a0, s8, a0 +; RV64I-NEXT: addw a1, s7, s1 +; RV64I-NEXT: addw a2, s6, s4 +; RV64I-NEXT: addw a3, s5, s9 ; RV64I-NEXT: sh a3, 6(s0) ; RV64I-NEXT: sh a2, 4(s0) ; RV64I-NEXT: sh a1, 2(s0) @@ -612,33 +612,33 @@ ; RV64IM-NEXT: add a2, a3, a2 ; RV64IM-NEXT: srli t3, a2, 6 ; RV64IM-NEXT: addi t0, zero, 95 -; RV64IM-NEXT: mul a3, t3, t0 -; RV64IM-NEXT: sub t1, a1, a3 +; RV64IM-NEXT: mulw a3, t3, t0 +; RV64IM-NEXT: subw t1, a1, a3 ; RV64IM-NEXT: mulhu a3, a4, a5 ; RV64IM-NEXT: sub a1, a4, a3 ; RV64IM-NEXT: srli a1, a1, 1 ; RV64IM-NEXT: add a1, a1, a3 ; RV64IM-NEXT: srli a1, a1, 6 -; RV64IM-NEXT: mul a3, a1, t0 -; RV64IM-NEXT: sub t2, a4, a3 +; RV64IM-NEXT: mulw a3, a1, t0 +; RV64IM-NEXT: subw t2, a4, a3 ; RV64IM-NEXT: mulhu a4, a7, a5 ; RV64IM-NEXT: sub a3, a7, a4 ; RV64IM-NEXT: srli a3, a3, 1 ; RV64IM-NEXT: add a3, a3, a4 ; RV64IM-NEXT: srli a3, a3, 6 -; RV64IM-NEXT: mul a4, a3, t0 -; RV64IM-NEXT: sub a4, a7, a4 +; RV64IM-NEXT: mulw a4, a3, t0 +; RV64IM-NEXT: subw a4, a7, a4 ; RV64IM-NEXT: mulhu a5, a6, a5 ; RV64IM-NEXT: sub a2, a6, a5 ; RV64IM-NEXT: srli a2, a2, 1 ; RV64IM-NEXT: add a2, a2, a5 ; RV64IM-NEXT: srli a2, a2, 6 -; RV64IM-NEXT: mul a5, a2, t0 -; RV64IM-NEXT: sub a5, a6, a5 -; RV64IM-NEXT: add a2, a5, a2 -; RV64IM-NEXT: add a3, a4, a3 -; RV64IM-NEXT: add a1, t2, a1 -; RV64IM-NEXT: add a4, t1, t3 +; RV64IM-NEXT: mulw a5, a2, t0 +; RV64IM-NEXT: subw a5, a6, a5 +; RV64IM-NEXT: addw a2, a5, a2 +; RV64IM-NEXT: addw a3, a4, a3 +; RV64IM-NEXT: addw a1, t2, a1 +; RV64IM-NEXT: addw a4, t1, t3 ; RV64IM-NEXT: sh a4, 6(a0) ; RV64IM-NEXT: sh a1, 4(a0) ; RV64IM-NEXT: sh a3, 2(a0) @@ -759,8 +759,8 @@ ; RV64IM-NEXT: add a2, a2, a5 ; RV64IM-NEXT: srli a2, a2, 6 ; RV64IM-NEXT: addi a5, zero, 95 -; RV64IM-NEXT: mul a2, a2, a5 -; RV64IM-NEXT: sub a1, a1, a2 +; RV64IM-NEXT: mulw a2, a2, a5 +; RV64IM-NEXT: subw a1, a1, a2 ; RV64IM-NEXT: andi a2, a4, 63 ; RV64IM-NEXT: andi a3, a3, 31 ; RV64IM-NEXT: andi a4, a6, 7 @@ -900,8 +900,8 @@ ; RV64IM-NEXT: add a4, a5, a4 ; RV64IM-NEXT: srli a4, a4, 4 ; RV64IM-NEXT: addi a5, zero, 23 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a1, a1, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a1, a1, a4 ; RV64IM-NEXT: srli a4, a3, 1 ; RV64IM-NEXT: lui a5, 6413 ; RV64IM-NEXT: addiw a5, a5, 1265 @@ -914,8 +914,8 @@ ; RV64IM-NEXT: mulhu a4, a4, a5 ; RV64IM-NEXT: srli a4, a4, 7 ; RV64IM-NEXT: addi a5, zero, 654 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a3, a3, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a3, a3, a4 ; RV64IM-NEXT: lui a4, 1044567 ; RV64IM-NEXT: addiw a4, a4, -575 ; RV64IM-NEXT: slli a4, a4, 12 @@ -928,8 +928,8 @@ ; RV64IM-NEXT: srli a4, a4, 12 ; RV64IM-NEXT: lui a5, 1 ; RV64IM-NEXT: addiw a5, a5, 1327 -; RV64IM-NEXT: mul a4, a4, a5 -; RV64IM-NEXT: sub a2, a2, a4 +; RV64IM-NEXT: mulw a4, a4, a5 +; RV64IM-NEXT: subw a2, a2, a4 ; RV64IM-NEXT: sh zero, 0(a0) ; RV64IM-NEXT: sh a2, 6(a0) ; RV64IM-NEXT: sh a3, 2(a0) diff --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll @@ -25,7 +25,7 @@ ; ; RV64I-LABEL: func32: ; RV64I: # %bb.0: -; RV64I-NEXT: mul a1, a1, a2 +; RV64I-NEXT: mulw a1, a1, a2 ; RV64I-NEXT: subw a1, a0, a1 ; RV64I-NEXT: sext.w a2, a0 ; RV64I-NEXT: mv a0, zero diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -1314,8 +1314,8 @@ ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, a0, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 0(a0) -; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, a1, s0 -; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, a1, a2 +; LP64-LP64F-LP64D-FPELIM-NEXT: addw a1, a1, s0 +; LP64-LP64F-LP64D-FPELIM-NEXT: addw a1, a1, a2 ; LP64-LP64F-LP64D-FPELIM-NEXT: addw a0, a1, a0 ; LP64-LP64F-LP64D-FPELIM-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -1357,8 +1357,8 @@ ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, a0, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, -32(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld a0, 0(a0) -; LP64-LP64F-LP64D-WITHFP-NEXT: add a1, a1, s1 -; LP64-LP64F-LP64D-WITHFP-NEXT: add a1, a1, a2 +; LP64-LP64F-LP64D-WITHFP-NEXT: addw a1, a1, s1 +; LP64-LP64F-LP64D-WITHFP-NEXT: addw a1, a1, a2 ; LP64-LP64F-LP64D-WITHFP-NEXT: addw a0, a1, a0 ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s1, 24(sp) # 8-byte Folded Reload ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 32(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -358,12 +358,10 @@ ; ; RV64-LABEL: uaddo.i32: ; RV64: # %bb.0: # %entry -; RV64-NEXT: addw a3, a0, a1 -; RV64-NEXT: sext.w a4, a0 -; RV64-NEXT: sltu a3, a3, a4 -; RV64-NEXT: add a0, a0, a1 -; RV64-NEXT: sw a0, 0(a2) -; RV64-NEXT: mv a0, a3 +; RV64-NEXT: addw a1, a0, a1 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: sltu a0, a1, a0 +; RV64-NEXT: sw a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32: @@ -375,12 +373,10 @@ ; ; RV64ZBA-LABEL: uaddo.i32: ; RV64ZBA: # %bb.0: # %entry -; RV64ZBA-NEXT: addw a3, a0, a1 -; RV64ZBA-NEXT: sext.w a4, a0 -; RV64ZBA-NEXT: sltu a3, a3, a4 -; RV64ZBA-NEXT: add a0, a0, a1 -; RV64ZBA-NEXT: sw a0, 0(a2) -; RV64ZBA-NEXT: mv a0, a3 +; RV64ZBA-NEXT: addw a1, a0, a1 +; RV64ZBA-NEXT: sext.w a0, a0 +; RV64ZBA-NEXT: sltu a0, a1, a0 +; RV64ZBA-NEXT: sw a1, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2) @@ -402,10 +398,8 @@ ; RV64: # %bb.0: # %entry ; RV64-NEXT: sext.w a2, a0 ; RV64-NEXT: addiw a3, a0, -2 -; RV64-NEXT: sltu a2, a3, a2 -; RV64-NEXT: addi a0, a0, -2 -; RV64-NEXT: sw a0, 0(a1) -; RV64-NEXT: mv a0, a2 +; RV64-NEXT: sltu a0, a3, a2 +; RV64-NEXT: sw a3, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: uaddo.i32.constant: @@ -419,10 +413,8 @@ ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: sext.w a2, a0 ; RV64ZBA-NEXT: addiw a3, a0, -2 -; RV64ZBA-NEXT: sltu a2, a3, a2 -; RV64ZBA-NEXT: addi a0, a0, -2 -; RV64ZBA-NEXT: sw a0, 0(a1) -; RV64ZBA-NEXT: mv a0, a2 +; RV64ZBA-NEXT: sltu a0, a3, a2 +; RV64ZBA-NEXT: sw a3, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 -2) @@ -638,12 +630,10 @@ ; ; RV64-LABEL: usubo.i32: ; RV64: # %bb.0: # %entry -; RV64-NEXT: subw a3, a0, a1 -; RV64-NEXT: sext.w a4, a0 -; RV64-NEXT: sltu a3, a4, a3 -; RV64-NEXT: sub a0, a0, a1 -; RV64-NEXT: sw a0, 0(a2) -; RV64-NEXT: mv a0, a3 +; RV64-NEXT: subw a1, a0, a1 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: sltu a0, a0, a1 +; RV64-NEXT: sw a1, 0(a2) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32: @@ -655,12 +645,10 @@ ; ; RV64ZBA-LABEL: usubo.i32: ; RV64ZBA: # %bb.0: # %entry -; RV64ZBA-NEXT: subw a3, a0, a1 -; RV64ZBA-NEXT: sext.w a4, a0 -; RV64ZBA-NEXT: sltu a3, a4, a3 -; RV64ZBA-NEXT: sub a0, a0, a1 -; RV64ZBA-NEXT: sw a0, 0(a2) -; RV64ZBA-NEXT: mv a0, a3 +; RV64ZBA-NEXT: subw a1, a0, a1 +; RV64ZBA-NEXT: sext.w a0, a0 +; RV64ZBA-NEXT: sltu a0, a0, a1 +; RV64ZBA-NEXT: sw a1, 0(a2) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2) @@ -681,11 +669,9 @@ ; RV64-LABEL: usubo.i32.constant.rhs: ; RV64: # %bb.0: # %entry ; RV64-NEXT: addiw a2, a0, 2 -; RV64-NEXT: sext.w a3, a0 -; RV64-NEXT: sltu a2, a3, a2 -; RV64-NEXT: addi a0, a0, 2 -; RV64-NEXT: sw a0, 0(a1) -; RV64-NEXT: mv a0, a2 +; RV64-NEXT: sext.w a0, a0 +; RV64-NEXT: sltu a0, a0, a2 +; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32.constant.rhs: @@ -698,11 +684,9 @@ ; RV64ZBA-LABEL: usubo.i32.constant.rhs: ; RV64ZBA: # %bb.0: # %entry ; RV64ZBA-NEXT: addiw a2, a0, 2 -; RV64ZBA-NEXT: sext.w a3, a0 -; RV64ZBA-NEXT: sltu a2, a3, a2 -; RV64ZBA-NEXT: addi a0, a0, 2 -; RV64ZBA-NEXT: sw a0, 0(a1) -; RV64ZBA-NEXT: mv a0, a2 +; RV64ZBA-NEXT: sext.w a0, a0 +; RV64ZBA-NEXT: sltu a0, a0, a2 +; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 -2) @@ -724,13 +708,11 @@ ; ; RV64-LABEL: usubo.i32.constant.lhs: ; RV64: # %bb.0: # %entry -; RV64-NEXT: addi a3, zero, -2 -; RV64-NEXT: subw a2, a3, a0 -; RV64-NEXT: addi a2, a2, 1 -; RV64-NEXT: seqz a2, a2 -; RV64-NEXT: sub a0, a3, a0 -; RV64-NEXT: sw a0, 0(a1) -; RV64-NEXT: mv a0, a2 +; RV64-NEXT: addi a2, zero, -2 +; RV64-NEXT: subw a2, a2, a0 +; RV64-NEXT: addi a0, a2, 1 +; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: sw a2, 0(a1) ; RV64-NEXT: ret ; ; RV32ZBA-LABEL: usubo.i32.constant.lhs: @@ -744,13 +726,11 @@ ; ; RV64ZBA-LABEL: usubo.i32.constant.lhs: ; RV64ZBA: # %bb.0: # %entry -; RV64ZBA-NEXT: addi a3, zero, -2 -; RV64ZBA-NEXT: subw a2, a3, a0 -; RV64ZBA-NEXT: addi a2, a2, 1 -; RV64ZBA-NEXT: seqz a2, a2 -; RV64ZBA-NEXT: sub a0, a3, a0 -; RV64ZBA-NEXT: sw a0, 0(a1) -; RV64ZBA-NEXT: mv a0, a2 +; RV64ZBA-NEXT: addi a2, zero, -2 +; RV64ZBA-NEXT: subw a2, a2, a0 +; RV64ZBA-NEXT: addi a0, a2, 1 +; RV64ZBA-NEXT: seqz a0, a0 +; RV64ZBA-NEXT: sw a2, 0(a1) ; RV64ZBA-NEXT: ret entry: %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 -2, i32 %v1)