diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -75,7 +75,8 @@ return selectSHXADD_UWOp(N, ShAmt, Val); } - bool hasAllNBitUsers(SDNode *Node, unsigned Bits) const; + bool hasAllNBitUsers(SDNode *Node, unsigned Bits, + const unsigned Depth = 0) const; bool hasAllHUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 16); } bool hasAllWUsers(SDNode *Node) const { return hasAllNBitUsers(Node, 32); } diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -2283,17 +2283,19 @@ // may be able to use a W instruction and CSE with the other instruction if // this has happened. We could try to detect that the CSE opportunity exists // before doing this, but that would be more complicated. -// TODO: Does this need to look through AND/OR/XOR to their users to find more -// opportunities. -bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const { +bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits, + const unsigned Depth) const { assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB || Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL || Node->getOpcode() == ISD::SRL || Node->getOpcode() == ISD::AND || Node->getOpcode() == ISD::OR || Node->getOpcode() == ISD::XOR || Node->getOpcode() == ISD::SIGN_EXTEND_INREG || - isa(Node)) && + isa(Node) || Depth != 0) && "Unexpected opcode"); + if (Depth >= SelectionDAG::MaxRecursionDepth) + return false; + for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) { SDNode *User = *UI; // Users of this node should have already been instruction selected @@ -2353,15 +2355,25 @@ return false; break; case RISCV::ANDI: - if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1)))) - return false; - break; + if (Bits >= (64 - countLeadingZeros(User->getConstantOperandVal(1)))) + break; + goto RecCheck; case RISCV::ORI: { uint64_t Imm = cast(User->getOperand(1))->getSExtValue(); - if (Bits < (64 - countLeadingOnes(Imm))) + if (Bits >= (64 - countLeadingOnes(Imm))) + break; + [[fallthrough]]; + } + case RISCV::AND: + case RISCV::OR: + case RISCV::XOR: + case RISCV::ANDN: + case RISCV::ORN: + case RISCV::XNOR: + RecCheck: + if (!hasAllNBitUsers(User, Bits, Depth + 1)) return false; break; - } case RISCV::SEXT_B: case RISCV::PACKH: if (Bits < 8) diff --git a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll --- a/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll +++ b/llvm/test/CodeGen/RISCV/bswap-bitreverse.ll @@ -1512,7 +1512,7 @@ ; RV64ZBB-LABEL: pr55484: ; RV64ZBB: # %bb.0: ; RV64ZBB-NEXT: srli a1, a0, 8 -; RV64ZBB-NEXT: slli a0, a0, 8 +; RV64ZBB-NEXT: slliw a0, a0, 8 ; RV64ZBB-NEXT: or a0, a1, a0 ; RV64ZBB-NEXT: sext.h a0, a0 ; RV64ZBB-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll --- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll @@ -239,7 +239,7 @@ ; RV64M-NEXT: sext.w a1, a0 ; RV64M-NEXT: beqz a1, .LBB2_2 ; RV64M-NEXT: # %bb.1: # %cond.false -; RV64M-NEXT: neg a1, a0 +; RV64M-NEXT: negw a1, a0 ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: lui a1, 30667 ; RV64M-NEXT: addiw a1, a1, 1329 @@ -577,7 +577,7 @@ ; ; RV64M-LABEL: test_cttz_i32_zero_undef: ; RV64M: # %bb.0: -; RV64M-NEXT: neg a1, a0 +; RV64M-NEXT: negw a1, a0 ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: lui a1, 30667 ; RV64M-NEXT: addiw a1, a1, 1329 @@ -1063,7 +1063,7 @@ ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: add a0, a2, a0 ; RV64M-NEXT: srli a1, a0, 4 -; RV64M-NEXT: add a0, a0, a1 +; RV64M-NEXT: addw a0, a0, a1 ; RV64M-NEXT: lui a1, 61681 ; RV64M-NEXT: addiw a1, a1, -241 ; RV64M-NEXT: and a0, a0, a1 @@ -1643,7 +1643,7 @@ ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: add a0, a2, a0 ; RV64M-NEXT: srli a1, a0, 4 -; RV64M-NEXT: add a0, a0, a1 +; RV64M-NEXT: addw a0, a0, a1 ; RV64M-NEXT: lui a1, 61681 ; RV64M-NEXT: addiw a1, a1, -241 ; RV64M-NEXT: and a0, a0, a1 @@ -2116,7 +2116,7 @@ ; RV64M-NEXT: and a0, a0, a1 ; RV64M-NEXT: add a0, a2, a0 ; RV64M-NEXT: srli a1, a0, 4 -; RV64M-NEXT: add a0, a0, a1 +; RV64M-NEXT: addw a0, a0, a1 ; RV64M-NEXT: lui a1, 61681 ; RV64M-NEXT: addiw a1, a1, -241 ; RV64M-NEXT: and a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -288,7 +288,7 @@ ; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rtz ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 -; RV64IFD-NEXT: addi a1, a1, -1 +; RV64IFD-NEXT: addiw a1, a1, -1 ; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: slli a0, a0, 32 ; RV64IFD-NEXT: srli a0, a0, 32 @@ -1406,7 +1406,7 @@ ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -1708,7 +1708,7 @@ ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -1877,7 +1877,7 @@ ; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rtz ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 -; RV64IFD-NEXT: addi a1, a1, -1 +; RV64IFD-NEXT: addiw a1, a1, -1 ; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: slli a0, a0, 32 ; RV64IFD-NEXT: srli a0, a0, 32 @@ -2059,7 +2059,7 @@ ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unorddf2@plt ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -219,7 +219,7 @@ ; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz ; RV64IF-NEXT: feq.s a1, fa0, fa0 ; RV64IF-NEXT: seqz a1, a1 -; RV64IF-NEXT: addi a1, a1, -1 +; RV64IF-NEXT: addiw a1, a1, -1 ; RV64IF-NEXT: and a0, a0, a1 ; RV64IF-NEXT: slli a0, a0, 32 ; RV64IF-NEXT: srli a0, a0, 32 @@ -1253,7 +1253,7 @@ ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -1533,7 +1533,7 @@ ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -1690,7 +1690,7 @@ ; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz ; RV64IF-NEXT: feq.s a1, fa0, fa0 ; RV64IF-NEXT: seqz a1, a1 -; RV64IF-NEXT: addi a1, a1, -1 +; RV64IF-NEXT: addiw a1, a1, -1 ; RV64IF-NEXT: and a0, a0, a1 ; RV64IF-NEXT: slli a0, a0, 32 ; RV64IF-NEXT: srli a0, a0, 32 @@ -1848,7 +1848,7 @@ ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll --- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll @@ -295,13 +295,13 @@ ; CHECK-NOV-NEXT: sgtz a6, a3 ; CHECK-NOV-NEXT: sgtz a7, a2 ; CHECK-NOV-NEXT: sgtz t0, a1 -; CHECK-NOV-NEXT: neg t0, t0 +; CHECK-NOV-NEXT: negw t0, t0 ; CHECK-NOV-NEXT: and a1, t0, a1 -; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: negw a7, a7 ; CHECK-NOV-NEXT: and a2, a7, a2 -; CHECK-NOV-NEXT: neg a6, a6 +; CHECK-NOV-NEXT: negw a6, a6 ; CHECK-NOV-NEXT: and a3, a6, a3 -; CHECK-NOV-NEXT: neg a4, a4 +; CHECK-NOV-NEXT: negw a4, a4 ; CHECK-NOV-NEXT: and a4, a4, a5 ; CHECK-NOV-NEXT: sw a4, 12(a0) ; CHECK-NOV-NEXT: sw a3, 8(a0) @@ -719,13 +719,13 @@ ; CHECK-NOV-NEXT: sgtz a4, a1 ; CHECK-NOV-NEXT: sgtz a5, s2 ; CHECK-NOV-NEXT: sgtz a6, a0 -; CHECK-NOV-NEXT: neg a6, a6 +; CHECK-NOV-NEXT: negw a6, a6 ; CHECK-NOV-NEXT: and a0, a6, a0 -; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: negw a5, a5 ; CHECK-NOV-NEXT: and a5, a5, s2 -; CHECK-NOV-NEXT: neg a4, a4 +; CHECK-NOV-NEXT: negw a4, a4 ; CHECK-NOV-NEXT: and a1, a4, a1 -; CHECK-NOV-NEXT: neg a2, a2 +; CHECK-NOV-NEXT: negw a2, a2 ; CHECK-NOV-NEXT: and a2, a2, a3 ; CHECK-NOV-NEXT: sw a2, 12(s0) ; CHECK-NOV-NEXT: sw a1, 8(s0) @@ -1120,13 +1120,13 @@ ; CHECK-NOV-NEXT: sgtz a6, a3 ; CHECK-NOV-NEXT: sgtz a7, a2 ; CHECK-NOV-NEXT: sgtz t0, a1 -; CHECK-NOV-NEXT: neg t0, t0 +; CHECK-NOV-NEXT: negw t0, t0 ; CHECK-NOV-NEXT: and a1, t0, a1 -; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: negw a7, a7 ; CHECK-NOV-NEXT: and a2, a7, a2 -; CHECK-NOV-NEXT: neg a6, a6 +; CHECK-NOV-NEXT: negw a6, a6 ; CHECK-NOV-NEXT: and a3, a6, a3 -; CHECK-NOV-NEXT: neg a4, a4 +; CHECK-NOV-NEXT: negw a4, a4 ; CHECK-NOV-NEXT: and a4, a4, a5 ; CHECK-NOV-NEXT: sh a4, 6(a0) ; CHECK-NOV-NEXT: sh a3, 4(a0) @@ -1862,21 +1862,21 @@ ; CHECK-NOV-NEXT: sgtz t4, a1 ; CHECK-NOV-NEXT: sgtz t5, s2 ; CHECK-NOV-NEXT: sgtz t6, a0 -; CHECK-NOV-NEXT: neg t6, t6 +; CHECK-NOV-NEXT: negw t6, t6 ; CHECK-NOV-NEXT: and a0, t6, a0 -; CHECK-NOV-NEXT: neg t5, t5 +; CHECK-NOV-NEXT: negw t5, t5 ; CHECK-NOV-NEXT: and t5, t5, s2 -; CHECK-NOV-NEXT: neg t4, t4 +; CHECK-NOV-NEXT: negw t4, t4 ; CHECK-NOV-NEXT: and a1, t4, a1 -; CHECK-NOV-NEXT: neg t3, t3 +; CHECK-NOV-NEXT: negw t3, t3 ; CHECK-NOV-NEXT: and a2, t3, a2 -; CHECK-NOV-NEXT: neg t2, t2 +; CHECK-NOV-NEXT: negw t2, t2 ; CHECK-NOV-NEXT: and a4, t2, a4 -; CHECK-NOV-NEXT: neg t1, t1 +; CHECK-NOV-NEXT: negw t1, t1 ; CHECK-NOV-NEXT: and a5, t1, a5 -; CHECK-NOV-NEXT: neg t0, t0 +; CHECK-NOV-NEXT: negw t0, t0 ; CHECK-NOV-NEXT: and a6, t0, a6 -; CHECK-NOV-NEXT: neg a3, a3 +; CHECK-NOV-NEXT: negw a3, a3 ; CHECK-NOV-NEXT: and a3, a3, a7 ; CHECK-NOV-NEXT: sh a3, 14(s0) ; CHECK-NOV-NEXT: sh a6, 12(s0) @@ -3587,16 +3587,16 @@ ; CHECK-NOV-NEXT: mv a5, a4 ; CHECK-NOV-NEXT: .LBB32_5: # %entry ; CHECK-NOV-NEXT: sgtz a4, a5 -; CHECK-NOV-NEXT: neg a4, a4 +; CHECK-NOV-NEXT: negw a4, a4 ; CHECK-NOV-NEXT: and a4, a4, a5 ; CHECK-NOV-NEXT: sgtz a5, a3 -; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: negw a5, a5 ; CHECK-NOV-NEXT: and a3, a5, a3 ; CHECK-NOV-NEXT: sgtz a5, a2 -; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: negw a5, a5 ; CHECK-NOV-NEXT: and a2, a5, a2 ; CHECK-NOV-NEXT: sgtz a5, a1 -; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: negw a5, a5 ; CHECK-NOV-NEXT: and a1, a5, a1 ; CHECK-NOV-NEXT: sw a1, 12(a0) ; CHECK-NOV-NEXT: sw a2, 8(a0) @@ -4006,16 +4006,16 @@ ; CHECK-NOV-NEXT: mv a3, a2 ; CHECK-NOV-NEXT: .LBB35_5: # %entry ; CHECK-NOV-NEXT: sgtz a2, a3 -; CHECK-NOV-NEXT: neg a2, a2 +; CHECK-NOV-NEXT: negw a2, a2 ; CHECK-NOV-NEXT: and a2, a2, a3 ; CHECK-NOV-NEXT: sgtz a3, a1 -; CHECK-NOV-NEXT: neg a3, a3 +; CHECK-NOV-NEXT: negw a3, a3 ; CHECK-NOV-NEXT: and a1, a3, a1 ; CHECK-NOV-NEXT: sgtz a3, s2 -; CHECK-NOV-NEXT: neg a3, a3 +; CHECK-NOV-NEXT: negw a3, a3 ; CHECK-NOV-NEXT: and a3, a3, s2 ; CHECK-NOV-NEXT: sgtz a4, a0 -; CHECK-NOV-NEXT: neg a4, a4 +; CHECK-NOV-NEXT: negw a4, a4 ; CHECK-NOV-NEXT: and a0, a4, a0 ; CHECK-NOV-NEXT: sw a0, 12(s0) ; CHECK-NOV-NEXT: sw a3, 8(s0) @@ -4397,16 +4397,16 @@ ; CHECK-NOV-NEXT: mv a5, a4 ; CHECK-NOV-NEXT: .LBB41_5: # %entry ; CHECK-NOV-NEXT: sgtz a4, a5 -; CHECK-NOV-NEXT: neg a4, a4 +; CHECK-NOV-NEXT: negw a4, a4 ; CHECK-NOV-NEXT: and a4, a4, a5 ; CHECK-NOV-NEXT: sgtz a5, a3 -; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: negw a5, a5 ; CHECK-NOV-NEXT: and a3, a5, a3 ; CHECK-NOV-NEXT: sgtz a5, a2 -; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: negw a5, a5 ; CHECK-NOV-NEXT: and a2, a5, a2 ; CHECK-NOV-NEXT: sgtz a5, a1 -; CHECK-NOV-NEXT: neg a5, a5 +; CHECK-NOV-NEXT: negw a5, a5 ; CHECK-NOV-NEXT: and a1, a5, a1 ; CHECK-NOV-NEXT: sh a1, 6(a0) ; CHECK-NOV-NEXT: sh a2, 4(a0) @@ -5128,28 +5128,28 @@ ; CHECK-NOV-NEXT: mv a7, a3 ; CHECK-NOV-NEXT: .LBB44_9: # %entry ; CHECK-NOV-NEXT: sgtz a3, a7 -; CHECK-NOV-NEXT: neg a3, a3 +; CHECK-NOV-NEXT: negw a3, a3 ; CHECK-NOV-NEXT: and a3, a3, a7 ; CHECK-NOV-NEXT: sgtz a7, a6 -; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: negw a7, a7 ; CHECK-NOV-NEXT: and a6, a7, a6 ; CHECK-NOV-NEXT: sgtz a7, a5 -; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: negw a7, a7 ; CHECK-NOV-NEXT: and a5, a7, a5 ; CHECK-NOV-NEXT: sgtz a7, a4 -; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: negw a7, a7 ; CHECK-NOV-NEXT: and a4, a7, a4 ; CHECK-NOV-NEXT: sgtz a7, a2 -; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: negw a7, a7 ; CHECK-NOV-NEXT: and a2, a7, a2 ; CHECK-NOV-NEXT: sgtz a7, a1 -; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: negw a7, a7 ; CHECK-NOV-NEXT: and a1, a7, a1 ; CHECK-NOV-NEXT: sgtz a7, s2 -; CHECK-NOV-NEXT: neg a7, a7 +; CHECK-NOV-NEXT: negw a7, a7 ; CHECK-NOV-NEXT: and a7, a7, s2 ; CHECK-NOV-NEXT: sgtz t0, a0 -; CHECK-NOV-NEXT: neg t0, t0 +; CHECK-NOV-NEXT: negw t0, t0 ; CHECK-NOV-NEXT: and a0, t0, a0 ; CHECK-NOV-NEXT: sh a0, 14(s0) ; CHECK-NOV-NEXT: sh a7, 12(s0) diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -831,7 +831,7 @@ ; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IZFH-NEXT: feq.h a1, fa0, fa0 ; RV64IZFH-NEXT: seqz a1, a1 -; RV64IZFH-NEXT: addi a1, a1, -1 +; RV64IZFH-NEXT: addiw a1, a1, -1 ; RV64IZFH-NEXT: and a0, a0, a1 ; RV64IZFH-NEXT: slli a0, a0, 32 ; RV64IZFH-NEXT: srli a0, a0, 32 @@ -851,7 +851,7 @@ ; RV64IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IDZFH-NEXT: feq.h a1, fa0, fa0 ; RV64IDZFH-NEXT: seqz a1, a1 -; RV64IDZFH-NEXT: addi a1, a1, -1 +; RV64IDZFH-NEXT: addiw a1, a1, -1 ; RV64IDZFH-NEXT: and a0, a0, a1 ; RV64IDZFH-NEXT: slli a0, a0, 32 ; RV64IDZFH-NEXT: srli a0, a0, 32 @@ -943,7 +943,7 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.wu.s a0, ft0, rtz ; CHECK64-IZFHMIN-NEXT: feq.s a1, ft0, ft0 ; CHECK64-IZFHMIN-NEXT: seqz a1, a1 -; CHECK64-IZFHMIN-NEXT: addi a1, a1, -1 +; CHECK64-IZFHMIN-NEXT: addiw a1, a1, -1 ; CHECK64-IZFHMIN-NEXT: and a0, a0, a1 ; CHECK64-IZFHMIN-NEXT: slli a0, a0, 32 ; CHECK64-IZFHMIN-NEXT: srli a0, a0, 32 @@ -2926,7 +2926,7 @@ ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srai a0, a0, 48 @@ -3386,7 +3386,7 @@ ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: slli a0, a0, 56 ; RV64I-NEXT: srai a0, a0, 56 @@ -3655,7 +3655,7 @@ ; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IZFH-NEXT: feq.h a1, fa0, fa0 ; RV64IZFH-NEXT: seqz a1, a1 -; RV64IZFH-NEXT: addi a1, a1, -1 +; RV64IZFH-NEXT: addiw a1, a1, -1 ; RV64IZFH-NEXT: and a0, a0, a1 ; RV64IZFH-NEXT: slli a0, a0, 32 ; RV64IZFH-NEXT: srli a0, a0, 32 @@ -3675,7 +3675,7 @@ ; RV64IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IDZFH-NEXT: feq.h a1, fa0, fa0 ; RV64IDZFH-NEXT: seqz a1, a1 -; RV64IDZFH-NEXT: addi a1, a1, -1 +; RV64IDZFH-NEXT: addiw a1, a1, -1 ; RV64IDZFH-NEXT: and a0, a0, a1 ; RV64IDZFH-NEXT: slli a0, a0, 32 ; RV64IDZFH-NEXT: srli a0, a0, 32 @@ -3769,7 +3769,7 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.wu.s a0, ft0, rtz ; CHECK64-IZFHMIN-NEXT: feq.s a1, ft0, ft0 ; CHECK64-IZFHMIN-NEXT: seqz a1, a1 -; CHECK64-IZFHMIN-NEXT: addi a1, a1, -1 +; CHECK64-IZFHMIN-NEXT: addiw a1, a1, -1 ; CHECK64-IZFHMIN-NEXT: and a0, a0, a1 ; CHECK64-IZFHMIN-NEXT: slli a0, a0, 32 ; CHECK64-IZFHMIN-NEXT: srli a0, a0, 32 @@ -3887,7 +3887,7 @@ ; RV64I-NEXT: mv a1, s0 ; RV64I-NEXT: call __unordsf2@plt ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: sext.w a0, a0 ; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rem.ll b/llvm/test/CodeGen/RISCV/rem.ll --- a/llvm/test/CodeGen/RISCV/rem.ll +++ b/llvm/test/CodeGen/RISCV/rem.ll @@ -125,7 +125,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 29 -; RV64I-NEXT: add a1, a0, a1 +; RV64I-NEXT: addw a1, a0, a1 ; RV64I-NEXT: andi a1, a1, -8 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret @@ -134,7 +134,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 29 -; RV64IM-NEXT: add a1, a0, a1 +; RV64IM-NEXT: addw a1, a0, a1 ; RV64IM-NEXT: andi a1, a1, -8 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret @@ -167,7 +167,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 16 -; RV64I-NEXT: add a1, a0, a1 +; RV64I-NEXT: addw a1, a0, a1 ; RV64I-NEXT: lui a2, 1048560 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: subw a0, a0, a1 @@ -177,7 +177,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 16 -; RV64IM-NEXT: add a1, a0, a1 +; RV64IM-NEXT: addw a1, a0, a1 ; RV64IM-NEXT: lui a2, 1048560 ; RV64IM-NEXT: and a1, a1, a2 ; RV64IM-NEXT: subw a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/rotl-rotr.ll b/llvm/test/CodeGen/RISCV/rotl-rotr.ll --- a/llvm/test/CodeGen/RISCV/rotl-rotr.ll +++ b/llvm/test/CodeGen/RISCV/rotl-rotr.ll @@ -718,7 +718,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: ori a2, a1, 128 ; RV64I-NEXT: sll a2, a0, a2 -; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: ori a1, a1, 64 ; RV64I-NEXT: srl a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 @@ -963,7 +963,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: ori a2, a1, 128 ; RV64I-NEXT: srl a2, a0, a2 -; RV64I-NEXT: neg a1, a1 +; RV64I-NEXT: negw a1, a1 ; RV64I-NEXT: ori a1, a1, 64 ; RV64I-NEXT: sll a0, a0, a1 ; RV64I-NEXT: or a0, a2, a0 diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbkb.ll @@ -279,7 +279,7 @@ ; RV64I-LABEL: rori_i32_fshl_nosext: ; RV64I: # %bb.0: ; RV64I-NEXT: srliw a2, a0, 1 -; RV64I-NEXT: slli a0, a0, 31 +; RV64I-NEXT: slliw a0, a0, 31 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: sw a0, 0(a1) ; RV64I-NEXT: ret @@ -314,7 +314,7 @@ define void @rori_i32_fshr_nosext(i32 signext %a, i32* %x) nounwind { ; RV64I-LABEL: rori_i32_fshr_nosext: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a2, a0, 1 +; RV64I-NEXT: slliw a2, a0, 1 ; RV64I-NEXT: srliw a0, a0, 31 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: sw a0, 0(a1) @@ -352,7 +352,7 @@ define i64 @roriw_bug(i64 %x) nounwind { ; CHECK-LABEL: roriw_bug: ; CHECK: # %bb.0: -; CHECK-NEXT: slli a1, a0, 31 +; CHECK-NEXT: slliw a1, a0, 31 ; CHECK-NEXT: andi a2, a0, -2 ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: or a0, a1, a0 diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll @@ -981,8 +981,8 @@ ; RV64I-NEXT: srliw a4, a0, 24 ; RV64I-NEXT: or a2, a2, a4 ; RV64I-NEXT: and a3, a0, a3 -; RV64I-NEXT: slli a3, a3, 8 -; RV64I-NEXT: slli a0, a0, 24 +; RV64I-NEXT: slliw a3, a3, 8 +; RV64I-NEXT: slliw a0, a0, 24 ; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: sw a0, 0(a1) diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll --- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll @@ -45,7 +45,7 @@ define signext i32 @pack_i32_3(i16 zeroext %0, i16 zeroext %1, i32 signext %2) { ; RV64I-LABEL: pack_i32_3: ; RV64I: # %bb.0: -; RV64I-NEXT: slli a0, a0, 16 +; RV64I-NEXT: slliw a0, a0, 16 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: addw a0, a0, a2 ; RV64I-NEXT: ret @@ -227,8 +227,8 @@ define zeroext i16 @packh_i16_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2) { ; RV64I-LABEL: packh_i16_2: ; RV64I: # %bb.0: -; RV64I-NEXT: add a0, a1, a0 -; RV64I-NEXT: slli a0, a0, 8 +; RV64I-NEXT: addw a0, a1, a0 +; RV64I-NEXT: slliw a0, a0, 8 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: slli a0, a0, 48 ; RV64I-NEXT: srli a0, a0, 48 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll @@ -431,7 +431,7 @@ ; RV64-NEXT: fcvt.l.d a0, ft3, rtz ; RV64-NEXT: feq.d a2, ft2, ft2 ; RV64-NEXT: seqz a2, a2 -; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: sb a0, 8(sp) ; RV64-NEXT: vsetivli zero, 1, e64, m4, ta, ma @@ -442,7 +442,7 @@ ; RV64-NEXT: fcvt.l.d a0, ft3, rtz ; RV64-NEXT: feq.d a2, ft2, ft2 ; RV64-NEXT: seqz a2, a2 -; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: sb a0, 15(sp) ; RV64-NEXT: vslidedown.vi v12, v8, 6 @@ -452,7 +452,7 @@ ; RV64-NEXT: fcvt.l.d a0, ft3, rtz ; RV64-NEXT: feq.d a2, ft2, ft2 ; RV64-NEXT: seqz a2, a2 -; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: sb a0, 14(sp) ; RV64-NEXT: vslidedown.vi v12, v8, 5 @@ -462,7 +462,7 @@ ; RV64-NEXT: fcvt.l.d a0, ft3, rtz ; RV64-NEXT: feq.d a2, ft2, ft2 ; RV64-NEXT: seqz a2, a2 -; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: sb a0, 13(sp) ; RV64-NEXT: vslidedown.vi v12, v8, 4 @@ -472,7 +472,7 @@ ; RV64-NEXT: fcvt.l.d a0, ft3, rtz ; RV64-NEXT: feq.d a2, ft2, ft2 ; RV64-NEXT: seqz a2, a2 -; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: sb a0, 12(sp) ; RV64-NEXT: vslidedown.vi v12, v8, 3 @@ -482,7 +482,7 @@ ; RV64-NEXT: fcvt.l.d a0, ft3, rtz ; RV64-NEXT: feq.d a2, ft2, ft2 ; RV64-NEXT: seqz a2, a2 -; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: sb a0, 11(sp) ; RV64-NEXT: vslidedown.vi v12, v8, 2 @@ -492,7 +492,7 @@ ; RV64-NEXT: fcvt.l.d a0, ft3, rtz ; RV64-NEXT: feq.d a2, ft2, ft2 ; RV64-NEXT: seqz a2, a2 -; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: sb a0, 10(sp) ; RV64-NEXT: vslidedown.vi v8, v8, 1 @@ -502,7 +502,7 @@ ; RV64-NEXT: fcvt.l.d a0, ft0, rtz ; RV64-NEXT: feq.d a2, ft2, ft2 ; RV64-NEXT: seqz a2, a2 -; RV64-NEXT: addi a2, a2, -1 +; RV64-NEXT: addiw a2, a2, -1 ; RV64-NEXT: and a0, a2, a0 ; RV64-NEXT: sb a0, 9(sp) ; RV64-NEXT: addi a0, sp, 8 diff --git a/llvm/test/CodeGen/RISCV/select-binop-identity.ll b/llvm/test/CodeGen/RISCV/select-binop-identity.ll --- a/llvm/test/CodeGen/RISCV/select-binop-identity.ll +++ b/llvm/test/CodeGen/RISCV/select-binop-identity.ll @@ -202,7 +202,7 @@ ; ; RV64I-LABEL: add_select_all_zeros_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: addw a0, a2, a0 ; RV64I-NEXT: ret @@ -264,7 +264,7 @@ ; ; RV64I-LABEL: sub_select_all_zeros_i32: ; RV64I: # %bb.0: -; RV64I-NEXT: addi a0, a0, -1 +; RV64I-NEXT: addiw a0, a0, -1 ; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: subw a0, a2, a0 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll --- a/llvm/test/CodeGen/RISCV/sextw-removal.ll +++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll @@ -196,7 +196,7 @@ ; RV64I-NEXT: and a0, a0, s1 ; RV64I-NEXT: add a0, a2, a0 ; RV64I-NEXT: srli a2, a0, 4 -; RV64I-NEXT: add a0, a0, a2 +; RV64I-NEXT: addw a0, a0, a2 ; RV64I-NEXT: and a0, a0, s2 ; RV64I-NEXT: mulw a0, a0, s3 ; RV64I-NEXT: srliw a0, a0, 24 diff --git a/llvm/test/CodeGen/RISCV/srem-lkk.ll b/llvm/test/CodeGen/RISCV/srem-lkk.ll --- a/llvm/test/CodeGen/RISCV/srem-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-lkk.ll @@ -311,7 +311,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 26 -; RV64I-NEXT: add a1, a0, a1 +; RV64I-NEXT: addw a1, a0, a1 ; RV64I-NEXT: andi a1, a1, -64 ; RV64I-NEXT: subw a0, a0, a1 ; RV64I-NEXT: ret @@ -320,7 +320,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 26 -; RV64IM-NEXT: add a1, a0, a1 +; RV64IM-NEXT: addw a1, a0, a1 ; RV64IM-NEXT: andi a1, a1, -64 ; RV64IM-NEXT: subw a0, a0, a1 ; RV64IM-NEXT: ret @@ -364,7 +364,7 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: sraiw a1, a0, 31 ; RV64I-NEXT: srliw a1, a1, 1 -; RV64I-NEXT: add a1, a0, a1 +; RV64I-NEXT: addw a1, a0, a1 ; RV64I-NEXT: lui a2, 524288 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: addw a0, a0, a1 @@ -374,7 +374,7 @@ ; RV64IM: # %bb.0: ; RV64IM-NEXT: sraiw a1, a0, 31 ; RV64IM-NEXT: srliw a1, a1, 1 -; RV64IM-NEXT: add a1, a0, a1 +; RV64IM-NEXT: addw a1, a0, a1 ; RV64IM-NEXT: lui a2, 524288 ; RV64IM-NEXT: and a1, a1, a2 ; RV64IM-NEXT: addw a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -427,7 +427,7 @@ ; RV64-NEXT: neg a0, a0 ; RV64-NEXT: addi a2, a2, -1 ; RV64-NEXT: addi a1, a1, -1 -; RV64-NEXT: slli a3, a1, 2 +; RV64-NEXT: slliw a3, a1, 2 ; RV64-NEXT: slli a4, a2, 31 ; RV64-NEXT: srli a4, a4, 62 ; RV64-NEXT: or a3, a4, a3 @@ -585,7 +585,7 @@ ; RV64M-NEXT: srli a1, a1, 31 ; RV64M-NEXT: or a1, a1, a4 ; RV64M-NEXT: sd a1, 0(a0) -; RV64M-NEXT: slli a1, a2, 2 +; RV64M-NEXT: slliw a1, a2, 2 ; RV64M-NEXT: slli a3, a3, 31 ; RV64M-NEXT: srli a3, a3, 62 ; RV64M-NEXT: or a1, a3, a1 @@ -779,11 +779,11 @@ ; RV64MV-NEXT: slli a4, a3, 33 ; RV64MV-NEXT: or a1, a1, a4 ; RV64MV-NEXT: sd a1, 0(a0) -; RV64MV-NEXT: slli a2, a2, 2 +; RV64MV-NEXT: slliw a1, a2, 2 ; RV64MV-NEXT: slli a3, a3, 31 ; RV64MV-NEXT: srli a3, a3, 62 -; RV64MV-NEXT: or a2, a3, a2 -; RV64MV-NEXT: sw a2, 8(a0) +; RV64MV-NEXT: or a1, a3, a1 +; RV64MV-NEXT: sw a1, 8(a0) ; RV64MV-NEXT: addi sp, s0, -64 ; RV64MV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64MV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll --- a/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll +++ b/llvm/test/CodeGen/RISCV/srem-vector-lkk.ll @@ -701,15 +701,15 @@ ; RV64I-NEXT: lh a3, 16(a1) ; RV64I-NEXT: lh a1, 8(a1) ; RV64I-NEXT: srli a4, a2, 58 -; RV64I-NEXT: add a4, a2, a4 +; RV64I-NEXT: addw a4, a2, a4 ; RV64I-NEXT: andi a4, a4, -64 ; RV64I-NEXT: subw s1, a2, a4 ; RV64I-NEXT: srli a2, a1, 59 -; RV64I-NEXT: add a2, a1, a2 +; RV64I-NEXT: addw a2, a1, a2 ; RV64I-NEXT: andi a2, a2, -32 ; RV64I-NEXT: subw s2, a1, a2 ; RV64I-NEXT: srli a1, a3, 61 -; RV64I-NEXT: add a1, a3, a1 +; RV64I-NEXT: addw a1, a3, a1 ; RV64I-NEXT: andi a1, a1, -8 ; RV64I-NEXT: subw s3, a3, a1 ; RV64I-NEXT: li a1, 95 @@ -743,15 +743,15 @@ ; RV64IM-NEXT: mulw a3, a3, a6 ; RV64IM-NEXT: subw a2, a2, a3 ; RV64IM-NEXT: srli a3, a1, 58 -; RV64IM-NEXT: add a3, a1, a3 +; RV64IM-NEXT: addw a3, a1, a3 ; RV64IM-NEXT: andi a3, a3, -64 ; RV64IM-NEXT: subw a1, a1, a3 ; RV64IM-NEXT: srli a3, a5, 59 -; RV64IM-NEXT: add a3, a5, a3 +; RV64IM-NEXT: addw a3, a5, a3 ; RV64IM-NEXT: andi a3, a3, -32 ; RV64IM-NEXT: subw a5, a5, a3 ; RV64IM-NEXT: srli a3, a4, 61 -; RV64IM-NEXT: add a3, a4, a3 +; RV64IM-NEXT: addw a3, a4, a3 ; RV64IM-NEXT: andi a3, a3, -8 ; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: sh a4, 4(a0) @@ -1008,7 +1008,7 @@ ; RV64I-NEXT: lh s1, 24(a1) ; RV64I-NEXT: lh a0, 16(a1) ; RV64I-NEXT: srli a1, a2, 49 -; RV64I-NEXT: add a1, a2, a1 +; RV64I-NEXT: addw a1, a2, a1 ; RV64I-NEXT: lui a3, 8 ; RV64I-NEXT: and a1, a1, a3 ; RV64I-NEXT: subw s3, a2, a1 @@ -1057,7 +1057,7 @@ ; RV64IM-NEXT: mulw a3, a3, a5 ; RV64IM-NEXT: subw a4, a4, a3 ; RV64IM-NEXT: srli a3, a1, 49 -; RV64IM-NEXT: add a3, a1, a3 +; RV64IM-NEXT: addw a3, a1, a3 ; RV64IM-NEXT: lui a5, 8 ; RV64IM-NEXT: and a3, a3, a5 ; RV64IM-NEXT: subw a1, a1, a3 diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll --- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll +++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll @@ -138,11 +138,11 @@ ; RV64I-NEXT: lbu a3, 4(a0) ; RV64I-NEXT: lbu a4, 6(a0) ; RV64I-NEXT: lbu a0, 7(a0) -; RV64I-NEXT: slli a2, a2, 8 +; RV64I-NEXT: slliw a2, a2, 8 ; RV64I-NEXT: or a2, a2, a3 -; RV64I-NEXT: slli a4, a4, 16 -; RV64I-NEXT: slli a0, a0, 24 -; RV64I-NEXT: or a0, a0, a4 +; RV64I-NEXT: slliw a3, a4, 16 +; RV64I-NEXT: slliw a0, a0, 24 +; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: or a0, a0, a1 diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -107,7 +107,7 @@ ; RV64-NEXT: lui a1, 28087 ; RV64-NEXT: addiw a1, a1, -585 ; RV64-NEXT: call __muldi3@plt -; RV64-NEXT: slli a1, a0, 26 +; RV64-NEXT: slliw a1, a0, 26 ; RV64-NEXT: slli a0, a0, 37 ; RV64-NEXT: srli a0, a0, 38 ; RV64-NEXT: or a0, a0, a1 @@ -140,8 +140,8 @@ ; RV64M: # %bb.0: ; RV64M-NEXT: lui a1, 28087 ; RV64M-NEXT: addiw a1, a1, -585 -; RV64M-NEXT: mul a0, a0, a1 -; RV64M-NEXT: slli a1, a0, 26 +; RV64M-NEXT: mulw a0, a0, a1 +; RV64M-NEXT: slliw a1, a0, 26 ; RV64M-NEXT: slli a0, a0, 37 ; RV64M-NEXT: srli a0, a0, 38 ; RV64M-NEXT: or a0, a0, a1 @@ -172,8 +172,8 @@ ; RV64MV: # %bb.0: ; RV64MV-NEXT: lui a1, 28087 ; RV64MV-NEXT: addiw a1, a1, -585 -; RV64MV-NEXT: mul a0, a0, a1 -; RV64MV-NEXT: slli a1, a0, 26 +; RV64MV-NEXT: mulw a0, a0, a1 +; RV64MV-NEXT: slliw a1, a0, 26 ; RV64MV-NEXT: slli a0, a0, 37 ; RV64MV-NEXT: srli a0, a0, 38 ; RV64MV-NEXT: or a0, a0, a1 @@ -396,7 +396,7 @@ ; RV64-NEXT: andi a0, a0, 2047 ; RV64-NEXT: li a1, 683 ; RV64-NEXT: call __muldi3@plt -; RV64-NEXT: slli a1, a0, 10 +; RV64-NEXT: slliw a1, a0, 10 ; RV64-NEXT: slli a0, a0, 53 ; RV64-NEXT: srli a0, a0, 54 ; RV64-NEXT: or a0, a0, a1 @@ -487,8 +487,8 @@ ; RV64M-NEXT: srli a3, a1, 11 ; RV64M-NEXT: andi a1, a1, 2047 ; RV64M-NEXT: li a4, 683 -; RV64M-NEXT: mul a1, a1, a4 -; RV64M-NEXT: slli a4, a1, 10 +; RV64M-NEXT: mulw a1, a1, a4 +; RV64M-NEXT: slliw a4, a1, 10 ; RV64M-NEXT: slli a1, a1, 53 ; RV64M-NEXT: srli a1, a1, 54 ; RV64M-NEXT: or a1, a1, a4 diff --git a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll --- a/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll +++ b/llvm/test/CodeGen/RISCV/vec3-setcc-crash.ll @@ -50,14 +50,14 @@ ; RV64-NEXT: sgtz a6, a6 ; RV64-NEXT: sgtz a5, a5 ; RV64-NEXT: sgtz a4, a4 -; RV64-NEXT: neg a4, a4 +; RV64-NEXT: negw a4, a4 ; RV64-NEXT: and a3, a4, a3 -; RV64-NEXT: slli a3, a3, 8 -; RV64-NEXT: neg a4, a5 +; RV64-NEXT: slliw a3, a3, 8 +; RV64-NEXT: negw a4, a5 ; RV64-NEXT: and a0, a4, a0 ; RV64-NEXT: andi a0, a0, 255 ; RV64-NEXT: or a0, a0, a3 -; RV64-NEXT: neg a3, a6 +; RV64-NEXT: negw a3, a6 ; RV64-NEXT: and a2, a3, a2 ; RV64-NEXT: sb a2, 2(a1) ; RV64-NEXT: sh a0, 0(a1)