diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -3794,6 +3794,30 @@ auto CCVal = static_cast(N->getConstantOperandVal(2)); if (!ISD::isIntEqualitySetCC(CCVal)) break; + + // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) -> + // (select_cc X, Y, lt, trueV, falseV) + // Sometimes the setcc is introduced after select_cc has been formed. + if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && + LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { + // If we're looking for eq 0 instead of ne 0, we need to invert the + // condition. + bool Invert = CCVal == ISD::SETEQ; + CCVal = cast(LHS.getOperand(2))->get(); + if (Invert) + CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); + + RHS = LHS.getOperand(1); + LHS = LHS.getOperand(0); + normaliseSetCC(LHS, RHS, CCVal); + + SDLoc DL(N); + SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); + return DAG.getNode( + RISCVISD::SELECT_CC, DL, N->getValueType(0), + {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); + } + // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> // (select_cc X, Y, eq/ne, trueV, falseV) if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) diff --git a/llvm/test/CodeGen/RISCV/uadd_sat.ll b/llvm/test/CodeGen/RISCV/uadd_sat.ll --- a/llvm/test/CodeGen/RISCV/uadd_sat.ll +++ b/llvm/test/CodeGen/RISCV/uadd_sat.ll @@ -13,10 +13,10 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32I-LABEL: func: ; RV32I: # %bb.0: +; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: add a1, a0, a1 -; RV32I-NEXT: sltu a2, a1, a0 ; RV32I-NEXT: addi a0, zero, -1 -; RV32I-NEXT: bnez a2, .LBB0_2 +; RV32I-NEXT: bltu a1, a2, .LBB0_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB0_2: @@ -85,10 +85,10 @@ ; ; RV64I-LABEL: func2: ; RV64I: # %bb.0: +; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: add a1, a0, a1 -; RV64I-NEXT: sltu a2, a1, a0 ; RV64I-NEXT: addi a0, zero, -1 -; RV64I-NEXT: bnez a2, .LBB1_2 +; RV64I-NEXT: bltu a1, a2, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: .LBB1_2: diff --git a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/uadd_sat_plus.ll @@ -13,11 +13,11 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32I-LABEL: func32: ; RV32I: # %bb.0: -; RV32I-NEXT: mul a1, a1, a2 -; RV32I-NEXT: add a1, a0, a1 -; RV32I-NEXT: sltu a2, a1, a0 +; RV32I-NEXT: mv a3, a0 +; RV32I-NEXT: mul a0, a1, a2 +; RV32I-NEXT: add a1, a3, a0 ; RV32I-NEXT: addi a0, zero, -1 -; RV32I-NEXT: bnez a2, .LBB0_2 +; RV32I-NEXT: bltu a1, a3, .LBB0_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB0_2: @@ -88,12 +88,12 @@ ; ; RV64I-LABEL: func64: ; RV64I: # %bb.0: -; RV64I-NEXT: add a1, a0, a2 -; RV64I-NEXT: sltu a2, a1, a0 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: add a2, a0, a2 ; RV64I-NEXT: addi a0, zero, -1 -; RV64I-NEXT: bnez a2, .LBB1_2 +; RV64I-NEXT: bltu a2, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: .LBB1_2: ; RV64I-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/usub_sat.ll b/llvm/test/CodeGen/RISCV/usub_sat.ll --- a/llvm/test/CodeGen/RISCV/usub_sat.ll +++ b/llvm/test/CodeGen/RISCV/usub_sat.ll @@ -13,10 +13,10 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32I-LABEL: func: ; RV32I: # %bb.0: +; RV32I-NEXT: mv a2, a0 ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: sltu a2, a0, a1 ; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: bnez a2, .LBB0_2 +; RV32I-NEXT: bltu a2, a1, .LBB0_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB0_2: @@ -29,9 +29,8 @@ ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a2, a0, 32 ; RV64I-NEXT: sub a0, a2, a1 -; RV64I-NEXT: sltu a2, a2, a0 ; RV64I-NEXT: mv a1, zero -; RV64I-NEXT: bnez a2, .LBB0_2 +; RV64I-NEXT: bltu a2, a0, .LBB0_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a1, a0 ; RV64I-NEXT: .LBB0_2: @@ -82,10 +81,10 @@ ; ; RV64I-LABEL: func2: ; RV64I: # %bb.0: +; RV64I-NEXT: mv a2, a0 ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: sltu a2, a0, a1 ; RV64I-NEXT: mv a0, zero -; RV64I-NEXT: bnez a2, .LBB1_2 +; RV64I-NEXT: bltu a2, a1, .LBB1_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: .LBB1_2: @@ -125,30 +124,28 @@ define zeroext i16 @func16(i16 zeroext %x, i16 zeroext %y) nounwind { ; RV32I-LABEL: func16: ; RV32I: # %bb.0: -; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: bnez a2, .LBB2_2 +; RV32I-NEXT: sub a2, a0, a1 +; RV32I-NEXT: mv a1, zero +; RV32I-NEXT: bltu a0, a2, .LBB2_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: .LBB2_2: -; RV32I-NEXT: lui a1, 16 -; RV32I-NEXT: addi a1, a1, -1 -; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: lui a0, 16 +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: and a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func16: ; RV64I: # %bb.0: -; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: sltu a2, a0, a1 -; RV64I-NEXT: mv a0, zero -; RV64I-NEXT: bnez a2, .LBB2_2 +; RV64I-NEXT: sub a2, a0, a1 +; RV64I-NEXT: mv a1, zero +; RV64I-NEXT: bltu a0, a2, .LBB2_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB2_2: -; RV64I-NEXT: lui a1, 16 -; RV64I-NEXT: addiw a1, a1, -1 -; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: lui a0, 16 +; RV64I-NEXT: addiw a0, a0, -1 +; RV64I-NEXT: and a0, a1, a0 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func16: @@ -172,25 +169,23 @@ ; RV32I-LABEL: func8: ; RV32I: # %bb.0: ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: bnez a2, .LBB3_2 +; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: bltu a0, a1, .LBB3_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a2, a1 ; RV32I-NEXT: .LBB3_2: -; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: andi a0, a2, 255 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func8: ; RV64I: # %bb.0: ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: sltu a2, a0, a1 -; RV64I-NEXT: mv a0, zero -; RV64I-NEXT: bnez a2, .LBB3_2 +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: bltu a0, a1, .LBB3_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: mv a2, a1 ; RV64I-NEXT: .LBB3_2: -; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: andi a0, a2, 255 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func8: @@ -214,25 +209,23 @@ ; RV32I-LABEL: func3: ; RV32I: # %bb.0: ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: sltu a2, a0, a1 -; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: bnez a2, .LBB4_2 +; RV32I-NEXT: mv a2, zero +; RV32I-NEXT: bltu a0, a1, .LBB4_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a2, a1 ; RV32I-NEXT: .LBB4_2: -; RV32I-NEXT: andi a0, a0, 15 +; RV32I-NEXT: andi a0, a2, 15 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func3: ; RV64I: # %bb.0: ; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: sltu a2, a0, a1 -; RV64I-NEXT: mv a0, zero -; RV64I-NEXT: bnez a2, .LBB4_2 +; RV64I-NEXT: mv a2, zero +; RV64I-NEXT: bltu a0, a1, .LBB4_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: mv a2, a1 ; RV64I-NEXT: .LBB4_2: -; RV64I-NEXT: andi a0, a0, 15 +; RV64I-NEXT: andi a0, a2, 15 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func3: diff --git a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/usub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/usub_sat_plus.ll @@ -13,11 +13,11 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32I-LABEL: func32: ; RV32I: # %bb.0: -; RV32I-NEXT: mul a1, a1, a2 -; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: sltu a2, a0, a1 +; RV32I-NEXT: mv a3, a0 +; RV32I-NEXT: mul a0, a1, a2 +; RV32I-NEXT: sub a1, a3, a0 ; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: bnez a2, .LBB0_2 +; RV32I-NEXT: bltu a3, a1, .LBB0_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB0_2: @@ -26,14 +26,13 @@ ; RV64I-LABEL: func32: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 32 +; RV64I-NEXT: srli a3, a0, 32 +; RV64I-NEXT: mul a0, a1, a2 +; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 -; RV64I-NEXT: mul a1, a1, a2 -; RV64I-NEXT: slli a1, a1, 32 -; RV64I-NEXT: srli a1, a1, 32 -; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: sltu a2, a0, a1 +; RV64I-NEXT: sub a1, a3, a0 ; RV64I-NEXT: mv a0, zero -; RV64I-NEXT: bnez a2, .LBB0_2 +; RV64I-NEXT: bltu a3, a1, .LBB0_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: .LBB0_2: @@ -86,12 +85,12 @@ ; ; RV64I-LABEL: func64: ; RV64I: # %bb.0: -; RV64I-NEXT: sub a1, a0, a2 -; RV64I-NEXT: sltu a2, a0, a1 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: sub a2, a0, a2 ; RV64I-NEXT: mv a0, zero -; RV64I-NEXT: bnez a2, .LBB1_2 +; RV64I-NEXT: bltu a1, a2, .LBB1_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: .LBB1_2: ; RV64I-NEXT: ret ; @@ -132,13 +131,12 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: lui a3, 16 ; RV32I-NEXT: addi a3, a3, -1 +; RV32I-NEXT: and a4, a0, a3 +; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: and a0, a0, a3 -; RV32I-NEXT: mul a1, a1, a2 -; RV32I-NEXT: and a1, a1, a3 -; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: sltu a2, a0, a1 +; RV32I-NEXT: sub a1, a4, a0 ; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: bnez a2, .LBB2_2 +; RV32I-NEXT: bltu a4, a1, .LBB2_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB2_2: @@ -148,13 +146,12 @@ ; RV64I: # %bb.0: ; RV64I-NEXT: lui a3, 16 ; RV64I-NEXT: addiw a3, a3, -1 +; RV64I-NEXT: and a4, a0, a3 +; RV64I-NEXT: mul a0, a1, a2 ; RV64I-NEXT: and a0, a0, a3 -; RV64I-NEXT: mul a1, a1, a2 -; RV64I-NEXT: and a1, a1, a3 -; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: sltu a2, a0, a1 +; RV64I-NEXT: sub a1, a4, a0 ; RV64I-NEXT: mv a0, zero -; RV64I-NEXT: bnez a2, .LBB2_2 +; RV64I-NEXT: bltu a4, a1, .LBB2_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: .LBB2_2: @@ -185,13 +182,12 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind { ; RV32I-LABEL: func8: ; RV32I: # %bb.0: +; RV32I-NEXT: andi a3, a0, 255 +; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: andi a0, a0, 255 -; RV32I-NEXT: mul a1, a1, a2 -; RV32I-NEXT: andi a1, a1, 255 -; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: sltu a2, a0, a1 +; RV32I-NEXT: sub a1, a3, a0 ; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: bnez a2, .LBB3_2 +; RV32I-NEXT: bltu a3, a1, .LBB3_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB3_2: @@ -199,13 +195,12 @@ ; ; RV64I-LABEL: func8: ; RV64I: # %bb.0: +; RV64I-NEXT: andi a3, a0, 255 +; RV64I-NEXT: mul a0, a1, a2 ; RV64I-NEXT: andi a0, a0, 255 -; RV64I-NEXT: mul a1, a1, a2 -; RV64I-NEXT: andi a1, a1, 255 -; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: sltu a2, a0, a1 +; RV64I-NEXT: sub a1, a3, a0 ; RV64I-NEXT: mv a0, zero -; RV64I-NEXT: bnez a2, .LBB3_2 +; RV64I-NEXT: bltu a3, a1, .LBB3_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: .LBB3_2: @@ -236,13 +231,12 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind { ; RV32I-LABEL: func4: ; RV32I: # %bb.0: +; RV32I-NEXT: andi a3, a0, 15 +; RV32I-NEXT: mul a0, a1, a2 ; RV32I-NEXT: andi a0, a0, 15 -; RV32I-NEXT: mul a1, a1, a2 -; RV32I-NEXT: andi a1, a1, 15 -; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: sltu a2, a0, a1 +; RV32I-NEXT: sub a1, a3, a0 ; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: bnez a2, .LBB4_2 +; RV32I-NEXT: bltu a3, a1, .LBB4_2 ; RV32I-NEXT: # %bb.1: ; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: .LBB4_2: @@ -250,13 +244,12 @@ ; ; RV64I-LABEL: func4: ; RV64I: # %bb.0: +; RV64I-NEXT: andi a3, a0, 15 +; RV64I-NEXT: mul a0, a1, a2 ; RV64I-NEXT: andi a0, a0, 15 -; RV64I-NEXT: mul a1, a1, a2 -; RV64I-NEXT: andi a1, a1, 15 -; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: sltu a2, a0, a1 +; RV64I-NEXT: sub a1, a3, a0 ; RV64I-NEXT: mv a0, zero -; RV64I-NEXT: bnez a2, .LBB4_2 +; RV64I-NEXT: bltu a3, a1, .LBB4_2 ; RV64I-NEXT: # %bb.1: ; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: .LBB4_2: diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -813,8 +813,7 @@ ; RV32-LABEL: uaddo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: add a2, a0, a1 -; RV32-NEXT: sltu a2, a2, a0 -; RV32-NEXT: bnez a2, .LBB26_2 +; RV32-NEXT: bltu a2, a0, .LBB26_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB26_2: # %entry @@ -891,8 +890,7 @@ ; RV64-LABEL: uaddo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: add a2, a0, a1 -; RV64-NEXT: sltu a2, a2, a0 -; RV64-NEXT: bnez a2, .LBB28_2 +; RV64-NEXT: bltu a2, a0, .LBB28_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB28_2: # %entry @@ -1052,8 +1050,7 @@ ; RV32-LABEL: usubo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: sub a2, a0, a1 -; RV32-NEXT: sltu a2, a0, a2 -; RV32-NEXT: bnez a2, .LBB34_2 +; RV32-NEXT: bltu a0, a2, .LBB34_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB34_2: # %entry @@ -1131,8 +1128,7 @@ ; RV64-LABEL: usubo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: sub a2, a0, a1 -; RV64-NEXT: sltu a2, a0, a2 -; RV64-NEXT: bnez a2, .LBB36_2 +; RV64-NEXT: bltu a0, a2, .LBB36_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB36_2: # %entry @@ -1180,9 +1176,7 @@ ; RV32-NEXT: mulh a2, a0, a1 ; RV32-NEXT: mul a3, a0, a1 ; RV32-NEXT: srai a3, a3, 31 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: snez a2, a2 -; RV32-NEXT: bnez a2, .LBB38_2 +; RV32-NEXT: bne a2, a3, .LBB38_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB38_2: # %entry @@ -1275,9 +1269,7 @@ ; RV64-NEXT: mulh a2, a0, a1 ; RV64-NEXT: mul a3, a0, a1 ; RV64-NEXT: srai a3, a3, 63 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: snez a2, a2 -; RV64-NEXT: bnez a2, .LBB40_2 +; RV64-NEXT: bne a2, a3, .LBB40_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB40_2: # %entry @@ -1324,7 +1316,6 @@ ; RV32-LABEL: umulo.select.i32: ; RV32: # %bb.0: # %entry ; RV32-NEXT: mulhu a2, a0, a1 -; RV32-NEXT: snez a2, a2 ; RV32-NEXT: bnez a2, .LBB42_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 @@ -1404,7 +1395,6 @@ ; RV64-LABEL: umulo.select.i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: mulhu a2, a0, a1 -; RV64-NEXT: snez a2, a2 ; RV64-NEXT: bnez a2, .LBB44_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1