diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -3789,23 +3789,32 @@ break; case RISCVISD::SELECT_CC: { // Transform - // (select_cc (xor X, 1), 0, setne, trueV, falseV) -> - // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. - // This can occur when legalizing some floating point comparisons. SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); auto CCVal = static_cast(N->getConstantOperandVal(2)); + if (!ISD::isIntEqualitySetCC(CCVal)) + break; + // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> + // (select_cc X, Y, eq/ne, trueV, falseV) + if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) + return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0), + {LHS.getOperand(0), LHS.getOperand(1), + N->getOperand(2), N->getOperand(3), + N->getOperand(4)}); + // (select_cc X, 1, setne, trueV, falseV) -> + // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. + // This can occur when legalizing some floating point comparisons. APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); - if (ISD::isIntEqualitySetCC(CCVal) && isNullConstant(RHS) && - LHS.getOpcode() == ISD::XOR && isOneConstant(LHS.getOperand(1)) && - DAG.MaskedValueIsZero(LHS.getOperand(0), Mask)) { + if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { SDLoc DL(N); CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); SDValue TargetCC = DAG.getConstant(CCVal, DL, Subtarget.getXLenVT()); - return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0), - {LHS.getOperand(0), RHS, TargetCC, N->getOperand(3), - N->getOperand(4)}); + RHS = DAG.getConstant(0, DL, LHS.getValueType()); + return DAG.getNode( + RISCVISD::SELECT_CC, DL, N->getValueType(0), + {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)}); } + break; } case ISD::SETCC: { diff --git a/llvm/test/CodeGen/RISCV/sadd_sat.ll b/llvm/test/CodeGen/RISCV/sadd_sat.ll --- a/llvm/test/CodeGen/RISCV/sadd_sat.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat.ll @@ -22,8 +22,7 @@ ; RV32I-NEXT: .LBB0_2: ; RV32I-NEXT: slt a2, a3, a2 ; RV32I-NEXT: slti a1, a1, 0 -; RV32I-NEXT: xor a1, a1, a2 -; RV32I-NEXT: bnez a1, .LBB0_4 +; RV32I-NEXT: bne a1, a2, .LBB0_4 ; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: .LBB0_4: @@ -57,8 +56,7 @@ ; RV32IZbb-NEXT: .LBB0_2: ; RV32IZbb-NEXT: slt a2, a3, a2 ; RV32IZbb-NEXT: slti a1, a1, 0 -; RV32IZbb-NEXT: xor a1, a1, a2 -; RV32IZbb-NEXT: bnez a1, .LBB0_4 +; RV32IZbb-NEXT: bne a1, a2, .LBB0_4 ; RV32IZbb-NEXT: # %bb.3: ; RV32IZbb-NEXT: mv a0, a3 ; RV32IZbb-NEXT: .LBB0_4: @@ -114,8 +112,7 @@ ; RV64I-NEXT: .LBB1_2: ; RV64I-NEXT: slt a2, a3, a2 ; RV64I-NEXT: slti a1, a1, 0 -; RV64I-NEXT: xor a1, a1, a2 -; RV64I-NEXT: bnez a1, .LBB1_4 +; RV64I-NEXT: bne a1, a2, .LBB1_4 ; RV64I-NEXT: # %bb.3: ; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: .LBB1_4: @@ -157,8 +154,7 @@ ; RV64IZbb-NEXT: .LBB1_2: ; RV64IZbb-NEXT: slt a2, a3, a2 ; RV64IZbb-NEXT: slti a1, a1, 0 -; RV64IZbb-NEXT: xor a1, a1, a2 -; RV64IZbb-NEXT: bnez a1, .LBB1_4 +; RV64IZbb-NEXT: bne a1, a2, .LBB1_4 ; RV64IZbb-NEXT: # %bb.3: ; RV64IZbb-NEXT: mv a0, a3 ; RV64IZbb-NEXT: .LBB1_4: diff --git a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/sadd_sat_plus.ll @@ -13,22 +13,20 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32I-LABEL: func32: ; RV32I: # %bb.0: +; RV32I-NEXT: mv a3, a0 ; RV32I-NEXT: mul a2, a1, a2 ; RV32I-NEXT: add a1, a0, a2 -; RV32I-NEXT: slt a0, a1, a0 -; RV32I-NEXT: slti a2, a2, 0 -; RV32I-NEXT: xor a2, a2, a0 ; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: bltz a1, .LBB0_3 +; RV32I-NEXT: bgez a1, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: beqz a2, .LBB0_4 -; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB0_3: ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: bnez a2, .LBB0_2 -; RV32I-NEXT: .LBB0_4: +; RV32I-NEXT: .LBB0_2: +; RV32I-NEXT: slt a3, a1, a3 +; RV32I-NEXT: slti a2, a2, 0 +; RV32I-NEXT: bne a2, a3, .LBB0_4 +; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: .LBB0_4: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func32: @@ -52,22 +50,20 @@ ; ; RV32IZbb-LABEL: func32: ; RV32IZbb: # %bb.0: +; RV32IZbb-NEXT: mv a3, a0 ; RV32IZbb-NEXT: mul a2, a1, a2 ; RV32IZbb-NEXT: add a1, a0, a2 -; RV32IZbb-NEXT: slt a0, a1, a0 -; RV32IZbb-NEXT: slti a2, a2, 0 -; RV32IZbb-NEXT: xor a2, a2, a0 ; RV32IZbb-NEXT: lui a0, 524288 -; RV32IZbb-NEXT: bltz a1, .LBB0_3 +; RV32IZbb-NEXT: bgez a1, .LBB0_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: beqz a2, .LBB0_4 -; RV32IZbb-NEXT: .LBB0_2: -; RV32IZbb-NEXT: ret -; RV32IZbb-NEXT: .LBB0_3: ; RV32IZbb-NEXT: addi a0, a0, -1 -; RV32IZbb-NEXT: bnez a2, .LBB0_2 -; RV32IZbb-NEXT: .LBB0_4: +; RV32IZbb-NEXT: .LBB0_2: +; RV32IZbb-NEXT: slt a3, a1, a3 +; RV32IZbb-NEXT: slti a2, a2, 0 +; RV32IZbb-NEXT: bne a2, a3, .LBB0_4 +; RV32IZbb-NEXT: # %bb.3: ; RV32IZbb-NEXT: mv a0, a1 +; RV32IZbb-NEXT: .LBB0_4: ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func32: @@ -123,8 +119,7 @@ ; RV64I-NEXT: .LBB1_2: ; RV64I-NEXT: slt a1, a3, a1 ; RV64I-NEXT: slti a2, a2, 0 -; RV64I-NEXT: xor a1, a2, a1 -; RV64I-NEXT: bnez a1, .LBB1_4 +; RV64I-NEXT: bne a2, a1, .LBB1_4 ; RV64I-NEXT: # %bb.3: ; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: .LBB1_4: @@ -166,8 +161,7 @@ ; RV64IZbb-NEXT: .LBB1_2: ; RV64IZbb-NEXT: slt a1, a3, a1 ; RV64IZbb-NEXT: slti a2, a2, 0 -; RV64IZbb-NEXT: xor a1, a2, a1 -; RV64IZbb-NEXT: bnez a1, .LBB1_4 +; RV64IZbb-NEXT: bne a2, a1, .LBB1_4 ; RV64IZbb-NEXT: # %bb.3: ; RV64IZbb-NEXT: mv a0, a3 ; RV64IZbb-NEXT: .LBB1_4: diff --git a/llvm/test/CodeGen/RISCV/ssub_sat.ll b/llvm/test/CodeGen/RISCV/ssub_sat.ll --- a/llvm/test/CodeGen/RISCV/ssub_sat.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat.ll @@ -13,21 +13,19 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind { ; RV32I-LABEL: func: ; RV32I: # %bb.0: -; RV32I-NEXT: sgtz a2, a1 -; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: slt a0, a1, a0 -; RV32I-NEXT: xor a2, a2, a0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: sub a3, a0, a1 ; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: bltz a1, .LBB0_3 +; RV32I-NEXT: bgez a3, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: beqz a2, .LBB0_4 -; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB0_3: ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: bnez a2, .LBB0_2 +; RV32I-NEXT: .LBB0_2: +; RV32I-NEXT: sgtz a1, a1 +; RV32I-NEXT: slt a2, a3, a2 +; RV32I-NEXT: bne a1, a2, .LBB0_4 +; RV32I-NEXT: # %bb.3: +; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: .LBB0_4: -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: func: @@ -49,21 +47,19 @@ ; ; RV32IZbb-LABEL: func: ; RV32IZbb: # %bb.0: -; RV32IZbb-NEXT: sgtz a2, a1 -; RV32IZbb-NEXT: sub a1, a0, a1 -; RV32IZbb-NEXT: slt a0, a1, a0 -; RV32IZbb-NEXT: xor a2, a2, a0 +; RV32IZbb-NEXT: mv a2, a0 +; RV32IZbb-NEXT: sub a3, a0, a1 ; RV32IZbb-NEXT: lui a0, 524288 -; RV32IZbb-NEXT: bltz a1, .LBB0_3 +; RV32IZbb-NEXT: bgez a3, .LBB0_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: beqz a2, .LBB0_4 -; RV32IZbb-NEXT: .LBB0_2: -; RV32IZbb-NEXT: ret -; RV32IZbb-NEXT: .LBB0_3: ; RV32IZbb-NEXT: addi a0, a0, -1 -; RV32IZbb-NEXT: bnez a2, .LBB0_2 +; RV32IZbb-NEXT: .LBB0_2: +; RV32IZbb-NEXT: sgtz a1, a1 +; RV32IZbb-NEXT: slt a2, a3, a2 +; RV32IZbb-NEXT: bne a1, a2, .LBB0_4 +; RV32IZbb-NEXT: # %bb.3: +; RV32IZbb-NEXT: mv a0, a3 ; RV32IZbb-NEXT: .LBB0_4: -; RV32IZbb-NEXT: mv a0, a1 ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func: @@ -104,22 +100,20 @@ ; ; RV64I-LABEL: func2: ; RV64I: # %bb.0: -; RV64I-NEXT: sgtz a2, a1 -; RV64I-NEXT: sub a1, a0, a1 -; RV64I-NEXT: slt a0, a1, a0 -; RV64I-NEXT: xor a2, a2, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: sub a3, a0, a1 ; RV64I-NEXT: addi a0, zero, -1 ; RV64I-NEXT: slli a0, a0, 63 -; RV64I-NEXT: bltz a1, .LBB1_3 +; RV64I-NEXT: bgez a3, .LBB1_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: beqz a2, .LBB1_4 -; RV64I-NEXT: .LBB1_2: -; RV64I-NEXT: ret -; RV64I-NEXT: .LBB1_3: ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: bnez a2, .LBB1_2 +; RV64I-NEXT: .LBB1_2: +; RV64I-NEXT: sgtz a1, a1 +; RV64I-NEXT: slt a2, a3, a2 +; RV64I-NEXT: bne a1, a2, .LBB1_4 +; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: .LBB1_4: -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func2: @@ -147,22 +141,20 @@ ; ; RV64IZbb-LABEL: func2: ; RV64IZbb: # %bb.0: -; RV64IZbb-NEXT: sgtz a2, a1 -; RV64IZbb-NEXT: sub a1, a0, a1 -; RV64IZbb-NEXT: slt a0, a1, a0 -; RV64IZbb-NEXT: xor a2, a2, a0 +; RV64IZbb-NEXT: mv a2, a0 +; RV64IZbb-NEXT: sub a3, a0, a1 ; RV64IZbb-NEXT: addi a0, zero, -1 ; RV64IZbb-NEXT: slli a0, a0, 63 -; RV64IZbb-NEXT: bltz a1, .LBB1_3 +; RV64IZbb-NEXT: bgez a3, .LBB1_2 ; RV64IZbb-NEXT: # %bb.1: -; RV64IZbb-NEXT: beqz a2, .LBB1_4 -; RV64IZbb-NEXT: .LBB1_2: -; RV64IZbb-NEXT: ret -; RV64IZbb-NEXT: .LBB1_3: ; RV64IZbb-NEXT: addi a0, a0, -1 -; RV64IZbb-NEXT: bnez a2, .LBB1_2 +; RV64IZbb-NEXT: .LBB1_2: +; RV64IZbb-NEXT: sgtz a1, a1 +; RV64IZbb-NEXT: slt a2, a3, a2 +; RV64IZbb-NEXT: bne a1, a2, .LBB1_4 +; RV64IZbb-NEXT: # %bb.3: +; RV64IZbb-NEXT: mv a0, a3 ; RV64IZbb-NEXT: .LBB1_4: -; RV64IZbb-NEXT: mv a0, a1 ; RV64IZbb-NEXT: ret %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y); ret i64 %tmp; diff --git a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll --- a/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll +++ b/llvm/test/CodeGen/RISCV/ssub_sat_plus.ll @@ -13,22 +13,20 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind { ; RV32I-LABEL: func32: ; RV32I: # %bb.0: -; RV32I-NEXT: mul a1, a1, a2 -; RV32I-NEXT: sgtz a2, a1 -; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: slt a0, a1, a0 -; RV32I-NEXT: xor a2, a2, a0 +; RV32I-NEXT: mv a3, a0 +; RV32I-NEXT: mul a2, a1, a2 +; RV32I-NEXT: sub a1, a0, a2 ; RV32I-NEXT: lui a0, 524288 -; RV32I-NEXT: bltz a1, .LBB0_3 +; RV32I-NEXT: bgez a1, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: beqz a2, .LBB0_4 -; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB0_3: ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: bnez a2, .LBB0_2 -; RV32I-NEXT: .LBB0_4: +; RV32I-NEXT: .LBB0_2: +; RV32I-NEXT: sgtz a2, a2 +; RV32I-NEXT: slt a3, a1, a3 +; RV32I-NEXT: bne a2, a3, .LBB0_4 +; RV32I-NEXT: # %bb.3: ; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: .LBB0_4: ; RV32I-NEXT: ret ; ; RV64I-LABEL: func32: @@ -52,22 +50,20 @@ ; ; RV32IZbb-LABEL: func32: ; RV32IZbb: # %bb.0: -; RV32IZbb-NEXT: mul a1, a1, a2 -; RV32IZbb-NEXT: sgtz a2, a1 -; RV32IZbb-NEXT: sub a1, a0, a1 -; RV32IZbb-NEXT: slt a0, a1, a0 -; RV32IZbb-NEXT: xor a2, a2, a0 +; RV32IZbb-NEXT: mv a3, a0 +; RV32IZbb-NEXT: mul a2, a1, a2 +; RV32IZbb-NEXT: sub a1, a0, a2 ; RV32IZbb-NEXT: lui a0, 524288 -; RV32IZbb-NEXT: bltz a1, .LBB0_3 +; RV32IZbb-NEXT: bgez a1, .LBB0_2 ; RV32IZbb-NEXT: # %bb.1: -; RV32IZbb-NEXT: beqz a2, .LBB0_4 -; RV32IZbb-NEXT: .LBB0_2: -; RV32IZbb-NEXT: ret -; RV32IZbb-NEXT: .LBB0_3: ; RV32IZbb-NEXT: addi a0, a0, -1 -; RV32IZbb-NEXT: bnez a2, .LBB0_2 -; RV32IZbb-NEXT: .LBB0_4: +; RV32IZbb-NEXT: .LBB0_2: +; RV32IZbb-NEXT: sgtz a2, a2 +; RV32IZbb-NEXT: slt a3, a1, a3 +; RV32IZbb-NEXT: bne a2, a3, .LBB0_4 +; RV32IZbb-NEXT: # %bb.3: ; RV32IZbb-NEXT: mv a0, a1 +; RV32IZbb-NEXT: .LBB0_4: ; RV32IZbb-NEXT: ret ; ; RV64IZbb-LABEL: func32: @@ -111,22 +107,20 @@ ; ; RV64I-LABEL: func64: ; RV64I: # %bb.0: -; RV64I-NEXT: sgtz a3, a2 -; RV64I-NEXT: sub a1, a0, a2 -; RV64I-NEXT: slt a0, a1, a0 -; RV64I-NEXT: xor a2, a3, a0 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: sub a3, a0, a2 ; RV64I-NEXT: addi a0, zero, -1 ; RV64I-NEXT: slli a0, a0, 63 -; RV64I-NEXT: bltz a1, .LBB1_3 +; RV64I-NEXT: bgez a3, .LBB1_2 ; RV64I-NEXT: # %bb.1: -; RV64I-NEXT: beqz a2, .LBB1_4 -; RV64I-NEXT: .LBB1_2: -; RV64I-NEXT: ret -; RV64I-NEXT: .LBB1_3: ; RV64I-NEXT: addi a0, a0, -1 -; RV64I-NEXT: bnez a2, .LBB1_2 +; RV64I-NEXT: .LBB1_2: +; RV64I-NEXT: sgtz a2, a2 +; RV64I-NEXT: slt a1, a3, a1 +; RV64I-NEXT: bne a2, a1, .LBB1_4 +; RV64I-NEXT: # %bb.3: +; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: .LBB1_4: -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret ; ; RV32IZbb-LABEL: func64: @@ -154,22 +148,20 @@ ; ; RV64IZbb-LABEL: func64: ; RV64IZbb: # %bb.0: -; RV64IZbb-NEXT: sgtz a3, a2 -; RV64IZbb-NEXT: sub a1, a0, a2 -; RV64IZbb-NEXT: slt a0, a1, a0 -; RV64IZbb-NEXT: xor a2, a3, a0 +; RV64IZbb-NEXT: mv a1, a0 +; RV64IZbb-NEXT: sub a3, a0, a2 ; RV64IZbb-NEXT: addi a0, zero, -1 ; RV64IZbb-NEXT: slli a0, a0, 63 -; RV64IZbb-NEXT: bltz a1, .LBB1_3 +; RV64IZbb-NEXT: bgez a3, .LBB1_2 ; RV64IZbb-NEXT: # %bb.1: -; RV64IZbb-NEXT: beqz a2, .LBB1_4 -; RV64IZbb-NEXT: .LBB1_2: -; RV64IZbb-NEXT: ret -; RV64IZbb-NEXT: .LBB1_3: ; RV64IZbb-NEXT: addi a0, a0, -1 -; RV64IZbb-NEXT: bnez a2, .LBB1_2 +; RV64IZbb-NEXT: .LBB1_2: +; RV64IZbb-NEXT: sgtz a2, a2 +; RV64IZbb-NEXT: slt a1, a3, a1 +; RV64IZbb-NEXT: bne a2, a1, .LBB1_4 +; RV64IZbb-NEXT: # %bb.3: +; RV64IZbb-NEXT: mv a0, a3 ; RV64IZbb-NEXT: .LBB1_4: -; RV64IZbb-NEXT: mv a0, a1 ; RV64IZbb-NEXT: ret %a = mul i64 %y, %z %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %z) diff --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll --- a/llvm/test/CodeGen/RISCV/xaluo.ll +++ b/llvm/test/CodeGen/RISCV/xaluo.ll @@ -694,8 +694,7 @@ ; RV32-NEXT: add a2, a0, a1 ; RV32-NEXT: slt a2, a2, a0 ; RV32-NEXT: slti a3, a1, 0 -; RV32-NEXT: xor a2, a3, a2 -; RV32-NEXT: bnez a2, .LBB22_2 +; RV32-NEXT: bne a3, a2, .LBB22_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB22_2: # %entry @@ -768,8 +767,7 @@ ; RV64-NEXT: add a2, a0, a1 ; RV64-NEXT: slt a2, a2, a0 ; RV64-NEXT: slti a3, a1, 0 -; RV64-NEXT: xor a2, a3, a2 -; RV64-NEXT: bnez a2, .LBB24_2 +; RV64-NEXT: bne a3, a2, .LBB24_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB24_2: # %entry @@ -939,8 +937,7 @@ ; RV32-NEXT: sgtz a2, a1 ; RV32-NEXT: sub a3, a0, a1 ; RV32-NEXT: slt a3, a3, a0 -; RV32-NEXT: xor a2, a2, a3 -; RV32-NEXT: bnez a2, .LBB30_2 +; RV32-NEXT: bne a2, a3, .LBB30_2 ; RV32-NEXT: # %bb.1: # %entry ; RV32-NEXT: mv a0, a1 ; RV32-NEXT: .LBB30_2: # %entry @@ -1011,8 +1008,7 @@ ; RV64-NEXT: sgtz a2, a1 ; RV64-NEXT: sub a3, a0, a1 ; RV64-NEXT: slt a3, a3, a0 -; RV64-NEXT: xor a2, a2, a3 -; RV64-NEXT: bnez a2, .LBB32_2 +; RV64-NEXT: bne a2, a3, .LBB32_2 ; RV64-NEXT: # %bb.1: # %entry ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: .LBB32_2: # %entry