diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h --- a/llvm/include/llvm/CodeGen/ISDOpcodes.h +++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h @@ -1332,6 +1332,12 @@ return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE; } +/// Return true if this is a setcc instruction that performs an equality +/// comparison when used with integer operands. +inline bool isIntEqualitySetCC(CondCode Code) { + return Code == SETEQ || Code == SETNE; +} + /// Return true if the specified condition returns true if the two operands to /// the condition are equal. Note that if one of the two operands is a NaN, /// this value is meaningless. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -447,6 +447,7 @@ // We can use any register for comparisons setHasMultipleConditionRegisters(); + setTargetDAGCombine(ISD::SETCC); if (Subtarget.hasStdExtZbp()) { setTargetDAGCombine(ISD::OR); } @@ -1961,7 +1962,7 @@ SDValue RHS = N->getOperand(1); auto CCVal = static_cast(N->getConstantOperandVal(2)); APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); - if ((CCVal == ISD::SETNE || CCVal == ISD::SETEQ) && isNullConstant(RHS) && + if (ISD::isIntEqualitySetCC(CCVal) && isNullConstant(RHS) && LHS.getOpcode() == ISD::XOR && isOneConstant(LHS.getOperand(1)) && DAG.MaskedValueIsZero(LHS.getOperand(0), Mask)) { SDLoc DL(N); @@ -1973,6 +1974,22 @@ } break; } + case ISD::SETCC: { + // (setcc X, 1, setne) -> (setcc X, 0, seteq) if we can prove X is 0/1. + // Comparing with 0 may allow us to fold into bnez/beqz. + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + auto CC = cast(N->getOperand(2))->get(); + APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); + if (isOneConstant(RHS) && ISD::isIntEqualitySetCC(CC) && + DAG.MaskedValueIsZero(LHS, Mask)) { + SDLoc DL(N); + SDValue Zero = DAG.getConstant(0, DL, LHS.getValueType()); + CC = ISD::getSetCCInverse(CC, LHS.getValueType()); + return DAG.getSetCC(DL, N->getValueType(0), LHS, Zero, CC); + } + break; + } } return SDValue(); diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll @@ -411,8 +411,7 @@ ; RV32IFD-NEXT: flt.d a0, ft1, ft0 ; RV32IFD-NEXT: flt.d a1, ft0, ft1 ; RV32IFD-NEXT: or a0, a1, a0 -; RV32IFD-NEXT: addi a1, zero, 1 -; RV32IFD-NEXT: bne a0, a1, .LBB9_2 +; RV32IFD-NEXT: beqz a0, .LBB9_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 @@ -429,8 +428,7 @@ ; RV64IFD-NEXT: flt.d a0, ft1, ft0 ; RV64IFD-NEXT: flt.d a1, ft0, ft1 ; RV64IFD-NEXT: or a0, a1, a0 -; RV64IFD-NEXT: addi a1, zero, 1 -; RV64IFD-NEXT: bne a0, a1, .LBB9_2 +; RV64IFD-NEXT: beqz a0, .LBB9_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 @@ -676,8 +674,7 @@ ; RV32IFD-NEXT: feq.d a0, ft1, ft1 ; RV32IFD-NEXT: feq.d a1, ft0, ft0 ; RV32IFD-NEXT: and a0, a1, a0 -; RV32IFD-NEXT: addi a1, zero, 1 -; RV32IFD-NEXT: bne a0, a1, .LBB15_2 +; RV32IFD-NEXT: beqz a0, .LBB15_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 @@ -694,8 +691,7 @@ ; RV64IFD-NEXT: feq.d a0, ft1, ft1 ; RV64IFD-NEXT: feq.d a1, ft0, ft0 ; RV64IFD-NEXT: and a0, a1, a0 -; RV64IFD-NEXT: addi a1, zero, 1 -; RV64IFD-NEXT: bne a0, a1, .LBB15_2 +; RV64IFD-NEXT: beqz a0, .LBB15_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll @@ -376,8 +376,7 @@ ; RV32IF-NEXT: flt.s a0, ft1, ft0 ; RV32IF-NEXT: flt.s a1, ft0, ft1 ; RV32IF-NEXT: or a0, a1, a0 -; RV32IF-NEXT: addi a1, zero, 1 -; RV32IF-NEXT: bne a0, a1, .LBB9_2 +; RV32IF-NEXT: beqz a0, .LBB9_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -394,8 +393,7 @@ ; RV64IF-NEXT: flt.s a0, ft1, ft0 ; RV64IF-NEXT: flt.s a1, ft0, ft1 ; RV64IF-NEXT: or a0, a1, a0 -; RV64IF-NEXT: addi a1, zero, 1 -; RV64IF-NEXT: bne a0, a1, .LBB9_2 +; RV64IF-NEXT: beqz a0, .LBB9_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 @@ -617,8 +615,7 @@ ; RV32IF-NEXT: feq.s a0, ft1, ft1 ; RV32IF-NEXT: feq.s a1, ft0, ft0 ; RV32IF-NEXT: and a0, a1, a0 -; RV32IF-NEXT: addi a1, zero, 1 -; RV32IF-NEXT: bne a0, a1, .LBB15_2 +; RV32IF-NEXT: beqz a0, .LBB15_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -635,8 +632,7 @@ ; RV64IF-NEXT: feq.s a0, ft1, ft1 ; RV64IF-NEXT: feq.s a1, ft0, ft0 ; RV64IF-NEXT: and a0, a1, a0 -; RV64IF-NEXT: addi a1, zero, 1 -; RV64IF-NEXT: bne a0, a1, .LBB15_2 +; RV64IF-NEXT: beqz a0, .LBB15_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll @@ -342,8 +342,7 @@ ; RV32IZFH-NEXT: flt.h a0, fa0, fa1 ; RV32IZFH-NEXT: flt.h a1, fa1, fa0 ; RV32IZFH-NEXT: or a0, a1, a0 -; RV32IZFH-NEXT: addi a1, zero, 1 -; RV32IZFH-NEXT: bne a0, a1, .LBB9_2 +; RV32IZFH-NEXT: beqz a0, .LBB9_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 @@ -358,8 +357,7 @@ ; RV64IZFH-NEXT: flt.h a0, fa0, fa1 ; RV64IZFH-NEXT: flt.h a1, fa1, fa0 ; RV64IZFH-NEXT: or a0, a1, a0 -; RV64IZFH-NEXT: addi a1, zero, 1 -; RV64IZFH-NEXT: bne a0, a1, .LBB9_2 +; RV64IZFH-NEXT: beqz a0, .LBB9_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 @@ -559,8 +557,7 @@ ; RV32IZFH-NEXT: feq.h a0, fa1, fa1 ; RV32IZFH-NEXT: feq.h a1, fa0, fa0 ; RV32IZFH-NEXT: and a0, a1, a0 -; RV32IZFH-NEXT: addi a1, zero, 1 -; RV32IZFH-NEXT: bne a0, a1, .LBB15_2 +; RV32IZFH-NEXT: beqz a0, .LBB15_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 @@ -575,8 +572,7 @@ ; RV64IZFH-NEXT: feq.h a0, fa1, fa1 ; RV64IZFH-NEXT: feq.h a1, fa0, fa0 ; RV64IZFH-NEXT: and a0, a1, a0 -; RV64IZFH-NEXT: addi a1, zero, 1 -; RV64IZFH-NEXT: bne a0, a1, .LBB15_2 +; RV64IZFH-NEXT: beqz a0, .LBB15_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/select-and.ll b/llvm/test/CodeGen/RISCV/select-and.ll --- a/llvm/test/CodeGen/RISCV/select-and.ll +++ b/llvm/test/CodeGen/RISCV/select-and.ll @@ -65,8 +65,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 1 -; RV32I-NEXT: bne a0, a1, .LBB1_2 +; RV32I-NEXT: beqz a0, .LBB1_2 ; RV32I-NEXT: # %bb.1: # %if.then ; RV32I-NEXT: call both@plt ; RV32I-NEXT: j .LBB1_3 @@ -82,8 +81,7 @@ ; RV32IBT-NEXT: addi sp, sp, -16 ; RV32IBT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IBT-NEXT: and a0, a0, a1 -; RV32IBT-NEXT: addi a1, zero, 1 -; RV32IBT-NEXT: bne a0, a1, .LBB1_2 +; RV32IBT-NEXT: beqz a0, .LBB1_2 ; RV32IBT-NEXT: # %bb.1: # %if.then ; RV32IBT-NEXT: call both@plt ; RV32IBT-NEXT: j .LBB1_3 @@ -99,8 +97,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: and a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 1 -; RV64I-NEXT: bne a0, a1, .LBB1_2 +; RV64I-NEXT: beqz a0, .LBB1_2 ; RV64I-NEXT: # %bb.1: # %if.then ; RV64I-NEXT: call both@plt ; RV64I-NEXT: j .LBB1_3 @@ -116,8 +113,7 @@ ; RV64IBT-NEXT: addi sp, sp, -16 ; RV64IBT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IBT-NEXT: and a0, a0, a1 -; RV64IBT-NEXT: addi a1, zero, 1 -; RV64IBT-NEXT: bne a0, a1, .LBB1_2 +; RV64IBT-NEXT: beqz a0, .LBB1_2 ; RV64IBT-NEXT: # %bb.1: # %if.then ; RV64IBT-NEXT: call both@plt ; RV64IBT-NEXT: j .LBB1_3 diff --git a/llvm/test/CodeGen/RISCV/select-or.ll b/llvm/test/CodeGen/RISCV/select-or.ll --- a/llvm/test/CodeGen/RISCV/select-or.ll +++ b/llvm/test/CodeGen/RISCV/select-or.ll @@ -65,8 +65,7 @@ ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32I-NEXT: or a0, a0, a1 -; RV32I-NEXT: addi a1, zero, 1 -; RV32I-NEXT: bne a0, a1, .LBB1_2 +; RV32I-NEXT: beqz a0, .LBB1_2 ; RV32I-NEXT: # %bb.1: # %if.then ; RV32I-NEXT: call either@plt ; RV32I-NEXT: j .LBB1_3 @@ -82,8 +81,7 @@ ; RV32IBT-NEXT: addi sp, sp, -16 ; RV32IBT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IBT-NEXT: or a0, a0, a1 -; RV32IBT-NEXT: addi a1, zero, 1 -; RV32IBT-NEXT: bne a0, a1, .LBB1_2 +; RV32IBT-NEXT: beqz a0, .LBB1_2 ; RV32IBT-NEXT: # %bb.1: # %if.then ; RV32IBT-NEXT: call either@plt ; RV32IBT-NEXT: j .LBB1_3 @@ -99,8 +97,7 @@ ; RV64I-NEXT: addi sp, sp, -16 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64I-NEXT: or a0, a0, a1 -; RV64I-NEXT: addi a1, zero, 1 -; RV64I-NEXT: bne a0, a1, .LBB1_2 +; RV64I-NEXT: beqz a0, .LBB1_2 ; RV64I-NEXT: # %bb.1: # %if.then ; RV64I-NEXT: call either@plt ; RV64I-NEXT: j .LBB1_3 @@ -116,8 +113,7 @@ ; RV64IBT-NEXT: addi sp, sp, -16 ; RV64IBT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IBT-NEXT: or a0, a0, a1 -; RV64IBT-NEXT: addi a1, zero, 1 -; RV64IBT-NEXT: bne a0, a1, .LBB1_2 +; RV64IBT-NEXT: beqz a0, .LBB1_2 ; RV64IBT-NEXT: # %bb.1: # %if.then ; RV64IBT-NEXT: call either@plt ; RV64IBT-NEXT: j .LBB1_3