diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -49889,6 +49889,11 @@ return SDValue(); } +static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT, + SDValue X, SDValue Y, + SelectionDAG &DAG, + bool ZeroSecondOpOnly = false); + static SDValue combineOr(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) { @@ -50036,6 +50041,22 @@ if (SDValue R = foldMaskedMerge(N, DAG)) return R; + // Delegate to combineAddOrSubToADCOrSBB if we have: + // + // (or (zero_extend (setcc)) imm) + // + // where imm has its LSB cleared, in which case the OR is equivalent to an + // ADD. + if (N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() && + N0.getOperand(0).getOpcode() == X86ISD::SETCC) { + if (ConstantSDNode *N1C = dyn_cast(N1)) { + if ((N1C->getZExtValue() & 1) == 0) { + if (SDValue R = combineAddOrSubToADCOrSBB(/*IsSub=*/false, dl, VT, N1, N0, DAG)) + return R; + } + } + } + return SDValue(); } @@ -52511,6 +52532,23 @@ if (SDValue SetCC = foldXor1SetCC(N, DAG)) return SetCC; + // Delegate to combineAddOrSubToADCOrSBB if we have: + // + // (xor (zero_extend (setcc)) imm) + // + // where imm has its LSB set, in which case the XOR is equivalent to SUB with + // the operands swapped. + SDLoc DL(N); + if (N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() && + N0.getOperand(0).getOpcode() == X86ISD::SETCC) { + if (ConstantSDNode *N1C = dyn_cast(N1)) { + if (N1C->getZExtValue() & 1) { + if (SDValue R = combineAddOrSubToADCOrSBB(/*IsSub=*/true, DL, VT, N1, N0, DAG)) + return R; + } + } + } + if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG)) return RV; @@ -54452,7 +54490,7 @@ static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT, SDValue X, SDValue Y, SelectionDAG &DAG, - bool ZeroSecondOpOnly = false) { + bool ZeroSecondOpOnly) { if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) return SDValue(); diff --git a/llvm/test/CodeGen/X86/select_const.ll b/llvm/test/CodeGen/X86/select_const.ll --- a/llvm/test/CodeGen/X86/select_const.ll +++ b/llvm/test/CodeGen/X86/select_const.ll @@ -520,3 +520,39 @@ %f = select i1 undef, float 4.0, float %x ret float %f } + +define i32 @select_eq0_3_2(i32 %X) { +; CHECK-LABEL: select_eq0_3_2: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: cmpl $1, %edi +; CHECK-NEXT: adcl $2, %eax +; CHECK-NEXT: retq + %cmp = icmp eq i32 %X, 0 + %sel = select i1 %cmp, i32 3, i32 2 + ret i32 %sel +} + +define i32 @select_carry_2_3(i32 %X) { +; CHECK-LABEL: select_carry_2_3: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: cmpl $4, %edi +; CHECK-NEXT: adcl $2, %eax +; CHECK-NEXT: retq + %cmp = icmp ugt i32 %X, 3 + %sel = select i1 %cmp, i32 2, i32 3 + ret i32 %sel +} + +define i32 @select_carry_7_6(i32 %X) { +; CHECK-LABEL: select_carry_7_6: +; CHECK: # %bb.0: +; CHECK-NEXT: xorl %eax, %eax +; CHECK-NEXT: cmpl $9, %edi +; CHECK-NEXT: adcl $6, %eax +; CHECK-NEXT: retq + %cmp = icmp ult i32 %X, 9 + %sel = select i1 %cmp, i32 7, i32 6 + ret i32 %sel +}