Index: lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2506,6 +2506,7 @@ LLVM_FALLTHROUGH; } case ISD::ADD: + case ISD::ADDC: case ISD::ADDE: { // Output known-0 bits are known if clear or set in both the low clear bits // common to both LHS & RHS. For example, 8+(X<<3) is known to have the @@ -2526,7 +2527,7 @@ KnownZeroLow = std::min(KnownZeroLow, KnownZero2.countTrailingOnes()); - if (Opcode == ISD::ADD) { + if (Opcode == ISD::ADD || Opcode == ISD::ADDC) { KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroLow); if (KnownZeroHigh > 1) KnownZero |= APInt::getHighBitsSet(BitWidth, KnownZeroHigh - 1); @@ -2924,6 +2925,7 @@ } break; case ISD::ADD: + case ISD::ADDC: // Add can have at most one carry bit. Thus we know that the output // is, at worst, one more bit than the inputs. Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1); Index: test/CodeGen/X86/known-bits.ll =================================================================== --- test/CodeGen/X86/known-bits.ll +++ test/CodeGen/X86/known-bits.ll @@ -103,3 +103,67 @@ %E156 = extractelement <4 x i1> %Cmp117, i32 2 br label %CF } + +define i32 @knownbits_mask_add_lshr(i32 %a0, i32 %a1) nounwind { +; X32-LABEL: knownbits_mask_add_lshr: +; X32: # BB#0: +; X32-NEXT: xorl %eax, %eax +; X32-NEXT: retl +; +; X64-LABEL: knownbits_mask_add_lshr: +; X64: # BB#0: +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: retq + %1 = and i32 %a0, 32767 + %2 = and i32 %a1, 32766 + %3 = add i32 %1, %2 + %4 = lshr i32 %3, 17 + ret i32 %4 +} + +define i128 @knownbits_mask_addc_shl(i64 %a0, i64 %a1, i64 %a2) nounwind { +; X32-LABEL: knownbits_mask_addc_shl: +; X32: # BB#0: +; X32-NEXT: pushl %edi +; X32-NEXT: pushl %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-NEXT: movl $-1024, %esi # imm = 0xFC00 +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: andl %esi, %edi +; X32-NEXT: andl {{[0-9]+}}(%esp), %esi +; X32-NEXT: addl %edi, %esi +; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx +; X32-NEXT: adcl $0, %ecx +; X32-NEXT: shldl $22, %edx, %ecx +; X32-NEXT: shldl $22, %esi, %edx +; X32-NEXT: movl %edx, 8(%eax) +; X32-NEXT: movl %ecx, 12(%eax) +; X32-NEXT: movl $0, 4(%eax) +; X32-NEXT: movl $0, (%eax) +; X32-NEXT: popl %esi +; X32-NEXT: popl %edi +; X32-NEXT: retl $4 +; +; X64-LABEL: knownbits_mask_addc_shl: +; X64: # BB#0: +; X64-NEXT: andq $-1024, %rdi # imm = 0xFC00 +; X64-NEXT: andq $-1024, %rsi # imm = 0xFC00 +; X64-NEXT: addq %rdi, %rsi +; X64-NEXT: sbbq %rax, %rax +; X64-NEXT: subl %eax, %edx +; X64-NEXT: shldq $54, %rsi, %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: retq + %1 = and i64 %a0, -1024 + %2 = zext i64 %1 to i128 + %3 = and i64 %a1, -1024 + %4 = zext i64 %3 to i128 + %5 = add i128 %2, %4 + %6 = zext i64 %a2 to i128 + %7 = shl i128 %6, 64 + %8 = add i128 %5, %7 + %9 = shl i128 %8, 54 + ret i128 %9 +}