Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -40446,6 +40446,19 @@ return Op.getValue(1); } +static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG) { + // If we don't use the flag result, simplify back to a simple ADD/SUB. + if (N->hasAnyUseOfValue(1)) + return SDValue(); + + SDLoc DL(N); + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + SDValue Res = DAG.getNode(X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB, + DL, LHS.getSimpleValueType(), LHS, RHS); + return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL); +} + static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) { if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) { MVT VT = N->getSimpleValueType(0); @@ -41615,6 +41628,8 @@ case X86ISD::CMP: return combineCMP(N, DAG); case ISD::ADD: return combineAdd(N, DAG, Subtarget); case ISD::SUB: return combineSub(N, DAG, Subtarget); + case X86ISD::ADD: + case X86ISD::SUB: return combineX86AddSub(N, DAG); case X86ISD::SBB: return combineSBB(N, DAG); case X86ISD::ADC: return combineADC(N, DAG, DCI); case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget); Index: test/CodeGen/X86/adx-intrinsics-upgrade.ll =================================================================== --- test/CodeGen/X86/adx-intrinsics-upgrade.ll +++ test/CodeGen/X86/adx-intrinsics-upgrade.ll @@ -108,9 +108,7 @@ define void @allzeros() { ; CHECK-LABEL: allzeros: ; CHECK: ## %bb.0: ## %entry -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: addq $0, %rax ## encoding: [0x48,0x83,0xc0,0x00] -; CHECK-NEXT: movq %rax, 0 ## encoding: [0x48,0x89,0x04,0x25,0x00,0x00,0x00,0x00] +; CHECK-NEXT: movq $0, 0 ## encoding: [0x48,0xc7,0x04,0x25,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00] ; CHECK-NEXT: retq ## encoding: [0xc3] entry: %0 = tail call i8 @llvm.x86.addcarryx.u64(i8 0, i64 0, i64 0, i8* null) Index: test/CodeGen/X86/adx-intrinsics.ll =================================================================== --- test/CodeGen/X86/adx-intrinsics.ll +++ test/CodeGen/X86/adx-intrinsics.ll @@ -134,9 +134,7 @@ define void @allzeros() { ; CHECK-LABEL: allzeros: ; CHECK: ## %bb.0: ## %entry -; CHECK-NEXT: xorl %eax, %eax ## encoding: [0x31,0xc0] -; CHECK-NEXT: addq $0, %rax ## encoding: [0x48,0x83,0xc0,0x00] -; CHECK-NEXT: movq %rax, 0 ## encoding: [0x48,0x89,0x04,0x25,0x00,0x00,0x00,0x00] +; CHECK-NEXT: movq $0, 0 ## encoding: [0x48,0xc7,0x04,0x25,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00] ; CHECK-NEXT: retq ## encoding: [0xc3] entry: %0 = call { i8, i64 } @llvm.x86.addcarry.64(i8 0, i64 0, i64 0) Index: test/CodeGen/X86/combine-adx.ll =================================================================== --- test/CodeGen/X86/combine-adx.ll +++ test/CodeGen/X86/combine-adx.ll @@ -6,13 +6,11 @@ ; X86-LABEL: test_addcarry_32_x_0_false: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: addl $0, %eax ; X86-NEXT: retl ; ; X64-LABEL: test_addcarry_32_x_0_false: ; X64: # %bb.0: -; X64-NEXT: # kill: def $edi killed $edi def $rdi -; X64-NEXT: leal (%rdi), %eax +; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq %1 = tail call { i8, i32 } @llvm.x86.addcarry.32(i8 0, i32 %a0, i32 0) %2 = extractvalue { i8, i32 } %1, 1 @@ -22,14 +20,12 @@ define i32 @test_addcarry_32_0_x_false(i32 %a0) { ; X86-LABEL: test_addcarry_32_0_x_false: ; X86: # %bb.0: -; X86-NEXT: xorl %eax, %eax -; X86-NEXT: addl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: retl ; ; X64-LABEL: test_addcarry_32_0_x_false: ; X64: # %bb.0: -; X64-NEXT: # kill: def $edi killed $edi def $rdi -; X64-NEXT: leal (%rdi), %eax +; X64-NEXT: movl %edi, %eax ; X64-NEXT: retq %1 = tail call { i8, i32 } @llvm.x86.addcarry.32(i8 0, i32 0, i32 %a0) %2 = extractvalue { i8, i32 } %1, 1 @@ -40,13 +36,11 @@ ; X86-LABEL: test_subborrow_32_x_0_false: ; X86: # %bb.0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: subl $0, %eax ; X86-NEXT: retl ; ; X64-LABEL: test_subborrow_32_x_0_false: ; X64: # %bb.0: ; X64-NEXT: movl %edi, %eax -; X64-NEXT: subl $0, %eax ; X64-NEXT: retq %1 = tail call { i8, i32 } @llvm.x86.subborrow.32(i8 0, i32 %a0, i32 0) %2 = extractvalue { i8, i32 } %1, 1