Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -3803,7 +3803,7 @@ SDValue DAGCombiner::visitSMULO(SDNode *N) { // (smulo x, 2) -> (saddo x, x) - if (ConstantSDNode *C2 = dyn_cast(N->getOperand(1))) + if (ConstantSDNode *C2 = isConstOrConstSplat(N->getOperand(1))) if (C2->getAPIntValue() == 2) return DAG.getNode(ISD::SADDO, SDLoc(N), N->getVTList(), N->getOperand(0), N->getOperand(0)); @@ -3813,7 +3813,7 @@ SDValue DAGCombiner::visitUMULO(SDNode *N) { // (umulo x, 2) -> (uaddo x, x) - if (ConstantSDNode *C2 = dyn_cast(N->getOperand(1))) + if (ConstantSDNode *C2 = isConstOrConstSplat(N->getOperand(1))) if (C2->getAPIntValue() == 2) return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(), N->getOperand(0), N->getOperand(0)); Index: test/CodeGen/X86/combine-mulo.ll =================================================================== --- test/CodeGen/X86/combine-mulo.ll +++ test/CodeGen/X86/combine-mulo.ll @@ -34,35 +34,31 @@ ; SSE-LABEL: combine_vec_smul_two: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2,2,2,2] -; SSE-NEXT: pmuldq %xmm3, %xmm0 -; SSE-NEXT: pmuldq %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] -; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7] +; SSE-NEXT: pxor %xmm0, %xmm0 +; SSE-NEXT: pxor %xmm3, %xmm3 +; SSE-NEXT: pcmpgtd %xmm2, %xmm3 +; SSE-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE-NEXT: pxor %xmm4, %xmm3 ; SSE-NEXT: paddd %xmm2, %xmm2 -; SSE-NEXT: movdqa %xmm2, %xmm0 -; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pcmpgtd %xmm2, %xmm0 +; SSE-NEXT: pxor %xmm4, %xmm0 ; SSE-NEXT: pcmpeqd %xmm3, %xmm0 -; SSE-NEXT: pcmpeqd %xmm3, %xmm3 -; SSE-NEXT: pxor %xmm3, %xmm0 +; SSE-NEXT: pxor %xmm4, %xmm0 ; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_smul_two: ; AVX: # %bb.0: -; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX-NEXT: vpbroadcastd {{.*#+}} xmm3 = [2,2,2,2] -; AVX-NEXT: vpmuldq %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpmuldq %xmm3, %xmm0, %xmm3 -; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] -; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm3 +; AVX-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4 +; AVX-NEXT: vpxor %xmm4, %xmm3, %xmm3 ; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vpsrad $31, %xmm0, %xmm3 -; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm2 +; AVX-NEXT: vpxor %xmm4, %xmm2, %xmm2 +; AVX-NEXT: vpcmpeqd %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpxor %xmm4, %xmm2, %xmm2 ; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = call {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> ) @@ -98,35 +94,23 @@ ; SSE-LABEL: combine_vec_umul_two: ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2,2,2,2] -; SSE-NEXT: pmuludq %xmm3, %xmm0 -; SSE-NEXT: pmuludq %xmm2, %xmm3 -; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] -; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7] -; SSE-NEXT: pxor %xmm4, %xmm4 -; SSE-NEXT: pcmpeqd %xmm3, %xmm4 -; SSE-NEXT: pcmpeqd %xmm0, %xmm0 -; SSE-NEXT: pxor %xmm4, %xmm0 -; SSE-NEXT: paddd %xmm2, %xmm2 +; SSE-NEXT: paddd %xmm0, %xmm2 +; SSE-NEXT: pmaxud %xmm2, %xmm0 +; SSE-NEXT: pcmpeqd %xmm2, %xmm0 +; SSE-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE-NEXT: pxor %xmm3, %xmm0 ; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2 ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_umul_two: ; AVX: # %bb.0: -; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX-NEXT: vpbroadcastd {{.*#+}} xmm3 = [2,2,2,2] -; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3 -; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] -; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] -; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm2 +; AVX-NEXT: vpmaxud %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm0 ; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpxor %xmm3, %xmm0, %xmm0 +; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 ; AVX-NEXT: retq %1 = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> ) %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0