Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2934,6 +2934,22 @@ if (KnownNegatives.none()) return Sra; + // Handle the special case where any of the divisor elements is -1. + // The rounding correction will result in the respective elements being + // undef. No shifting is needed for -1, so we can bypass the undefs by + // adding a select. The scalar and splat cases were already handled earlier, + // so we only need to handle non-splat vector divisors. + // FIXME: Use a SELECT_CC once we improve SELECT_CC constant-folding. + if (VT.isVector()) + Sra = DAG.getSelect( + DL, VT, + DAG.getSetCC( + DL, VT, N1, + DAG.getSplatBuildVector( + VT, DL, DAG.getAllOnesConstant(DL, VT.getScalarType())), + ISD::SETEQ), + N0, Sra); + AddToWorklist(Sra.getNode()); SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Sra); Index: test/CodeGen/X86/combine-sdiv.ll =================================================================== --- test/CodeGen/X86/combine-sdiv.ll +++ test/CodeGen/X86/combine-sdiv.ll @@ -1790,3 +1790,115 @@ %1 = sdiv <4 x i32> %x, ret <4 x i32> %1 } + +; PR37119 +define <16 x i8> @non_splat_minus_one_divisor_0(<16 x i8> %A) { +; SSE-LABEL: non_splat_minus_one_divisor_0: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: psubb %xmm0, %xmm2 +; SSE-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] +; SSE-NEXT: pblendvb %xmm0, %xmm1, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX1-LABEL: non_splat_minus_one_divisor_0: +; AVX1: # %bb.0: +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpsubb %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] +; AVX1-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: non_splat_minus_one_divisor_0: +; AVX2: # %bb.0: +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpsubb %xmm0, %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] +; AVX2-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: non_splat_minus_one_divisor_0: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512F-NEXT: vpsubb %xmm0, %xmm1, %xmm1 +; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] +; AVX512F-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: non_splat_minus_one_divisor_0: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX512BW-NEXT: movw $443, %ax # imm = 0x1BB +; AVX512BW-NEXT: kmovd %eax, %k1 +; AVX512BW-NEXT: vpsubb %xmm0, %xmm1, %xmm0 {%k1} +; AVX512BW-NEXT: retq +; +; XOP-LABEL: non_splat_minus_one_divisor_0: +; XOP: # %bb.0: +; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; XOP-NEXT: vpsubb %xmm0, %xmm1, %xmm1 +; XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255] +; XOP-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0 +; XOP-NEXT: retq + %div = sdiv <16 x i8> %A, + ret <16 x i8> %div +} + +define <4 x i32> @non_splat_minus_one_divisor_1(<4 x i32> %A) { +; SSE-LABEL: non_splat_minus_one_divisor_1: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: psrld $31, %xmm2 +; SSE-NEXT: pxor %xmm3, %xmm3 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: paddd %xmm0, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm1 +; SSE-NEXT: psrad $1, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] +; SSE-NEXT: psubd %xmm1, %xmm3 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3,4,5],xmm3[6,7] +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX1-LABEL: non_splat_minus_one_divisor_1: +; AVX1: # %bb.0: +; AVX1-NEXT: vpsrld $31, %xmm0, %xmm1 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsrad $1, %xmm1, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] +; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7] +; AVX1-NEXT: retq +; +; AVX2ORLATER-LABEL: non_splat_minus_one_divisor_1: +; AVX2ORLATER: # %bb.0: +; AVX2ORLATER-NEXT: vpsrad $31, %xmm0, %xmm1 +; AVX2ORLATER-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1 +; AVX2ORLATER-NEXT: vpaddd %xmm1, %xmm0, %xmm1 +; AVX2ORLATER-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1 +; AVX2ORLATER-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVX2ORLATER-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2ORLATER-NEXT: vpsubd %xmm0, %xmm1, %xmm1 +; AVX2ORLATER-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3] +; AVX2ORLATER-NEXT: retq +; +; XOP-LABEL: non_splat_minus_one_divisor_1: +; XOP: # %bb.0: +; XOP-NEXT: vpsrad $31, %xmm0, %xmm1 +; XOP-NEXT: vpshld {{.*}}(%rip), %xmm1, %xmm1 +; XOP-NEXT: vpaddd %xmm1, %xmm0, %xmm1 +; XOP-NEXT: vpshad {{.*}}(%rip), %xmm1, %xmm1 +; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3,4,5,6,7] +; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; XOP-NEXT: vpsubd %xmm0, %xmm1, %xmm1 +; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7] +; XOP-NEXT: retq + %div = sdiv <4 x i32> %A, + ret <4 x i32> %div +}