Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -23931,6 +23931,30 @@ } } + // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we + // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt). + // TODO: Special case handling for shift by 0/1, really we can afford either + // of these cases in pre-SSE41/XOP/AVX512 but not both. + if (Opc == ISD::SRA && ConstantAmt && + (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) && + ((Subtarget.hasSSE41() && !Subtarget.hasXOP() && + !Subtarget.hasAVX512()) || + DAG.isKnownNeverZero(Amt))) { + SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT); + SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt); + if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) { + SDValue Amt0 = + DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ); + SDValue Amt1 = + DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ); + SDValue Sra1 = + getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG); + SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale); + Res = DAG.getSelect(dl, VT, Amt0, R, Res); + return DAG.getSelect(dl, VT, Amt1, Sra1, Res); + } + } + // v4i32 Non Uniform Shifts. // If the shift amount is constant we can shift each lane using the SSE2 // immediate shifts, else we need to zero-extend each lane to the lower i64 Index: llvm/trunk/test/CodeGen/X86/combine-sdiv.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/combine-sdiv.ll +++ llvm/trunk/test/CodeGen/X86/combine-sdiv.ll @@ -560,14 +560,9 @@ ; SSE41-NEXT: pmulhuw {{.*}}(%rip), %xmm1 ; SSE41-NEXT: paddw %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $4, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7] -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: psraw $2, %xmm3 -; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7] -; SSE41-NEXT: movdqa %xmm3, %xmm1 -; SSE41-NEXT: psraw $1, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2],xmm3[3],xmm1[4,5],xmm3[6],xmm1[7] +; SSE41-NEXT: psraw $1, %xmm2 +; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4,5,6],xmm2[7] ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -577,12 +572,9 @@ ; AVX1-NEXT: vpsraw $15, %xmm0, %xmm1 ; AVX1-NEXT: vpmulhuw {{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7] -; AVX1-NEXT: vpsraw $2, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7] ; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7] +; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4,5,6],xmm2[7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; AVX1-NEXT: retq ; @@ -591,12 +583,10 @@ ; AVX2-NEXT: vpsraw $15, %xmm0, %xmm1 ; AVX2-NEXT: vpmulhuw {{.*}}(%rip), %xmm1, %xmm1 ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm1 -; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1 -; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsraw $1, %xmm1, %xmm2 +; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4,5,6],xmm2[7] ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] -; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v8i16: @@ -695,32 +685,22 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psraw $15, %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [1,4,2,16,8,32,64,2] -; SSE41-NEXT: pmulhuw %xmm3, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,4,2,16,8,32,64,2] +; SSE41-NEXT: pmulhuw %xmm4, %xmm2 ; SSE41-NEXT: paddw %xmm0, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: psraw $4, %xmm4 -; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1,2],xmm4[3],xmm2[4],xmm4[5,6],xmm2[7] -; SSE41-NEXT: movdqa %xmm4, %xmm5 -; SSE41-NEXT: psraw $2, %xmm5 -; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7] -; SSE41-NEXT: movdqa %xmm5, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm5 = +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: pmulhw %xmm5, %xmm3 ; SSE41-NEXT: psraw $1, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3],xmm2[4,5],xmm5[6],xmm2[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4,5,6],xmm2[7] ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3,4,5,6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: psraw $15, %xmm0 -; SSE41-NEXT: pmulhuw %xmm3, %xmm0 -; SSE41-NEXT: paddw %xmm1, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm3 -; SSE41-NEXT: psraw $4, %xmm3 -; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1,2],xmm3[3],xmm0[4],xmm3[5,6],xmm0[7] -; SSE41-NEXT: movdqa %xmm3, %xmm0 -; SSE41-NEXT: psraw $2, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2,3],xmm0[4],xmm3[5],xmm0[6],xmm3[7] -; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: movdqa %xmm1, %xmm3 +; SSE41-NEXT: psraw $15, %xmm3 +; SSE41-NEXT: pmulhuw %xmm4, %xmm3 +; SSE41-NEXT: paddw %xmm1, %xmm3 +; SSE41-NEXT: pmulhw %xmm3, %xmm5 ; SSE41-NEXT: psraw $1, %xmm3 -; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2],xmm0[3],xmm3[4,5],xmm0[6],xmm3[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2],xmm5[3,4,5,6],xmm3[7] ; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1,2,3,4,5,6,7] ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: movdqa %xmm3, %xmm1 @@ -733,21 +713,18 @@ ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,4,2,16,8,32,64,2] ; AVX1-NEXT: vpmulhuw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpaddw %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm2[3],xmm1[4],xmm2[5,6],xmm1[7] -; AVX1-NEXT: vpsraw $2, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3],xmm2[4],xmm1[5],xmm2[6],xmm1[7] -; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3],xmm2[4,5],xmm1[6],xmm2[7] -; AVX1-NEXT: vpsraw $15, %xmm0, %xmm2 -; AVX1-NEXT: vpmulhuw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm2 -; AVX1-NEXT: vpsraw $4, %xmm2, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[3],xmm2[4],xmm3[5,6],xmm2[7] -; AVX1-NEXT: vpsraw $2, %xmm2, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5],xmm3[6],xmm2[7] -; AVX1-NEXT: vpsraw $1, %xmm2, %xmm3 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3],xmm3[4,5],xmm2[6],xmm3[7] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = +; AVX1-NEXT: vpmulhw %xmm2, %xmm1, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm1[0],xmm4[1,2,3,4,5,6,7] +; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm4[0,1],xmm1[2],xmm4[3,4,5,6],xmm1[7] +; AVX1-NEXT: vpsraw $15, %xmm0, %xmm4 +; AVX1-NEXT: vpmulhuw %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm3 +; AVX1-NEXT: vpmulhw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3,4,5,6,7] +; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5,6],xmm3[7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 @@ -757,20 +734,12 @@ ; ; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,1,4,3,5,6,1,0,2,1,4,3,5,6,1] -; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15] -; AVX2-NEXT: vpsraw $15, %ymm0, %ymm4 -; AVX2-NEXT: vpmulhuw {{.*}}(%rip), %ymm4, %ymm4 -; AVX2-NEXT: vpaddw %ymm4, %ymm0, %ymm4 -; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4],ymm4[4],ymm1[5],ymm4[5],ymm1[6],ymm4[6],ymm1[7],ymm4[7],ymm1[12],ymm4[12],ymm1[13],ymm4[13],ymm1[14],ymm4[14],ymm1[15],ymm4[15] -; AVX2-NEXT: vpsravd %ymm3, %ymm5, %ymm3 -; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3 -; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11] -; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm4[0],ymm1[1],ymm4[1],ymm1[2],ymm4[2],ymm1[3],ymm4[3],ymm1[8],ymm4[8],ymm1[9],ymm4[9],ymm1[10],ymm4[10],ymm1[11],ymm4[11] -; AVX2-NEXT: vpsravd %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1 -; AVX2-NEXT: vpackusdw %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm1 +; AVX2-NEXT: vpmulhuw {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpsraw $1, %ymm1, %ymm2 +; AVX2-NEXT: vpmulhw {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6],ymm2[7],ymm1[8,9],ymm2[10],ymm1[11,12,13,14],ymm2[15] ; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] ; AVX2-NEXT: retq ; @@ -932,60 +901,40 @@ ; SSE41-NEXT: movdqa %xmm1, %xmm4 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: psraw $15, %xmm0 -; SSE41-NEXT: movdqa {{.*#+}} xmm5 = [1,4,2,16,8,32,64,2] -; SSE41-NEXT: pmulhuw %xmm5, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [1,4,2,16,8,32,64,2] +; SSE41-NEXT: pmulhuw %xmm7, %xmm0 ; SSE41-NEXT: paddw %xmm1, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm6 -; SSE41-NEXT: psraw $4, %xmm6 -; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm0[0,1,2],xmm6[3],xmm0[4],xmm6[5,6],xmm0[7] -; SSE41-NEXT: movdqa %xmm6, %xmm7 -; SSE41-NEXT: psraw $2, %xmm7 -; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7] -; SSE41-NEXT: movdqa %xmm7, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm6 = +; SSE41-NEXT: movdqa %xmm0, %xmm5 +; SSE41-NEXT: pmulhw %xmm6, %xmm5 ; SSE41-NEXT: psraw $1, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm7[0,1],xmm0[2],xmm7[3],xmm0[4,5],xmm7[6],xmm0[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm5[0,1],xmm0[2],xmm5[3,4,5,6],xmm0[7] ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7] ; SSE41-NEXT: movdqa %xmm4, %xmm1 ; SSE41-NEXT: psraw $15, %xmm1 -; SSE41-NEXT: pmulhuw %xmm5, %xmm1 +; SSE41-NEXT: pmulhuw %xmm7, %xmm1 ; SSE41-NEXT: paddw %xmm4, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm6 -; SSE41-NEXT: psraw $4, %xmm6 -; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm1[0,1,2],xmm6[3],xmm1[4],xmm6[5,6],xmm1[7] -; SSE41-NEXT: movdqa %xmm6, %xmm7 -; SSE41-NEXT: psraw $2, %xmm7 -; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7] -; SSE41-NEXT: movdqa %xmm7, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm5 +; SSE41-NEXT: pmulhw %xmm6, %xmm5 ; SSE41-NEXT: psraw $1, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1],xmm1[2],xmm7[3],xmm1[4,5],xmm7[6],xmm1[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm5[0,1],xmm1[2],xmm5[3,4,5,6],xmm1[7] ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0],xmm1[1,2,3,4,5,6,7] ; SSE41-NEXT: movdqa %xmm2, %xmm4 ; SSE41-NEXT: psraw $15, %xmm4 -; SSE41-NEXT: pmulhuw %xmm5, %xmm4 +; SSE41-NEXT: pmulhuw %xmm7, %xmm4 ; SSE41-NEXT: paddw %xmm2, %xmm4 -; SSE41-NEXT: movdqa %xmm4, %xmm6 -; SSE41-NEXT: psraw $4, %xmm6 -; SSE41-NEXT: pblendw {{.*#+}} xmm6 = xmm4[0,1,2],xmm6[3],xmm4[4],xmm6[5,6],xmm4[7] -; SSE41-NEXT: movdqa %xmm6, %xmm7 -; SSE41-NEXT: psraw $2, %xmm7 -; SSE41-NEXT: pblendw {{.*#+}} xmm7 = xmm6[0],xmm7[1],xmm6[2,3],xmm7[4],xmm6[5],xmm7[6],xmm6[7] -; SSE41-NEXT: movdqa %xmm7, %xmm4 +; SSE41-NEXT: movdqa %xmm4, %xmm5 +; SSE41-NEXT: pmulhw %xmm6, %xmm5 ; SSE41-NEXT: psraw $1, %xmm4 -; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2],xmm7[3],xmm4[4,5],xmm7[6],xmm4[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm5[0,1],xmm4[2],xmm5[3,4,5,6],xmm4[7] ; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0],xmm4[1,2,3,4,5,6,7] -; SSE41-NEXT: movdqa %xmm3, %xmm2 -; SSE41-NEXT: psraw $15, %xmm2 -; SSE41-NEXT: pmulhuw %xmm5, %xmm2 -; SSE41-NEXT: paddw %xmm3, %xmm2 -; SSE41-NEXT: movdqa %xmm2, %xmm5 -; SSE41-NEXT: psraw $4, %xmm5 -; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1,2],xmm5[3],xmm2[4],xmm5[5,6],xmm2[7] -; SSE41-NEXT: movdqa %xmm5, %xmm2 -; SSE41-NEXT: psraw $2, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm5[0],xmm2[1],xmm5[2,3],xmm2[4],xmm5[5],xmm2[6],xmm5[7] -; SSE41-NEXT: movdqa %xmm2, %xmm5 +; SSE41-NEXT: movdqa %xmm3, %xmm5 +; SSE41-NEXT: psraw $15, %xmm5 +; SSE41-NEXT: pmulhuw %xmm7, %xmm5 +; SSE41-NEXT: paddw %xmm3, %xmm5 +; SSE41-NEXT: pmulhw %xmm5, %xmm6 ; SSE41-NEXT: psraw $1, %xmm5 -; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm2[0,1],xmm5[2],xmm2[3],xmm5[4,5],xmm2[6],xmm5[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3,4,5,6],xmm5[7] ; SSE41-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3,4,5,6,7] ; SSE41-NEXT: movdqa %xmm4, %xmm2 ; SSE41-NEXT: movdqa %xmm5, %xmm3 @@ -993,85 +942,68 @@ ; ; AVX1-LABEL: combine_vec_sdiv_by_pow2b_v32i16: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpsraw $15, %xmm3, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,4,2,16,8,32,64,2] -; AVX1-NEXT: vpmulhuw %xmm2, %xmm4, %xmm4 -; AVX1-NEXT: vpaddw %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpsraw $4, %xmm3, %xmm4 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[3],xmm3[4],xmm4[5,6],xmm3[7] -; AVX1-NEXT: vpsraw $2, %xmm3, %xmm4 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2,3],xmm4[4],xmm3[5],xmm4[6],xmm3[7] -; AVX1-NEXT: vpsraw $1, %xmm3, %xmm4 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7] -; AVX1-NEXT: vpsraw $15, %xmm0, %xmm4 -; AVX1-NEXT: vpmulhuw %xmm2, %xmm4, %xmm4 -; AVX1-NEXT: vpaddw %xmm4, %xmm0, %xmm4 -; AVX1-NEXT: vpsraw $4, %xmm4, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2],xmm5[3],xmm4[4],xmm5[5,6],xmm4[7] -; AVX1-NEXT: vpsraw $2, %xmm4, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm5[1],xmm4[2,3],xmm5[4],xmm4[5],xmm5[6],xmm4[7] -; AVX1-NEXT: vpsraw $1, %xmm4, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3],xmm5[4,5],xmm4[6],xmm5[7] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] -; AVX1-NEXT: vandps %ymm4, %ymm3, %ymm3 -; AVX1-NEXT: vandnps %ymm0, %ymm4, %ymm0 -; AVX1-NEXT: vorps %ymm0, %ymm3, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpsraw $15, %xmm3, %xmm5 -; AVX1-NEXT: vpmulhuw %xmm2, %xmm5, %xmm5 -; AVX1-NEXT: vpaddw %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vpsraw $4, %xmm3, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[3],xmm3[4],xmm5[5,6],xmm3[7] -; AVX1-NEXT: vpsraw $2, %xmm3, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm5[1],xmm3[2,3],xmm5[4],xmm3[5],xmm5[6],xmm3[7] -; AVX1-NEXT: vpsraw $1, %xmm3, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm5[2],xmm3[3],xmm5[4,5],xmm3[6],xmm5[7] -; AVX1-NEXT: vpsraw $15, %xmm1, %xmm5 -; AVX1-NEXT: vpmulhuw %xmm2, %xmm5, %xmm2 -; AVX1-NEXT: vpaddw %xmm2, %xmm1, %xmm2 -; AVX1-NEXT: vpsraw $4, %xmm2, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm5[3],xmm2[4],xmm5[5,6],xmm2[7] -; AVX1-NEXT: vpsraw $2, %xmm2, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm5[1],xmm2[2,3],xmm5[4],xmm2[5],xmm5[6],xmm2[7] -; AVX1-NEXT: vpsraw $1, %xmm2, %xmm5 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm5[2],xmm2[3],xmm5[4,5],xmm2[6],xmm5[7] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; AVX1-NEXT: vandps %ymm4, %ymm2, %ymm2 -; AVX1-NEXT: vandnps %ymm1, %ymm4, %ymm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpsraw $15, %xmm2, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,4,2,16,8,32,64,2] +; AVX1-NEXT: vpmulhuw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = +; AVX1-NEXT: vpmulhw %xmm3, %xmm2, %xmm5 +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm2[0],xmm5[1,2,3,4,5,6,7] +; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0,1],xmm2[2],xmm5[3,4,5,6],xmm2[7] +; AVX1-NEXT: vpsraw $15, %xmm0, %xmm5 +; AVX1-NEXT: vpmulhuw %xmm4, %xmm5, %xmm5 +; AVX1-NEXT: vpaddw %xmm5, %xmm0, %xmm5 +; AVX1-NEXT: vpmulhw %xmm3, %xmm5, %xmm6 +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm5[0],xmm6[1,2,3,4,5,6,7] +; AVX1-NEXT: vpsraw $1, %xmm5, %xmm5 +; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm6[0,1],xmm5[2],xmm6[3,4,5,6],xmm5[7] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2 +; AVX1-NEXT: vmovaps {{.*#+}} ymm5 = [0,65535,65535,65535,65535,65535,65535,65535,0,65535,65535,65535,65535,65535,65535,65535] +; AVX1-NEXT: vandps %ymm5, %ymm2, %ymm2 +; AVX1-NEXT: vandnps %ymm0, %ymm5, %ymm0 +; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpsraw $15, %xmm2, %xmm6 +; AVX1-NEXT: vpmulhuw %xmm4, %xmm6, %xmm6 +; AVX1-NEXT: vpaddw %xmm6, %xmm2, %xmm2 +; AVX1-NEXT: vpmulhw %xmm3, %xmm2, %xmm6 +; AVX1-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0],xmm6[1,2,3,4,5,6,7] +; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm6[0,1],xmm2[2],xmm6[3,4,5,6],xmm2[7] +; AVX1-NEXT: vpsraw $15, %xmm1, %xmm6 +; AVX1-NEXT: vpmulhuw %xmm4, %xmm6, %xmm4 +; AVX1-NEXT: vpaddw %xmm4, %xmm1, %xmm4 +; AVX1-NEXT: vpmulhw %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1,2,3,4,5,6,7] +; AVX1-NEXT: vpsraw $1, %xmm4, %xmm4 +; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4,5,6],xmm4[7] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 +; AVX1-NEXT: vandps %ymm5, %ymm2, %ymm2 +; AVX1-NEXT: vandnps %ymm1, %ymm5, %ymm1 ; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; ; AVX2-LABEL: combine_vec_sdiv_by_pow2b_v32i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,2,1,4,3,5,6,1,0,2,1,4,3,5,6,1] -; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15] -; AVX2-NEXT: vpsraw $15, %ymm0, %ymm5 -; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [1,4,2,16,8,32,64,2,1,4,2,16,8,32,64,2] -; AVX2-NEXT: # ymm6 = mem[0,1,0,1] -; AVX2-NEXT: vpmulhuw %ymm6, %ymm5, %ymm5 -; AVX2-NEXT: vpaddw %ymm5, %ymm0, %ymm5 -; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm7 = ymm2[4],ymm5[4],ymm2[5],ymm5[5],ymm2[6],ymm5[6],ymm2[7],ymm5[7],ymm2[12],ymm5[12],ymm2[13],ymm5[13],ymm2[14],ymm5[14],ymm2[15],ymm5[15] -; AVX2-NEXT: vpsravd %ymm4, %ymm7, %ymm7 -; AVX2-NEXT: vpsrld $16, %ymm7, %ymm7 -; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11] -; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm5 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[8],ymm5[8],ymm2[9],ymm5[9],ymm2[10],ymm5[10],ymm2[11],ymm5[11] -; AVX2-NEXT: vpsravd %ymm3, %ymm5, %ymm5 -; AVX2-NEXT: vpsrld $16, %ymm5, %ymm5 -; AVX2-NEXT: vpackusdw %ymm7, %ymm5, %ymm5 -; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm5[1,2,3,4,5,6,7],ymm0[8],ymm5[9,10,11,12,13,14,15] -; AVX2-NEXT: vpsraw $15, %ymm1, %ymm5 -; AVX2-NEXT: vpmulhuw %ymm6, %ymm5, %ymm5 -; AVX2-NEXT: vpaddw %ymm5, %ymm1, %ymm5 -; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm2[4],ymm5[4],ymm2[5],ymm5[5],ymm2[6],ymm5[6],ymm2[7],ymm5[7],ymm2[12],ymm5[12],ymm2[13],ymm5[13],ymm2[14],ymm5[14],ymm2[15],ymm5[15] -; AVX2-NEXT: vpsravd %ymm4, %ymm6, %ymm4 -; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4 -; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[8],ymm5[8],ymm2[9],ymm5[9],ymm2[10],ymm5[10],ymm2[11],ymm5[11] -; AVX2-NEXT: vpsravd %ymm3, %ymm2, %ymm2 -; AVX2-NEXT: vpsrld $16, %ymm2, %ymm2 -; AVX2-NEXT: vpackusdw %ymm4, %ymm2, %ymm2 +; AVX2-NEXT: vpsraw $15, %ymm0, %ymm2 +; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,4,2,16,8,32,64,2,1,4,2,16,8,32,64,2] +; AVX2-NEXT: # ymm3 = mem[0,1,0,1] +; AVX2-NEXT: vpmulhuw %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm2 +; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [0,16384,32768,4096,8192,2048,1024,32768,0,16384,32768,4096,8192,2048,1024,32768] +; AVX2-NEXT: # ymm4 = mem[0,1,0,1] +; AVX2-NEXT: vpmulhw %ymm4, %ymm2, %ymm5 +; AVX2-NEXT: vpsraw $1, %ymm2, %ymm2 +; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm5[0,1],ymm2[2],ymm5[3,4,5,6],ymm2[7],ymm5[8,9],ymm2[10],ymm5[11,12,13,14],ymm2[15] +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3,4,5,6,7],ymm0[8],ymm2[9,10,11,12,13,14,15] +; AVX2-NEXT: vpsraw $15, %ymm1, %ymm2 +; AVX2-NEXT: vpmulhuw %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpaddw %ymm2, %ymm1, %ymm2 +; AVX2-NEXT: vpmulhw %ymm4, %ymm2, %ymm3 +; AVX2-NEXT: vpsraw $1, %ymm2, %ymm2 +; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1],ymm2[2],ymm3[3,4,5,6],ymm2[7],ymm3[8,9],ymm2[10],ymm3[11,12,13,14],ymm2[15] ; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7],ymm1[8],ymm2[9,10,11,12,13,14,15] ; AVX2-NEXT: retq ; @@ -2959,18 +2891,12 @@ ; SSE41-NEXT: pmullw %xmm0, %xmm1 ; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm0 ; SSE41-NEXT: paddw %xmm1, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: psraw $8, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3,4,5,6],xmm1[7] -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $4, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2],xmm1[3,4,5],xmm2[6],xmm1[7] -; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: psraw $2, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3,4,5],xmm1[6],xmm2[7] -; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = <256,16384,4096,u,u,32768,512,256> +; SSE41-NEXT: pmulhw %xmm0, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7] +; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psraw $1, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2,3,4],xmm2[5,6],xmm1[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2,3,4],xmm2[5],xmm1[6,7] ; SSE41-NEXT: psrlw $15, %xmm0 ; SSE41-NEXT: paddw %xmm2, %xmm0 ; SSE41-NEXT: retq @@ -2980,14 +2906,10 @@ ; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 ; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $8, %xmm0, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3,4,5,6],xmm1[7] -; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4,5],xmm2[6],xmm1[7] -; AVX1-NEXT: vpsraw $2, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5],xmm2[6],xmm1[7] -; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6],xmm1[7] +; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7] +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5],xmm1[6,7] ; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: retq @@ -2997,13 +2919,12 @@ ; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 ; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm1 -; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7] +; AVX2-NEXT: vpsraw $1, %xmm0, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5],xmm1[6,7] ; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 -; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: combine_vec_sdiv_nonuniform5: @@ -3083,21 +3004,12 @@ ; SSE41-NEXT: pmullw %xmm0, %xmm1 ; SSE41-NEXT: pmulhw {{.*}}(%rip), %xmm0 ; SSE41-NEXT: paddw %xmm1, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: psraw $8, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4,5],xmm1[6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $4, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm1[1,2,3,4],xmm2[5],xmm1[6],xmm2[7] -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: psraw $2, %xmm3 -; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3,4],xmm3[5],xmm2[6,7] -; SSE41-NEXT: movdqa %xmm3, %xmm1 -; SSE41-NEXT: psraw $1, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1,2,3,4],xmm1[5],xmm3[6],xmm1[7] +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <4,256,256,u,u,512,256,8> +; SSE41-NEXT: pmulhw %xmm0, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2],xmm0[3,4],xmm2[5,6,7] ; SSE41-NEXT: psrlw $15, %xmm0 -; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7] +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2],xmm1[3,4],xmm0[5,6,7] ; SSE41-NEXT: paddw %xmm2, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -3107,14 +3019,8 @@ ; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 ; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 ; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsraw $8, %xmm0, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4,5],xmm1[6,7] -; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4],xmm2[5],xmm1[6],xmm2[7] -; AVX1-NEXT: vpsraw $2, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4],xmm2[5],xmm1[6,7] -; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5],xmm1[6],xmm2[7] +; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7] ; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7] @@ -3126,15 +3032,12 @@ ; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 ; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 ; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm1 -; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[3,4],xmm1[5,6,7] ; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3,4],xmm0[5,6,7] ; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 -; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: combine_vec_sdiv_nonuniform6: Index: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll +++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-128.ll @@ -1109,35 +1109,20 @@ ; ; SSE41-LABEL: constant_shift_v8i16: ; SSE41: # %bb.0: -; SSE41-NEXT: movdqa %xmm0, %xmm1 -; SSE41-NEXT: psraw $4, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: psraw $2, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; SSE41-NEXT: movdqa %xmm2, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = +; SSE41-NEXT: pmulhw %xmm0, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; SSE41-NEXT: psraw $1, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7] ; SSE41-NEXT: retq ; -; AVX1-LABEL: constant_shift_v8i16: -; AVX1: # %bb.0: -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] -; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] -; AVX1-NEXT: retq -; -; AVX2-LABEL: constant_shift_v8i16: -; AVX2: # %bb.0: -; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 -; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: vzeroupper -; AVX2-NEXT: retq +; AVX-LABEL: constant_shift_v8i16: +; AVX: # %bb.0: +; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] +; AVX-NEXT: vpsraw $1, %xmm0, %xmm0 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3,4,5,6,7] +; AVX-NEXT: retq ; ; XOP-LABEL: constant_shift_v8i16: ; XOP: # %bb.0: Index: llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll +++ llvm/trunk/test/CodeGen/X86/vector-shift-ashr-256.ll @@ -1180,36 +1180,23 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind { ; AVX1-LABEL: constant_shift_v16i16: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1 -; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; AVX1-NEXT: vpsraw $2, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] -; AVX1-NEXT: vpsraw $4, %xmm0, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5,6,7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: constant_shift_v16i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] -; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15] -; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15] -; AVX2-NEXT: vpsravd %ymm3, %ymm4, %ymm3 -; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3 -; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11] -; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11] -; AVX2-NEXT: vpsravd %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0 -; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vpmulhw {{.*}}(%rip), %ymm0, %ymm1 +; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; AVX2-NEXT: vpsraw $1, %ymm0, %ymm0 +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4,5,6,7,8],ymm0[9],ymm2[10,11,12,13,14,15] +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; AVX2-NEXT: retq ; ; XOPAVX1-LABEL: constant_shift_v16i16: @@ -1257,36 +1244,23 @@ ; ; X32-AVX1-LABEL: constant_shift_v16i16: ; X32-AVX1: # %bb.0: -; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; X32-AVX1-NEXT: vpsraw $8, %xmm1, %xmm1 -; X32-AVX1-NEXT: vpsraw $4, %xmm1, %xmm2 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X32-AVX1-NEXT: vpsraw $2, %xmm1, %xmm2 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; X32-AVX1-NEXT: vpsraw $1, %xmm1, %xmm2 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] -; X32-AVX1-NEXT: vpsraw $4, %xmm0, %xmm2 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; X32-AVX1-NEXT: vpsraw $2, %xmm0, %xmm2 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; X32-AVX1-NEXT: vpmulhw {{\.LCPI.*}}, %xmm0, %xmm1 +; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; X32-AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] -; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5,6,7] +; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; X32-AVX1-NEXT: vpmulhw {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X32-AVX1-NEXT: retl ; ; X32-AVX2-LABEL: constant_shift_v16i16: ; X32-AVX2: # %bb.0: -; X32-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X32-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] -; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15] -; X32-AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15] -; X32-AVX2-NEXT: vpsravd %ymm3, %ymm4, %ymm3 -; X32-AVX2-NEXT: vpsrld $16, %ymm3, %ymm3 -; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11] -; X32-AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11] -; X32-AVX2-NEXT: vpsravd %ymm2, %ymm0, %ymm0 -; X32-AVX2-NEXT: vpsrld $16, %ymm0, %ymm0 -; X32-AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0 +; X32-AVX2-NEXT: vpmulhw {{\.LCPI.*}}, %ymm0, %ymm1 +; X32-AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0],ymm1[1,2,3,4,5,6,7],ymm0[8],ymm1[9,10,11,12,13,14,15] +; X32-AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm1[4,5,6,7] +; X32-AVX2-NEXT: vpsraw $1, %ymm0, %ymm0 +; X32-AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4,5,6,7,8],ymm0[9],ymm2[10,11,12,13,14,15] +; X32-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; X32-AVX2-NEXT: retl %shift = ashr <16 x i16> %a, ret <16 x i16> %shift