Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -21638,6 +21638,24 @@ SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl); if (VT == MVT::v32i8) { + if (Subtarget.hasBWI()) { + SDValue ExA = getExtendInVec(ExSSE41, dl, MVT::v32i16, A, DAG); + SDValue ExB = getExtendInVec(ExSSE41, dl, MVT::v32i16, B, DAG); + SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v32i16, ExA, ExB); + Mul = DAG.getNode(ISD::SRL, dl, MVT::v32i16, Mul, + DAG.getConstant(8, dl, MVT::v32i16)); + // The ymm variant of PACKUS treats the 128-bit lanes separately, so + // before using PACKUS we need to permute the inputs to the correct + // lo/hi xmm lane. + const int Mask[] = { 0, 1, 2, 3, 4, 5, 6, 7, + 16, 17, 18, 19, 20, 21, 22, 23, + 8, 9, 10, 11, 12, 13, 14, 15, + 24, 25, 26, 27, 28, 29, 30, 31}; + Mul = DAG.getVectorShuffle(MVT::v32i16, dl, Mul, Mul, Mask); + Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i16, Mul, Lo); + Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i16, Mul, Hi); + return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi); + } SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Lo); SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Lo); SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Hi); Index: test/CodeGen/X86/vector-idiv-sdiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -202,32 +202,51 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; -; AVX2-LABEL: test_div7_32i8: -; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3 -; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX2-NEXT: vpmovsxbw %xmm0, %ymm3 -; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1 -; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm1 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] -; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpsubb %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: retq +; AVX2NOBW-LABEL: test_div7_32i8: +; AVX2NOBW: # BB#0: +; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] +; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2NOBW-NEXT: vpmovsxbw %xmm2, %ymm2 +; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2NOBW-NEXT: vpmovsxbw %xmm3, %ymm3 +; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2NOBW-NEXT: vpmovsxbw %xmm1, %ymm1 +; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm3 +; AVX2NOBW-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] +; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpaddb %ymm0, %ymm1, %ymm0 +; AVX2NOBW-NEXT: vpsrlw $2, %ymm0, %ymm1 +; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX2NOBW-NEXT: vpxor %ymm2, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpsubb %ymm2, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpsrlw $7, %ymm0, %ymm0 +; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2NOBW-NEXT: vpaddb %ymm0, %ymm1, %ymm0 +; AVX2NOBW-NEXT: retq +; +; AVX512BW-LABEL: test_div7_32i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1 +; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1 +; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7] +; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; AVX512BW-NEXT: vpackuswb %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vpaddb %ymm0, %ymm1, %ymm0 +; AVX512BW-NEXT: vpsrlw $2, %ymm0, %ymm1 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX512BW-NEXT: vpxor %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vpsubb %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vpsrlw $7, %ymm0, %ymm0 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512BW-NEXT: vpaddb %ymm0, %ymm1, %ymm0 +; AVX512BW-NEXT: retq %res = sdiv <32 x i8> %a, ret <32 x i8> %res } @@ -544,20 +563,12 @@ ; ; AVX512BW-LABEL: test_rem7_32i8: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] -; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT: vpmovsxbw %xmm2, %ymm2 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512BW-NEXT: vpmovsxbw %xmm3, %ymm3 -; AVX512BW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512BW-NEXT: vpmovsxbw %xmm1, %ymm1 -; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm3 -; AVX512BW-NEXT: vpmullw %ymm1, %ymm3, %ymm1 -; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX512BW-NEXT: vpmovsxbw %ymm0, %zmm1 +; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1 +; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7] +; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; AVX512BW-NEXT: vpackuswb %ymm2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpaddb %ymm0, %ymm1, %ymm1 ; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm2 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 Index: test/CodeGen/X86/vector-idiv-udiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-256.ll +++ test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -208,29 +208,45 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; -; AVX2-LABEL: test_div7_32i8: -; AVX2: # BB#0: -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero -; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1 -; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 -; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: retq +; AVX2NOBW-LABEL: test_div7_32i8: +; AVX2NOBW: # BB#0: +; AVX2NOBW-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX2NOBW-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX2NOBW-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero +; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2NOBW-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2NOBW-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] +; AVX2NOBW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 +; AVX2NOBW-NEXT: vpsrlw $1, %ymm0, %ymm0 +; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm0, %ymm0 +; AVX2NOBW-NEXT: vpsrlw $2, %ymm0, %ymm0 +; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2NOBW-NEXT: retq +; +; AVX512BW-LABEL: test_div7_32i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1 +; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7] +; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; AVX512BW-NEXT: vpackuswb %ymm2, %ymm1, %ymm1 +; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpsrlw $1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512BW-NEXT: vpaddb %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpsrlw $2, %ymm0, %ymm0 +; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512BW-NEXT: retq %res = udiv <32 x i8> %a, ret <32 x i8> %res } @@ -550,20 +566,12 @@ ; ; AVX512BW-LABEL: test_rem7_32i8: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] -; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero -; AVX512BW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 -; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero -; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero -; AVX512BW-NEXT: vpmullw %ymm1, %ymm3, %ymm1 -; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1 -; AVX512BW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] -; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512BW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero +; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1 +; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,4,5,2,3,6,7] +; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; AVX512BW-NEXT: vpackuswb %ymm2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpsubb %ymm1, %ymm0, %ymm2 ; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2