Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -23020,16 +23020,13 @@ Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi); Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG); Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG); - // The ymm variant of PACKUS treats the 128-bit lanes separately, so - // before using PACKUS we need to permute the inputs to the correct lo/hi - // xmm lane. - const int LoMask[] = {0, 1, 2, 3, 4, 5, 6, 7, - 16, 17, 18, 19, 20, 21, 22, 23}; - const int HiMask[] = {8, 9, 10, 11, 12, 13, 14, 15, - 24, 25, 26, 27, 28, 29, 30, 31}; - return DAG.getNode(X86ISD::PACKUS, dl, VT, - DAG.getVectorShuffle(ExVT, dl, Lo, Hi, LoMask), - DAG.getVectorShuffle(ExVT, dl, Lo, Hi, HiMask)); + + SDValue Res = DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi); + // The ymm variant of PACKUS treats the 128-bit lanes separately, so we + // need to permute the final result into place. + Res = DAG.getBitcast(MVT::v4i64, Res); + Res = DAG.getVectorShuffle(MVT::v4i64, dl, Res, Res, { 0, 2, 1, 3 }); + return DAG.getBitcast(VT, Res); } assert(VT == MVT::v16i8 && "Unexpected VT"); Index: test/CodeGen/X86/vector-idiv-sdiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -208,9 +208,8 @@ ; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm3 ; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] -; AVX2NOBW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpackuswb %ymm1, %ymm2, %ymm1 +; AVX2NOBW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3] ; AVX2NOBW-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX2NOBW-NEXT: vpsrlw $2, %ymm0, %ymm1 ; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 @@ -512,9 +511,8 @@ ; AVX2NOBW-NEXT: vpmovsxbw %xmm0, %ymm3 ; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] -; AVX2NOBW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpackuswb %ymm1, %ymm2, %ymm1 +; AVX2NOBW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3] ; AVX2NOBW-NEXT: vpaddb %ymm0, %ymm1, %ymm1 ; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm2 ; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 Index: test/CodeGen/X86/vector-idiv-sdiv-512.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-512.ll +++ test/CodeGen/X86/vector-idiv-sdiv-512.ll @@ -135,9 +135,8 @@ ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm4 ; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm2[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm2, %ymm4, %ymm2 +; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX512F-NEXT: vpaddb %ymm0, %ymm2, %ymm0 ; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm2 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] @@ -156,9 +155,8 @@ ; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm7 ; AVX512F-NEXT: vpmullw %ymm3, %ymm7, %ymm3 ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm3[2,3],ymm2[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm7, %ymm2, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX512F-NEXT: vpaddb %ymm1, %ymm2, %ymm1 ; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2 ; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 @@ -370,9 +368,8 @@ ; AVX512F-NEXT: vpmovsxbw %xmm0, %ymm4 ; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm3[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 -; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3 +; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3 +; AVX512F-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3] ; AVX512F-NEXT: vpaddb %ymm0, %ymm3, %ymm3 ; AVX512F-NEXT: vpsrlw $7, %ymm3, %ymm5 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] @@ -403,9 +400,8 @@ ; AVX512F-NEXT: vpmovsxbw %xmm1, %ymm8 ; AVX512F-NEXT: vpmullw %ymm2, %ymm8, %ymm2 ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm8 = ymm2[2,3],ymm7[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm7, %ymm2, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm8, %ymm2, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm7, %ymm2, %ymm2 +; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX512F-NEXT: vpaddb %ymm1, %ymm2, %ymm2 ; AVX512F-NEXT: vpsrlw $7, %ymm2, %ymm7 ; AVX512F-NEXT: vpand %ymm4, %ymm7, %ymm4 Index: test/CodeGen/X86/vector-idiv-udiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-256.ll +++ test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -214,9 +214,8 @@ ; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] -; AVX2NOBW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpackuswb %ymm1, %ymm2, %ymm1 +; AVX2NOBW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3] ; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX2NOBW-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 @@ -518,9 +517,8 @@ ; AVX2NOBW-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX2NOBW-NEXT: vpmullw %ymm2, %ymm3, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX2NOBW-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3] -; AVX2NOBW-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 -; AVX2NOBW-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2NOBW-NEXT: vpackuswb %ymm1, %ymm2, %ymm1 +; AVX2NOBW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3] ; AVX2NOBW-NEXT: vpsubb %ymm1, %ymm0, %ymm2 ; AVX2NOBW-NEXT: vpsrlw $1, %ymm2, %ymm2 ; AVX2NOBW-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 Index: test/CodeGen/X86/vector-idiv-udiv-512.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-512.ll +++ test/CodeGen/X86/vector-idiv-udiv-512.ll @@ -146,9 +146,8 @@ ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm2[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm2, %ymm4, %ymm2 +; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX512F-NEXT: vpsubb %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $1, %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] @@ -164,9 +163,8 @@ ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512F-NEXT: vpmullw %ymm3, %ymm6, %ymm3 ; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm3[2,3],ymm5[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm5, %ymm3, %ymm3 -; AVX512F-NEXT: vpackuswb %ymm6, %ymm3, %ymm3 +; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3 +; AVX512F-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3] ; AVX512F-NEXT: vpsubb %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1 ; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1 @@ -384,17 +382,16 @@ ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm4 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; AVX512F-NEXT: vpmullw %ymm2, %ymm4, %ymm4 ; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm4[2,3],ymm3[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 -; AVX512F-NEXT: vpackuswb %ymm5, %ymm3, %ymm3 +; AVX512F-NEXT: vpackuswb %ymm3, %ymm4, %ymm3 +; AVX512F-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3] ; AVX512F-NEXT: vpsubb %ymm3, %ymm0, %ymm4 -; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm5 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; AVX512F-NEXT: vpand %ymm4, %ymm5, %ymm5 -; AVX512F-NEXT: vpaddb %ymm3, %ymm5, %ymm3 +; AVX512F-NEXT: vpsrlw $1, %ymm4, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512F-NEXT: vpaddb %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] -; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm6 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm6 ; AVX512F-NEXT: vpmovsxbw %xmm6, %ymm7 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] ; AVX512F-NEXT: vpmullw %ymm3, %ymm7, %ymm7 @@ -414,15 +411,14 @@ ; AVX512F-NEXT: vpmovzxbw {{.*#+}} ymm7 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; AVX512F-NEXT: vpmullw %ymm2, %ymm7, %ymm2 ; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2 -; AVX512F-NEXT: vperm2i128 {{.*#+}} ymm7 = ymm2[2,3],ymm6[2,3] -; AVX512F-NEXT: vinserti128 $1, %xmm6, %ymm2, %ymm2 -; AVX512F-NEXT: vpackuswb %ymm7, %ymm2, %ymm2 +; AVX512F-NEXT: vpackuswb %ymm6, %ymm2, %ymm2 +; AVX512F-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX512F-NEXT: vpsubb %ymm2, %ymm1, %ymm6 ; AVX512F-NEXT: vpsrlw $1, %ymm6, %ymm6 -; AVX512F-NEXT: vpand %ymm4, %ymm6, %ymm4 -; AVX512F-NEXT: vpaddb %ymm2, %ymm4, %ymm2 +; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm5 +; AVX512F-NEXT: vpaddb %ymm2, %ymm5, %ymm2 ; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2 -; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 ; AVX512F-NEXT: vpmovsxbw %xmm2, %ymm4 ; AVX512F-NEXT: vpmullw %ymm3, %ymm4, %ymm4 ; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4