Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -9405,6 +9405,113 @@ return SDValue(); } +static bool maskContainsSequenceForVPMOV(ArrayRef Mask, bool SwappedOps, + int delta) { + int Size = (int)Mask.size(); + int N = Size / delta; + int I = 0; + + if (SwappedOps) { + // Look for e.g.: <8, 10, 12, 14, 0, 0, 0, 0> + + // These should refer to the second vectors elements + while (I < N) { + if (Mask[I] != Size + I * delta) + return false; + ++I; + } + // The rest should not refer to the second vectors elements + while (I < Size) { + if (Mask[I] >= Size && Mask[I] < Size * 2) + return false; + ++I; + } + } else { + // Look for e.g.: <0, 2, 4, 6, 8, 8, 8, 8> + + // These should refer to the first vectors elements + while (I < N) { + if (Mask[I] != I * delta) + return false; + ++I; + } + // The rest should not refer to the first vectors elements + while (I < Size) { + if (Mask[I] >= 0 && Mask[I] < Size) + return false; + ++I; + } + } + + return true; +} + +// Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction. +// +// An example is the following: +// +// t0: ch = EntryToken +// t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0 +// t25: v4i32 = truncate t2 +// t41: v8i16 = bitcast t25 +// t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0> +// t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21 +// t18: v2i64 = bitcast t51 +// +// Without avx512vl, this is lowered to: +// +// vpmovqd %zmm0, %ymm0 +// vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +// +// But when avx512vl is available, one can just use a single vpmovdw +// instruction. +static SDValue lowerVectorShuffleWithVPMOV(const SDLoc &DL, ArrayRef Mask, + MVT VT, SDValue V1, SDValue V2, + SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + if (!Subtarget.hasVLX()) + return SDValue(); + + if (VT != MVT::v16i8 && VT != MVT::v8i16) + return SDValue(); + + bool SwappedOps = false; + + if (!ISD::isBuildVectorAllZeros(V2.getNode())) { + if (!ISD::isBuildVectorAllZeros(V1.getNode())) + return SDValue(); + + std::swap(V1, V2); + SwappedOps = true; + } + + // Look for: + // + // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8> + // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16> + // + // and similar ones. + if (V1.getOpcode() != ISD::BITCAST) + return SDValue(); + if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE) + return SDValue(); + + // Look for a mask that looks like <0, 2, 4, 6, #, #, #, #> + // or <0, 4, 8, 12, #, #, #, #> + size_t N = VT.getVectorNumElements(); + + if (Mask.size() != N) + return SDValue(); + + // The first half/quarter of the mask should refer to every second/fourth + // element of the vector truncated and bitcasted. + if (!maskContainsSequenceForVPMOV(Mask, SwappedOps, 2) && + !maskContainsSequenceForVPMOV(Mask, SwappedOps, 4)) + return SDValue(); + + return DAG.getNode(X86ISD::VTRUNC, DL, VT, V1.getOperand(0).getOperand(0)); +} + // X86 has dedicated pack instructions that can handle specific truncation // operations: PACKSS and PACKUS. static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, @@ -14940,6 +15047,10 @@ if (canonicalizeShuffleMaskWithCommute(Mask)) return DAG.getCommutedVectorShuffle(*SVOp); + if (SDValue V = lowerVectorShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, + Subtarget)) + return V; + // For each vector width, delegate to a specialized lowering routine. if (VT.is128BitVector()) return lower128BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, Index: test/CodeGen/X86/shuffle-vs-trunc-256.ll =================================================================== --- test/CodeGen/X86/shuffle-vs-trunc-256.ll +++ test/CodeGen/X86/shuffle-vs-trunc-256.ll @@ -511,8 +511,7 @@ ; ; AVX512VL-LABEL: trunc_v8i32_to_v8i8_return_v2i64: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -526,15 +525,13 @@ ; ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8_return_v2i64: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8_return_v2i64: ; AVX512VBMIVL: # %bb.0: -; AVX512VBMIVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512VBMIVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VBMIVL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512VBMIVL-NEXT: vzeroupper ; AVX512VBMIVL-NEXT: retq %truncated.vec = trunc <8 x i32> %vec to <8 x i8> @@ -573,8 +570,7 @@ ; ; AVX512VL-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -588,15 +584,13 @@ ; ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8_with_zext_return_v16i8: ; AVX512VBMIVL: # %bb.0: -; AVX512VBMIVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512VBMIVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VBMIVL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512VBMIVL-NEXT: vzeroupper ; AVX512VBMIVL-NEXT: retq %truncated = trunc <8 x i32> %vec to <8 x i8> @@ -636,8 +630,7 @@ ; ; AVX512VL-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -651,15 +644,13 @@ ; ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8_via_v8i16_return_v16i8: ; AVX512VBMIVL: # %bb.0: -; AVX512VBMIVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512VBMIVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VBMIVL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512VBMIVL-NEXT: vzeroupper ; AVX512VBMIVL-NEXT: retq %truncated = trunc <8 x i32> %vec to <8 x i16> @@ -698,8 +689,7 @@ ; ; AVX512VL-LABEL: trunc_v8i32_to_v8i8_return_v16i8: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -713,15 +703,13 @@ ; ; AVX512BWVL-LABEL: trunc_v8i32_to_v8i8_return_v16i8: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512BWVL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; ; AVX512VBMIVL-LABEL: trunc_v8i32_to_v8i8_return_v16i8: ; AVX512VBMIVL: # %bb.0: -; AVX512VBMIVL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX512VBMIVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VBMIVL-NEXT: vpmovdb %ymm0, %xmm0 ; AVX512VBMIVL-NEXT: vzeroupper ; AVX512VBMIVL-NEXT: retq %truncated = trunc <8 x i32> %vec to <8 x i8> @@ -766,8 +754,7 @@ ; ; AVX512VL-LABEL: trunc_v4i64_to_v4i16_return_v2i64: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -781,15 +768,13 @@ ; ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16_return_v2i64: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512BWVL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16_return_v2i64: ; AVX512VBMIVL: # %bb.0: -; AVX512VBMIVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VBMIVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VBMIVL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512VBMIVL-NEXT: vzeroupper ; AVX512VBMIVL-NEXT: retq %truncated = trunc <4 x i64> %vec to <4 x i16> @@ -833,8 +818,7 @@ ; ; AVX512VL-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -848,15 +832,13 @@ ; ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512BWVL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16_with_zext_return_v8i16: ; AVX512VBMIVL: # %bb.0: -; AVX512VBMIVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VBMIVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VBMIVL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512VBMIVL-NEXT: vzeroupper ; AVX512VBMIVL-NEXT: retq %truncated = trunc <4 x i64> %vec to <4 x i16> @@ -901,8 +883,7 @@ ; ; AVX512VL-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -916,15 +897,13 @@ ; ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512BWVL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16_via_v4i32_return_v8i16: ; AVX512VBMIVL: # %bb.0: -; AVX512VBMIVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VBMIVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VBMIVL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512VBMIVL-NEXT: vzeroupper ; AVX512VBMIVL-NEXT: retq %truncated = trunc <4 x i64> %vec to <4 x i32> @@ -968,8 +947,7 @@ ; ; AVX512VL-LABEL: trunc_v4i64_to_v4i16_return_v8i16: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -983,15 +961,13 @@ ; ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i16_return_v8i16: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512BWVL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i16_return_v8i16: ; AVX512VBMIVL: # %bb.0: -; AVX512VBMIVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VBMIVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VBMIVL-NEXT: vpmovqw %ymm0, %xmm0 ; AVX512VBMIVL-NEXT: vzeroupper ; AVX512VBMIVL-NEXT: retq %truncated = trunc <4 x i64> %vec to <4 x i16> @@ -1034,8 +1010,7 @@ ; ; AVX512VL-LABEL: trunc_v4i64_to_v4i8_return_v16i8: ; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[u],zero +; AVX512VL-NEXT: vpmovqb %ymm0, %xmm0 ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; @@ -1049,15 +1024,13 @@ ; ; AVX512BWVL-LABEL: trunc_v4i64_to_v4i8_return_v16i8: ; AVX512BWVL: # %bb.0: -; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[u],zero +; AVX512BWVL-NEXT: vpmovqb %ymm0, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq ; ; AVX512VBMIVL-LABEL: trunc_v4i64_to_v4i8_return_v16i8: ; AVX512VBMIVL: # %bb.0: -; AVX512VBMIVL-NEXT: vpmovqd %ymm0, %xmm0 -; AVX512VBMIVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[u],zero +; AVX512VBMIVL-NEXT: vpmovqb %ymm0, %xmm0 ; AVX512VBMIVL-NEXT: vzeroupper ; AVX512VBMIVL-NEXT: retq %truncated = trunc <4 x i64> %vec to <4 x i8> Index: test/CodeGen/X86/shuffle-vs-trunc-512.ll =================================================================== --- test/CodeGen/X86/shuffle-vs-trunc-512.ll +++ test/CodeGen/X86/shuffle-vs-trunc-512.ll @@ -943,12 +943,44 @@ } define <16 x i8> @trunc_v8i64_to_v8i8_return_v16i8(<8 x i64> %vec) nounwind { -; AVX512-LABEL: trunc_v8i64_to_v8i8_return_v16i8: -; AVX512: # %bb.0: -; AVX512-NEXT: vpmovqw %zmm0, %xmm0 -; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; AVX512F-LABEL: trunc_v8i64_to_v8i8_return_v16i8: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmovqw %zmm0, %xmm0 +; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: trunc_v8i64_to_v8i8_return_v16i8: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vpmovqb %zmm0, %xmm0 +; AVX512VL-NEXT: vzeroupper +; AVX512VL-NEXT: retq +; +; AVX512BW-LABEL: trunc_v8i64_to_v8i8_return_v16i8: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmovqw %zmm0, %xmm0 +; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq +; +; AVX512BWVL-LABEL: trunc_v8i64_to_v8i8_return_v16i8: +; AVX512BWVL: # %bb.0: +; AVX512BWVL-NEXT: vpmovqb %zmm0, %xmm0 +; AVX512BWVL-NEXT: vzeroupper +; AVX512BWVL-NEXT: retq +; +; AVX512VBMI-LABEL: trunc_v8i64_to_v8i8_return_v16i8: +; AVX512VBMI: # %bb.0: +; AVX512VBMI-NEXT: vpmovqw %zmm0, %xmm0 +; AVX512VBMI-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX512VBMI-NEXT: vzeroupper +; AVX512VBMI-NEXT: retq +; +; AVX512VBMIVL-LABEL: trunc_v8i64_to_v8i8_return_v16i8: +; AVX512VBMIVL: # %bb.0: +; AVX512VBMIVL-NEXT: vpmovqb %zmm0, %xmm0 +; AVX512VBMIVL-NEXT: vzeroupper +; AVX512VBMIVL-NEXT: retq %truncated = trunc <8 x i64> %vec to <8 x i8> %result = shufflevector <8 x i8> %truncated, <8 x i8> zeroinitializer, <16 x i32> ret <16 x i8> %result