Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -399,6 +399,7 @@ SDValue reduceBuildVecExtToExtBuildVec(SDNode *N); SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N); SDValue reduceBuildVecToShuffle(SDNode *N); + SDValue reduceBuildVecToTrunc(SDNode *N); SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N, ArrayRef VectorMask, SDValue VecIn1, SDValue VecIn2, unsigned LeftIdx); @@ -14224,6 +14225,74 @@ return Shuffles[0]; } +// Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT +// operations which be matched for a truncate. +SDValue DAGCombiner::reduceBuildVecToTrunc(SDNode *N) { + // TODO: Add support for big-endian. + if (DAG.getDataLayout().isBigEndian()) + return SDValue(); + if (N->getNumOperands() < 2) + return SDValue(); + SDLoc DL(N); + EVT VT = N->getValueType(0); + unsigned NumElems = N->getNumOperands(); + + if (!isTypeLegal(VT)) + return SDValue(); + + // If the input is something other than an EXTRACT_VECTOR_ELT with a constant + // index, bail out. + // TODO: Allow undef elements in some cases? + if (any_of(make_range(N->op_begin(), N->op_end()), [VT](SDValue Op) { + return Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + !isa(Op.getOperand(1)) || + Op.getValueType() != VT.getVectorElementType(); + })) + return SDValue(); + + // Helper for obtaining an EXTRACT_VECTOR_ELT's index operand + auto GetExtractIdx = [](SDValue Extract) { + return cast(Extract.getOperand(1))->getSExtValue(); + }; + + // The first BUILD_VECTOR operand must be an an extract from index zero + // (assuming no undef and little-endian). + if (GetExtractIdx(N->getOperand(0)) != 0) + return SDValue(); + + // Compute the stride from the first index. + int Stride = GetExtractIdx(N->getOperand(1)); + SDValue ExtractedFromVec = N->getOperand(1).getOperand(0); + + // Proceed only if the stride and the types can be matched to a truncate. + if ((Stride == 1 || !isPowerOf2_32(Stride)) || + (ExtractedFromVec.getValueType().getVectorNumElements() != + Stride * NumElems) || + (VT.getScalarSizeInBits() * Stride > 64)) + return SDValue(); + + // Check remaining operands are consistent with the computed stride. + for (unsigned i = 1; i != NumElems; ++i) { + SDValue Op = N->getOperand(i); + + if ((Op.getOperand(0) != ExtractedFromVec) || + (GetExtractIdx(Op) != Stride * i)) + return SDValue(); + } + + // All checks were ok, construct the truncate. + EVT NewVT = VT.getVectorVT( + *DAG.getContext(), MVT::getIntegerVT(VT.getScalarSizeInBits() * Stride), + NumElems); + EVT TruncVT = + VT.isFloatingPoint() ? VT.changeVectorElementTypeToInteger() : VT; + SDValue Res = DAG.getBitcast(NewVT, ExtractedFromVec); + Res = DAG.getNode(ISD::TRUNCATE, SDLoc(N), TruncVT, Res); + if (VT.isFloatingPoint()) + Res = DAG.getBitcast(VT, Res); + return Res; +} + SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { EVT VT = N->getValueType(0); @@ -14266,6 +14335,9 @@ if (SDValue V = reduceBuildVecConvertToConvertBuildVec(N)) return V; + if (SDValue V = reduceBuildVecToTrunc(N)) + return V; + if (SDValue V = reduceBuildVecToShuffle(N)) return V; Index: test/CodeGen/ARM/vext.ll =================================================================== --- test/CodeGen/ARM/vext.ll +++ test/CodeGen/ARM/vext.ll @@ -199,10 +199,10 @@ define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind { ; CHECK-LABEL: test_undef: ; CHECK: @ BB#0: -; CHECK-NEXT: vldr d16, [r1] -; CHECK-NEXT: vldr d17, [r0, #8] +; CHECK-NEXT: vldr d16, [r1] +; CHECK-NEXT: vldr d17, [r0, #8] ; CHECK-NEXT: vzip.16 d17, d16 -; CHECK-NEXT: vmov r0, r1, d17 +; CHECK-NEXT: vmov r0, r1, d17 ; CHECK-NEXT: mov pc, lr %tmp1 = load <8 x i16>, <8 x i16>* %A %tmp2 = load <8 x i16>, <8 x i16>* %B @@ -242,9 +242,8 @@ ; CHECK-LABEL: test_largespan: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vorr d18, d16, d16 -; CHECK-NEXT: vuzp.16 d18, d17 -; CHECK-NEXT: vmov r0, r1, d18 +; CHECK-NEXT: vmovn.i32 d16, q8 +; CHECK-NEXT: vmov r0, r1, d16 ; CHECK-NEXT: mov pc, lr %tmp1 = load <8 x i16>, <8 x i16>* %B %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> Index: test/CodeGen/ARM/vpadd.ll =================================================================== --- test/CodeGen/ARM/vpadd.ll +++ test/CodeGen/ARM/vpadd.ll @@ -218,7 +218,11 @@ ; CHECK-LABEL: addCombineToVPADD_i8: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vpadd.i8 d16, d16, d17 +; CHECK-NEXT: vorr d18, d16, d16 +; CHECK-NEXT: vorr d19, d17, d17 +; CHECK-NEXT: vmovn.i16 d16, q8 +; CHECK-NEXT: vuzp.8 d18, d19 +; CHECK-NEXT: vadd.i8 d16, d19, d16 ; CHECK-NEXT: vstr d16, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <16 x i8>, <16 x i8>* %cbcr @@ -235,7 +239,11 @@ ; CHECK-LABEL: addCombineToVPADD_i16: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vpadd.i16 d16, d16, d17 +; CHECK-NEXT: vorr d18, d16, d16 +; CHECK-NEXT: vorr d19, d17, d17 +; CHECK-NEXT: vmovn.i32 d16, q8 +; CHECK-NEXT: vuzp.16 d18, d19 +; CHECK-NEXT: vadd.i16 d16, d19, d16 ; CHECK-NEXT: vstr d16, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <8 x i16>, <8 x i16>* %cbcr @@ -251,7 +259,11 @@ ; CHECK-LABEL: addCombineToVPADD_i32: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vpadd.i32 d16, d16, d17 +; CHECK-NEXT: vorr d18, d16, d16 +; CHECK-NEXT: vorr d19, d17, d17 +; CHECK-NEXT: vmovn.i64 d16, q8 +; CHECK-NEXT: vtrn.32 d18, d19 +; CHECK-NEXT: vadd.i32 d16, d19, d16 ; CHECK-NEXT: vstr d16, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <4 x i32>, <4 x i32>* %cbcr @@ -267,7 +279,13 @@ ; CHECK-LABEL: addCombineToVPADDLq_s8: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vpaddl.s8 q8, q8 +; CHECK-NEXT: vmov.i16 q9, #0x8 +; CHECK-NEXT: vneg.s16 q9, q9 +; CHECK-NEXT: vshl.i16 q10, q8, #8 +; CHECK-NEXT: vorr d22, d16, d16 +; CHECK-NEXT: vuzp.8 d22, d17 +; CHECK-NEXT: vshl.s16 q9, q10, q9 +; CHECK-NEXT: vaddw.s8 q8, q9, d17 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <16 x i8>, <16 x i8>* %cbcr @@ -311,7 +329,11 @@ ; CHECK-LABEL: addCombineToVPADDLq_u8: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vpaddl.u8 q8, q8 +; CHECK-NEXT: vorr d18, d16, d16 +; CHECK-NEXT: vorr d19, d17, d17 +; CHECK-NEXT: vbic.i16 q8, #0xff00 +; CHECK-NEXT: vuzp.8 d18, d19 +; CHECK-NEXT: vaddw.u8 q8, q8, d19 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <16 x i8>, <16 x i8>* %cbcr @@ -332,8 +354,10 @@ ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] ; CHECK-NEXT: vmovl.u8 q9, d17 ; CHECK-NEXT: vmovl.u8 q8, d16 +; CHECK-NEXT: vmovn.i32 d21, q9 +; CHECK-NEXT: vmovn.i32 d20, q8 ; CHECK-NEXT: vuzp.16 q8, q9 -; CHECK-NEXT: vadd.i16 q8, q8, q9 +; CHECK-NEXT: vadd.i16 q8, q10, q9 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <16 x i8>, <16 x i8>* %cbcr @@ -390,7 +414,13 @@ ; CHECK-LABEL: addCombineToVPADDLq_s16: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vpaddl.s16 q8, q8 +; CHECK-NEXT: vmov.i32 q9, #0x10 +; CHECK-NEXT: vneg.s32 q9, q9 +; CHECK-NEXT: vshl.i32 q10, q8, #16 +; CHECK-NEXT: vorr d22, d16, d16 +; CHECK-NEXT: vuzp.16 d22, d17 +; CHECK-NEXT: vshl.s32 q9, q10, q9 +; CHECK-NEXT: vaddw.s16 q8, q9, d17 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <8 x i16>, <8 x i16>* %cbcr @@ -408,7 +438,12 @@ ; CHECK-LABEL: addCombineToVPADDLq_u16: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vpaddl.u16 q8, q8 +; CHECK-NEXT: vmov.i32 q9, #0xffff +; CHECK-NEXT: vorr d20, d17, d17 +; CHECK-NEXT: vorr d21, d16, d16 +; CHECK-NEXT: vand q8, q8, q9 +; CHECK-NEXT: vuzp.16 d21, d20 +; CHECK-NEXT: vaddw.u16 q8, q8, d20 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <8 x i16>, <8 x i16>* %cbcr @@ -425,10 +460,25 @@ define void @addCombineToVPADDLq_s32(<4 x i32> *%cbcr, <2 x i64> *%X) nounwind ssp { ; CHECK-LABEL: addCombineToVPADDLq_s32: ; CHECK: @ BB#0: -; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vpaddl.s32 q8, q8 +; CHECK-NEXT: adr r2, .LCPI27_0 +; CHECK-NEXT: vld1.64 {d18, d19}, [r0] +; CHECK-NEXT: vmov.i32 q10, #0x0 +; CHECK-NEXT: vld1.64 {d16, d17}, [r2:128] +; CHECK-NEXT: vorr d22, d18, d18 +; CHECK-NEXT: vsub.i64 q8, q10, q8 +; CHECK-NEXT: vshl.i64 q10, q9, #32 +; CHECK-NEXT: vtrn.32 d22, d19 +; CHECK-NEXT: vshl.s64 q8, q10, q8 +; CHECK-NEXT: vaddw.s32 q8, q8, d19 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] ; CHECK-NEXT: mov pc, lr +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ BB#1: +; CHECK-NEXT: .LCPI27_0: +; CHECK-NEXT: .long 32 @ 0x20 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 32 @ 0x20 +; CHECK-NEXT: .long 0 @ 0x0 %tmp = load <4 x i32>, <4 x i32>* %cbcr %tmp1 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> %tmp3 = shufflevector <4 x i32> %tmp, <4 x i32> undef, <2 x i32> @@ -444,7 +494,12 @@ ; CHECK-LABEL: addCombineToVPADDLq_u32: ; CHECK: @ BB#0: ; CHECK-NEXT: vld1.64 {d16, d17}, [r0] -; CHECK-NEXT: vpaddl.u32 q8, q8 +; CHECK-NEXT: vmov.i64 q9, #0xffffffff +; CHECK-NEXT: vorr d20, d17, d17 +; CHECK-NEXT: vorr d21, d16, d16 +; CHECK-NEXT: vand q8, q8, q9 +; CHECK-NEXT: vtrn.32 d21, d20 +; CHECK-NEXT: vaddw.u32 q8, q8, d20 ; CHECK-NEXT: vst1.64 {d16, d17}, [r1] ; CHECK-NEXT: mov pc, lr %tmp = load <4 x i32>, <4 x i32>* %cbcr @@ -462,7 +517,8 @@ ; CHECK-LABEL: fromExtendingExtractVectorElt_i8: ; CHECK: @ BB#0: ; CHECK-NEXT: vmov d16, r0, r1 -; CHECK-NEXT: vpaddl.s8 d16, d16 +; CHECK-NEXT: vext.8 d17, d16, d16, #1 +; CHECK-NEXT: vadd.i16 d16, d17, d16 ; CHECK-NEXT: vmov r0, r1, d16 ; CHECK-NEXT: mov pc, lr %tmp1 = shufflevector <8 x i8> %in, <8 x i8> undef, <4 x i32> @@ -476,7 +532,11 @@ ; CHECK-LABEL: fromExtendingExtractVectorElt_i16: ; CHECK: @ BB#0: ; CHECK-NEXT: vmov d16, r0, r1 -; CHECK-NEXT: vpaddl.s16 d16, d16 +; CHECK-NEXT: vmov.u16 r1, d16[1] +; CHECK-NEXT: vmov.u16 r0, d16[3] +; CHECK-NEXT: vmov.32 d17[0], r1 +; CHECK-NEXT: vmov.32 d17[1], r0 +; CHECK-NEXT: vadd.i32 d16, d17, d16 ; CHECK-NEXT: vmov r0, r1, d16 ; CHECK-NEXT: mov pc, lr %tmp1 = shufflevector <4 x i16> %in, <4 x i16> undef, <2 x i32> @@ -488,7 +548,19 @@ ; And <2 x i8> to <2 x i32> define <2 x i8> @fromExtendingExtractVectorElt_2i8(<8 x i8> %in) { ; CHECK-LABEL: fromExtendingExtractVectorElt_2i8: -; CHECK: vadd.i32 +; CHECK: @ BB#0: +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u8 r2, d16[1] +; CHECK-NEXT: vmov.u8 r3, d16[0] +; CHECK-NEXT: vmov.u8 r0, d16[3] +; CHECK-NEXT: vmov.u8 r1, d16[2] +; CHECK-NEXT: vmov.32 d16[0], r3 +; CHECK-NEXT: vmov.32 d17[0], r2 +; CHECK-NEXT: vmov.32 d16[1], r1 +; CHECK-NEXT: vmov.32 d17[1], r0 +; CHECK-NEXT: vadd.i32 d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: mov pc, lr %tmp1 = shufflevector <8 x i8> %in, <8 x i8> undef, <2 x i32> %tmp2 = shufflevector <8 x i8> %in, <8 x i8> undef, <2 x i32> %x = add <2 x i8> %tmp2, %tmp1 @@ -497,7 +569,20 @@ define <2 x i16> @fromExtendingExtractVectorElt_2i16(<8 x i16> %in) { ; CHECK-LABEL: fromExtendingExtractVectorElt_2i16: -; CHECK: vadd.i32 +; CHECK: @ BB#0: +; CHECK-NEXT: vmov d17, r2, r3 +; CHECK-NEXT: vmov d16, r0, r1 +; CHECK-NEXT: vmov.u16 r2, d16[1] +; CHECK-NEXT: vmov.u16 r3, d16[0] +; CHECK-NEXT: vmov.u16 r0, d16[3] +; CHECK-NEXT: vmov.u16 r1, d16[2] +; CHECK-NEXT: vmov.32 d16[0], r3 +; CHECK-NEXT: vmov.32 d17[0], r2 +; CHECK-NEXT: vmov.32 d16[1], r1 +; CHECK-NEXT: vmov.32 d17[1], r0 +; CHECK-NEXT: vadd.i32 d16, d17, d16 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: mov pc, lr %tmp1 = shufflevector <8 x i16> %in, <8 x i16> undef, <2 x i32> %tmp2 = shufflevector <8 x i16> %in, <8 x i16> undef, <2 x i32> %x = add <2 x i16> %tmp2, %tmp1 Index: test/CodeGen/ARM/vuzp.ll =================================================================== --- test/CodeGen/ARM/vuzp.ll +++ test/CodeGen/ARM/vuzp.ll @@ -553,10 +553,12 @@ ; CHECK: @ BB#0: ; CHECK-NEXT: vmov d17, r2, r3 ; CHECK-NEXT: vmov d16, r0, r1 -; CHECK-NEXT: vorr d18, d17, d17 -; CHECK-NEXT: vuzp.8 d16, d18 +; CHECK-NEXT: vorr d18, d16, d16 +; CHECK-NEXT: vorr d19, d17, d17 +; CHECK-NEXT: vmovn.i16 d16, q8 +; CHECK-NEXT: vuzp.8 d18, d19 ; CHECK-NEXT: vmov r0, r1, d16 -; CHECK-NEXT: vmov r2, r3, d18 +; CHECK-NEXT: vmov r2, r3, d19 ; CHECK-NEXT: mov pc, lr %vuzp.i = shufflevector <16 x i8> %t, <16 x i8> undef, <8 x i32> Index: test/CodeGen/X86/shuffle-vs-trunc-256.ll =================================================================== --- test/CodeGen/X86/shuffle-vs-trunc-256.ll +++ test/CodeGen/X86/shuffle-vs-trunc-256.ll @@ -24,24 +24,16 @@ ; ; AVX512F-LABEL: shuffle_v32i8_to_v16i8: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512F-NEXT: vpmovsxwd (%rdi), %zmm0 +; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512F-NEXT: vmovdqa %xmm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v32i8_to_v16i8: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512VL-NEXT: vpmovsxwd (%rdi), %zmm0 +; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 ; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq @@ -49,11 +41,7 @@ ; AVX512BW-LABEL: shuffle_v32i8_to_v16i8: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0 ; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq @@ -61,12 +49,7 @@ ; AVX512BWVL-LABEL: shuffle_v32i8_to_v16i8: ; AVX512BWVL: # BB#0: ; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0 -; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vmovdqu {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> -; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi) +; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <32 x i8>, <32 x i8>* %L @@ -129,11 +112,8 @@ ; AVX-LABEL: shuffle_v16i16_to_v8i16: ; AVX: # BB#0: ; AVX-NEXT: vmovdqa (%rdi), %ymm0 -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] +; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] ; AVX-NEXT: vmovdqa %xmm0, (%rsi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq @@ -141,11 +121,7 @@ ; AVX512F-LABEL: shuffle_v16i16_to_v8i16: ; AVX512F: # BB#0: ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512F-NEXT: vmovdqa %xmm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -153,42 +129,22 @@ ; AVX512VL-LABEL: shuffle_v16i16_to_v8i16: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512VL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi) +; AVX512VL-NEXT: vpmovdw %ymm0, (%rsi) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: shuffle_v16i16_to_v8i16: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: shuffle_v16i16_to_v8i16: ; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0 -; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi) +; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512BWVL-NEXT: vpmovdw %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <16 x i16>, <16 x i16>* %L @@ -246,46 +202,39 @@ define void @shuffle_v8i32_to_v4i32(<8 x i32>* %L, <4 x i32>* %S) nounwind { ; AVX-LABEL: shuffle_v8i32_to_v4i32: ; AVX: # BB#0: -; AVX-NEXT: vmovaps (%rdi), %ymm0 -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX-NEXT: vmovaps %xmm0, (%rsi) +; AVX-NEXT: vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7] +; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; AVX-NEXT: vmovdqa %xmm0, (%rsi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq ; ; AVX512F-LABEL: shuffle_v8i32_to_v4i32: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovaps (%rdi), %ymm0 -; AVX512F-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX512F-NEXT: vmovaps %xmm0, (%rsi) +; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512F-NEXT: vpmovqd %zmm0, %ymm0 +; AVX512F-NEXT: vmovdqa %xmm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v8i32_to_v4i32: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX512VL-NEXT: vmovaps %xmm0, (%rsi) +; AVX512VL-NEXT: vpmovqd %ymm0, (%rsi) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: shuffle_v8i32_to_v4i32: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovaps (%rdi), %ymm0 -; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX512BW-NEXT: vmovaps %xmm0, (%rsi) +; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0 +; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: shuffle_v8i32_to_v4i32: ; AVX512BWVL: # BB#0: ; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX512BWVL-NEXT: vmovaps %xmm0, (%rsi) +; AVX512BWVL-NEXT: vpmovqd %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <8 x i32>, <8 x i32>* %L @@ -343,11 +292,9 @@ ; AVX-LABEL: shuffle_v32i8_to_v8i8: ; AVX: # BB#0: ; AVX-NEXT: vmovdqa (%rdi), %ymm0 -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] +; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX-NEXT: vmovq %xmm0, (%rsi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq @@ -355,11 +302,8 @@ ; AVX512F-LABEL: shuffle_v32i8_to_v8i8: ; AVX512F: # BB#0: ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX512F-NEXT: vmovq %xmm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -367,39 +311,23 @@ ; AVX512VL-LABEL: shuffle_v32i8_to_v8i8: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; AVX512VL-NEXT: vmovq %xmm0, (%rsi) +; AVX512VL-NEXT: vpmovdb %ymm0, (%rsi) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: shuffle_v32i8_to_v8i8: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX512BW-NEXT: vmovq %xmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: shuffle_v32i8_to_v8i8: ; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0 -; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX512BWVL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512BWVL-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512BWVL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi) +; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <32 x i8>, <32 x i8>* %L @@ -460,13 +388,9 @@ define void @shuffle_v16i16_to_v4i16(<16 x i16>* %L, <4 x i16>* %S) nounwind { ; AVX-LABEL: shuffle_v16i16_to_v4i16: ; AVX: # BB#0: -; AVX-NEXT: vmovdqa (%rdi), %ymm0 -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX-NEXT: vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7] +; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; AVX-NEXT: vmovq %xmm0, (%rsi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq @@ -474,12 +398,8 @@ ; AVX512F-LABEL: shuffle_v16i16_to_v4i16: ; AVX512F: # BB#0: ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX512F-NEXT: vpmovqd %zmm0, %ymm0 +; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; AVX512F-NEXT: vmovq %xmm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -487,31 +407,23 @@ ; AVX512VL-LABEL: shuffle_v16i16_to_v4i16: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX512VL-NEXT: vpmovdw %xmm0, (%rsi) +; AVX512VL-NEXT: vpmovqw %ymm0, (%rsi) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: shuffle_v16i16_to_v4i16: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512BW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0 +; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; AVX512BW-NEXT: vmovq %xmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: shuffle_v16i16_to_v4i16: ; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0 -; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX512BWVL-NEXT: vpmovdw %xmm0, (%rsi) +; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512BWVL-NEXT: vpmovqw %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <16 x i16>, <16 x i16>* %L @@ -571,12 +483,9 @@ define void @shuffle_v32i8_to_v4i8(<32 x i8>* %L, <4 x i8>* %S) nounwind { ; AVX-LABEL: shuffle_v32i8_to_v4i8: ; AVX: # BB#0: -; AVX-NEXT: vmovdqa (%rdi), %ymm0 -; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX-NEXT: vpshufd {{.*#+}} ymm0 = mem[0,2,2,3,4,6,6,7] +; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] ; AVX-NEXT: vmovd %xmm0, (%rsi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq @@ -584,11 +493,8 @@ ; AVX512F-LABEL: shuffle_v32i8_to_v4i8: ; AVX512F: # BB#0: ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX512F-NEXT: vpmovqd %zmm0, %ymm0 +; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] ; AVX512F-NEXT: vmovd %xmm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq @@ -596,30 +502,23 @@ ; AVX512VL-LABEL: shuffle_v32i8_to_v4i8: ; AVX512VL: # BB#0: ; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512VL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX512VL-NEXT: vpmovdb %xmm0, (%rsi) +; AVX512VL-NEXT: vpmovqb %ymm0, (%rsi) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: shuffle_v32i8_to_v4i8: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; AVX512BW-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0 +; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] ; AVX512BW-NEXT: vmovd %xmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: shuffle_v32i8_to_v4i8: ; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vmovdqu (%rdi), %ymm0 -; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX512BWVL-NEXT: vpmovdb %xmm0, (%rsi) +; AVX512BWVL-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512BWVL-NEXT: vpmovqb %ymm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <32 x i8>, <32 x i8>* %L Index: test/CodeGen/X86/shuffle-vs-trunc-512.ll =================================================================== --- test/CodeGen/X86/shuffle-vs-trunc-512.ll +++ test/CodeGen/X86/shuffle-vs-trunc-512.ll @@ -11,49 +11,37 @@ define void @shuffle_v64i8_to_v32i8(<64 x i8>* %L, <32 x i8>* %S) nounwind { ; AVX512F-LABEL: shuffle_v64i8_to_v32i8: ; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30] -; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u] -; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] -; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512F-NEXT: vpmovsxwd (%rdi), %zmm0 +; AVX512F-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512F-NEXT: vpmovsxwd 32(%rdi), %zmm1 +; AVX512F-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqa %ymm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v64i8_to_v32i8: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX512VL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30] -; AVX512VL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u] -; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] -; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512VL-NEXT: vpmovsxwd (%rdi), %zmm0 +; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0 +; AVX512VL-NEXT: vpmovsxwd 32(%rdi), %zmm1 +; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1 +; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX512VL-NEXT: vmovdqa %ymm0, (%rsi) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: shuffle_v64i8_to_v32i8: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqu8 (%rdi), %zmm0 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30] -; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u] -; AVX512BW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] -; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi) +; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0 +; AVX512BW-NEXT: vpmovwb %zmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: shuffle_v64i8_to_v32i8: ; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vmovdqu8 (%rdi), %zmm0 -; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30] -; AVX512BWVL-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u,16,18,20,22,24,26,28,30,u,u,u,u,u,u,u,u] -; AVX512BWVL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] -; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX512BWVL-NEXT: vmovdqu %ymm0, (%rsi) +; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0 +; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L @@ -106,54 +94,12 @@ } define void @shuffle_v32i16_to_v16i16(<32 x i16>* %L, <16 x i16>* %S) nounwind { -; AVX512F-LABEL: shuffle_v32i16_to_v16i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] -; AVX512F-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] -; AVX512F-NEXT: vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] -; AVX512F-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] -; AVX512F-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[0,2],ymm1[4,6],ymm0[4,6] -; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX512F-NEXT: vmovdqa %ymm0, (%rsi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: shuffle_v32i16_to_v16i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm0 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] -; AVX512VL-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] -; AVX512VL-NEXT: vpshuflw {{.*#+}} ymm1 = mem[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] -; AVX512VL-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] -; AVX512VL-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[0,2],ymm1[4,6],ymm0[4,6] -; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX512VL-NEXT: vmovdqa %ymm0, (%rsi) -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: shuffle_v32i16_to_v16i16: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0 -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm1 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] -; AVX512BW-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] -; AVX512BW-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] -; AVX512BW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6] -; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX512BW-NEXT: vmovdqa %ymm0, (%rsi) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512BWVL-LABEL: shuffle_v32i16_to_v16i16: -; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0 -; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BWVL-NEXT: vmovdqu {{.*#+}} ymm2 = [0,2,4,6,16,18,20,22,8,10,12,14,24,26,28,30] -; AVX512BWVL-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 -; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm2[0,2,1,3] -; AVX512BWVL-NEXT: vmovdqu %ymm0, (%rsi) -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512-LABEL: shuffle_v32i16_to_v16i16: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0 +; AVX512-NEXT: vpmovdw %zmm0, (%rsi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> store <16 x i16> %strided.vec, <16 x i16>* %S @@ -177,11 +123,8 @@ define void @shuffle_v16i32_to_v8i32(<16 x i32>* %L, <8 x i32>* %S) nounwind { ; AVX512-LABEL: shuffle_v16i32_to_v8i32: ; AVX512: # BB#0: -; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0 -; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6] -; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX512-NEXT: vmovdqa %ymm0, (%rsi) +; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512-NEXT: vpmovqd %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <16 x i32>, <16 x i32>* %L @@ -205,127 +148,12 @@ } define void @shuffle_v64i8_to_v16i8(<64 x i8>* %L, <16 x i8>* %S) nounwind { -; AVX512F-LABEL: shuffle_v64i8_to_v16i8: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = -; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX512F-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX512F-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX512F-NEXT: vmovdqa %xmm0, (%rsi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: shuffle_v64i8_to_v16i8: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = -; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX512VL-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX512VL-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi) -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: shuffle_v64i8_to_v16i8: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqu8 (%rdi), %zmm0 -; AVX512BW-NEXT: vpextrb $4, %xmm0, %eax -; AVX512BW-NEXT: vpextrb $0, %xmm0, %ecx -; AVX512BW-NEXT: vmovd %ecx, %xmm1 -; AVX512BW-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $8, %xmm0, %eax -; AVX512BW-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $12, %xmm0, %eax -; AVX512BW-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2 -; AVX512BW-NEXT: vpextrb $0, %xmm2, %eax -; AVX512BW-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $4, %xmm2, %eax -; AVX512BW-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $8, %xmm2, %eax -; AVX512BW-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $12, %xmm2, %eax -; AVX512BW-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; AVX512BW-NEXT: vpextrb $0, %xmm2, %eax -; AVX512BW-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $4, %xmm2, %eax -; AVX512BW-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $8, %xmm2, %eax -; AVX512BW-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $12, %xmm2, %eax -; AVX512BW-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm0 -; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax -; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $4, %xmm0, %eax -; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $8, %xmm0, %eax -; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrb $12, %xmm0, %eax -; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 -; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512BWVL-LABEL: shuffle_v64i8_to_v16i8: -; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vmovdqu8 (%rdi), %zmm0 -; AVX512BWVL-NEXT: vpextrb $4, %xmm0, %eax -; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %ecx -; AVX512BWVL-NEXT: vmovd %ecx, %xmm1 -; AVX512BWVL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $12, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2 -; AVX512BWVL-NEXT: vpextrb $0, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $4, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $8, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $12, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; AVX512BWVL-NEXT: vpextrb $0, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $4, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $8, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $12, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vextracti32x4 $3, %zmm0, %xmm0 -; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $4, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $12, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 -; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi) -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512-LABEL: shuffle_v64i8_to_v16i8: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa32 (%rdi), %zmm0 +; AVX512-NEXT: vpmovdb %zmm0, (%rsi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <16 x i32> store <16 x i8> %strided.vec, <16 x i8>* %S @@ -347,99 +175,12 @@ } define void @shuffle_v32i16_to_v8i16(<32 x i16>* %L, <8 x i16>* %S) nounwind { -; AVX512F-LABEL: shuffle_v32i16_to_v8i16: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512F-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7] -; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7] -; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512F-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX512F-NEXT: vmovdqa %xmm0, (%rsi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: shuffle_v32i16_to_v8i16: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7] -; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,0,2,4,5,6,7] -; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] -; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7] -; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512VL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; AVX512VL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX512VL-NEXT: vmovdqa %xmm0, (%rsi) -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: shuffle_v32i16_to_v8i16: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0 -; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX512BW-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] -; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax -; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2 -; AVX512BW-NEXT: vmovd %xmm2, %eax -; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrw $4, %xmm2, %eax -; AVX512BW-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; AVX512BW-NEXT: vmovd %xmm2, %eax -; AVX512BW-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrw $4, %xmm2, %eax -; AVX512BW-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm0 -; AVX512BW-NEXT: vmovd %xmm0, %eax -; AVX512BW-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX512BW-NEXT: vpextrw $4, %xmm0, %eax -; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX512BW-NEXT: vmovdqa %xmm0, (%rsi) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512BWVL-LABEL: shuffle_v32i16_to_v8i16: -; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vmovdqu16 (%rdi), %zmm0 -; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vmovss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] -; AVX512BWVL-NEXT: vpextrw $4, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2 -; AVX512BWVL-NEXT: vmovd %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrw $4, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; AVX512BWVL-NEXT: vmovd %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrw $4, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vextracti32x4 $3, %zmm0, %xmm0 -; AVX512BWVL-NEXT: vmovd %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrw $4, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX512BWVL-NEXT: vmovdqu %xmm0, (%rsi) -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512-LABEL: shuffle_v32i16_to_v8i16: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512-NEXT: vpmovqw %zmm0, (%rsi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %vec = load <32 x i16>, <32 x i16>* %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> store <8 x i16> %strided.vec, <8 x i16>* %S @@ -461,95 +202,12 @@ } define void @shuffle_v64i8_to_v8i8(<64 x i8>* %L, <8 x i8>* %S) nounwind { -; AVX512F-LABEL: shuffle_v64i8_to_v8i8: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = -; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX512F-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512F-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX512F-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3] -; AVX512F-NEXT: vmovq %xmm0, (%rsi) -; AVX512F-NEXT: vzeroupper -; AVX512F-NEXT: retq -; -; AVX512VL-LABEL: shuffle_v64i8_to_v8i8: -; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512VL-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = -; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX512VL-NEXT: vpshufb %xmm3, %xmm1, %xmm1 -; AVX512VL-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] -; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = <0,8,u,u,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512VL-NEXT: vpshufb %xmm3, %xmm2, %xmm2 -; AVX512VL-NEXT: vpshufb %xmm3, %xmm0, %xmm0 -; AVX512VL-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3] -; AVX512VL-NEXT: vmovq %xmm0, (%rsi) -; AVX512VL-NEXT: vzeroupper -; AVX512VL-NEXT: retq -; -; AVX512BW-LABEL: shuffle_v64i8_to_v8i8: -; AVX512BW: # BB#0: -; AVX512BW-NEXT: vmovdqu8 (%rdi), %zmm0 -; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm1 -; AVX512BW-NEXT: vpextrb $8, %xmm1, %r8d -; AVX512BW-NEXT: vpextrb $0, %xmm1, %r9d -; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm1 -; AVX512BW-NEXT: vpextrb $8, %xmm1, %r10d -; AVX512BW-NEXT: vpextrb $0, %xmm1, %r11d -; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm1 -; AVX512BW-NEXT: vpextrb $8, %xmm1, %eax -; AVX512BW-NEXT: vpextrb $0, %xmm1, %ecx -; AVX512BW-NEXT: vpextrb $8, %xmm0, %edx -; AVX512BW-NEXT: vpextrb $0, %xmm0, %edi -; AVX512BW-NEXT: vmovd %edi, %xmm0 -; AVX512BW-NEXT: vpinsrb $1, %edx, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrb $4, %r11d, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrb $5, %r10d, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrb $6, %r9d, %xmm0, %xmm0 -; AVX512BW-NEXT: vpinsrb $7, %r8d, %xmm0, %xmm0 -; AVX512BW-NEXT: vmovq %xmm0, (%rsi) -; AVX512BW-NEXT: vzeroupper -; AVX512BW-NEXT: retq -; -; AVX512BWVL-LABEL: shuffle_v64i8_to_v8i8: -; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vmovdqu8 (%rdi), %zmm0 -; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax -; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %ecx -; AVX512BWVL-NEXT: vmovd %ecx, %xmm1 -; AVX512BWVL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2 -; AVX512BWVL-NEXT: vpextrb $0, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $8, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; AVX512BWVL-NEXT: vpextrb $0, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $8, %xmm2, %eax -; AVX512BWVL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vextracti32x4 $3, %zmm0, %xmm0 -; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpextrb $8, %xmm0, %eax -; AVX512BWVL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm0 -; AVX512BWVL-NEXT: vpmovwb %xmm0, (%rsi) -; AVX512BWVL-NEXT: vzeroupper -; AVX512BWVL-NEXT: retq +; AVX512-LABEL: shuffle_v64i8_to_v8i8: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 +; AVX512-NEXT: vpmovqb %zmm0, (%rsi) +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq %vec = load <64 x i8>, <64 x i8>* %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> store <8 x i8> %strided.vec, <8 x i8>* %S