Index: lib/Target/X86/X86InterleavedAccess.cpp =================================================================== --- lib/Target/X86/X86InterleavedAccess.cpp +++ lib/Target/X86/X86InterleavedAccess.cpp @@ -80,6 +80,11 @@ void deinterleave8bitStride3(ArrayRef InputVectors, SmallVectorImpl &TransposedMatrix, unsigned NumSubVecElems); + void concatSubVector(Value **Vec, ArrayRef InVec, + unsigned VecElems); + void reorderSubVector(MVT VT, SmallVectorImpl &TransposedMatrix, + Value **InVec, SmallVectorImpl &VPShuf, + unsigned VecElems, unsigned Stride); public: /// In order to form an interleaved access group X86InterleavedAccessGroup @@ -136,7 +141,7 @@ return true; if (ShuffleElemSize == 8 && Factor == 3 && - (WideInstSize == 384 || WideInstSize == 768)) + (WideInstSize == 384 || WideInstSize == 768 || WideInstSize == 1536)) return true; return false; @@ -178,11 +183,12 @@ // In the case of stride 3 with a vector of 32 elements load the information // in the following way: // [0,1...,VF/2-1,VF/2+VF,VF/2+VF+1,...,2VF-1] - if (DL.getTypeSizeInBits(VecTy) == 768) { + unsigned VecLength = DL.getTypeSizeInBits(VecTy); + if (VecLength == 768 || VecLength == 1536) { Type *VecTran = VectorType::get(Type::getInt8Ty(LI->getContext()), 16)->getPointerTo(); VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecTran); - NumLoads = NumSubVectors * 2; + NumLoads = NumSubVectors * (VecLength / 384); } else VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy); // Generate N loads of T type. @@ -423,6 +429,62 @@ } } +// concatSubVector - The function rebuilds the data to a correct expected +// order. The assumption(The shape of the matrix) was taken for the +// deinterleaved to work with lane's instructions like 'vpalign' or 'vphuf'. +// This function ensures that the data is built in correct way for the lane +// instructions. Each lane inside the vector is a 128-bit length. +// +// The 'InVec' argument contains the data in increasing order.In InVec[0] - You +// can find the first 128 bit data. The number of different lanes inside a +// vector depends on the 'VecElems'.In general, the formula is VecElems*type / +// 128. The size the array 'InVec' depends and equal to 'VecElems'. + +// For VecElems = 16 +// Invec[0] - |0| Vec[0] - |0| +// Invec[1] - |1| => Vec[1] - |1| +// Invec[2] - |2| Vec[2] - |2| + +// For VecElems = 32 +// Invec[0] - |0|1| Vec[0] - |0|3| +// Invec[1] - |2|3| => Vec[1] - |1|4| +// Invec[2] - |4|5| Vec[2] - |2|5| + +// For VecElems = 64 +// Invec[0] - |0|1|2 |3 | Vec[0] - |0|3|6|9 | +// Invec[1] - |4|5|6 |7 | => Vec[1] - |1|4|7|10| +// Invec[2] - |8|9|10|11| Vec[2] - |2|5|8|11| + +void X86InterleavedAccessGroup::concatSubVector(Value **Vec, + ArrayRef InVec, + unsigned VecElems) { + SmallVector Concat; + if (VecElems == 16) { + for (int i = 0; i < 3; i++) + Vec[i] = InVec[i]; + return; + } + + for (unsigned i = 0; i < 32; ++i) + Concat.push_back(i); + + for (int j = 0; j < VecElems / 32; j++) + for (int i = 0; i < 3; i++) + Vec[i + j * 3] = Builder.CreateShuffleVector( + InVec[j * 6 + i], InVec[j * 6 + i + 3], Concat); + + if (VecElems == 32) + return; + + for (unsigned i = 32; i < VecElems; ++i) + Concat.push_back(i); + + for (int i = 0; i < 3; i++) + Vec[i] = Builder.CreateShuffleVector(Vec[i], Vec[i + 3], Concat); + + return; +} + void X86InterleavedAccessGroup::deinterleave8bitStride3( ArrayRef InVec, SmallVectorImpl &TransposedMatrix, unsigned VecElems) { @@ -433,19 +495,15 @@ // Matrix[2]= b5 c5 a6 b6 c6 a7 b7 c7 TransposedMatrix.resize(3); - SmallVector Concat; SmallVector VPShuf; SmallVector VPAlign[2]; SmallVector VPAlign2; SmallVector VPAlign3; SmallVector GroupSize; - Value *Vec[3], *TempVector[3]; + Value *Vec[6], *TempVector[3]; MVT VT = MVT::getVT(Shuffles[0]->getType()); - for (unsigned i = 0; i < VecElems && VecElems == 32; ++i) - Concat.push_back(i); - createShuffleStride(VT, 3, VPShuf); setGroupSize(VT, GroupSize); @@ -455,11 +513,7 @@ DecodePALIGNRMask(VT, GroupSize[2] + GroupSize[1], VPAlign2, true, true); DecodePALIGNRMask(VT, GroupSize[1], VPAlign3, true, true); - for (int i = 0; i < 3; i++) - Vec[i] = VecElems == 32 - ? Builder.CreateShuffleVector(InVec[i], InVec[i + 3], Concat) - : InVec[i]; - + concatSubVector(Vec, InVec, VecElems); // Vec[0]= a0 a1 a2 b0 b1 b2 c0 c1 // Vec[1]= c2 c3 c4 a3 a4 a5 b3 b4 // Vec[2]= b5 b6 b7 c5 c6 c7 a6 a7 @@ -539,8 +593,8 @@ static void genShuffleBland(MVT VT, SmallVectorImpl &Mask, SmallVectorImpl &Out, int LowOffset, int HighOffset) { - assert(VT.getSizeInBits() == 256 && - "This function works on only width of 256"); + assert(VT.getSizeInBits() >= 256 && + "This function doesn't accept width smaller then 256"); unsigned NumOfElm = VT.getVectorNumElements(); for (unsigned i = 0; i < Mask.size(); i++) Out.push_back(Mask[i] + LowOffset); @@ -548,6 +602,61 @@ Out.push_back(Mask[i] + HighOffset + NumOfElm); } +// reorderSubVecotr returns the data to is the original state. And de-facto is +// the opposite of the function concatSubVector. +// For VecElems = 16 Invec[0] - + +// For VecElems = 16 +// Invec[0] - |0| TransposedMatrix[0] - |0| +// Invec[1] - |1| => TransposedMatrix[1] - |1| +// Invec[2] - |2| TransposedMatrix[2] - |2| + +// For VecElems = 32 +// Invec[0] - |0|3| TransposedMatrix[0] - |0|1| +// Invec[1] - |1|4| => TransposedMatrix[1] - |2|3| +// Invec[2] - |2|5| TransposedMatrix[2] - |4|5| + +// For VecElems = 64 +// Invec[0] - |0|3|6|9 | TransposedMatrix[0] - |0|1|2 |3 | +// Invec[1] - |1|4|7|10| => TransposedMatrix[1] - |4|5|6 |7 | +// Invec[2] - |2|5|8|11| TransposedMatrix[2] - |8|9|10|11| + +void X86InterleavedAccessGroup::reorderSubVector( + MVT VT, SmallVectorImpl &TransposedMatrix, Value **Vec, + SmallVectorImpl &VPShuf, unsigned VecElems, unsigned Stride) { + + if (VecElems == 16) { + for (int i = 0; i < 3; i++) + TransposedMatrix[i] = Builder.CreateShuffleVector( + Vec[i], UndefValue::get(Vec[i]->getType()), VPShuf); + return; + } + + SmallVector OptimizeShuf; + Value *Temp[6]; + + for (int i = 0; i < (VecElems / 16) * Stride; i += 2) { + genShuffleBland(VT, VPShuf, OptimizeShuf, (i / Stride) * 16, + (i + 1) / Stride * 16); + Temp[i / 2] = Builder.CreateShuffleVector( + Vec[i % Stride], Vec[(i + 1) % Stride], OptimizeShuf); + OptimizeShuf.clear(); + } + + SmallVector Concat; + + for (unsigned i = 0; i < 64; ++i) + Concat.push_back(i); + + for (int i = 0; i < 3; i++) + TransposedMatrix[i] = + VecElems == 32 + ? Temp[i] + : Builder.CreateShuffleVector(Temp[2 * i], Temp[2 * i + 1], Concat); + + return; +} + void X86InterleavedAccessGroup::interleave8bitStride3( ArrayRef InVec, SmallVectorImpl &TransposedMatrix, unsigned VecElems) { @@ -563,7 +672,7 @@ SmallVector VPAlign[3]; SmallVector VPAlign2; SmallVector VPAlign3; - SmallVector OptimizeShuf[3]; + Value *Vec[3], *TempVector[3]; MVT VT = MVT::getVectorVT(MVT::i8, VecElems); @@ -605,23 +714,9 @@ // TransposedMatrix[1] = c2 a3 b3 c3 a4 b4 c4 a5 // TransposedMatrix[2] = b5 c5 a6 b6 c6 a7 b7 c7 - group2Shuffle(VT, GroupSize, VPShuf); - - if (VT.getSizeInBits() <= 128) { - for (int i = 0; i < 3; i++) - TransposedMatrix[i] = Builder.CreateShuffleVector( - Vec[i], UndefValue::get(Vec[i]->getType()), VPShuf); - return; - } - unsigned NumOfElm = VT.getVectorNumElements(); - genShuffleBland(VT, VPShuf, OptimizeShuf[0], 0, 0); - genShuffleBland(VT, VPShuf, OptimizeShuf[1], 0, NumOfElm / 2); - genShuffleBland(VT, VPShuf, OptimizeShuf[2], NumOfElm / 2, NumOfElm / 2); - - for (int i = 0; i < 3; i++) - TransposedMatrix[i] = Builder.CreateShuffleVector( - Vec[(i * 2) % 3], Vec[(i * 2 + 1) % 3], OptimizeShuf[i]); + group2Shuffle(VT, GroupSize, VPShuf); + reorderSubVector(VT, TransposedMatrix, Vec, VPShuf, NumOfElm,3); return; } @@ -683,6 +778,7 @@ case 8: case 16: case 32: + case 64: deinterleave8bitStride3(DecomposedVectors, TransposedVectors, NumSubVecElems); break; @@ -722,6 +818,10 @@ if (Factor == 3) interleave8bitStride3(DecomposedVectors, TransposedVectors, NumSubVecElems); + case 64: + if (Factor == 3) + interleave8bitStride3(DecomposedVectors, TransposedVectors, + NumSubVecElems); break; default: return false; Index: test/CodeGen/X86/x86-interleaved-access.ll =================================================================== --- test/CodeGen/X86/x86-interleaved-access.ll +++ test/CodeGen/X86/x86-interleaved-access.ll @@ -1362,258 +1362,144 @@ define void @interleaved_store_vf64_i8_stride3(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <192 x i8>* %p) { ; AVX1-LABEL: interleaved_store_vf64_i8_stride3: ; AVX1: # BB#0: -; AVX1-NEXT: vmovdqa %ymm4, %ymm11 -; AVX1-NEXT: vmovdqa %ymm3, %ymm4 -; AVX1-NEXT: vmovdqa %ymm2, %ymm9 -; AVX1-NEXT: vmovdqa %ymm1, %ymm13 -; AVX1-NEXT: vmovdqa %ymm0, %ymm3 -; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm10 -; AVX1-NEXT: vpshufb {{.*#+}} xmm7 = xmm10[0,0,1,1,1,1,2,2,4,4,3,3,4,4,3,3] -; AVX1-NEXT: vpshufb {{.*#+}} xmm8 = xmm9[12,12,11,11,12,12,11,11,13,13,14,14,14,14,15,15] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm8 -; AVX1-NEXT: vextractf128 $1, %ymm11, %xmm6 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm15 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm15[0],xmm6[0],xmm15[1],xmm6[1],xmm15[2],xmm6[2],xmm15[3],xmm6[3],xmm15[4],xmm6[4],xmm15[5],xmm6[5],xmm15[6],xmm6[6],xmm15[7],xmm6[7] -; AVX1-NEXT: vpshufb {{.*#+}} xmm14 = xmm7[0,u,1,2,u,3,4,u,5,6,u,7,8,u,9,10] -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm3[8],xmm11[9],xmm3[9],xmm11[10],xmm3[10],xmm11[11],xmm3[11],xmm11[12],xmm3[12],xmm11[13],xmm3[13],xmm11[14],xmm3[14],xmm11[15],xmm3[15] -; AVX1-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[4,7,u,6,9,u,8,11,u,10,13,u,12,15,u,14] -; AVX1-NEXT: vinsertf128 $1, %xmm14, %ymm7, %ymm14 -; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255] -; AVX1-NEXT: vandnps %ymm8, %ymm2, %ymm8 -; AVX1-NEXT: vandps %ymm2, %ymm14, %ymm14 -; AVX1-NEXT: vorps %ymm8, %ymm14, %ymm0 -; AVX1-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill -; AVX1-NEXT: vpshufb {{.*#+}} xmm8 = xmm15[10,11,10,11,12,13,12,13,8,9,14,15,14,15,14,15] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm15[0,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,6,5] -; AVX1-NEXT: vinsertf128 $1, %xmm8, %ymm7, %ymm7 -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm6[5,u],zero,xmm6[6,u],zero,xmm6[7,u],zero,xmm6[8,u],zero,xmm6[9,u],zero -; AVX1-NEXT: vmovdqa {{.*#+}} xmm15 = <5,128,u,6,128,u,7,128,u,8,128,u,9,128,u,10> -; AVX1-NEXT: vpshufb %xmm15, %xmm10, %xmm12 -; AVX1-NEXT: vpor %xmm0, %xmm12, %xmm0 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm10[8],xmm6[9],xmm10[9],xmm6[10],xmm10[10],xmm6[11],xmm10[11],xmm6[12],xmm10[12],xmm6[13],xmm10[13],xmm6[14],xmm10[14],xmm6[15],xmm10[15] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm12 = <4,u,7,6,u,9,8,u,11,10,u,13,12,u,15,14> -; AVX1-NEXT: vpshufb %xmm12, %xmm6, %xmm6 -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0 -; AVX1-NEXT: vandnps %ymm7, %ymm2, %ymm6 -; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: vorps %ymm6, %ymm0, %ymm0 -; AVX1-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill -; AVX1-NEXT: vmovdqa %ymm4, %ymm10 -; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm1 -; AVX1-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[0,0,1,1,1,1,2,2,4,4,3,3,4,4,3,3] -; AVX1-NEXT: vpshufb {{.*#+}} xmm7 = xmm10[12,12,11,11,12,12,11,11,13,13,14,14,14,14,15,15] -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6 -; AVX1-NEXT: vmovdqa %ymm5, %ymm8 -; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm7 -; AVX1-NEXT: vextractf128 $1, %ymm13, %xmm0 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] -; AVX1-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,u,1,2,u,3,4,u,5,6,u,7,8,u,9,10] -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm8[8],xmm13[8],xmm8[9],xmm13[9],xmm8[10],xmm13[10],xmm8[11],xmm13[11],xmm8[12],xmm13[12],xmm8[13],xmm13[13],xmm8[14],xmm13[14],xmm8[15],xmm13[15] -; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[4,7,u,6,9,u,8,11,u,10,13,u,12,15,u,14] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4 -; AVX1-NEXT: vandnps %ymm6, %ymm2, %ymm5 -; AVX1-NEXT: vandps %ymm2, %ymm4, %ymm4 -; AVX1-NEXT: vorps %ymm5, %ymm4, %ymm14 -; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = xmm0[10,11,10,11,12,13,12,13,8,9,14,15,14,15,14,15] -; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,3,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,5] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = zero,xmm7[5,u],zero,xmm7[6,u],zero,xmm7[7,u],zero,xmm7[8,u],zero,xmm7[9,u],zero -; AVX1-NEXT: vpshufb %xmm15, %xmm1, %xmm5 -; AVX1-NEXT: vpor %xmm4, %xmm5, %xmm4 -; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15] -; AVX1-NEXT: vpshufb %xmm12, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1 -; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 -; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm12 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = <128,u,6,128,u,7,128,u,8,128,u,9,128,u,10,128> -; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = <5,u,128,6,u,128,7,u,128,8,u,128,9,u,128,10> -; AVX1-NEXT: vpshufb %xmm4, %xmm9, %xmm5 -; AVX1-NEXT: vpor %xmm1, %xmm5, %xmm1 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <0,1,u,2,3,u,4,5,u,6,7,u,8,9,u,10> -; AVX1-NEXT: vpshufb %xmm5, %xmm3, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,0,1,0,1,6,7,2,3,2,3,4,5,4,5] -; AVX1-NEXT: vpshufb %xmm3, %xmm11, %xmm7 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm11[2,1,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,7] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6 +; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm6[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5] +; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7 +; AVX1-NEXT: vpalignr {{.*#+}} xmm14 = xmm7[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5] +; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5] +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6 +; AVX1-NEXT: vpalignr {{.*#+}} xmm11 = xmm6[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm15 = xmm3[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1 +; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm10 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4] +; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm6 +; AVX1-NEXT: vpalignr {{.*#+}} xmm12 = xmm14[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm13 = xmm9[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4] +; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm7 +; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm8[5,6,7,8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm14 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm15[5,6,7,8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm11[5,6,7,8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm7[5,6,7,8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm11 = xmm3[5,6,7,8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm15 = xmm13[5,6,7,8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm12[5,6,7,8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm10[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm8[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm9[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm14 = xmm14[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4] +; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[5,6,7,8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5] +; AVX1-NEXT: vpshufb %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vpshufb %xmm5, %xmm7, %xmm7 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm7, %ymm0 +; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm6 +; AVX1-NEXT: vpshufb %xmm5, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2 +; AVX1-NEXT: vpshufb %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm5, %xmm14, %xmm6 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm6, %ymm1 +; AVX1-NEXT: vpshufb %xmm5, %xmm9, %xmm6 +; AVX1-NEXT: vpshufb %xmm5, %xmm15, %xmm7 ; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6 -; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 -; AVX1-NEXT: vandnps %ymm6, %ymm2, %ymm6 -; AVX1-NEXT: vorps %ymm6, %ymm1, %ymm1 -; AVX1-NEXT: vpshufb %xmm0, %xmm13, %xmm0 -; AVX1-NEXT: vpshufb %xmm4, %xmm10, %xmm4 -; AVX1-NEXT: vpor %xmm0, %xmm4, %xmm0 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7] +; AVX1-NEXT: vpshufb %xmm5, %xmm11, %xmm7 ; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm4 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm4, %ymm0 -; AVX1-NEXT: vpshufb %xmm3, %xmm8, %xmm3 -; AVX1-NEXT: vpshuflw {{.*#+}} xmm4 = xmm8[2,1,3,3,4,5,6,7] -; AVX1-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,7] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3 -; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: vandnps %ymm3, %ymm2, %ymm2 -; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: vmovups %ymm12, 160(%rdi) -; AVX1-NEXT: vmovups %ymm14, 128(%rdi) -; AVX1-NEXT: vmovups %ymm0, 96(%rdi) -; AVX1-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovups %ymm0, 64(%rdi) -; AVX1-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload -; AVX1-NEXT: vmovups %ymm0, 32(%rdi) -; AVX1-NEXT: vmovups %ymm1, (%rdi) +; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm4, %ymm4 +; AVX1-NEXT: vpshufb %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpshufb %xmm5, %xmm8, %xmm5 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 +; AVX1-NEXT: vmovups %ymm3, 160(%rdi) +; AVX1-NEXT: vmovups %ymm4, 128(%rdi) +; AVX1-NEXT: vmovups %ymm6, 96(%rdi) +; AVX1-NEXT: vmovups %ymm1, 64(%rdi) +; AVX1-NEXT: vmovups %ymm2, 32(%rdi) +; AVX1-NEXT: vmovups %ymm0, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: interleaved_store_vf64_i8_stride3: ; AVX2: # BB#0: -; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm11 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm8 = <128,5,u,128,6,u,128,7,u,128,8,u,128,9,u,128> -; AVX2-NEXT: vpshufb %xmm8, %xmm11, %xmm9 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm7 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm10 = <5,128,u,6,128,u,7,128,u,8,128,u,9,128,u,10> -; AVX2-NEXT: vpshufb %xmm10, %xmm7, %xmm6 -; AVX2-NEXT: vpor %xmm9, %xmm6, %xmm6 -; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm11[8],xmm7[8],xmm11[9],xmm7[9],xmm11[10],xmm7[10],xmm11[11],xmm7[11],xmm11[12],xmm7[12],xmm11[13],xmm7[13],xmm11[14],xmm7[14],xmm11[15],xmm7[15] -; AVX2-NEXT: vmovdqa {{.*#+}} xmm9 = <4,u,7,6,u,9,8,u,11,10,u,13,12,u,15,14> -; AVX2-NEXT: vpshufb %xmm9, %xmm7, %xmm7 -; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm12 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm7 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm11 = [10,11,10,11,12,13,12,13,8,9,14,15,14,15,14,15] -; AVX2-NEXT: vpshufb %xmm11, %xmm7, %xmm6 -; AVX2-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,3,3,3,4,5,6,7] -; AVX2-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,6,5] -; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm13 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255] -; AVX2-NEXT: vpblendvb %ymm13, %ymm12, %ymm6, %ymm12 -; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm14 -; AVX2-NEXT: vpshufb %xmm8, %xmm14, %xmm8 -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm7 -; AVX2-NEXT: vpshufb %xmm10, %xmm7, %xmm6 -; AVX2-NEXT: vpor %xmm8, %xmm6, %xmm6 -; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm14[8],xmm7[8],xmm14[9],xmm7[9],xmm14[10],xmm7[10],xmm14[11],xmm7[11],xmm14[12],xmm7[12],xmm14[13],xmm7[13],xmm14[14],xmm7[14],xmm14[15],xmm7[15] -; AVX2-NEXT: vpshufb %xmm9, %xmm7, %xmm7 -; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm8 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm7 -; AVX2-NEXT: vpshufb %xmm11, %xmm7, %xmm6 -; AVX2-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,3,3,3,4,5,6,7] -; AVX2-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,6,5] -; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6 -; AVX2-NEXT: vpblendvb %ymm13, %ymm8, %ymm6, %ymm8 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm10 = <128,u,6,128,u,7,128,u,8,128,u,9,128,u,10,128> -; AVX2-NEXT: vpshufb %xmm10, %xmm0, %xmm7 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm11 = <5,u,128,6,u,128,7,u,128,8,u,128,9,u,128,10> -; AVX2-NEXT: vpshufb %xmm11, %xmm2, %xmm6 -; AVX2-NEXT: vpor %xmm7, %xmm6, %xmm6 -; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; AVX2-NEXT: vmovdqa {{.*#+}} xmm14 = <0,1,u,2,3,u,4,5,u,6,7,u,8,9,u,10> -; AVX2-NEXT: vpshufb %xmm14, %xmm7, %xmm7 -; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,0,1,0,1,6,7,2,3,2,3,4,5,4,5] -; AVX2-NEXT: vpshufb %xmm15, %xmm4, %xmm9 -; AVX2-NEXT: vpshuflw {{.*#+}} xmm7 = xmm4[2,1,3,3,4,5,6,7] -; AVX2-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,4,7] -; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm9, %ymm7 -; AVX2-NEXT: vpblendvb %ymm13, %ymm6, %ymm7, %ymm9 -; AVX2-NEXT: vpshufb %xmm10, %xmm1, %xmm6 -; AVX2-NEXT: vpshufb %xmm11, %xmm3, %xmm7 -; AVX2-NEXT: vpor %xmm6, %xmm7, %xmm6 -; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; AVX2-NEXT: vpshufb %xmm14, %xmm7, %xmm7 -; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm10 -; AVX2-NEXT: vpshufb %xmm15, %xmm5, %xmm7 -; AVX2-NEXT: vpshuflw {{.*#+}} xmm6 = xmm5[2,1,3,3,4,5,6,7] -; AVX2-NEXT: vpshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,7] -; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm7, %ymm6 -; AVX2-NEXT: vpblendvb %ymm13, %ymm10, %ymm6, %ymm6 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = <10,11,10,11,u,u,12,13,12,13,u,u,14,15,14,15,u,u,0,1,0,1,u,u,2,3,2,3,u,u,4,5> +; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21] +; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21] +; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vpalignr {{.*#+}} ymm6 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm7 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm3[5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4],ymm3[21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm4[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm4[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm5[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm5[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm4 = ymm7[5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4],ymm7[21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm5 = ymm6[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm6[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[5,6,7,8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4],ymm3[21,22,23,24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20] +; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[5,6,7,8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4],ymm2[21,22,23,24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm5, %ymm6 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5] +; AVX2-NEXT: vpshufb %ymm7, %ymm6, %ymm6 +; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm2[0,1,2,3],ymm5[4,5,6,7] +; AVX2-NEXT: vpshufb %ymm7, %ymm5, %ymm5 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX2-NEXT: vpshufb %ymm7, %ymm0, %ymm0 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm4, %ymm2 +; AVX2-NEXT: vpshufb %ymm7, %ymm2, %ymm2 +; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm3[0,1,2,3],ymm4[4,5,6,7] ; AVX2-NEXT: vpshufb %ymm7, %ymm4, %ymm4 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = <10,11,u,u,12,13,12,13,u,u,14,15,14,15,u,u,0,1,0,1,u,u,2,3,2,3,u,u,4,5,4,5> -; AVX2-NEXT: vpshufb %ymm10, %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm11 = <255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0> -; AVX2-NEXT: vpblendvb %ymm11, %ymm4, %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = -; AVX2-NEXT: vpshufb %ymm4, %ymm2, %ymm2 -; AVX2-NEXT: vpblendvb %ymm13, %ymm0, %ymm2, %ymm0 -; AVX2-NEXT: vpshufb %ymm7, %ymm5, %ymm2 -; AVX2-NEXT: vpshufb %ymm10, %ymm1, %ymm1 -; AVX2-NEXT: vpblendvb %ymm11, %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpshufb %ymm4, %ymm3, %ymm2 -; AVX2-NEXT: vpblendvb %ymm13, %ymm1, %ymm2, %ymm1 -; AVX2-NEXT: vmovdqu %ymm1, 128(%rdi) -; AVX2-NEXT: vmovdqu %ymm0, 32(%rdi) -; AVX2-NEXT: vmovdqu %ymm8, 160(%rdi) -; AVX2-NEXT: vmovdqu %ymm6, 96(%rdi) -; AVX2-NEXT: vmovdqu %ymm12, 64(%rdi) -; AVX2-NEXT: vmovdqu %ymm9, (%rdi) +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX2-NEXT: vpshufb %ymm7, %ymm1, %ymm1 +; AVX2-NEXT: vmovdqu %ymm1, 160(%rdi) +; AVX2-NEXT: vmovdqu %ymm4, 128(%rdi) +; AVX2-NEXT: vmovdqu %ymm0, 64(%rdi) +; AVX2-NEXT: vmovdqu %ymm5, 32(%rdi) +; AVX2-NEXT: vmovdqu %ymm2, 96(%rdi) +; AVX2-NEXT: vmovdqu %ymm6, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512-LABEL: interleaved_store_vf64_i8_stride3: ; AVX512: # BB#0: -; AVX512-NEXT: vextracti64x4 $1, %zmm2, %ymm3 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = <128,5,u,128,6,u,128,7,u,128,8,u,128,9,u,128> -; AVX512-NEXT: vpshufb %xmm8, %xmm3, %xmm5 -; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm6 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm9 = <5,128,u,6,128,u,7,128,u,8,128,u,9,128,u,10> -; AVX512-NEXT: vpshufb %xmm9, %xmm6, %xmm4 -; AVX512-NEXT: vpor %xmm5, %xmm4, %xmm4 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm6[0],xmm3[0],xmm6[1],xmm3[1],xmm6[2],xmm3[2],xmm6[3],xmm3[3],xmm6[4],xmm3[4],xmm6[5],xmm3[5],xmm6[6],xmm3[6],xmm6[7],xmm3[7] -; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,0,1,u,2,3,u,4,5,u,6,7,u,8,9,u] -; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm10 -; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm5 -; AVX512-NEXT: vpshufb %xmm8, %xmm5, %xmm6 -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm7 -; AVX512-NEXT: vpshufb %xmm9, %xmm7, %xmm4 -; AVX512-NEXT: vpor %xmm6, %xmm4, %xmm4 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm7[8],xmm5[9],xmm7[9],xmm5[10],xmm7[10],xmm5[11],xmm7[11],xmm5[12],xmm7[12],xmm5[13],xmm7[13],xmm5[14],xmm7[14],xmm5[15],xmm7[15] -; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[4,u,7,6,u,9,8,u,11,10,u,13,12,u,15,14] -; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4 -; AVX512-NEXT: vinserti64x4 $1, %ymm10, %zmm4, %zmm11 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = -; AVX512-NEXT: vpermw %zmm0, %zmm5, %zmm5 -; AVX512-NEXT: movabsq $5270498306774157604, %rax # imm = 0x4924924924924924 -; AVX512-NEXT: kmovq %rax, %k1 -; AVX512-NEXT: vmovdqu8 %zmm5, %zmm11 {%k1} -; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = zero,xmm0[u,6],zero,xmm0[u,7],zero,xmm0[u,8],zero,xmm0[u,9],zero,xmm0[u,10],zero -; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[5,u],zero,xmm1[6,u],zero,xmm1[7,u],zero,xmm1[8,u],zero,xmm1[9,u],zero,xmm1[10] -; AVX512-NEXT: vpor %xmm5, %xmm6, %xmm5 -; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,u,2,3,u,4,5,u,6,7,u,8,9,u,10] -; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm6, %ymm5 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = <10,11,u,u,12,13,12,13,u,u,14,15,14,15,u,u,0,1,0,1,u,u,2,3,2,3,u,u,4,5,4,5> -; AVX512-NEXT: vpshufb %ymm10, %ymm0, %ymm7 -; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,11,u,u,12,u,u,13,u,u,14,u,u,15,u,u,16,u,u,17,u,u,18,u,u,19,u,u,20,u,u] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = -; AVX512-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 -; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm5, %zmm5 -; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = -; AVX512-NEXT: vpermw %zmm2, %zmm7, %zmm2 -; AVX512-NEXT: vmovdqu8 %zmm2, %zmm5 {%k1} +; AVX512-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21,38,39,40,41,42,43,44,45,46,47,32,33,34,35,36,37,54,55,56,57,58,59,60,61,62,63,48,49,50,51,52,53] +; AVX512-NEXT: vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26,43,44,45,46,47,32,33,34,35,36,37,38,39,40,41,42,59,60,61,62,63,48,49,50,51,52,53,54,55,56,57,58] +; AVX512-NEXT: vpalignr {{.*#+}} zmm3 = zmm0[5,6,7,8,9,10,11,12,13,14,15],zmm2[0,1,2,3,4],zmm0[21,22,23,24,25,26,27,28,29,30,31],zmm2[16,17,18,19,20],zmm0[37,38,39,40,41,42,43,44,45,46,47],zmm2[32,33,34,35,36],zmm0[53,54,55,56,57,58,59,60,61,62,63],zmm2[48,49,50,51,52] +; AVX512-NEXT: vpalignr {{.*#+}} zmm0 = zmm1[5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1,2,3,4],zmm1[21,22,23,24,25,26,27,28,29,30,31],zmm0[16,17,18,19,20],zmm1[37,38,39,40,41,42,43,44,45,46,47],zmm0[32,33,34,35,36],zmm1[53,54,55,56,57,58,59,60,61,62,63],zmm0[48,49,50,51,52] +; AVX512-NEXT: vpalignr {{.*#+}} zmm1 = zmm2[5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1,2,3,4],zmm2[21,22,23,24,25,26,27,28,29,30,31],zmm1[16,17,18,19,20],zmm2[37,38,39,40,41,42,43,44,45,46,47],zmm1[32,33,34,35,36],zmm2[53,54,55,56,57,58,59,60,61,62,63],zmm1[48,49,50,51,52] +; AVX512-NEXT: vpalignr {{.*#+}} zmm2 = zmm3[5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1,2,3,4],zmm3[21,22,23,24,25,26,27,28,29,30,31],zmm0[16,17,18,19,20],zmm3[37,38,39,40,41,42,43,44,45,46,47],zmm0[32,33,34,35,36],zmm3[53,54,55,56,57,58,59,60,61,62,63],zmm0[48,49,50,51,52] +; AVX512-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[5,6,7,8,9,10,11,12,13,14,15],zmm1[0,1,2,3,4],zmm0[21,22,23,24,25,26,27,28,29,30,31],zmm1[16,17,18,19,20],zmm0[37,38,39,40,41,42,43,44,45,46,47],zmm1[32,33,34,35,36],zmm0[53,54,55,56,57,58,59,60,61,62,63],zmm1[48,49,50,51,52] +; AVX512-NEXT: vpalignr {{.*#+}} zmm1 = zmm1[5,6,7,8,9,10,11,12,13,14,15],zmm3[0,1,2,3,4],zmm1[21,22,23,24,25,26,27,28,29,30,31],zmm3[16,17,18,19,20],zmm1[37,38,39,40,41,42,43,44,45,46,47],zmm3[32,33,34,35,36],zmm1[53,54,55,56,57,58,59,60,61,62,63],zmm3[48,49,50,51,52] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm3 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5] +; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3 +; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVX512-NEXT: vpshufb %ymm4, %ymm5, %ymm5 +; AVX512-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm0[2,3],ymm1[2,3] +; AVX512-NEXT: vpshufb %ymm4, %ymm6, %ymm6 +; AVX512-NEXT: vextracti64x4 $1, %zmm2, %ymm2 ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm0 -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u],zero,xmm2[6,u],zero,xmm2[7,u],zero,xmm2[8,u],zero,xmm2[9,u],zero,xmm2[10,u] -; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm6 -; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm6[u,5],zero,xmm6[u,6],zero,xmm6[u,7],zero,xmm6[u,8],zero,xmm6[u,9],zero,xmm6[u] -; AVX512-NEXT: vpor %xmm7, %xmm4, %xmm4 -; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15] -; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[4,7,u,6,9,u,8,11,u,10,13,u,12,15,u,14] -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 -; AVX512-NEXT: vpshufb %ymm10, %ymm0, %ymm0 -; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[10,11,10,11,u,u,12,13,12,13,u,u,14,15,14,15,u,u,16,17,16,17,u,u,18,19,18,19,u,u,20,21] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = <255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0> -; AVX512-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm0 -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 -; AVX512-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,21,16,27,22,17,28,23,18,29,24,19,30,25,20,31,26,37,32,43,38,33,44,39,34,45,40,35,46,41,36,47,42,53,48,59,54,49,60,55,50,61,56,51,62,57,52,63,58] -; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[4,5,6,7,6,7,6,7] -; AVX512-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm7 +; AVX512-NEXT: vpshufb %ymm4, %ymm7, %ymm7 +; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; AVX512-NEXT: vpshufb %ymm4, %ymm2, %ymm2 +; AVX512-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; AVX512-NEXT: vpshufb %ymm4, %ymm0, %ymm0 +; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm1 +; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm3 +; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0 ; AVX512-NEXT: vmovdqu32 %zmm0, 128(%rdi) -; AVX512-NEXT: vmovdqu32 %zmm11, 64(%rdi) -; AVX512-NEXT: vmovdqu32 %zmm5, (%rdi) +; AVX512-NEXT: vmovdqu32 %zmm3, 64(%rdi) +; AVX512-NEXT: vmovdqu32 %zmm1, (%rdi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = shufflevector <64 x i8> %a, <64 x i8> %b, <128 x i32> @@ -1626,355 +1512,169 @@ define <64 x i8> @interleaved_load_vf64_i8_stride3(<192 x i8>* %ptr){ ; AVX1-LABEL: interleaved_load_vf64_i8_stride3: ; AVX1: # BB#0: -; AVX1-NEXT: subq $152, %rsp -; AVX1-NEXT: .Lcfi0: -; AVX1-NEXT: .cfi_def_cfa_offset 160 -; AVX1-NEXT: vmovdqu (%rdi), %ymm2 -; AVX1-NEXT: vmovdqu 32(%rdi), %ymm0 -; AVX1-NEXT: vmovdqu 64(%rdi), %ymm4 -; AVX1-NEXT: vmovdqu %ymm4, -{{[0-9]+}}(%rsp) # 32-byte Spill -; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm11 -; AVX1-NEXT: vpshufb %xmm10, %xmm11, %xmm3 -; AVX1-NEXT: vmovdqa %xmm11, -{{[0-9]+}}(%rsp) # 16-byte Spill -; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = -; AVX1-NEXT: vpshufb %xmm9, %xmm4, %xmm6 -; AVX1-NEXT: vpor %xmm3, %xmm6, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1 -; AVX1-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill -; AVX1-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,xmm1[2,5,8,11,14,u,u,u,u,u] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm12 = <0,3,6,9,12,15,128,128,128,128,128,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm12, %xmm2, %xmm7 -; AVX1-NEXT: vmovdqa %ymm2, %ymm15 -; AVX1-NEXT: vmovdqu %ymm15, (%rsp) # 32-byte Spill -; AVX1-NEXT: vpor %xmm6, %xmm7, %xmm6 -; AVX1-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,6,7,8,9,10],zero,zero,zero,zero,zero -; AVX1-NEXT: vmovdqa {{.*#+}} xmm13 = [128,128,128,128,128,128,128,128,128,128,128,1,4,7,10,13] -; AVX1-NEXT: vmovdqa %ymm0, %ymm2 -; AVX1-NEXT: vpshufb %xmm13, %xmm2, %xmm7 -; AVX1-NEXT: vpor %xmm7, %xmm6, %xmm7 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <0,3,6,9,12,15,u,u,u,u,u,u,u,u,u,u> -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm8 -; AVX1-NEXT: vmovdqu %ymm2, -{{[0-9]+}}(%rsp) # 32-byte Spill -; AVX1-NEXT: vpshufb %xmm5, %xmm8, %xmm4 -; AVX1-NEXT: vmovdqa %xmm8, -{{[0-9]+}}(%rsp) # 16-byte Spill -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm7, %ymm4 -; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,65535,0,0,0,0,0] -; AVX1-NEXT: vandnps %ymm3, %ymm0, %ymm3 -; AVX1-NEXT: vandps %ymm0, %ymm4, %ymm4 -; AVX1-NEXT: vorps %ymm3, %ymm4, %ymm1 -; AVX1-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill -; AVX1-NEXT: vmovdqu 160(%rdi), %ymm14 -; AVX1-NEXT: vextractf128 $1, %ymm14, %xmm7 -; AVX1-NEXT: vpshufb %xmm10, %xmm7, %xmm3 -; AVX1-NEXT: vpshufb %xmm9, %xmm14, %xmm4 -; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm1 -; AVX1-NEXT: vmovdqu 96(%rdi), %ymm9 -; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm3 -; AVX1-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,zero,zero,zero,xmm3[2,5,8,11,14,u,u,u,u,u] -; AVX1-NEXT: vpshufb %xmm12, %xmm9, %xmm6 -; AVX1-NEXT: vpor %xmm4, %xmm6, %xmm4 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,2,3,4,5,6,7,8,9,10,128,128,128,128,128] -; AVX1-NEXT: vpshufb %xmm10, %xmm4, %xmm4 -; AVX1-NEXT: vmovdqu 128(%rdi), %ymm12 -; AVX1-NEXT: vpshufb %xmm13, %xmm12, %xmm6 -; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm6 -; AVX1-NEXT: vextractf128 $1, %ymm12, %xmm4 -; AVX1-NEXT: vpshufb %xmm5, %xmm4, %xmm5 -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 -; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm1 -; AVX1-NEXT: vandps %ymm0, %ymm5, %ymm0 -; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill -; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm11[u,u,u,u,u],zero,zero,zero,zero,zero,zero,xmm11[2,5,8,11,14] -; AVX1-NEXT: vmovdqu -{{[0-9]+}}(%rsp), %ymm11 # 32-byte Reload -; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm11[u,u,u,u,u,0,3,6,9,12,15],zero,zero,zero,zero,zero -; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vpshufb {{.*#+}} xmm5 = xmm15[1,4,7,10,13],zero,zero,zero,zero,zero,zero,xmm15[u,u,u,u,u] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <128,128,128,128,128,0,3,6,9,12,15,u,u,u,u,u> -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm6 # 16-byte Reload -; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm13 -; AVX1-NEXT: vpor %xmm5, %xmm13, %xmm5 -; AVX1-NEXT: vpshufb %xmm10, %xmm5, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm13 = [128,128,128,128,128,128,128,128,128,128,128,2,5,8,11,14] -; AVX1-NEXT: vpshufb %xmm13, %xmm2, %xmm15 -; AVX1-NEXT: vpor %xmm15, %xmm5, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm15 = <1,4,7,10,13,u,u,u,u,u,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm15, %xmm8, %xmm10 -; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm5, %ymm5 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm10 -; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0] -; AVX1-NEXT: vandnps %ymm10, %ymm0, %ymm10 -; AVX1-NEXT: vandps %ymm0, %ymm5, %ymm5 -; AVX1-NEXT: vorps %ymm10, %ymm5, %ymm2 -; AVX1-NEXT: vmovups %ymm2, {{[0-9]+}}(%rsp) # 32-byte Spill -; AVX1-NEXT: vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u],zero,zero,zero,zero,zero,zero,xmm7[2,5,8,11,14] -; AVX1-NEXT: vpshufb {{.*#+}} xmm10 = xmm14[u,u,u,u,u,0,3,6,9,12,15],zero,zero,zero,zero,zero -; AVX1-NEXT: vpor %xmm5, %xmm10, %xmm5 -; AVX1-NEXT: vpshufb {{.*#+}} xmm6 = xmm9[1,4,7,10,13],zero,zero,zero,zero,zero,zero,xmm9[u,u,u,u,u] -; AVX1-NEXT: vpshufb %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpor %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,7,8,9,10],zero,zero,zero,zero,zero -; AVX1-NEXT: vpshufb %xmm13, %xmm12, %xmm6 -; AVX1-NEXT: vpor %xmm6, %xmm1, %xmm1 -; AVX1-NEXT: vpshufb %xmm15, %xmm4, %xmm6 -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm1, %ymm1 -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 -; AVX1-NEXT: vandnps %ymm5, %ymm0, %ymm5 -; AVX1-NEXT: vandps %ymm0, %ymm1, %ymm1 -; AVX1-NEXT: vorps %ymm5, %ymm1, %ymm13 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = -; AVX1-NEXT: vpshufb %xmm10, %xmm14, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm14 = -; AVX1-NEXT: vpshufb %xmm14, %xmm7, %xmm7 -; AVX1-NEXT: vpor %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm15 = <128,128,128,128,128,1,4,7,10,13,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm15, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = <2,5,8,11,14,128,128,128,128,128,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm8, %xmm9, %xmm6 -; AVX1-NEXT: vpor %xmm3, %xmm6, %xmm3 -; AVX1-NEXT: vpxor %xmm9, %xmm9, %xmm9 -; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm9[5,6,7] -; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [128,128,128,128,128,128,128,128,128,128,0,3,6,9,12,15] -; AVX1-NEXT: vpshufb %xmm7, %xmm12, %xmm1 -; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = <2,5,8,11,14,u,u,u,u,u,u,u,u,u,u,u> -; AVX1-NEXT: vpshufb %xmm6, %xmm4, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm3 -; AVX1-NEXT: vandnps %ymm3, %ymm0, %ymm3 -; AVX1-NEXT: vandps %ymm0, %ymm1, %ymm1 -; AVX1-NEXT: vorps %ymm3, %ymm1, %ymm3 -; AVX1-NEXT: vpshufb %xmm10, %xmm11, %xmm1 -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; AVX1-NEXT: vmovdqu (%rdi), %xmm11 +; AVX1-NEXT: vmovdqu 16(%rdi), %xmm10 +; AVX1-NEXT: vmovdqu 32(%rdi), %xmm8 +; AVX1-NEXT: vmovdqu 48(%rdi), %xmm3 +; AVX1-NEXT: vmovdqu 64(%rdi), %xmm12 +; AVX1-NEXT: vmovdqu 80(%rdi), %xmm9 +; AVX1-NEXT: vmovdqu 96(%rdi), %xmm6 +; AVX1-NEXT: vmovdqu 112(%rdi), %xmm14 +; AVX1-NEXT: vmovdqu 128(%rdi), %xmm13 +; AVX1-NEXT: vmovdqu 144(%rdi), %xmm5 +; AVX1-NEXT: vmovdqu 160(%rdi), %xmm1 +; AVX1-NEXT: vmovdqu 176(%rdi), %xmm15 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13] +; AVX1-NEXT: vpshufb %xmm4, %xmm6, %xmm6 +; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm5 +; AVX1-NEXT: vpshufb %xmm4, %xmm11, %xmm2 +; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpshufb %xmm4, %xmm10, %xmm11 +; AVX1-NEXT: vpshufb %xmm4, %xmm12, %xmm12 +; AVX1-NEXT: vpshufb %xmm4, %xmm14, %xmm14 +; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpshufb %xmm4, %xmm13, %xmm0 +; AVX1-NEXT: vpshufb %xmm4, %xmm15, %xmm7 +; AVX1-NEXT: vpshufb %xmm4, %xmm8, %xmm13 +; AVX1-NEXT: vpshufb %xmm4, %xmm9, %xmm4 +; AVX1-NEXT: vpalignr {{.*#+}} xmm15 = xmm4[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm10 = xmm13[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm7[11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm0[11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm6[11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm7 +; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm2[11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm14[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm14 +; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm12[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpalignr {{.*#+}} xmm11 = xmm11[11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm11, %ymm12 +; AVX1-NEXT: vmovaps {{.*#+}} ymm13 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] +; AVX1-NEXT: vandnps %ymm12, %ymm13, %ymm12 +; AVX1-NEXT: vandps %ymm13, %ymm14, %ymm14 +; AVX1-NEXT: vorps %ymm12, %ymm14, %ymm12 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm14 +; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm15[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vmovdqa %xmm4, -{{[0-9]+}}(%rsp) # 16-byte Spill +; AVX1-NEXT: vandnps %ymm14, %ymm13, %ymm14 +; AVX1-NEXT: vandps %ymm13, %ymm7, %ymm7 +; AVX1-NEXT: vorps %ymm14, %ymm7, %ymm13 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm14 = [128,128,128,128,128,128,11,12,13,14,15,128,128,128,128,128] +; AVX1-NEXT: vpshufb %xmm14, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [5,6,7,8,9,10,128,128,128,128,128,0,1,2,3,4] +; AVX1-NEXT: vpshufb %xmm7, %xmm15, %xmm4 +; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpalignr {{.*#+}} xmm11 = xmm10[11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7,8,9,10] ; AVX1-NEXT: vpshufb %xmm14, %xmm2, %xmm2 -; AVX1-NEXT: vpor %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload -; AVX1-NEXT: vpshufb %xmm15, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqu (%rsp), %ymm4 # 32-byte Reload -; AVX1-NEXT: vpshufb %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpshufb %xmm7, %xmm10, %xmm4 ; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2 -; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm9[5,6,7] -; AVX1-NEXT: vmovdqu -{{[0-9]+}}(%rsp), %ymm4 # 32-byte Reload -; AVX1-NEXT: vpshufb %xmm7, %xmm4, %xmm4 -; AVX1-NEXT: vpor %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm4 # 16-byte Reload -; AVX1-NEXT: vpshufb %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 -; AVX1-NEXT: vandnps %ymm1, %ymm0, %ymm1 -; AVX1-NEXT: vandps %ymm0, %ymm2, %ymm0 -; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 -; AVX1-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm5 # 32-byte Reload -; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm4 # 32-byte Reload -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm2 -; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpaddb %xmm0, %xmm5, %xmm0 -; AVX1-NEXT: vpaddb %xmm0, %xmm4, %xmm0 -; AVX1-NEXT: vextractf128 $1, %ymm13, %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1 -; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vmovdqu {{[0-9]+}}(%rsp), %ymm4 # 32-byte Reload -; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm2 -; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 -; AVX1-NEXT: vpaddb %xmm3, %xmm13, %xmm2 -; AVX1-NEXT: vpaddb %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm9[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpshufb %xmm14, %xmm5, %xmm4 +; AVX1-NEXT: vpshufb %xmm7, %xmm9, %xmm5 +; AVX1-NEXT: vpor %xmm4, %xmm5, %xmm4 +; AVX1-NEXT: vpshufb %xmm14, %xmm6, %xmm5 +; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm8[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10] +; AVX1-NEXT: vpshufb %xmm7, %xmm8, %xmm0 +; AVX1-NEXT: vpor %xmm5, %xmm0, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm13, %xmm0 +; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm0, %xmm4, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm12, %xmm0 +; AVX1-NEXT: vpaddb -{{[0-9]+}}(%rsp), %xmm0, %xmm0 # 16-byte Folded Reload +; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0 +; AVX1-NEXT: vpaddb %xmm11, %xmm12, %xmm3 +; AVX1-NEXT: vpaddb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 +; AVX1-NEXT: vpaddb %xmm6, %xmm13, %xmm2 +; AVX1-NEXT: vpaddb %xmm2, %xmm5, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: addq $152, %rsp ; AVX1-NEXT: retq ; ; AVX2-LABEL: interleaved_load_vf64_i8_stride3: ; AVX2: # BB#0: -; AVX2-NEXT: vmovdqu 160(%rdi), %ymm7 -; AVX2-NEXT: vmovdqu 128(%rdi), %ymm5 -; AVX2-NEXT: vmovdqu (%rdi), %ymm14 -; AVX2-NEXT: vmovdqu 32(%rdi), %ymm12 -; AVX2-NEXT: vmovdqu 64(%rdi), %ymm3 -; AVX2-NEXT: vmovdqu 96(%rdi), %ymm6 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = <255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0> -; AVX2-NEXT: vpblendvb %ymm1, %ymm14, %ymm12, %ymm2 -; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm2[2,3,0,1] -; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255] -; AVX2-NEXT: # ymm9 = mem[0,1,0,1] -; AVX2-NEXT: vpblendvb %ymm9, %ymm2, %ymm8, %ymm2 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = <0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,u,u,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %ymm10, %ymm2, %ymm8 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm11 = -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2 -; AVX2-NEXT: vpshufb %xmm11, %xmm2, %xmm4 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm13 = -; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,3,0,1] -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3 -; AVX2-NEXT: vpshufb %xmm13, %xmm3, %xmm0 -; AVX2-NEXT: vpor %xmm4, %xmm0, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0] -; AVX2-NEXT: vpblendvb %ymm15, %ymm8, %ymm0, %ymm0 -; AVX2-NEXT: vmovdqu %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill -; AVX2-NEXT: vpblendvb %ymm1, %ymm6, %ymm5, %ymm0 -; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1] -; AVX2-NEXT: vpblendvb %ymm9, %ymm0, %ymm4, %ymm0 -; AVX2-NEXT: vpshufb %ymm10, %ymm0, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm10 -; AVX2-NEXT: vpshufb %xmm11, %xmm10, %xmm4 -; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,3,0,1] -; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm7 -; AVX2-NEXT: vpshufb %xmm13, %xmm7, %xmm1 -; AVX2-NEXT: vpor %xmm4, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 -; AVX2-NEXT: vpblendvb %ymm15, %ymm0, %ymm1, %ymm0 -; AVX2-NEXT: vmovdqu %ymm0, -{{[0-9]+}}(%rsp) # 32-byte Spill -; AVX2-NEXT: vmovdqa {{.*#+}} ymm13 = -; AVX2-NEXT: vpblendvb %ymm13, %ymm14, %ymm12, %ymm1 -; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1] -; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0] -; AVX2-NEXT: # ymm11 = mem[0,1,0,1] -; AVX2-NEXT: vpblendvb %ymm11, %ymm1, %ymm4, %ymm1 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm8 = -; AVX2-NEXT: vpshufb %xmm8, %xmm2, %xmm0 -; AVX2-NEXT: vpblendvb %ymm13, %ymm6, %ymm5, %ymm13 -; AVX2-NEXT: vpermq {{.*#+}} ymm15 = ymm13[2,3,0,1] -; AVX2-NEXT: vpblendvb %ymm11, %ymm13, %ymm15, %ymm11 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm13 = -; AVX2-NEXT: vpshufb %xmm13, %xmm3, %xmm4 -; AVX2-NEXT: vpor %xmm0, %xmm4, %xmm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = <1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,u,u,u,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %ymm4, %ymm1, %ymm1 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm15 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0] -; AVX2-NEXT: vpblendvb %ymm15, %ymm1, %ymm0, %ymm9 -; AVX2-NEXT: vpshufb %ymm4, %ymm11, %ymm1 -; AVX2-NEXT: vpshufb %xmm8, %xmm10, %xmm4 -; AVX2-NEXT: vpshufb %xmm13, %xmm7, %xmm0 -; AVX2-NEXT: vpor %xmm4, %xmm0, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 -; AVX2-NEXT: vpblendvb %ymm15, %ymm1, %ymm0, %ymm8 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = <255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u> -; AVX2-NEXT: vpblendvb %ymm1, %ymm5, %ymm6, %ymm4 -; AVX2-NEXT: vpblendvb %ymm1, %ymm12, %ymm14, %ymm1 -; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1] -; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255] -; AVX2-NEXT: # ymm6 = mem[0,1,0,1] -; AVX2-NEXT: vpblendvb %ymm6, %ymm4, %ymm5, %ymm4 -; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm1[2,3,0,1] -; AVX2-NEXT: vpblendvb %ymm6, %ymm1, %ymm5, %ymm1 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = -; AVX2-NEXT: vpshufb %xmm5, %xmm10, %xmm6 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = -; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm7 -; AVX2-NEXT: vpor %xmm7, %xmm6, %xmm6 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = <2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,u,u,u,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %ymm7, %ymm4, %ymm4 -; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6 -; AVX2-NEXT: vpblendvb %ymm15, %ymm4, %ymm6, %ymm4 -; AVX2-NEXT: vpshufb %ymm7, %ymm1, %ymm1 -; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2 -; AVX2-NEXT: vpshufb %xmm0, %xmm3, %xmm0 -; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 -; AVX2-NEXT: vpblendvb %ymm15, %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpaddb %ymm4, %ymm8, %ymm1 -; AVX2-NEXT: vpaddb -{{[0-9]+}}(%rsp), %ymm1, %ymm1 # 32-byte Folded Reload -; AVX2-NEXT: vpaddb %ymm0, %ymm9, %ymm0 -; AVX2-NEXT: vpaddb -{{[0-9]+}}(%rsp), %ymm0, %ymm0 # 32-byte Folded Reload +; AVX2-NEXT: vmovdqu (%rdi), %xmm0 +; AVX2-NEXT: vmovdqu 16(%rdi), %xmm1 +; AVX2-NEXT: vmovdqu 32(%rdi), %xmm2 +; AVX2-NEXT: vmovdqu 96(%rdi), %xmm3 +; AVX2-NEXT: vmovdqu 112(%rdi), %xmm4 +; AVX2-NEXT: vmovdqu 128(%rdi), %xmm5 +; AVX2-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1 +; AVX2-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2 +; AVX2-NEXT: vinserti128 $1, 144(%rdi), %ymm3, %ymm3 +; AVX2-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm4 +; AVX2-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm5 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13] +; AVX2-NEXT: vpshufb %ymm6, %ymm3, %ymm3 +; AVX2-NEXT: vpshufb %ymm6, %ymm0, %ymm0 +; AVX2-NEXT: vpshufb %ymm6, %ymm1, %ymm1 +; AVX2-NEXT: vpshufb %ymm6, %ymm4, %ymm4 +; AVX2-NEXT: vpshufb %ymm6, %ymm5, %ymm5 +; AVX2-NEXT: vpshufb %ymm6, %ymm2, %ymm2 +; AVX2-NEXT: vpalignr {{.*#+}} ymm6 = ymm2[11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10],ymm2[27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vpalignr {{.*#+}} ymm7 = ymm5[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm5[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm1[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm4[11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7,8,9,10],ymm4[27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vpalignr {{.*#+}} ymm4 = ymm7[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm7[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vpalignr {{.*#+}} ymm5 = ymm6[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26] +; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] +; AVX2-NEXT: # ymm8 = mem[0,1,0,1] +; AVX2-NEXT: vpblendvb %ymm8, %ymm0, %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm5, %ymm1, %ymm1 +; AVX2-NEXT: vpblendvb %ymm8, %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpaddb %ymm4, %ymm2, %ymm2 +; AVX2-NEXT: vpblendvb %ymm8, %ymm6, %ymm0, %ymm0 +; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20] +; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpblendvb %ymm8, %ymm7, %ymm3, %ymm1 +; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20] +; AVX2-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: retq ; ; AVX512-LABEL: interleaved_load_vf64_i8_stride3: ; AVX512: # BB#0: -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 -; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm9 -; AVX512-NEXT: vmovdqu64 128(%rdi), %zmm1 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = -; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm14 -; AVX512-NEXT: vpblendvb %ymm10, %ymm1, %ymm14, %ymm3 -; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = -; AVX512-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3 -; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,1,4,7,10,13,16,19,22,25,28,31,18,21,24,27,30,17,20,23,26,29] -; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm11 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = <255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0> -; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm8 -; AVX512-NEXT: vpblendvb %ymm2, %ymm0, %ymm8, %ymm4 -; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm4[2,3,0,1] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = <255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,u,u,255,u,u,255,u,u,255,u,u,255,u,u,255> -; AVX512-NEXT: vpblendvb %ymm6, %ymm4, %ymm5, %ymm4 -; AVX512-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,16,19,22,25,28,31,u,u,u,u,u,u,u,u,u,u] -; AVX512-NEXT: vextracti128 $1, %ymm9, %xmm12 -; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm12[u,u,u,u,u,u],zero,zero,zero,zero,zero,xmm12[1,4,7,10,13] -; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm9[2,3,0,1] -; AVX512-NEXT: vextracti128 $1, %ymm6, %xmm13 -; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm13[u,u,u,u,u,u,2,5,8,11,14],zero,zero,zero,zero,zero -; AVX512-NEXT: vpor %xmm7, %xmm4, %xmm4 -; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0] -; AVX512-NEXT: vpblendvb %ymm7, %ymm5, %ymm4, %ymm4 -; AVX512-NEXT: vextracti64x4 $1, %zmm9, %ymm7 -; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm5 -; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,xmm5[2,5,8,11,14,u,u,u,u,u] -; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm7[0,3,6,9,12,15],zero,zero,zero,zero,zero,xmm7[u,u,u,u,u] -; AVX512-NEXT: vpor %xmm6, %xmm2, %xmm2 -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm4, %zmm2 -; AVX512-NEXT: movabsq $-8796093022208, %rax # imm = 0xFFFFF80000000000 +; AVX512-NEXT: vmovdqu (%rdi), %xmm0 +; AVX512-NEXT: vmovdqu 16(%rdi), %xmm1 +; AVX512-NEXT: vmovdqu 32(%rdi), %xmm2 +; AVX512-NEXT: vmovdqu 96(%rdi), %xmm3 +; AVX512-NEXT: vmovdqu 112(%rdi), %xmm4 +; AVX512-NEXT: vmovdqu 128(%rdi), %xmm5 +; AVX512-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0 +; AVX512-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1 +; AVX512-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2 +; AVX512-NEXT: vinserti128 $1, 144(%rdi), %ymm3, %ymm3 +; AVX512-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm4 +; AVX512-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm5 +; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0 +; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm1, %zmm1 +; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm2, %zmm2 +; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13] +; AVX512-NEXT: vpshufb %zmm3, %zmm0, %zmm0 +; AVX512-NEXT: vpshufb %zmm3, %zmm1, %zmm1 +; AVX512-NEXT: vpshufb %zmm3, %zmm2, %zmm2 +; AVX512-NEXT: vpalignr {{.*#+}} zmm3 = zmm2[11,12,13,14,15],zmm0[0,1,2,3,4,5,6,7,8,9,10],zmm2[27,28,29,30,31],zmm0[16,17,18,19,20,21,22,23,24,25,26],zmm2[43,44,45,46,47],zmm0[32,33,34,35,36,37,38,39,40,41,42],zmm2[59,60,61,62,63],zmm0[48,49,50,51,52,53,54,55,56,57,58] +; AVX512-NEXT: vpalignr {{.*#+}} zmm0 = zmm0[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm0[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm0[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm0[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58] +; AVX512-NEXT: movabsq $-576188069258921984, %rax # imm = 0xF800F800F800F800 ; AVX512-NEXT: kmovq %rax, %k1 -; AVX512-NEXT: vmovdqu8 %zmm11, %zmm2 {%k1} -; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = <255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u,0,255,u> -; AVX512-NEXT: vpblendvb %ymm11, %ymm14, %ymm1, %ymm4 -; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm4[2,3,0,1] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm15 = -; AVX512-NEXT: vpblendvb %ymm15, %ymm4, %ymm6, %ymm4 -; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,2,5,8,11,14,17,20,23,26,29,16,19,22,25,28,31,18,21,24,27,30] -; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm16 -; AVX512-NEXT: vpblendvb %ymm10, %ymm0, %ymm8, %ymm6 -; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm6[2,3,0,1] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm15 = <0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,u,255,u,u,255,u,u,255,u,u,255,u,u,255,u,u> -; AVX512-NEXT: vpblendvb %ymm15, %ymm6, %ymm10, %ymm6 -; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,17,20,23,26,29,u,u,u,u,u,u,u,u,u,u,u] -; AVX512-NEXT: vextracti128 $1, %ymm9, %xmm4 -; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,u,u,u],zero,zero,zero,zero,zero,zero,xmm4[2,5,8,11,14] -; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,0,1] -; AVX512-NEXT: vextracti128 $1, %ymm9, %xmm3 -; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,u,u,0,3,6,9,12,15],zero,zero,zero,zero,zero -; AVX512-NEXT: vpor %xmm4, %xmm3, %xmm3 -; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,0] -; AVX512-NEXT: vpblendvb %ymm9, %ymm6, %ymm3, %ymm3 -; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[1,4,7,10,13],zero,zero,zero,zero,zero,zero,xmm7[u,u,u,u,u] -; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,zero,zero,xmm5[0,3,6,9,12,15,u,u,u,u,u] -; AVX512-NEXT: vpor %xmm6, %xmm4, %xmm4 -; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3 -; AVX512-NEXT: vmovdqu8 %zmm16, %zmm3 {%k1} -; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = <255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0,u,255,0> -; AVX512-NEXT: vpblendvb %ymm4, %ymm1, %ymm14, %ymm1 -; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = <255,u,u,255,u,u,255,u,u,255,u,u,255,u,u,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255> -; AVX512-NEXT: vpblendvb %ymm6, %ymm1, %ymm4, %ymm1 -; AVX512-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,0,3,6,9,12,15,18,21,24,27,30,17,20,23,26,29,16,19,22,25,28,31] -; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm1 -; AVX512-NEXT: vpblendvb %ymm11, %ymm8, %ymm0, %ymm0 -; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1] -; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = <255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,u,u,255,u,u,255,u,u,255,u,u,255,u,u,255,u> -; AVX512-NEXT: vpblendvb %ymm6, %ymm0, %ymm4, %ymm0 -; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,18,21,24,27,30,u,u,u,u,u,u,u,u,u,u,u] -; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm12[u,u,u,u,u],zero,zero,zero,zero,zero,xmm12[0,3,6,9,12,15] -; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm13[u,u,u,u,u,1,4,7,10,13],zero,zero,zero,zero,zero,zero -; AVX512-NEXT: vpor %xmm6, %xmm4, %xmm4 -; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4 -; AVX512-NEXT: vpblendvb %ymm9, %ymm0, %ymm4, %ymm0 -; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,zero,zero,xmm5[1,4,7,10,13,u,u,u,u,u,u] -; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm7[2,5,8,11,14],zero,zero,zero,zero,zero,xmm7[u,u,u,u,u,u] -; AVX512-NEXT: vpor %xmm4, %xmm5, %xmm4 -; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm0 -; AVX512-NEXT: movl $-2097152, %eax # imm = 0xFFE00000 -; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vmovdqu16 %zmm1, %zmm0 {%k1} -; AVX512-NEXT: vpaddb %zmm0, %zmm3, %zmm0 -; AVX512-NEXT: vpaddb %zmm0, %zmm2, %zmm0 +; AVX512-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0] +; AVX512-NEXT: # ymm4 = mem[0,1,0,1] +; AVX512-NEXT: vpblendvb %ymm4, %ymm3, %ymm0, %ymm5 +; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm6 +; AVX512-NEXT: vpalignr {{.*#+}} zmm0 {%k1} = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58] +; AVX512-NEXT: vpalignr {{.*#+}} zmm1 = zmm1[11,12,13,14,15],zmm2[0,1,2,3,4,5,6,7,8,9,10],zmm1[27,28,29,30,31],zmm2[16,17,18,19,20,21,22,23,24,25,26],zmm1[43,44,45,46,47],zmm2[32,33,34,35,36,37,38,39,40,41,42],zmm1[59,60,61,62,63],zmm2[48,49,50,51,52,53,54,55,56,57,58] +; AVX512-NEXT: vpalignr {{.*#+}} zmm1 = zmm3[11,12,13,14,15],zmm1[0,1,2,3,4,5,6,7,8,9,10],zmm3[27,28,29,30,31],zmm1[16,17,18,19,20,21,22,23,24,25,26],zmm3[43,44,45,46,47],zmm1[32,33,34,35,36,37,38,39,40,41,42],zmm3[59,60,61,62,63],zmm1[48,49,50,51,52,53,54,55,56,57,58] +; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpalignr {{.*#+}} ymm1 = ymm5[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20] +; AVX512-NEXT: vextracti64x4 $1, %zmm3, %ymm2 +; AVX512-NEXT: vpblendvb %ymm4, %ymm2, %ymm6, %ymm2 +; AVX512-NEXT: vpalignr {{.*#+}} ymm2 = ymm2[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20] +; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 +; AVX512-NEXT: vpaddb %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: retq %wide.vec = load <192 x i8>, <192 x i8>* %ptr, align 1 %v1 = shufflevector <192 x i8> %wide.vec, <192 x i8> undef, <64 x i32> Index: test/Transforms/InterleavedAccess/X86/interleavedLoad.ll =================================================================== --- test/Transforms/InterleavedAccess/X86/interleavedLoad.ll +++ test/Transforms/InterleavedAccess/X86/interleavedLoad.ll @@ -99,12 +99,53 @@ define <64 x i8> @interleaved_load_vf64_i8_stride3(<192 x i8>* %ptr){ ; AVX2-LABEL: @interleaved_load_vf64_i8_stride3( -; AVX2-NEXT: [[WIDE_VEC:%.*]] = load <192 x i8>, <192 x i8>* [[PTR:%.*]], align 1 -; AVX2-NEXT: [[V1:%.*]] = shufflevector <192 x i8> [[WIDE_VEC]], <192 x i8> undef, <64 x i32> -; AVX2-NEXT: [[V2:%.*]] = shufflevector <192 x i8> [[WIDE_VEC]], <192 x i8> undef, <64 x i32> -; AVX2-NEXT: [[V3:%.*]] = shufflevector <192 x i8> [[WIDE_VEC]], <192 x i8> undef, <64 x i32> -; AVX2-NEXT: [[ADD1:%.*]] = add <64 x i8> [[V1]], [[V2]] -; AVX2-NEXT: [[ADD2:%.*]] = add <64 x i8> [[V3]], [[ADD1]] +; AVX2-NEXT: [[TMP1:%.*]] = bitcast <192 x i8>* [[PTR:%.*]] to <16 x i8>* +; AVX2-NEXT: [[TMP2:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 0 +; AVX2-NEXT: [[TMP3:%.*]] = load <16 x i8>, <16 x i8>* [[TMP2]], align 1 +; AVX2-NEXT: [[TMP4:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 1 +; AVX2-NEXT: [[TMP5:%.*]] = load <16 x i8>, <16 x i8>* [[TMP4]], align 1 +; AVX2-NEXT: [[TMP6:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 2 +; AVX2-NEXT: [[TMP7:%.*]] = load <16 x i8>, <16 x i8>* [[TMP6]], align 1 +; AVX2-NEXT: [[TMP8:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 3 +; AVX2-NEXT: [[TMP9:%.*]] = load <16 x i8>, <16 x i8>* [[TMP8]], align 1 +; AVX2-NEXT: [[TMP10:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 4 +; AVX2-NEXT: [[TMP11:%.*]] = load <16 x i8>, <16 x i8>* [[TMP10]], align 1 +; AVX2-NEXT: [[TMP12:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 5 +; AVX2-NEXT: [[TMP13:%.*]] = load <16 x i8>, <16 x i8>* [[TMP12]], align 1 +; AVX2-NEXT: [[TMP14:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 6 +; AVX2-NEXT: [[TMP15:%.*]] = load <16 x i8>, <16 x i8>* [[TMP14]], align 1 +; AVX2-NEXT: [[TMP16:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 7 +; AVX2-NEXT: [[TMP17:%.*]] = load <16 x i8>, <16 x i8>* [[TMP16]], align 1 +; AVX2-NEXT: [[TMP18:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 8 +; AVX2-NEXT: [[TMP19:%.*]] = load <16 x i8>, <16 x i8>* [[TMP18]], align 1 +; AVX2-NEXT: [[TMP20:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 9 +; AVX2-NEXT: [[TMP21:%.*]] = load <16 x i8>, <16 x i8>* [[TMP20]], align 1 +; AVX2-NEXT: [[TMP22:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 10 +; AVX2-NEXT: [[TMP23:%.*]] = load <16 x i8>, <16 x i8>* [[TMP22]], align 1 +; AVX2-NEXT: [[TMP24:%.*]] = getelementptr <16 x i8>, <16 x i8>* [[TMP1]], i32 11 +; AVX2-NEXT: [[TMP25:%.*]] = load <16 x i8>, <16 x i8>* [[TMP24]], align 1 +; AVX2-NEXT: [[TMP26:%.*]] = shufflevector <16 x i8> [[TMP3]], <16 x i8> [[TMP9]], <32 x i32> +; AVX2-NEXT: [[TMP27:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> [[TMP11]], <32 x i32> +; AVX2-NEXT: [[TMP28:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP13]], <32 x i32> +; AVX2-NEXT: [[TMP29:%.*]] = shufflevector <16 x i8> [[TMP15]], <16 x i8> [[TMP21]], <32 x i32> +; AVX2-NEXT: [[TMP30:%.*]] = shufflevector <16 x i8> [[TMP17]], <16 x i8> [[TMP23]], <32 x i32> +; AVX2-NEXT: [[TMP31:%.*]] = shufflevector <16 x i8> [[TMP19]], <16 x i8> [[TMP25]], <32 x i32> +; AVX2-NEXT: [[TMP32:%.*]] = shufflevector <32 x i8> [[TMP26]], <32 x i8> [[TMP29]], <64 x i32> +; AVX2-NEXT: [[TMP33:%.*]] = shufflevector <32 x i8> [[TMP27]], <32 x i8> [[TMP30]], <64 x i32> +; AVX2-NEXT: [[TMP34:%.*]] = shufflevector <32 x i8> [[TMP28]], <32 x i8> [[TMP31]], <64 x i32> +; AVX2-NEXT: [[TMP35:%.*]] = shufflevector <64 x i8> [[TMP32]], <64 x i8> undef, <64 x i32> +; AVX2-NEXT: [[TMP36:%.*]] = shufflevector <64 x i8> [[TMP33]], <64 x i8> undef, <64 x i32> +; AVX2-NEXT: [[TMP37:%.*]] = shufflevector <64 x i8> [[TMP34]], <64 x i8> undef, <64 x i32> +; AVX2-NEXT: [[TMP38:%.*]] = shufflevector <64 x i8> [[TMP37]], <64 x i8> [[TMP35]], <64 x i32> +; AVX2-NEXT: [[TMP39:%.*]] = shufflevector <64 x i8> [[TMP35]], <64 x i8> [[TMP36]], <64 x i32> +; AVX2-NEXT: [[TMP40:%.*]] = shufflevector <64 x i8> [[TMP36]], <64 x i8> [[TMP37]], <64 x i32> +; AVX2-NEXT: [[TMP41:%.*]] = shufflevector <64 x i8> [[TMP39]], <64 x i8> [[TMP38]], <64 x i32> +; AVX2-NEXT: [[TMP42:%.*]] = shufflevector <64 x i8> [[TMP40]], <64 x i8> [[TMP39]], <64 x i32> +; AVX2-NEXT: [[TMP43:%.*]] = shufflevector <64 x i8> [[TMP38]], <64 x i8> [[TMP40]], <64 x i32> +; AVX2-NEXT: [[TMP44:%.*]] = shufflevector <64 x i8> [[TMP42]], <64 x i8> undef, <64 x i32> +; AVX2-NEXT: [[TMP45:%.*]] = shufflevector <64 x i8> [[TMP41]], <64 x i8> undef, <64 x i32> +; AVX2-NEXT: [[ADD1:%.*]] = add <64 x i8> [[TMP45]], [[TMP44]] +; AVX2-NEXT: [[ADD2:%.*]] = add <64 x i8> [[TMP43]], [[ADD1]] ; AVX2-NEXT: ret <64 x i8> [[ADD2]] ; %wide.vec = load <192 x i8>, <192 x i8>* %ptr, align 1 Index: test/Transforms/InterleavedAccess/X86/interleavedStore.ll =================================================================== --- test/Transforms/InterleavedAccess/X86/interleavedStore.ll +++ test/Transforms/InterleavedAccess/X86/interleavedStore.ll @@ -119,10 +119,21 @@ ; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <16 x i8> [[TMP11]], <16 x i8> undef, <16 x i32> ; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <16 x i8> [[TMP12]], <16 x i8> undef, <16 x i32> ; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <16 x i8> [[TMP13]], <16 x i8> undef, <16 x i32> -; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <16 x i8> [[TMP14]], <16 x i8> [[TMP15]], <32 x i32> -; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <16 x i8> [[TMP16]], <16 x i8> undef, <32 x i32> -; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <32 x i8> [[TMP17]], <32 x i8> [[TMP18]], <48 x i32> -; CHECK-NEXT: store <48 x i8> [[TMP19]], <48 x i8>* [[P:%.*]], align 1 +; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <16 x i8> [[TMP3]], <16 x i8> undef, <16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <16 x i8> [[TMP4]], <16 x i8> undef, <16 x i32> +; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <16 x i8> [[TMP17]], <16 x i8> [[TMP5]], <16 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = shufflevector <16 x i8> [[TMP18]], <16 x i8> [[TMP17]], <16 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = shufflevector <16 x i8> [[TMP5]], <16 x i8> [[TMP18]], <16 x i32> +; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <16 x i8> [[TMP19]], <16 x i8> [[TMP20]], <16 x i32> +; CHECK-NEXT: [[TMP23:%.*]] = shufflevector <16 x i8> [[TMP20]], <16 x i8> [[TMP21]], <16 x i32> +; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <16 x i8> [[TMP21]], <16 x i8> [[TMP19]], <16 x i32> +; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <16 x i8> [[TMP22]], <16 x i8> undef, <16 x i32> +; CHECK-NEXT: [[TMP26:%.*]] = shufflevector <16 x i8> [[TMP23]], <16 x i8> undef, <16 x i32> +; CHECK-NEXT: [[TMP27:%.*]] = shufflevector <16 x i8> [[TMP24]], <16 x i8> undef, <16 x i32> +; CHECK-NEXT: [[TMP28:%.*]] = shufflevector <16 x i8> [[TMP25]], <16 x i8> [[TMP26]], <32 x i32> +; CHECK-NEXT: [[TMP29:%.*]] = shufflevector <16 x i8> [[TMP27]], <16 x i8> undef, <32 x i32> +; CHECK-NEXT: [[TMP30:%.*]] = shufflevector <32 x i8> [[TMP28]], <32 x i8> [[TMP29]], <48 x i32> +; CHECK-NEXT: store <48 x i8> [[TMP30]], <48 x i8>* [[P:%.*]], align 1 ; CHECK-NEXT: ret void ; %1 = shufflevector <16 x i8> %a, <16 x i8> %b, <32 x i32> @@ -150,10 +161,21 @@ ; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <32 x i8> [[TMP11]], <32 x i8> [[TMP12]], <32 x i32> ; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <32 x i8> [[TMP13]], <32 x i8> [[TMP11]], <32 x i32> ; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <32 x i8> [[TMP12]], <32 x i8> [[TMP13]], <32 x i32> -; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <32 x i8> [[TMP14]], <32 x i8> [[TMP15]], <64 x i32> -; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <32 x i8> [[TMP16]], <32 x i8> undef, <64 x i32> -; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <64 x i8> [[TMP17]], <64 x i8> [[TMP18]], <96 x i32> -; CHECK-NEXT: store <96 x i8> [[TMP19]], <96 x i8>* [[P:%.*]], align 1 +; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <32 x i8> [[TMP3]], <32 x i8> undef, <32 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <32 x i8> [[TMP4]], <32 x i8> undef, <32 x i32> +; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <32 x i8> [[TMP17]], <32 x i8> [[TMP5]], <32 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = shufflevector <32 x i8> [[TMP18]], <32 x i8> [[TMP17]], <32 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = shufflevector <32 x i8> [[TMP5]], <32 x i8> [[TMP18]], <32 x i32> +; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <32 x i8> [[TMP19]], <32 x i8> [[TMP20]], <32 x i32> +; CHECK-NEXT: [[TMP23:%.*]] = shufflevector <32 x i8> [[TMP20]], <32 x i8> [[TMP21]], <32 x i32> +; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <32 x i8> [[TMP21]], <32 x i8> [[TMP19]], <32 x i32> +; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <32 x i8> [[TMP22]], <32 x i8> [[TMP23]], <32 x i32> +; CHECK-NEXT: [[TMP26:%.*]] = shufflevector <32 x i8> [[TMP24]], <32 x i8> [[TMP22]], <32 x i32> +; CHECK-NEXT: [[TMP27:%.*]] = shufflevector <32 x i8> [[TMP23]], <32 x i8> [[TMP24]], <32 x i32> +; CHECK-NEXT: [[TMP28:%.*]] = shufflevector <32 x i8> [[TMP25]], <32 x i8> [[TMP26]], <64 x i32> +; CHECK-NEXT: [[TMP29:%.*]] = shufflevector <32 x i8> [[TMP27]], <32 x i8> undef, <64 x i32> +; CHECK-NEXT: [[TMP30:%.*]] = shufflevector <64 x i8> [[TMP28]], <64 x i8> [[TMP29]], <96 x i32> +; CHECK-NEXT: store <96 x i8> [[TMP30]], <96 x i8>* [[P:%.*]], align 1 ; CHECK-NEXT: ret void ; %1 = shufflevector <32 x i8> %a, <32 x i8> %b, <64 x i32> @@ -167,8 +189,30 @@ ; CHECK-LABEL: @interleaved_store_vf64_i8_stride3( ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[A:%.*]], <64 x i8> [[B:%.*]], <128 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <64 x i8> [[C:%.*]], <64 x i8> undef, <128 x i32> -; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <128 x i8> [[TMP1]], <128 x i8> [[TMP2]], <192 x i32> -; CHECK-NEXT: store <192 x i8> [[TMP3]], <192 x i8>* [[P:%.*]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <128 x i8> [[TMP1]], <128 x i8> [[TMP2]], <64 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <128 x i8> [[TMP1]], <128 x i8> [[TMP2]], <64 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <128 x i8> [[TMP1]], <128 x i8> [[TMP2]], <64 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <64 x i8> [[TMP3]], <64 x i8> undef, <64 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <64 x i8> [[TMP4]], <64 x i8> undef, <64 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <64 x i8> [[TMP6]], <64 x i8> [[TMP5]], <64 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <64 x i8> [[TMP7]], <64 x i8> [[TMP6]], <64 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <64 x i8> [[TMP5]], <64 x i8> [[TMP7]], <64 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <64 x i8> [[TMP8]], <64 x i8> [[TMP9]], <64 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <64 x i8> [[TMP9]], <64 x i8> [[TMP10]], <64 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <64 x i8> [[TMP10]], <64 x i8> [[TMP8]], <64 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <64 x i8> [[TMP11]], <64 x i8> [[TMP12]], <32 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <64 x i8> [[TMP13]], <64 x i8> [[TMP11]], <32 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <64 x i8> [[TMP12]], <64 x i8> [[TMP13]], <32 x i32> +; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <64 x i8> [[TMP11]], <64 x i8> [[TMP12]], <32 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <64 x i8> [[TMP13]], <64 x i8> [[TMP11]], <32 x i32> +; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <64 x i8> [[TMP12]], <64 x i8> [[TMP13]], <32 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = shufflevector <32 x i8> [[TMP14]], <32 x i8> [[TMP15]], <64 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = shufflevector <32 x i8> [[TMP16]], <32 x i8> [[TMP17]], <64 x i32> +; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <32 x i8> [[TMP18]], <32 x i8> [[TMP19]], <64 x i32> +; CHECK-NEXT: [[TMP23:%.*]] = shufflevector <64 x i8> [[TMP20]], <64 x i8> [[TMP21]], <128 x i32> +; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <64 x i8> [[TMP22]], <64 x i8> undef, <128 x i32> +; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <128 x i8> [[TMP23]], <128 x i8> [[TMP24]], <192 x i32> +; CHECK-NEXT: store <192 x i8> [[TMP25]], <192 x i8>* [[P:%.*]], align 1 ; CHECK-NEXT: ret void ; %1 = shufflevector <64 x i8> %a, <64 x i8> %b, <128 x i32>