Index: lib/Target/X86/X86InterleavedAccess.cpp =================================================================== --- lib/Target/X86/X86InterleavedAccess.cpp +++ lib/Target/X86/X86InterleavedAccess.cpp @@ -70,7 +70,8 @@ /// Out-V3 = P4, q4, r4, s4 void transpose_4x4(ArrayRef InputVectors, SmallVectorImpl &TrasposedVectors); - + void transposeChar_32x4(ArrayRef InputVectors, + SmallVectorImpl &TrasposedVectors); public: /// In order to form an interleaved access group X86InterleavedAccessGroup /// requires a wide-load instruction \p 'I', a group of interleaved-vectors @@ -101,15 +102,22 @@ uint64_t ShuffleVecSize = DL.getTypeSizeInBits(ShuffleVecTy); Type *ShuffleEltTy = ShuffleVecTy->getVectorElementType(); - // Currently, lowering is supported for 4-element vectors of 64 bits on AVX. + // Currently, lowering is supported for the following vectors: + // 1. 4-element vectors of 64 bits on AVX. + // 2. 32-element vectors of 8 bits on AVX. uint64_t ExpectedShuffleVecSize; if (isa(Inst)) ExpectedShuffleVecSize = 256; else ExpectedShuffleVecSize = 1024; - if (!Subtarget.hasAVX() || ShuffleVecSize != ExpectedShuffleVecSize || - DL.getTypeSizeInBits(ShuffleEltTy) != 64 || Factor != 4) + // Currently 32 elements of 8 bit supported by store operation only. + if ((DL.getTypeSizeInBits(ShuffleEltTy) == 8) && !isa(Inst)) + return false; + + if (((DL.getTypeSizeInBits(ShuffleEltTy) != 64) && + (DL.getTypeSizeInBits(ShuffleEltTy) != 8)) || !Subtarget.hasAVX() || + ShuffleVecSize != ExpectedShuffleVecSize || Factor != 4) return false; return true; @@ -158,6 +166,139 @@ } } +/// Generate unpacklo/unpackhi shuffle mask. +static void createUnpackShuffleMask(int NumElts, SmallVectorImpl &Mask, + bool Lo, bool Unary) { + int NumEltsInLane = NumElts / 2; + assert(Mask.empty() && "Expected an empty shuffle mask vector"); + for (int i = 0; i < NumElts; ++i) { + unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane; + int Pos = (i % NumEltsInLane) / 2 + LaneStart; + Pos += (Unary ? 0 : NumElts * (i % 2)); + Pos += (Lo ? 0 : NumEltsInLane / 2); + Mask.push_back(Pos); + } +} + +// Create shuffle mask for concatenation of two half vectors. +// Low = false: mask generated for the shuffle +// shufle(VEC1,VEC2,{NumElement/2, NumElement/2+1, NumElement/2+2..., +// NumElement-1, NumElement-NumElement/2, +// NumElement-NumElement/2+1..., 2*NumElement-1}) +// = concat(high_half(VEC1),high_half(VEC2)) +// Low = true: mask generated for the shuffle +// shufle(VEC1,VEC2,{0,1,2,...,NumElement/2-1,NumElement, +// NumElement+1...,NumElement+NumElement/2-1}) +// = concat(low_half(VEC1),low_half(VEC2)) +static void createConcatShuffleMask(int NumElement, + SmallVectorImpl &Mask, bool Low) { + int BeginIndex = Low ? 0 : NumElement / 2; + int EndIndex = BeginIndex + NumElement / 2; + for (int i = 0; i < NumElement; ++i) { + if (BeginIndex == EndIndex) + BeginIndex += NumElement / 2; + Mask.push_back(BeginIndex); + BeginIndex++; + } +} + +void X86InterleavedAccessGroup::transposeChar_32x4( + ArrayRef Matrix, + SmallVectorImpl &TransposedMatrix) { + + // Example: Assuming we start from the following vectors: + // Matrix[0]= c0 c1 c2 c3 c4 ... c31 + // Matrix[1]= m0 m1 m2 m3 m4 ... m31 + // Matrix[2]= y0 y1 y2 y3 y4 ... y31 + // Matrix[3]= k0 k1 k2 k3 k4 ... k31 + + TransposedMatrix.resize(4); + + SmallVector MaskHighTemp; + SmallVector MaskLowTemp; + SmallVector MaskHighTemp1; + SmallVector MaskLowTemp1; + SmallVector ConcatLow; + SmallVector ConcatHigh; + + // MaskHighTemp and MaskLowTemp built in the vpunpckhbw and vpunpcklbw X86 + // shuffle pattern. + createUnpackShuffleMask(32, MaskHighTemp, false, false); + createUnpackShuffleMask(32, MaskLowTemp, true, false); + + // MaskHighTemp1 and MaskLowTemp1 built in the vpunpckhdw and vpunpckldw X86 + // shuffle pattern. + createUnpackShuffleMask(16, MaskLowTemp1, true, false); + createUnpackShuffleMask(16, MaskHighTemp1, false, false); + + // ConcatHigh and ConcatLow built in the vperm2i128 and vinserti128 X86 + // shuffle pattern. + createConcatShuffleMask(16, ConcatLow, true); + createConcatShuffleMask(16, ConcatHigh, false); + + ArrayRef MaskHigh = makeArrayRef(MaskHighTemp); + ArrayRef MaskLow = makeArrayRef(MaskLowTemp); + ArrayRef MaskConcatLow = makeArrayRef(ConcatLow); + ArrayRef MaskConcatHigh = makeArrayRef(ConcatHigh); + ArrayRef MaskHighWord = makeArrayRef(MaskHighTemp1); + ArrayRef MaskLowWord = makeArrayRef(MaskLowTemp1); + + // IntrVec1Low = c0 m0 c1 m1 ... c7 m7 | c16 m16 c17 m17 ... c23 m23 + // IntrVec1High = c8 m8 c9 m9 ... c15 m15 | c24 m24 c25 m25 ... c31 m31 + // IntrVec2Low = y0 k0 y1 k1 ... y7 k7 | y16 k16 y17 k17 ... y23 k23 + // IntrVec2High = y8 k8 y9 k9 ... y15 k15 | y24 k24 y25 k25 ... y31 k31 + + Value *IntrVec1Low = + Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskLow); + Value *IntrVec1High = + Builder.CreateShuffleVector(Matrix[0], Matrix[1], MaskHigh); + Value *IntrVec2Low = + Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskLow); + Value *IntrVec2High = + Builder.CreateShuffleVector(Matrix[2], Matrix[3], MaskHigh); + + IntrVec1Low = Builder.CreateBitCast( + IntrVec1Low, + VectorType::get(Type::getInt16Ty(Shuffles[0]->getContext()), 16)); + IntrVec1High = Builder.CreateBitCast( + IntrVec1High, + VectorType::get(Type::getInt16Ty(Shuffles[0]->getContext()), 16)); + IntrVec2Low = Builder.CreateBitCast( + IntrVec2Low, + VectorType::get(Type::getInt16Ty(Shuffles[0]->getContext()), 16)); + IntrVec2High = Builder.CreateBitCast( + IntrVec2High, + VectorType::get(Type::getInt16Ty(Shuffles[0]->getContext()), 16)); + + // cmyk4 cmyk5 cmyk6 cmyk7 | cmyk20 cmyk21 cmyk22 cmyk23 + // cmyk12 cmyk13 cmyk14 cmyk15 | cmyk28 cmyk29 cmyk30 cmyk31 + // cmyk0 cmyk1 cmyk2 cmyk3 | cmyk16 cmyk17 cmyk18 cmyk19 + // cmyk8 cmyk9 cmyk10 cmyk11 | cmyk24 cmyk25 cmyk26 cmyk27 + + Value *High = Builder.CreateShuffleVector(IntrVec1Low, IntrVec2Low, + MaskHighWord); + Value *High1 = Builder.CreateShuffleVector(IntrVec1High, IntrVec2High, + MaskHighWord); + Value *Low = Builder.CreateShuffleVector(IntrVec1Low, IntrVec2Low, + MaskLowWord); + Value *Low1 = Builder.CreateShuffleVector(IntrVec1High, IntrVec2High, + MaskLowWord); + + // cmyk0 cmyk1 cmyk2 cmyk3 | cmyk4 cmyk5 cmyk6 cmyk7 + // cmyk8 cmyk9 cmyk10 cmyk11 | cmyk12 cmyk13 cmyk14 cmyk15 + // cmyk16 cmyk17 cmyk18 cmyk19 | cmyk20 cmyk21 cmyk22 cmyk23 + // cmyk24 cmyk25 cmyk26 cmyk27 | cmyk28 cmyk29 cmyk30 cmyk31 + + TransposedMatrix[0] = + Builder.CreateShuffleVector(Low, High, MaskConcatLow); + TransposedMatrix[1] = + Builder.CreateShuffleVector(Low1, High1, MaskConcatLow); + TransposedMatrix[2] = + Builder.CreateShuffleVector(Low, High, MaskConcatHigh); + TransposedMatrix[3] = + Builder.CreateShuffleVector(Low1, High1, MaskConcatHigh); +} + void X86InterleavedAccessGroup::transpose_4x4( ArrayRef Matrix, SmallVectorImpl &TransposedMatrix) { @@ -224,15 +365,37 @@ // 2. Transpose the interleaved-vectors into vectors of contiguous // elements. - transpose_4x4(DecomposedVectors, TransposedVectors); + StoreInst *SI = cast(Inst); + switch (NumSubVecElems) { + case 4: { + transpose_4x4(DecomposedVectors, TransposedVectors); - // 3. Concatenate the contiguous-vectors back into a wide vector. - Value *WideVec = concatenateVectors(Builder, TransposedVectors); + // 3. Concatenate the contiguous-vectors back into a wide vector. + Value *WideVec = concatenateVectors(Builder, TransposedVectors); - // 4. Generate a store instruction for wide-vec. - StoreInst *SI = cast(Inst); - Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(), - SI->getAlignment()); + // 4. Generate a store instruction for wide-vec. + Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(), + SI->getAlignment()); + break; + } + case 32: { + transposeChar_32x4(DecomposedVectors, TransposedVectors); + // VecInst contains the Ptr argument. + Value *VecInst = Inst->getOperand(1); + Type *IntOf16 = Type::getInt16Ty(Shuffles[0]->getContext()); + // From <128xi8>* to <16xi16>* + Type *VecTran = VectorType::get(IntOf16, 16)->getPointerTo(); + Value *VecBasePtr = Builder.CreateBitCast(VecInst, VecTran); + for (unsigned i = 0; i < 4; i++) { + Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i)); + Builder.CreateAlignedStore(TransposedVectors[i], NewBasePtr, + SI->getAlignment()); + } + break; + } + default: + return false; + } return true; } @@ -258,6 +421,9 @@ return Grp.isSupported() && Grp.lowerIntoOptimizedSequence(); } +// Currently lowering is supported for the following interleaves: +// 1. stride4 x 64bit elements with vector factor 4 on AVX +// 2. stride4 x 8bit elements with vector factor 32 on AVX2 bool X86TargetLowering::lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const { @@ -280,6 +446,5 @@ IRBuilder<> Builder(SI); X86InterleavedAccessGroup Grp(SI, Shuffles, Indices, Factor, Subtarget, Builder); - return Grp.isSupported() && Grp.lowerIntoOptimizedSequence(); } Index: test/CodeGen/X86/x86-interleaved-access.ll =================================================================== --- test/CodeGen/X86/x86-interleaved-access.ll +++ test/CodeGen/X86/x86-interleaved-access.ll @@ -196,108 +196,59 @@ define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32 x i8> %x3, <32 x i8> %x4, <128 x i8>* %p) { ; AVX1-LABEL: interleaved_store_vf32_i8_stride4: ; AVX1: # BB#0: -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm5 -; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0,65535,0] -; AVX1-NEXT: vandnps %ymm5, %ymm4, %ymm5 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6 -; AVX1-NEXT: vandps %ymm4, %ymm6, %ymm6 -; AVX1-NEXT: vorps %ymm5, %ymm6, %ymm8 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6 -; AVX1-NEXT: vandnps %ymm6, %ymm4, %ymm6 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5 -; AVX1-NEXT: vandps %ymm4, %ymm5, %ymm5 -; AVX1-NEXT: vorps %ymm6, %ymm5, %ymm9 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm6[8],xmm5[8],xmm6[9],xmm5[9],xmm6[10],xmm5[10],xmm6[11],xmm5[11],xmm6[12],xmm5[12],xmm6[13],xmm5[13],xmm6[14],xmm5[14],xmm6[15],xmm5[15] ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm5, %ymm5 -; AVX1-NEXT: vandnps %ymm5, %ymm4, %ymm5 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero -; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6 -; AVX1-NEXT: vandps %ymm4, %ymm6, %ymm6 -; AVX1-NEXT: vorps %ymm5, %ymm6, %ymm5 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm6 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm0 +; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] ; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; AVX1-NEXT: vandnps %ymm2, %ymm4, %ymm2 -; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7] -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0 -; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0 +; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm9[4],xmm5[4],xmm9[5],xmm5[5],xmm9[6],xmm5[6],xmm9[7],xmm5[7] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm10 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm11 = xmm8[4],xmm2[4],xmm8[5],xmm2[5],xmm8[6],xmm2[6],xmm8[7],xmm2[7] +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm11, %ymm3 +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm9[0],xmm5[0],xmm9[1],xmm5[1],xmm9[2],xmm5[2],xmm9[3],xmm5[3] +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm8[0],xmm2[0],xmm8[1],xmm2[1],xmm8[2],xmm2[2],xmm8[3],xmm2[3] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm1 +; AVX1-NEXT: vinsertf128 $1, %xmm11, %ymm0, %ymm2 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3],ymm10[2,3] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm3[2,3] +; AVX1-NEXT: vmovaps %ymm1, (%rdi) +; AVX1-NEXT: vmovaps %ymm2, 32(%rdi) +; AVX1-NEXT: vmovaps %ymm4, 64(%rdi) ; AVX1-NEXT: vmovaps %ymm0, 96(%rdi) -; AVX1-NEXT: vmovaps %ymm5, 64(%rdi) -; AVX1-NEXT: vmovaps %ymm9, 32(%rdi) -; AVX1-NEXT: vmovaps %ymm8, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: interleaved_store_vf32_i8_stride4: ; AVX2: # BB#0: -; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3] -; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm4 -; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] -; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero -; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5 -; AVX2-NEXT: vpblendw {{.*#+}} ymm8 = ymm5[0],ymm4[1],ymm5[2],ymm4[3],ymm5[4],ymm4[5],ymm5[6],ymm4[7],ymm5[8],ymm4[9],ymm5[10],ymm4[11],ymm5[12],ymm4[13],ymm5[14],ymm4[15] -; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm5 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3] -; AVX2-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm5 -; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero -; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm6 -; AVX2-NEXT: vpblendw {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3],ymm6[4],ymm5[5],ymm6[6],ymm5[7],ymm6[8],ymm5[9],ymm6[10],ymm5[11],ymm6[12],ymm5[13],ymm6[14],ymm5[15] -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm3 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 -; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7] -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3] -; AVX2-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm6 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm7[4],xmm0[4],xmm7[5],xmm0[5],xmm7[6],xmm0[6],xmm7[7],xmm0[7] -; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero -; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm7, %ymm4 -; AVX2-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm6[1],ymm4[2],ymm6[3],ymm4[4],ymm6[5],ymm4[6],ymm6[7],ymm4[8],ymm6[9],ymm4[10],ymm6[11],ymm4[12],ymm6[13],ymm4[14],ymm6[15] -; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 -; AVX2-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] -; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4,4,5,5,6,6,7,7] -; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 -; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15] +; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm0 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] +; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[16],ymm3[16],ymm2[17],ymm3[17],ymm2[18],ymm3[18],ymm2[19],ymm3[19],ymm2[20],ymm3[20],ymm2[21],ymm3[21],ymm2[22],ymm3[22],ymm2[23],ymm3[23] +; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15],ymm2[24],ymm3[24],ymm2[25],ymm3[25],ymm2[26],ymm3[26],ymm2[27],ymm3[27],ymm2[28],ymm3[28],ymm2[29],ymm3[29],ymm2[30],ymm3[30],ymm2[31],ymm3[31] +; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15] +; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15] +; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11] +; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11] +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm2 +; AVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm4 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3] +; AVX2-NEXT: vmovdqa %ymm2, (%rdi) +; AVX2-NEXT: vmovdqa %ymm4, 32(%rdi) +; AVX2-NEXT: vmovdqa %ymm1, 64(%rdi) ; AVX2-NEXT: vmovdqa %ymm0, 96(%rdi) -; AVX2-NEXT: vmovdqa %ymm4, 64(%rdi) -; AVX2-NEXT: vmovdqa %ymm5, 32(%rdi) -; AVX2-NEXT: vmovdqa %ymm8, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq %v1 = shufflevector <32 x i8> %x1, <32 x i8> %x2, <64 x i32> Index: test/Transforms/InterleavedAccess/X86/interleavedStore.ll =================================================================== --- test/Transforms/InterleavedAccess/X86/interleavedStore.ll +++ test/Transforms/InterleavedAccess/X86/interleavedStore.ll @@ -1,12 +1,39 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -mtriple=x86_64-pc-linux -mattr=+avx -mattr=+avx2 -interleaved-access -S | FileCheck %s +; RUN: opt < %s -mtriple=x86_64-pc-linux -mattr=+avx2 -interleaved-access -S | FileCheck %s define void @interleaved_store_vf32_i8_stride4(<32 x i8> %x1, <32 x i8> %x2, <32 x i8> %x3, <32 x i8> %x4, <128 x i8>* %p) { ; CHECK-LABEL: @interleaved_store_vf32_i8_stride4( ; CHECK-NEXT: [[V1:%.*]] = shufflevector <32 x i8> [[X1:%.*]], <32 x i8> [[X2:%.*]], <64 x i32> ; CHECK-NEXT: [[V2:%.*]] = shufflevector <32 x i8> [[X3:%.*]], <32 x i8> [[X4:%.*]], <64 x i32> -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <64 x i8> [[V1]], <64 x i8> [[V2]], <128 x i32> -; CHECK-NEXT: store <128 x i8> [[INTERLEAVED_VEC]], <128 x i8>* [[P:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <64 x i8> [[V1]], <64 x i8> [[V2]], <32 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <64 x i8> [[V1]], <64 x i8> [[V2]], <32 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <64 x i8> [[V1]], <64 x i8> [[V2]], <32 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <64 x i8> [[V1]], <64 x i8> [[V2]], <32 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> [[TMP2]], <32 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <32 x i8> [[TMP1]], <32 x i8> [[TMP2]], <32 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <32 x i8> [[TMP3]], <32 x i8> [[TMP4]], <32 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <32 x i8> [[TMP3]], <32 x i8> [[TMP4]], <32 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = bitcast <32 x i8> [[TMP5]] to <16 x i16> +; CHECK-NEXT: [[TMP10:%.*]] = bitcast <32 x i8> [[TMP6]] to <16 x i16> +; CHECK-NEXT: [[TMP11:%.*]] = bitcast <32 x i8> [[TMP7]] to <16 x i16> +; CHECK-NEXT: [[TMP12:%.*]] = bitcast <32 x i8> [[TMP8]] to <16 x i16> +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <16 x i16> [[TMP9]], <16 x i16> [[TMP11]], <16 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <16 x i16> [[TMP10]], <16 x i16> [[TMP12]], <16 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <16 x i16> [[TMP9]], <16 x i16> [[TMP11]], <16 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <16 x i16> [[TMP10]], <16 x i16> [[TMP12]], <16 x i32> +; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <16 x i16> [[TMP15]], <16 x i16> [[TMP13]], <16 x i32> +; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <16 x i16> [[TMP16]], <16 x i16> [[TMP14]], <16 x i32> +; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <16 x i16> [[TMP15]], <16 x i16> [[TMP13]], <16 x i32> +; CHECK-NEXT: [[TMP20:%.*]] = shufflevector <16 x i16> [[TMP16]], <16 x i16> [[TMP14]], <16 x i32> +; CHECK-NEXT: [[TMP21:%.*]] = bitcast <128 x i8>* [[P:%.*]] to <16 x i16>* +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr <16 x i16>, <16 x i16>* [[TMP21]], i32 0 +; CHECK-NEXT: store <16 x i16> [[TMP17]], <16 x i16>* [[TMP22]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr <16 x i16>, <16 x i16>* [[TMP21]], i32 1 +; CHECK-NEXT: store <16 x i16> [[TMP18]], <16 x i16>* [[TMP23]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr <16 x i16>, <16 x i16>* [[TMP21]], i32 2 +; CHECK-NEXT: store <16 x i16> [[TMP19]], <16 x i16>* [[TMP24]] +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr <16 x i16>, <16 x i16>* [[TMP21]], i32 3 +; CHECK-NEXT: store <16 x i16> [[TMP20]], <16 x i16>* [[TMP25]] ; CHECK-NEXT: ret void ; %v1 = shufflevector <32 x i8> %x1, <32 x i8> %x2, <64 x i32>