Index: llvm/trunk/lib/Target/X86/X86ISelLowering.h =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.h +++ llvm/trunk/lib/Target/X86/X86ISelLowering.h @@ -1070,6 +1070,12 @@ ArrayRef Indices, unsigned Factor) const override; + /// \brief Lower interleaved store(s) into target specific + /// instructions/intrinsics. + bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, + unsigned Factor) const override; + + void finalizeLowering(MachineFunction &MF) const override; protected: Index: llvm/trunk/lib/Target/X86/X86InterleavedAccess.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86InterleavedAccess.cpp +++ llvm/trunk/lib/Target/X86/X86InterleavedAccess.cpp @@ -16,6 +16,7 @@ #include "X86ISelLowering.h" #include "X86TargetMachine.h" +#include "llvm/Analysis/VectorUtils.h" using namespace llvm; @@ -50,9 +51,8 @@ IRBuilder<> &Builder; /// \brief Breaks down a vector \p 'Inst' of N elements into \p NumSubVectors - /// sub vectors of type \p T. Returns true and the sub-vectors in - /// \p DecomposedVectors if it decomposes the Inst, returns false otherwise. - bool decompose(Instruction *Inst, unsigned NumSubVectors, VectorType *T, + /// sub vectors of type \p T. Returns the sub-vectors in \p DecomposedVectors. + void decompose(Instruction *Inst, unsigned NumSubVectors, VectorType *T, SmallVectorImpl &DecomposedVectors); /// \brief Performs matrix transposition on a 4x4 matrix \p InputVectors and @@ -80,8 +80,7 @@ /// target information \p STarget. explicit X86InterleavedAccessGroup(Instruction *I, ArrayRef Shuffs, - ArrayRef Ind, - const unsigned F, + ArrayRef Ind, const unsigned F, const X86Subtarget &STarget, IRBuilder<> &B) : Inst(I), Shuffles(Shuffs), Indices(Ind), Factor(F), Subtarget(STarget), @@ -102,48 +101,61 @@ uint64_t ShuffleVecSize = DL.getTypeSizeInBits(ShuffleVecTy); Type *ShuffleEltTy = ShuffleVecTy->getVectorElementType(); - if (DL.getTypeSizeInBits(Inst->getType()) < Factor * ShuffleVecSize) - return false; + // Currently, lowering is supported for 4-element vectors of 64 bits on AVX. + uint64_t ExpectedShuffleVecSize; + if (isa(Inst)) + ExpectedShuffleVecSize = 256; + else + ExpectedShuffleVecSize = 1024; - // Currently, lowering is supported for 64 bits on AVX. - if (!Subtarget.hasAVX() || ShuffleVecSize != 256 || + if (!Subtarget.hasAVX() || ShuffleVecSize != ExpectedShuffleVecSize || DL.getTypeSizeInBits(ShuffleEltTy) != 64 || Factor != 4) return false; return true; } -bool X86InterleavedAccessGroup::decompose( +void X86InterleavedAccessGroup::decompose( Instruction *VecInst, unsigned NumSubVectors, VectorType *SubVecTy, SmallVectorImpl &DecomposedVectors) { + + assert((isa(VecInst) || isa(VecInst)) && + "Expected Load or Shuffle"); + Type *VecTy = VecInst->getType(); (void)VecTy; assert(VecTy->isVectorTy() && DL.getTypeSizeInBits(VecTy) >= DL.getTypeSizeInBits(SubVecTy) * NumSubVectors && "Invalid Inst-size!!!"); - assert(VecTy->getVectorElementType() == SubVecTy->getVectorElementType() && - "Element type mismatched!!!"); - if (!isa(VecInst)) - return false; + if (auto *SVI = dyn_cast(VecInst)) { + Value *Op0 = SVI->getOperand(0); + Value *Op1 = SVI->getOperand(1); + + // Generate N(= NumSubVectors) shuffles of T(= SubVecTy) type. + for (unsigned i = 0; i < NumSubVectors; ++i) + DecomposedVectors.push_back( + cast(Builder.CreateShuffleVector( + Op0, Op1, createSequentialMask(Builder, Indices[i], + SubVecTy->getVectorNumElements(), 0)))); + return; + } + // Decompose the load instruction. LoadInst *LI = cast(VecInst); Type *VecBasePtrTy = SubVecTy->getPointerTo(LI->getPointerAddressSpace()); - Value *VecBasePtr = Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy); - // Generate N loads of T type + // Generate N loads of T type. for (unsigned i = 0; i < NumSubVectors; i++) { - // TODO: Support inbounds GEP + // TODO: Support inbounds GEP. Value *NewBasePtr = Builder.CreateGEP(VecBasePtr, Builder.getInt32(i)); Instruction *NewLoad = Builder.CreateAlignedLoad(NewBasePtr, LI->getAlignment()); DecomposedVectors.push_back(NewLoad); } - - return true; } void X86InterleavedAccessGroup::transpose_4x4( @@ -181,21 +193,46 @@ // instructions/intrinsics. bool X86InterleavedAccessGroup::lowerIntoOptimizedSequence() { SmallVector DecomposedVectors; - VectorType *VecTy = Shuffles[0]->getType(); - // Try to generate target-sized register(/instruction). - if (!decompose(Inst, Factor, VecTy, DecomposedVectors)) - return false; - SmallVector TransposedVectors; - // Perform matrix-transposition in order to compute interleaved - // results by generating some sort of (optimized) target-specific - // instructions. + VectorType *ShuffleTy = Shuffles[0]->getType(); + + if (isa(Inst)) { + // Try to generate target-sized register(/instruction). + decompose(Inst, Factor, ShuffleTy, DecomposedVectors); + + // Perform matrix-transposition in order to compute interleaved + // results by generating some sort of (optimized) target-specific + // instructions. + transpose_4x4(DecomposedVectors, TransposedVectors); + + // Now replace the unoptimized-interleaved-vectors with the + // transposed-interleaved vectors. + for (unsigned i = 0, e = Shuffles.size(); i < e; ++i) + Shuffles[i]->replaceAllUsesWith(TransposedVectors[Indices[i]]); + + return true; + } + + Type *ShuffleEltTy = ShuffleTy->getVectorElementType(); + unsigned NumSubVecElems = ShuffleTy->getVectorNumElements() / Factor; + + // Lower the interleaved stores: + // 1. Decompose the interleaved wide shuffle into individual shuffle + // vectors. + decompose(Shuffles[0], Factor, + VectorType::get(ShuffleEltTy, NumSubVecElems), DecomposedVectors); + + // 2. Transpose the interleaved-vectors into vectors of contiguous + // elements. transpose_4x4(DecomposedVectors, TransposedVectors); - // Now replace the unoptimized-interleaved-vectors with the - // transposed-interleaved vectors. - for (unsigned i = 0; i < Shuffles.size(); i++) - Shuffles[i]->replaceAllUsesWith(TransposedVectors[Indices[i]]); + // 3. Concatenate the contiguous-vectors back into a wide vector. + Value *WideVec = concatenateVectors(Builder, TransposedVectors); + + // 4. Generate a store instruction for wide-vec. + StoreInst *SI = cast(Inst); + Builder.CreateAlignedStore(WideVec, SI->getPointerOperand(), + SI->getAlignment()); return true; } @@ -220,3 +257,30 @@ return Grp.isSupported() && Grp.lowerIntoOptimizedSequence(); } + +bool X86TargetLowering::lowerInterleavedStore(StoreInst *SI, + ShuffleVectorInst *SVI, + unsigned Factor) const { + assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && + "Invalid interleave factor"); + + VectorType *VecTy = SVI->getType(); + assert(VecTy->getVectorNumElements() % Factor == 0 && + "Invalid interleaved store"); + + // Holds the indices of SVI that correspond to the starting index of each + // interleaved shuffle. + SmallVector Indices; + auto Mask = SVI->getShuffleMask(); + for (unsigned i = 0; i < Factor; i++) + Indices.push_back(Mask[i]); + + ArrayRef Shuffles = makeArrayRef(SVI); + + // Create an interleaved access group. + IRBuilder<> Builder(SI); + X86InterleavedAccessGroup Grp(SI, Shuffles, Indices, Factor, Subtarget, + Builder); + + return Grp.isSupported() && Grp.lowerIntoOptimizedSequence(); +} Index: llvm/trunk/test/CodeGen/X86/x86-interleaved-access.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/x86-interleaved-access.ll +++ llvm/trunk/test/CodeGen/X86/x86-interleaved-access.ll @@ -129,26 +129,18 @@ define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x double> %v3) { ; AVX-LABEL: store_factorf64_4: ; AVX: # BB#0: -; AVX-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm2[0],xmm3[0] -; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 -; AVX-NEXT: vunpcklpd {{.*#+}} xmm5 = xmm0[0],xmm1[0] -; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3] -; AVX-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm2[1],xmm3[1] -; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 -; AVX-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm0[1],xmm1[1] -; AVX-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3] -; AVX-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] -; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7 -; AVX-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] -; AVX-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 +; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] ; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] ; AVX-NEXT: vmovupd %ymm0, 96(%rdi) -; AVX-NEXT: vmovupd %ymm6, 64(%rdi) -; AVX-NEXT: vmovupd %ymm5, 32(%rdi) -; AVX-NEXT: vmovupd %ymm4, (%rdi) +; AVX-NEXT: vmovupd %ymm3, 64(%rdi) +; AVX-NEXT: vmovupd %ymm4, 32(%rdi) +; AVX-NEXT: vmovupd %ymm2, (%rdi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %s0 = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> @@ -161,55 +153,35 @@ define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <4 x i64> %v2, <4 x i64> %v3) { ; AVX1-LABEL: store_factori64_4: ; AVX1: # BB#0: -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm2[0],xmm3[0] -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm0[0],xmm1[0] -; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3] -; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm2[1],xmm3[1] -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 -; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm0[1],xmm1[1] -; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3] -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] -; AVX1-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7 -; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] -; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] ; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] ; AVX1-NEXT: vmovupd %ymm0, 96(%rdi) -; AVX1-NEXT: vmovupd %ymm6, 64(%rdi) -; AVX1-NEXT: vmovupd %ymm5, 32(%rdi) -; AVX1-NEXT: vmovupd %ymm4, (%rdi) +; AVX1-NEXT: vmovupd %ymm3, 64(%rdi) +; AVX1-NEXT: vmovupd %ymm4, 32(%rdi) +; AVX1-NEXT: vmovupd %ymm2, (%rdi) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; ; AVX2-LABEL: store_factori64_4: ; AVX2: # BB#0: -; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5 -; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm1[0,2,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] -; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm5 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm6 -; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm0[3,1,2,3] -; AVX2-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7] -; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm6 -; AVX2-NEXT: vpbroadcastq %xmm3, %ymm7 -; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7] -; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm7 = xmm0[0],xmm1[0] -; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7] -; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3 -; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,3] -; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7] -; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] -; AVX2-NEXT: vmovdqu %ymm0, 32(%rdi) -; AVX2-NEXT: vmovdqu %ymm6, (%rdi) -; AVX2-NEXT: vmovdqu %ymm5, 96(%rdi) -; AVX2-NEXT: vmovdqu %ymm4, 64(%rdi) +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4 +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] +; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX2-NEXT: vmovdqu %ymm0, 96(%rdi) +; AVX2-NEXT: vmovdqu %ymm3, 64(%rdi) +; AVX2-NEXT: vmovdqu %ymm4, 32(%rdi) +; AVX2-NEXT: vmovdqu %ymm2, (%rdi) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq %s0 = shufflevector <4 x i64> %v0, <4 x i64> %v1, <8 x i32> Index: llvm/trunk/test/Transforms/InterleavedAccess/X86/interleaved-accesses-64bits-avx.ll =================================================================== --- llvm/trunk/test/Transforms/InterleavedAccess/X86/interleaved-accesses-64bits-avx.ll +++ llvm/trunk/test/Transforms/InterleavedAccess/X86/interleaved-accesses-64bits-avx.ll @@ -106,8 +106,22 @@ ; CHECK-LABEL: @store_factorf64_4( ; CHECK-NEXT: [[S0:%.*]] = shufflevector <4 x double> [[V0:%.*]], <4 x double> [[V1:%.*]], <8 x i32> ; CHECK-NEXT: [[S1:%.*]] = shufflevector <4 x double> [[V2:%.*]], <4 x double> [[V3:%.*]], <8 x i32> -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <16 x i32> -; CHECK-NEXT: store <16 x double> [[INTERLEAVED_VEC]], <16 x double>* [[PTR:%.*]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> [[TMP4]], <4 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> [[TMP4]], <4 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP6]], <4 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> [[TMP8]], <4 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP6]], <4 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> [[TMP8]], <4 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x double> [[TMP9]], <4 x double> [[TMP11]], <8 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x double> [[TMP10]], <4 x double> [[TMP12]], <8 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <8 x double> [[TMP13]], <8 x double> [[TMP14]], <16 x i32> +; CHECK-NEXT: store <16 x double> [[TMP15]], <16 x double>* [[PTR:%.*]], align 16 ; CHECK-NEXT: ret void ; %s0 = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> @@ -121,8 +135,22 @@ ; CHECK-LABEL: @store_factori64_4( ; CHECK-NEXT: [[S0:%.*]] = shufflevector <4 x i64> [[V0:%.*]], <4 x i64> [[V1:%.*]], <8 x i32> ; CHECK-NEXT: [[S1:%.*]] = shufflevector <4 x i64> [[V2:%.*]], <4 x i64> [[V3:%.*]], <8 x i32> -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x i64> [[S0]], <8 x i64> [[S1]], <16 x i32> -; CHECK-NEXT: store <16 x i64> [[INTERLEAVED_VEC]], <16 x i64>* [[PTR:%.*]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i64> [[S0]], <8 x i64> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i64> [[S0]], <8 x i64> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x i64> [[S0]], <8 x i64> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x i64> [[S0]], <8 x i64> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i64> [[TMP2]], <4 x i64> [[TMP4]], <4 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i64> [[TMP1]], <4 x i64> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i64> [[TMP2]], <4 x i64> [[TMP4]], <4 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> [[TMP6]], <4 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> [[TMP8]], <4 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> [[TMP6]], <4 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> [[TMP8]], <4 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x i64> [[TMP9]], <4 x i64> [[TMP11]], <8 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x i64> [[TMP10]], <4 x i64> [[TMP12]], <8 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <8 x i64> [[TMP13]], <8 x i64> [[TMP14]], <16 x i32> +; CHECK-NEXT: store <16 x i64> [[TMP15]], <16 x i64>* [[PTR:%.*]], align 16 ; CHECK-NEXT: ret void ; %s0 = shufflevector <4 x i64> %v0, <4 x i64> %v1, <8 x i32> @@ -136,8 +164,22 @@ ; CHECK-LABEL: @store_factorf64_4_revMask( ; CHECK-NEXT: [[S0:%.*]] = shufflevector <4 x double> [[V0:%.*]], <4 x double> [[V1:%.*]], <8 x i32> ; CHECK-NEXT: [[S1:%.*]] = shufflevector <4 x double> [[V2:%.*]], <4 x double> [[V3:%.*]], <8 x i32> -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <16 x i32> -; CHECK-NEXT: store <16 x double> [[INTERLEAVED_VEC]], <16 x double>* [[PTR:%.*]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <8 x double> [[S0]], <8 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> [[TMP4]], <4 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> [[TMP4]], <4 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP6]], <4 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> [[TMP8]], <4 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP6]], <4 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> [[TMP8]], <4 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x double> [[TMP9]], <4 x double> [[TMP11]], <8 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x double> [[TMP10]], <4 x double> [[TMP12]], <8 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <8 x double> [[TMP13]], <8 x double> [[TMP14]], <16 x i32> +; CHECK-NEXT: store <16 x double> [[TMP15]], <16 x double>* [[PTR:%.*]], align 16 ; CHECK-NEXT: ret void ; %s0 = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> @@ -151,8 +193,22 @@ ; CHECK-LABEL: @store_factorf64_4_arbitraryMask( ; CHECK-NEXT: [[S0:%.*]] = shufflevector <16 x double> [[V0:%.*]], <16 x double> [[V1:%.*]], <32 x i32> ; CHECK-NEXT: [[S1:%.*]] = shufflevector <16 x double> [[V2:%.*]], <16 x double> [[V3:%.*]], <32 x i32> -; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <32 x double> [[S0]], <32 x double> [[S1]], <16 x i32> -; CHECK-NEXT: store <16 x double> [[INTERLEAVED_VEC]], <16 x double>* [[PTR:%.*]], align 16 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <32 x double> [[S0]], <32 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <32 x double> [[S0]], <32 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <32 x double> [[S0]], <32 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <32 x double> [[S0]], <32 x double> [[S1]], <4 x i32> +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> [[TMP4]], <4 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x double> [[TMP2]], <4 x double> [[TMP4]], <4 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP6]], <4 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> [[TMP8]], <4 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP6]], <4 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x double> [[TMP7]], <4 x double> [[TMP8]], <4 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x double> [[TMP9]], <4 x double> [[TMP11]], <8 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x double> [[TMP10]], <4 x double> [[TMP12]], <8 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <8 x double> [[TMP13]], <8 x double> [[TMP14]], <16 x i32> +; CHECK-NEXT: store <16 x double> [[TMP15]], <16 x double>* [[PTR:%.*]], align 16 ; CHECK-NEXT: ret void ; %s0 = shufflevector <16 x double> %v0, <16 x double> %v1, <32 x i32> @@ -161,4 +217,3 @@ store <16 x double> %interleaved.vec, <16 x double>* %ptr, align 16 ret void } -