Index: lib/Target/AArch64/AArch64.h =================================================================== --- lib/Target/AArch64/AArch64.h +++ lib/Target/AArch64/AArch64.h @@ -38,6 +38,7 @@ ModulePass *createAArch64PromoteConstantPass(); FunctionPass *createAArch64ConditionOptimizerPass(); FunctionPass *createAArch64AddressTypePromotionPass(); +FunctionPass *createAArch64InterleavedAccessPass(); FunctionPass *createAArch64A57FPLoadBalancing(); FunctionPass *createAArch64A53Fix835769(); Index: lib/Target/AArch64/AArch64InterleavedAccess.cpp =================================================================== --- /dev/null +++ lib/Target/AArch64/AArch64InterleavedAccess.cpp @@ -0,0 +1,390 @@ +//=------------- ------- AArch64InterleavedAccess.cpp --------- ------------==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the AArch64InterleavedAccess pass, which identifies +// interleaved memory accesses and transfers them into AArch64 ldN/stN +// intrinsics (N = 2, 3, 4). +// +// An interleaved load reads data from memory into several vectors, with +// de-interleaving the data on a factor. An interleaved store writes several +// vectors to memory with re-interleaving the data on a factor. The interleave +// factor is equal to the number of vectors. AArch64 backend supports interleave +// factor of 2, 3 and 4. +// +// E.g. Transfer an interleaved load (Factor = 2): +// %wide.vec = load <8 x i32>, <8 x i32>* %ptr +// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements +// Into: +// %ld2 = { <4 x i32>, <4 x i32> } call aarch64.neon.ld2(%ptr) +// %v0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0 +// +// E.g. Transfer an interleaved store (Factor = 2): +// %i.vec = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7> ; Interleaved vec +// store <8 x i32> %i.vec, <8 x i32>* %ptr +// Into: +// %v0 = shuffle %i.vec, undef, <0, 1, 2, 3> +// %v1 = shuffle %i.vec, undef, <4, 5, 6, 7> +// call void aarch64.neon.st2(%v0, %v1, %ptr) +// +// Note that the interleaved load allows gaps, which means not all of the sub +// vectors are used. The interleave load in example doesn't use odd elements. +// +//===----------------------------------------------------------------------===// + +#include "AArch64.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/IR/InstIterator.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Module.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/MathExtras.h" + +using namespace llvm; + +#define DEBUG_TYPE "aarch64-interleaved-access" + +namespace llvm { +static void initializeAArch64InterleavedAccessPass(PassRegistry &); +} + +namespace { + +/// This enum is just used to hold the minimum and maximum interleave factor. +enum { + MIN_FACTOR = 2, /// Minimum interleave factor + MAX_FACTOR = 4 /// Maximum interleave factor +}; + +class AArch64InterleavedAccess : public FunctionPass { + +public: + static char ID; + AArch64InterleavedAccess() : FunctionPass(ID) { + initializeAArch64InterleavedAccessPass(*PassRegistry::getPassRegistry()); + } + + const char *getPassName() const override { + return "AArch64 Interleaved Access Pass"; + } + + bool runOnFunction(Function &F) override; + +private: + const DataLayout *DL; + Module *M; + + /// \brief Transfer an interleaved load into ldN intrinsic. + bool matchInterleavedLoad(ShuffleVectorInst *SVI, + SmallVector &DeadInsts); + + /// \brief Transfer an interleaved store into stN intrinsic. + bool matchInterleavedStore(ShuffleVectorInst *SVI, + SmallVector &DeadInsts); +}; +} // end anonymous namespace. + +char AArch64InterleavedAccess::ID = 0; + +INITIALIZE_PASS_BEGIN(AArch64InterleavedAccess, DEBUG_TYPE, + "AArch64 interleaved access Pass", false, false) +INITIALIZE_PASS_END(AArch64InterleavedAccess, DEBUG_TYPE, + "AArch64 interleaved access Pass", false, false) + +FunctionPass *llvm::createAArch64InterleavedAccessPass() { + return new AArch64InterleavedAccess(); +} + +static Intrinsic::ID getLdNStNIntrinsic(unsigned Factor, bool IsLoad) { + static Intrinsic::ID LoadInt[3] = {Intrinsic::aarch64_neon_ld2, + Intrinsic::aarch64_neon_ld3, + Intrinsic::aarch64_neon_ld4}; + static Intrinsic::ID StoreInt[3] = {Intrinsic::aarch64_neon_st2, + Intrinsic::aarch64_neon_st3, + Intrinsic::aarch64_neon_st4}; + + assert(Factor >= MIN_FACTOR && Factor <= MAX_FACTOR && + "Invalid interleave factor"); + + if (IsLoad) + return LoadInt[Factor - 2]; + else + return StoreInt[Factor - 2]; +} + +/// \brief Check if the mask is strided mask of the given factor like: +/// +/// +/// E.g. The strided masks of an interleaved load (Factor = 2): +/// <0, 2, 4, 6> (Index 0) +/// <1, 3, 5, 7> (Index 1) +static bool isStridedMaskOfGivenFactor(ArrayRef Mask, unsigned Factor, + unsigned &Index) { + // Check all potential indices from 0 to (Factor - 1). + for (Index = 0; Index < Factor; Index++) { + unsigned i = 0; + for (; i < Mask.size(); i++) + if (Mask[i] >= 0 && static_cast(Mask[i]) != Index + i * Factor) + break; + + // All elements are strided on this Index and Factor. + if (i == Mask.size()) + return true; + } + + return false; +} + +/// \brief Check if the mask is strided mask and return the factor and index if +/// it is true. +static bool isStridedMask(ArrayRef Mask, unsigned &Factor, + unsigned &Index) { + unsigned NumElts = Mask.size(); + if (NumElts < 2) + return false; + + for (Factor = MIN_FACTOR; Factor <= MAX_FACTOR; Factor++) + if (isStridedMaskOfGivenFactor(Mask, Factor, Index)) + return true; + + return false; +} + +bool AArch64InterleavedAccess::matchInterleavedLoad( + ShuffleVectorInst *SVI, SmallVector &DeadInsts) { + LoadInst *LI = dyn_cast(SVI->getOperand(0)); + if (!LI || !LI->isSimple() || !isa(SVI->getOperand(1))) + return false; + + VectorType *VecTy = SVI->getType(); + + // Skip illegal vector types. + unsigned TypeSize = DL->getTypeAllocSizeInBits(VecTy); + if (TypeSize != 64 && TypeSize != 128) + return false; + + // Check if the mask is strided and get the start index. + unsigned Factor, Index; + if (!isStridedMask(SVI->getShuffleMask(), Factor, Index)) + return false; + + // Holds the relationship between the strided shufflevector and the index. + SmallVector, 4> StridedShuffles; + StridedShuffles.push_back(std::make_pair(SVI, Index)); + + // Check whether other users of this load are also strided shufflevectors of + // the same Factor and type. + for (Value::user_iterator UI = LI->user_begin(), E = LI->user_end(); + UI != E;) { + ShuffleVectorInst *SV = dyn_cast(*UI++); + if (SV == SVI) + continue; + + if (!SV || SV->getType() != VecTy) + return false; + + unsigned OIndex; + if (!isStridedMaskOfGivenFactor(SV->getShuffleMask(), Factor, OIndex)) + return false; + + StridedShuffles.push_back(std::make_pair(SV, OIndex)); + } + + DEBUG(dbgs() << "Found an interleaved load:" << *LI << "\n"); + + // A pointer vector can not be the return type of the ldN intrinsics. Need to + // load integer vectors first and then convert to pointer vectors. + Type *EltTy = VecTy->getVectorElementType(); + if (EltTy->isPointerTy()) + VecTy = VectorType::get(DL->getIntPtrType(EltTy), + VecTy->getVectorNumElements()); + + Type *PtrTy = VecTy->getPointerTo(LI->getPointerAddressSpace()); + Type *Tys[2] = {VecTy, PtrTy}; + Function *LdnFunc = + Intrinsic::getDeclaration(M, getLdNStNIntrinsic(Factor, true), Tys); + + IRBuilder<> Builder(LI); + Value *Ptr = Builder.CreateBitCast(LI->getPointerOperand(), PtrTy); + + CallInst *Ldn = Builder.CreateCall(LdnFunc, Ptr, "ldN"); + DEBUG(dbgs() << " Created:" << *Ldn << "\n"); + + // Replace each strided shufflevector with the corresponding vector loaded + // by ldN. + for (auto I : StridedShuffles) { + ShuffleVectorInst *SV = I.first; + unsigned Index = I.second; + + Value *SubVec = Builder.CreateExtractValue(Ldn, Index); + + // Convert the integer vector to pointer vector if the element is pointer. + if (EltTy->isPointerTy()) + SubVec = Builder.CreateIntToPtr(SubVec, SV->getType()); + + DEBUG(dbgs() << " Replacing:" << *SV << "\n" + << " With:" << *SubVec << "\n"); + SV->replaceAllUsesWith(SubVec); + + // Avoid analyzing it twice. + if (SV != SVI) + SV->eraseFromParent(); + } + + // Mark this shufflevector and load as dead. + DeadInsts.push_back(SVI); + DeadInsts.push_back(LI); + return true; +} + +/// \brief Check if the given mask \p Mask is interleaved mask of the given +/// \p Factor. +/// +/// I.e. <0, NumSubElts, ... , NumSubElts*(Factor - 1), 1, NumSubElts + 1, ...> +static bool IsInterleavedMaskOfGivenFactor(ArrayRef Mask, + unsigned Factor) { + unsigned NumElts = Mask.size(); + if (NumElts % Factor) + return false; + + unsigned NumSubElts = NumElts / Factor; + if (!isPowerOf2_32(NumSubElts)) + return false; + + for (unsigned i = 0; i < NumSubElts; i++) + for (unsigned j = 0; j < Factor; j++) + if (Mask[i * Factor + j] >= 0 && + static_cast(Mask[i * Factor + j]) != j * NumSubElts + i) + return false; + + return true; +} + +/// \brief Check if the given mask \p Mask is interleaved mask like: +/// <0, NumSubElts, NumSubElts*2, ..., NumSubElts*(Factor-1), 1, ...> +/// +/// E.g. The interleaved masks of an interleaved store (Factor = 2): +/// <0, 4, 1, 5, 2, 6, 3, 7> +static bool isInterleavedMask(ArrayRef Mask, unsigned &Factor) { + if (Mask.size() < 4) + return false; + + // Check potential Factors and return true if find a factor for the mask. + for (Factor = MIN_FACTOR; Factor <= MAX_FACTOR; Factor++) + if (IsInterleavedMaskOfGivenFactor(Mask, Factor)) + return true; + + return false; +} + +/// \brief Get a mask consists of sequential integers starting from \p Start. +/// +/// I.e. +static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned Start, + unsigned NumElts) { + SmallVector Mask; + for (unsigned i = 0; i < NumElts; i++) + Mask.push_back(Builder.getInt32(Start + i)); + + return ConstantVector::get(Mask); +} + +bool AArch64InterleavedAccess::matchInterleavedStore( + ShuffleVectorInst *SVI, SmallVector &DeadInsts) { + if (!SVI->hasOneUse()) + return false; + + StoreInst *SI = dyn_cast(SVI->user_back()); + if (!SI || !SI->isSimple()) + return false; + + // Check if the mask is interleaved and get the interleave factor. + unsigned Factor; + if (!isInterleavedMask(SVI->getShuffleMask(), Factor)) + return false; + + VectorType *VecTy = SVI->getType(); + unsigned NumSubElts = VecTy->getVectorNumElements() / Factor; + Type *EltTy = VecTy->getVectorElementType(); + VectorType *SubVecTy = VectorType::get(EltTy, NumSubElts); + + // Skip illegal sub vector types. + unsigned TypeSize = DL->getTypeAllocSizeInBits(SubVecTy); + if (TypeSize != 64 && TypeSize != 128) + return false; + + DEBUG(dbgs() << "Found an interleaved store:" << *SI << "\n"); + + Value *Op0 = SVI->getOperand(0); + Value *Op1 = SVI->getOperand(1); + IRBuilder<> Builder(SI); + + // StN intrinsics don't support pointer vectors as arguments. Convert pointer + // vectors to integer vectors. + if (EltTy->isPointerTy()) { + Type *IntTy = DL->getIntPtrType(EltTy); + unsigned NumOpElts = + dyn_cast(Op0->getType())->getVectorNumElements(); + + // The corresponding integer vector type of the same element size. + Type *IntVecTy = VectorType::get(IntTy, NumOpElts); + + Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); + Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); + SubVecTy = VectorType::get(IntTy, NumSubElts); + } + + Type *PtrTy = SubVecTy->getPointerTo(SI->getPointerAddressSpace()); + Type *Tys[2] = {SubVecTy, PtrTy}; + Function *StNFunc = + Intrinsic::getDeclaration(M, getLdNStNIntrinsic(Factor, false), Tys); + + SmallVector Ops; + + // Split the shufflevector operands into sub vectors for the new stN call. + for (unsigned i = 0; i < Factor; i++) + Ops.push_back(Builder.CreateShuffleVector( + Op0, Op1, getSequentialMask(Builder, NumSubElts * i, NumSubElts))); + + Ops.push_back(Builder.CreateBitCast(SI->getPointerOperand(), PtrTy)); + + DEBUG(dbgs() << " Replacing:" << *SI << "'\n"); + + CallInst *StN = Builder.CreateCall(StNFunc, Ops); + + DEBUG(dbgs() << " with:" << *StN << "\n"); + + SI->eraseFromParent(); + + // Mark this shufflevector as dead. + DeadInsts.push_back(SVI); + return true; +} + +bool AArch64InterleavedAccess::runOnFunction(Function &F) { + DEBUG(dbgs() << "*** " << getPassName() << ": " << F.getName() << "\n"); + + M = F.getParent(); + DL = &M->getDataLayout(); + + // Holds dead instructions that will be erased later. + SmallVector DeadInsts; + bool Changed = false; + for (auto &I : inst_range(F)) { + if (ShuffleVectorInst *SVI = dyn_cast(&I)) { + Changed |= matchInterleavedLoad(SVI, DeadInsts); + Changed |= matchInterleavedStore(SVI, DeadInsts); + } + } + + for (auto I : DeadInsts) + I->eraseFromParent(); + + return Changed; +} Index: lib/Target/AArch64/AArch64TargetMachine.cpp =================================================================== --- lib/Target/AArch64/AArch64TargetMachine.cpp +++ lib/Target/AArch64/AArch64TargetMachine.cpp @@ -67,6 +67,11 @@ " to make use of cmpxchg flow-based information"), cl::init(true)); +static cl::opt AArch64InterleavedAccessOpt( + "aarch64-interleaved-access-opt", + cl::desc("Optimize interleaved memory accesses in AArch64 backend"), + cl::init(false), cl::Hidden); + static cl::opt EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), @@ -225,6 +230,9 @@ if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) addPass(createCFGSimplificationPass()); + if (TM->getOptLevel() != CodeGenOpt::None && AArch64InterleavedAccessOpt) + addPass(createAArch64InterleavedAccessPass()); + TargetPassConfig::addIRPasses(); if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) { Index: lib/Target/AArch64/AArch64TargetTransformInfo.h =================================================================== --- lib/Target/AArch64/AArch64TargetTransformInfo.h +++ lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -139,6 +139,11 @@ bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info); + unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, + unsigned Factor, + ArrayRef Indices, + unsigned Alignment, + unsigned AddressSpace); /// @} }; Index: lib/Target/AArch64/AArch64TargetTransformInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -407,6 +407,18 @@ return LT.first; } +unsigned AArch64TTIImpl::getInterleavedMemoryOpCost( + unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, + unsigned Alignment, unsigned AddressSpace) { + assert(isa(VecTy) && "Expect a vector type"); + + if (Factor > 1 && Factor < 5 && isTypeLegal(VecTy)) + return Factor; + + return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, + Alignment, AddressSpace); +} + unsigned AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef Tys) { unsigned Cost = 0; for (auto *I : Tys) { Index: lib/Target/AArch64/CMakeLists.txt =================================================================== --- lib/Target/AArch64/CMakeLists.txt +++ lib/Target/AArch64/CMakeLists.txt @@ -38,6 +38,7 @@ AArch64PBQPRegAlloc.cpp AArch64RegisterInfo.cpp AArch64SelectionDAGInfo.cpp + AArch64InterleavedAccess.cpp AArch64StorePairSuppress.cpp AArch64Subtarget.cpp AArch64TargetMachine.cpp Index: test/CodeGen/AArch64/aarch64-interleaved-accesses.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/aarch64-interleaved-accesses.ll @@ -0,0 +1,197 @@ +; RUN: llc -march=aarch64 -aarch64-interleaved-access-opt=true < %s | FileCheck %s + +; CHECK-LABEL: load_factor2: +; CHECK: ld2 { v0.8b, v1.8b }, [x0] +define <8 x i8> @load_factor2(<16 x i8>* %ptr) { + %wide.vec = load <16 x i8>, <16 x i8>* %ptr, align 4 + %strided.v0 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> + %strided.v1 = shufflevector <16 x i8> %wide.vec, <16 x i8> undef, <8 x i32> + %add = add nsw <8 x i8> %strided.v0, %strided.v1 + ret <8 x i8> %add +} + +; CHECK-LABEL: load_delat3: +; CHECK: ld3 { v0.4s, v1.4s, v2.4s }, [x0] +define <4 x i32> @load_delat3(i32* %ptr) { + %base = bitcast i32* %ptr to <12 x i32>* + %wide.vec = load <12 x i32>, <12 x i32>* %base, align 4 + %strided.v2 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> + %strided.v1 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> + %add = add nsw <4 x i32> %strided.v2, %strided.v1 + ret <4 x i32> %add +} + +; CHECK-LABEL: load_factor4: +; CHECK: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0] +define <4 x i32> @load_factor4(i32* %ptr) { + %base = bitcast i32* %ptr to <16 x i32>* + %wide.vec = load <16 x i32>, <16 x i32>* %base, align 4 + %strided.v0 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> + %strided.v2 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> + %add = add nsw <4 x i32> %strided.v0, %strided.v2 + ret <4 x i32> %add +} + +; CHECK-LABEL: store_factor2: +; CHECK: st2 { v0.8b, v1.8b }, [x0] +define void @store_factor2(<16 x i8>* %ptr, <8 x i8> %v0, <8 x i8> %v1) { + %interleaved.vec = shufflevector <8 x i8> %v0, <8 x i8> %v1, <16 x i32> + store <16 x i8> %interleaved.vec, <16 x i8>* %ptr, align 4 + ret void +} + +; CHECK-LABEL: store_factor3: +; CHECK: st3 { v0.4s, v1.4s, v2.4s }, [x0] +define void @store_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) { + %base = bitcast i32* %ptr to <12 x i32>* + %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> + %v2_u = shufflevector <4 x i32> %v2, <4 x i32> undef, <8 x i32> + %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_u, <12 x i32> + store <12 x i32> %interleaved.vec, <12 x i32>* %base, align 4 + ret void +} + +; CHECK-LABEL: store_factor4: +; CHECK: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0] +define void @store_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) { + %base = bitcast i32* %ptr to <16 x i32>* + %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> + %v2_v3 = shufflevector <4 x i32> %v2, <4 x i32> %v3, <8 x i32> + %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_v3, <16 x i32> + store <16 x i32> %interleaved.vec, <16 x i32>* %base, align 4 + ret void +} + +; The following cases test that interleaved access with pointer vectors can be +; matched to ldN/stN instruction. + +; CHECK-LABEL: load_ptrvec_factor2: +; CHECK: ld2 { v0.2d, v1.2d }, [x0] +define <2 x i32*> @load_ptrvec_factor2(i32** %ptr) { + %base = bitcast i32** %ptr to <4 x i32*>* + %wide.vec = load <4 x i32*>, <4 x i32*>* %base, align 4 + %strided.v0 = shufflevector <4 x i32*> %wide.vec, <4 x i32*> undef, <2 x i32> + ret <2 x i32*> %strided.v0 +} + +; CHECK-LABEL: load_ptrvec_factor3: +; CHECK: ld3 { v0.2d, v1.2d, v2.2d }, [x0] +define void @load_ptrvec_factor3(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) { + %base = bitcast i32** %ptr to <6 x i32*>* + %wide.vec = load <6 x i32*>, <6 x i32*>* %base, align 4 + %strided.v2 = shufflevector <6 x i32*> %wide.vec, <6 x i32*> undef, <2 x i32> + store <2 x i32*> %strided.v2, <2 x i32*>* %ptr1 + %strided.v1 = shufflevector <6 x i32*> %wide.vec, <6 x i32*> undef, <2 x i32> + store <2 x i32*> %strided.v1, <2 x i32*>* %ptr2 + ret void +} + +; CHECK-LABEL: load_ptrvec_factor4: +; CHECK: ld4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] +define void @load_ptrvec_factor4(i32** %ptr, <2 x i32*>* %ptr1, <2 x i32*>* %ptr2) { + %base = bitcast i32** %ptr to <8 x i32*>* + %wide.vec = load <8 x i32*>, <8 x i32*>* %base, align 4 + %strided.v1 = shufflevector <8 x i32*> %wide.vec, <8 x i32*> undef, <2 x i32> + %strided.v3 = shufflevector <8 x i32*> %wide.vec, <8 x i32*> undef, <2 x i32> + store <2 x i32*> %strided.v1, <2 x i32*>* %ptr1 + store <2 x i32*> %strided.v3, <2 x i32*>* %ptr2 + ret void +} + +; CHECK-LABEL: store_ptrvec_factor2: +; CHECK: st2 { v0.2d, v1.2d }, [x0] +define void @store_ptrvec_factor2(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1) { + %base = bitcast i32** %ptr to <4 x i32*>* + %interleaved.vec = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> + store <4 x i32*> %interleaved.vec, <4 x i32*>* %base, align 4 + ret void +} + +; CHECK-LABEL: store_ptrvec_factor3: +; CHECK: st3 { v0.2d, v1.2d, v2.2d }, [x0] +define void @store_ptrvec_factor3(i32** %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2) { + %base = bitcast i32** %ptr to <6 x i32*>* + %v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> + %v2_u = shufflevector <2 x i32*> %v2, <2 x i32*> undef, <4 x i32> + %interleaved.vec = shufflevector <4 x i32*> %v0_v1, <4 x i32*> %v2_u, <6 x i32> + store <6 x i32*> %interleaved.vec, <6 x i32*>* %base, align 4 + ret void +} + +; CHECK-LABEL: store_ptrvec_factor4: +; CHECK: st4 { v0.2d, v1.2d, v2.2d, v3.2d }, [x0] +define void @store_ptrvec_factor4(i32* %ptr, <2 x i32*> %v0, <2 x i32*> %v1, <2 x i32*> %v2, <2 x i32*> %v3) { + %base = bitcast i32* %ptr to <8 x i32*>* + %v0_v1 = shufflevector <2 x i32*> %v0, <2 x i32*> %v1, <4 x i32> + %v2_v3 = shufflevector <2 x i32*> %v2, <2 x i32*> %v3, <4 x i32> + %interleaved.vec = shufflevector <4 x i32*> %v0_v1, <4 x i32*> %v2_v3, <8 x i32> + store <8 x i32*> %interleaved.vec, <8 x i32*>* %base, align 4 + ret void +} + +; Following cases check that shuffle maskes with undef indices can be matched +; into ldN/stN instruction. + +; CHECK-LABEL: load_undef_mask_factor2: +; CHECK: ld2 { v0.4s, v1.4s }, [x0] +define <4 x i32> @load_undef_mask_factor2(i32* %ptr) { + %base = bitcast i32* %ptr to <8 x i32>* + %wide.vec = load <8 x i32>, <8 x i32>* %base, align 4 + %strided.v0 = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> + %strided.v1 = shufflevector <8 x i32> %wide.vec, <8 x i32> undef, <4 x i32> + %add = add nsw <4 x i32> %strided.v0, %strided.v1 + ret <4 x i32> %add +} + +; CHECK-LABEL: load_undef_mask_factor3: +; CHECK: ld3 { v0.4s, v1.4s, v2.4s }, [x0] +define <4 x i32> @load_undef_mask_factor3(i32* %ptr) { + %base = bitcast i32* %ptr to <12 x i32>* + %wide.vec = load <12 x i32>, <12 x i32>* %base, align 4 + %strided.v2 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> + %strided.v1 = shufflevector <12 x i32> %wide.vec, <12 x i32> undef, <4 x i32> + %add = add nsw <4 x i32> %strided.v2, %strided.v1 + ret <4 x i32> %add +} + +; CHECK-LABEL: load_undef_mask_factor4: +; CHECK: ld4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0] +define <4 x i32> @load_undef_mask_factor4(i32* %ptr) { + %base = bitcast i32* %ptr to <16 x i32>* + %wide.vec = load <16 x i32>, <16 x i32>* %base, align 4 + %strided.v0 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> + %strided.v2 = shufflevector <16 x i32> %wide.vec, <16 x i32> undef, <4 x i32> + %add = add nsw <4 x i32> %strided.v0, %strided.v2 + ret <4 x i32> %add +} + +; CHECK-LABEL: store_undef_mask_factor2: +; CHECK: st2 { v0.4s, v1.4s }, [x0] +define void @store_undef_mask_factor2(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1) { + %base = bitcast i32* %ptr to <8 x i32>* + %interleaved.vec = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> + store <8 x i32> %interleaved.vec, <8 x i32>* %base, align 4 + ret void +} + +; CHECK-LABEL: store_undef_mask_factor3: +; CHECK: st3 { v0.4s, v1.4s, v2.4s }, [x0] +define void @store_undef_mask_factor3(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2) { + %base = bitcast i32* %ptr to <12 x i32>* + %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> + %v2_u = shufflevector <4 x i32> %v2, <4 x i32> undef, <8 x i32> + %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_u, <12 x i32> + store <12 x i32> %interleaved.vec, <12 x i32>* %base, align 4 + ret void +} + +; CHECK-LABEL: store_undef_mask_factor4: +; CHECK: st4 { v0.4s, v1.4s, v2.4s, v3.4s }, [x0] +define void @store_undef_mask_factor4(i32* %ptr, <4 x i32> %v0, <4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) { + %base = bitcast i32* %ptr to <16 x i32>* + %v0_v1 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <8 x i32> + %v2_v3 = shufflevector <4 x i32> %v2, <4 x i32> %v3, <8 x i32> + %interleaved.vec = shufflevector <8 x i32> %v0_v1, <8 x i32> %v2_v3, <16 x i32> + store <16 x i32> %interleaved.vec, <16 x i32>* %base, align 4 + ret void +}