Index: include/llvm/InitializePasses.h =================================================================== --- include/llvm/InitializePasses.h +++ include/llvm/InitializePasses.h @@ -312,6 +312,7 @@ void initializeLoopAccessAnalysisPass(PassRegistry&); void initializeLoopVectorizePass(PassRegistry&); void initializeSLPVectorizerPass(PassRegistry&); +void initializeLoadStoreVectorizerPass(PassRegistry&); void initializeBBVectorizePass(PassRegistry&); void initializeMachineFunctionPrinterPassPass(PassRegistry&); void initializeMIRPrintingPassPass(PassRegistry&); Index: include/llvm/LinkAllPasses.h =================================================================== --- include/llvm/LinkAllPasses.h +++ include/llvm/LinkAllPasses.h @@ -182,6 +182,7 @@ (void) llvm::createInstructionSimplifierPass(); (void) llvm::createLoopVectorizePass(); (void) llvm::createSLPVectorizerPass(); + (void) llvm::createLoadStoreVectorizerPass(128); (void) llvm::createBBVectorizePass(); (void) llvm::createPartiallyInlineLibCallsPass(); (void) llvm::createScalarizerPass(); Index: include/llvm/Transforms/Vectorize.h =================================================================== --- include/llvm/Transforms/Vectorize.h +++ include/llvm/Transforms/Vectorize.h @@ -139,6 +139,13 @@ bool vectorizeBasicBlock(Pass *P, BasicBlock &BB, const VectorizeConfig &C = VectorizeConfig()); +//===----------------------------------------------------------------------===// +// +// LoadStoreVectorizer - Create vector loads and stores, but leave scalar +// operations. +// +Pass *createLoadStoreVectorizerPass(unsigned VecRegSize); + } // End llvm namespace #endif Index: lib/Transforms/Vectorize/CMakeLists.txt =================================================================== --- lib/Transforms/Vectorize/CMakeLists.txt +++ lib/Transforms/Vectorize/CMakeLists.txt @@ -1,6 +1,7 @@ add_llvm_library(LLVMVectorize BBVectorize.cpp Vectorize.cpp + LoadStoreVectorizer.cpp LoopVectorize.cpp SLPVectorizer.cpp Index: lib/Transforms/Vectorize/LoadStoreVectorizer.cpp =================================================================== --- /dev/null +++ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -0,0 +1,848 @@ +//===----- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Vectorize.h" +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/Triple.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionExpressions.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/IR/Value.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +#define DEBUG_TYPE "load-store-vectorizer" +STATISTIC(NumVectorInstructions, "Number of vector instructions generated"); + +namespace { + +class Vectorizer { + + typedef SmallVector ValueList; + typedef MapVector ValueListMap; + + Function &F; + AliasAnalysis &AA; + DominatorTree &DT; + ScalarEvolution &SE; + const DataLayout &DL; + IRBuilder<> Builder; + ValueListMap StoreRefs; + ValueListMap LoadRefs; + unsigned VecRegSize; + +public: + Vectorizer(Function &F, AliasAnalysis &AA, DominatorTree &DT, + ScalarEvolution &SE, unsigned VecRegSize) + : F(F), AA(AA), DT(DT), SE(SE), DL(F.getParent()->getDataLayout()), + Builder(SE.getContext()), VecRegSize(VecRegSize) {} + + bool run(); + +private: + Value *getPointerOperand(Value *I); + + unsigned getAddressSpaceOperand(Value *I); + + /// Checks if it is a consecutive access. + bool isConsecutiveAccess(Value *A, Value *B); + + /// Reorder the user of I after vectorization. + void reorder(Instruction *I); + + /// Returns the first and the last instructions in Chain. + std::pair + getBoundaryInstrs(ArrayRef Chain); + + /// Erases the original instructions after vectorizing. + void eraseInstructions(ArrayRef Chain); + + void propagateMetadata(Instruction *I, ArrayRef VL); + + std::pair, ArrayRef> + bisect(ArrayRef &Chain, unsigned ElementSize); + + /// Checks if there are any memory instructions + /// which may be alias betwwen From and To. + bool isVectorizable(ArrayRef &Chain, BasicBlock::iterator From, + BasicBlock::iterator To); + + /// Collects load and store instructions to + /// vectorize. + void collectInstructions(BasicBlock *BB); + + /// Processes the collected instructions, the Map. + bool vectorizeChains(ValueListMap &Map); + + /// Finds the consecutive instructions and vectorizes them. + bool vectorizeInstructions(ArrayRef Instrs); + + /// Vectorizes the load instructions in Chain. + bool vectorizeLoadChain(ArrayRef Chain); + + /// Vectorizes the store instructions in Chain. + bool vectorizeStoreChain(ArrayRef Chain); +}; + +class LoadStoreVectorizer : public FunctionPass { +public: + static char ID; + unsigned VecRegSize; + + LoadStoreVectorizer(unsigned VecRegSize = 128) : FunctionPass(ID), + VecRegSize(VecRegSize) { + initializeLoadStoreVectorizerPass(*PassRegistry::getPassRegistry()); + } + + bool runOnFunction(Function &F) override; + + const char *getPassName() const override { + return "GPU Load and Store Vectorizer"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + AU.setPreservesCFG(); + } +}; +} + +INITIALIZE_PASS_BEGIN(LoadStoreVectorizer, DEBUG_TYPE, + "Vectorize load and Store instructions", false, false); +INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) +INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) +INITIALIZE_PASS_END(LoadStoreVectorizer, DEBUG_TYPE, + "Vectorize load and store instructions", false, false); + +char LoadStoreVectorizer::ID = 0; + +Pass *llvm::createLoadStoreVectorizerPass(unsigned VecRegSize) { + return new LoadStoreVectorizer(VecRegSize); +} + +bool LoadStoreVectorizer::runOnFunction(Function &F) { + AliasAnalysis &AA = getAnalysis().getAAResults(); + DominatorTree &DT = getAnalysis().getDomTree(); + ScalarEvolution &SE = getAnalysis().getSE(); + + // Don't vectorize when the attribute NoImplicitFloat is used. + if (F.hasFnAttribute(Attribute::NoImplicitFloat)) + return false; + + Vectorizer V(F, AA, DT, SE, VecRegSize); + return V.run(); +} + +// Vectorizer Implementation +bool Vectorizer::run() { + bool Changed = false; + + // Scan the blocks in the function in post order. + for (po_iterator I = po_begin(&F.getEntryBlock()), + E = po_end(&F.getEntryBlock()); + I != E; ++I) { + collectInstructions(*I); + Changed |= vectorizeChains(LoadRefs); + Changed |= vectorizeChains(StoreRefs); + } + + return Changed; +} + +Value *Vectorizer::getPointerOperand(Value *I) { + if (LoadInst *LI = dyn_cast(I)) + return LI->getPointerOperand(); + if (StoreInst *SI = dyn_cast(I)) + return SI->getPointerOperand(); + return nullptr; +} + +unsigned Vectorizer::getAddressSpaceOperand(Value *I) { + if (LoadInst *L = dyn_cast(I)) + return L->getPointerAddressSpace(); + if (StoreInst *S = dyn_cast(I)) + return S->getPointerAddressSpace(); + return -1; +} + +// FIXME: Merge with llvm::isConsecutiveAccess +bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { + Value *PtrA = getPointerOperand(A); + Value *PtrB = getPointerOperand(B); + unsigned ASA = getAddressSpaceOperand(A); + unsigned ASB = getAddressSpaceOperand(B); + + // Check that the address spaces match and that the pointers are valid. + if (!PtrA || !PtrB || (ASA != ASB)) + return false; + + // Make sure that A and B are different pointers of the same size type. + unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA); + Type *PtrATy = PtrA->getType()->getPointerElementType(); + Type *PtrBTy = PtrB->getType()->getPointerElementType(); + if (PtrA == PtrB || + DL.getTypeStoreSize(PtrATy) != DL.getTypeStoreSize(PtrBTy) || + DL.getTypeStoreSize(PtrATy->getScalarType()) != + DL.getTypeStoreSize(PtrBTy->getScalarType())) + return false; + + APInt Size(PtrBitWidth, DL.getTypeStoreSize(PtrATy)); + + APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); + PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); + PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB); + + APInt OffsetDelta = OffsetB - OffsetA; + + // Check if they are based on the same pointer. That makes the offsets + // sufficient. + if (PtrA == PtrB) + return OffsetDelta == Size; + + // Compute the necessary base pointer delta to have the necessary final delta + // equal to the size. + APInt BaseDelta = Size - OffsetDelta; + + // Otherwise compute the distance with SCEV between the base pointers. + const SCEV *PtrSCEVA = SE.getSCEV(PtrA); + const SCEV *PtrSCEVB = SE.getSCEV(PtrB); + const SCEV *C = SE.getConstant(BaseDelta); + const SCEV *X = SE.getAddExpr(PtrSCEVA, C); + if (X == PtrSCEVB) + return true; + + // Sometimes even this doesn't work, because SCEV can't always see through + // patterns that look like (gep (ext (add (shl X, C1), C2))). Try proving + // things the hard way. + + // Look through GEPs after proving they're the same except for the last + // index. + GetElementPtrInst *GEPA = dyn_cast(getPointerOperand(A)); + GetElementPtrInst *GEPB = dyn_cast(getPointerOperand(B)); + if (!GEPA || !GEPB || GEPA->getNumOperands() != GEPB->getNumOperands()) + return false; + unsigned FinalIndex = GEPA->getNumOperands() - 1; + for (unsigned i = 0; i < FinalIndex; i++) + if (GEPA->getOperand(i) != GEPB->getOperand(i)) + return false; + + Instruction *OpA = dyn_cast(GEPA->getOperand(FinalIndex)); + Instruction *OpB = dyn_cast(GEPB->getOperand(FinalIndex)); + if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() || + OpA->getType() != OpB->getType()) + return false; + + // Look through a ZExt/SExt + if (!isa(OpA) && !isa(OpA)) + return false; + + OpA = dyn_cast(OpA->getOperand(0)); + OpB = dyn_cast(OpB->getOperand(0)); + if (!OpA || !OpB || OpA->getType() != OpB->getType()) + return false; + + // Now we need to prove that adding 1 to OpA won't overflow. + unsigned BitWidth = OpA->getType()->getScalarSizeInBits(); + APInt KnownZero = APInt(BitWidth, 0); + APInt KnownOne = APInt(BitWidth, 0); + computeKnownBits(OpA, KnownZero, KnownOne, DL, 0, nullptr, OpA, &DT); + // If any bits are known to be zero other than the sign bit in OpA, we can + // add 1 to it while guaranteeing no overflow of any sort. + KnownZero &= ~APInt::getHighBitsSet(BitWidth, 1); + if (KnownZero == 0) + return false; + + const SCEV *OffsetSCEVA = SE.getSCEV(OpA); + const SCEV *OffsetSCEVB = SE.getSCEV(OpB); + const SCEV *One = SE.getConstant(APInt(BitWidth, 1)); + const SCEV *X2 = SE.getAddExpr(OffsetSCEVA, One); + return X2 == OffsetSCEVB; +} + +void Vectorizer::reorder(Instruction *I) { + for (User *U : I->users()) + if (Instruction *User = dyn_cast(U)) + if (!DT.dominates(I, User) && User->getOpcode() != Instruction::PHI) { + User->removeFromParent(); + User->insertAfter(I); + reorder(User); + } +} + +std::pair +Vectorizer::getBoundaryInstrs(ArrayRef Chain) { + Instruction *C0 = cast(Chain[0]); + BasicBlock::iterator FirstInstr = C0->getIterator(); + BasicBlock::iterator LastInstr = C0->getIterator(); + + BasicBlock *BB = C0->getParent(); + unsigned numFound = 0; + for (auto I = BB->begin(), E = BB->end(); I != E; ++I) { + if (std::find(Chain.begin(), Chain.end(), &*I) == Chain.end()) + continue; + ++numFound; + if (numFound == 1) { + FirstInstr = I; + } else if (numFound == Chain.size()) { + LastInstr = I; + break; + } + } + + return std::make_pair(FirstInstr, LastInstr); +} + +void Vectorizer::eraseInstructions(ArrayRef Chain) { + SmallVector Instrs; + for (Value *V : Chain) { + Value *PtrOperand = getPointerOperand(V); + assert(PtrOperand && "Instruction must have a pointer operand."); + Instrs.push_back(cast(V)); + if (GetElementPtrInst *GEP = dyn_cast(PtrOperand)) + Instrs.push_back(GEP); + } + + // Erase instructions. + for (Value *V : Instrs) { + Instruction *Instr = cast(V); + if (Instr->use_empty()) + Instr->eraseFromParent(); + } +} + +void Vectorizer::propagateMetadata(Instruction *I, ArrayRef VL) { + Instruction *I0 = cast(VL[0]); + SmallVector, 4> Metadata; + I0->getAllMetadataOtherThanDebugLoc(Metadata); + + for (unsigned i = 0, n = Metadata.size(); i != n; ++i) { + unsigned Kind = Metadata[i].first; + MDNode *MD = Metadata[i].second; + + for (int i = 1, e = VL.size(); MD && i != e; i++) { + Instruction *I = cast(VL[i]); + MDNode *IMD = I->getMetadata(Kind); + + switch (Kind) { + default: + MD = nullptr; // Remove unknown metadata + break; + case LLVMContext::MD_tbaa: + MD = MDNode::getMostGenericTBAA(MD, IMD); + break; + case LLVMContext::MD_alias_scope: + case LLVMContext::MD_noalias: + MD = MDNode::intersect(MD, IMD); + break; + case LLVMContext::MD_fpmath: + MD = MDNode::getMostGenericFPMath(MD, IMD); + break; + } + } + I->setMetadata(Kind, MD); + } +} + +std::pair, ArrayRef> +Vectorizer::bisect(ArrayRef &Chain, unsigned ElementSize) { + unsigned elemSizeInBytes = ElementSize / 8; + unsigned SizeInBytes = elemSizeInBytes * Chain.size(); + unsigned numRight = (SizeInBytes % 4) / elemSizeInBytes; + unsigned numLeft = Chain.size() - numRight; + return std::make_pair(Chain.slice(0, numLeft), Chain.slice(numLeft)); +} + +bool Vectorizer::isVectorizable(ArrayRef &Chain, + BasicBlock::iterator From, + BasicBlock::iterator To) { + SmallVector, 16> MemoryInstrs; + SmallVector, 16> ChainInstrs; + + unsigned Idx = 0; + for (auto I = From, E = To; I != E; ++I, ++Idx) { + if (isa(I) || isa(I)) { + if (std::find(Chain.begin(), Chain.end(), &*I) == Chain.end()) + MemoryInstrs.push_back({ &*I, Idx }); + else + ChainInstrs.push_back({ &*I, Idx }); + } else if (I->mayHaveSideEffects()) { + DEBUG(dbgs() << "GPULSV: Found side-effecting operation: aborting.\n"); + DEBUG(I->dump()); + return false; + } + } + + for (auto EntryMem : MemoryInstrs) { + Value *V = EntryMem.first; + unsigned VIdx = EntryMem.second; + for (auto EntryChain : ChainInstrs) { + Value *VV = EntryChain.first; + unsigned VVIdx = EntryChain.second; + if (isa(V) && isa(VV)) + continue; + + // We can ignore the alias as long as the load comes before the store, + // because that means we won't be moving the load past the store to + // vectorize it (the loads are inserted at the location of the first + // load). + if (isa(V) && isa(VV) && VVIdx < VIdx) + continue; + + // Same case, but in reverse. + if (isa(V) && isa(VV) && VVIdx > VIdx) + continue; + + Instruction *M0 = cast(V); + Instruction *M1 = cast(VV); + Value *Ptr0 = isa(M0) ? M0->getOperand(0) : M0->getOperand(1); + Value *Ptr1 = isa(M1) ? M1->getOperand(0) : M1->getOperand(1); + unsigned S0 = + DL.getTypeStoreSize(Ptr0->getType()->getPointerElementType()); + unsigned S1 = + DL.getTypeStoreSize(Ptr1->getType()->getPointerElementType()); + + if (AA.alias(MemoryLocation(Ptr0, S0), MemoryLocation(Ptr1, S1))) { + DEBUG( + dbgs() << "GPULSV: Found alias.\n" + " Aliasing instruction and pointer:\n" + << *V << " aliases " << *Ptr0 << '\n' + << " Aliased instruction and pointer:\n" + << *VV << " aliases " << *Ptr1 << '\n' + ); + + return false; + } + } + } + + return true; +} + +void Vectorizer::collectInstructions(BasicBlock *BB) { + + LoadRefs.clear(); + StoreRefs.clear(); + + for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { + + if (!it->mayReadOrWriteMemory()) + continue; + + if (LoadInst *LI = dyn_cast(it)) { + + if (!LI->isSimple()) + continue; + + Type *Ty = LI->getType(); + if (!VectorType::isValidElementType(Ty->getScalarType())) + continue; + + // No point in looking at these if they're too big to vectorize. + if (DL.getTypeSizeInBits(Ty) > VecRegSize / 2) + continue; + + // Make sure all the users of a vector are constant-index extracts. + if (isa(Ty) && + !std::all_of(LI->user_begin(), LI->user_end(), [LI](const User *U) { + const Instruction *UI = cast(U); + return isa(UI) && + isa(UI->getOperand(1)); + })) + continue; + + // TODO: Target hook to filter types. + + // Save the load locations. + Value *Ptr = GetUnderlyingObject(LI->getPointerOperand(), DL); + LoadRefs[Ptr].push_back(LI); + + } else if (StoreInst *SI = dyn_cast(it)) { + + if (!SI->isSimple()) + continue; + + Type *Ty = SI->getValueOperand()->getType(); + if (!VectorType::isValidElementType(Ty->getScalarType())) + continue; + + if (DL.getTypeSizeInBits(Ty) > VecRegSize / 2) + continue; + + if (isa(Ty) && + !std::all_of(SI->user_begin(), SI->user_end(), [SI](const User *U) { + const Instruction *UI = cast(U); + return isa(UI) && + isa(UI->getOperand(1)); + })) + continue; + + // Save store location. + Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL); + StoreRefs[Ptr].push_back(SI); + } + } +} + +bool Vectorizer::vectorizeChains(ValueListMap &Map) { + bool Changed = false; + + for (const std::pair &Chain : Map) { + unsigned Size = Chain.second.size(); + if (Size < 2) + continue; + + DEBUG(dbgs() << "GPULSV: Analyzing a chain of length " << Size << ".\n"); + + // Process the stores in chunks of 64. + for (unsigned CI = 0, CE = Size; CI < CE; CI += 64) { + unsigned Len = std::min(CE - CI, 64); + ArrayRef Chunk(&Chain.second[CI], Len); + Changed |= vectorizeInstructions(Chunk); + } + } + + return Changed; +} + +bool Vectorizer::vectorizeInstructions(ArrayRef Instrs) { + DEBUG(dbgs() << "GPULSV: Vectorizing " << Instrs.size() + << " instructions.\n"); + SmallSetVector Heads, Tails; + unsigned ConsecutiveChain[64]; + SmallPtrSet VectorizedValues; + bool Changed = false; + + // Do a quadratic search on all of the given stores and find + // all of the pairs of stores that follow each other. + for (int i = 0, e = Instrs.size(); i < e; ++i) { + ConsecutiveChain[i] = ~0U; + for (int j = e - 1; j >= 0; --j) { + if (i == j) + continue; + if (isConsecutiveAccess(Instrs[i], Instrs[j])) { + bool ShouldInsert = true; + if (ConsecutiveChain[i] != ~0U) { + unsigned CurDistance = std::abs((int)ConsecutiveChain[i] - i); + unsigned NewDistance = std::abs((int)ConsecutiveChain[i] - j); + if (j < i || NewDistance > CurDistance) + ShouldInsert = false; + } + if (ShouldInsert) { + Tails.insert(j); + Heads.insert(i); + ConsecutiveChain[i] = j; + } + } + } + } + + for (unsigned Head : Heads) { + if (Tails.count(Head)) + continue; + + // We found an instr that starts a chain. Now follow the chain and try + // to vectorize it. + SmallVector Operands; + unsigned I = Head; + while (I != ~0U && (Tails.count(I) || Heads.count(I))) { + if (VectorizedValues.count(Instrs[I])) + break; + Operands.push_back(Instrs[I]); + I = ConsecutiveChain[I]; + } + + bool Vectorized = false; + if (isa(*Operands.begin())) + Vectorized = vectorizeLoadChain(Operands); + else + Vectorized = vectorizeStoreChain(Operands); + + // Mark the vectorized instructions so that we don't vectorize them again. + if (Vectorized) + VectorizedValues.insert(Operands.begin(), Operands.end()); + Changed |= Vectorized; + } + + return Changed; +} + +bool Vectorizer::vectorizeStoreChain(ArrayRef Chain) { + StoreInst *S0 = cast(Chain[0]); + Type *StoreTy = S0->getValueOperand()->getType(); + unsigned Sz = DL.getTypeSizeInBits(StoreTy); + unsigned VF = VecRegSize / Sz; + unsigned ChainSize = Chain.size(); + + if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) + return false; + + // Store size should be 1B, 2B or multiple of 4B. + // TODO: Target hook for size constraint? + unsigned SzInBytes = (Sz / 8) * ChainSize; + if (SzInBytes > 2 && SzInBytes % 4 != 0) { + DEBUG(dbgs() + << "GPULSV: Size should be 1B, 2B or multiple of 4B. Bisecting.\n"); + if (SzInBytes == 3) + return vectorizeStoreChain(Chain.slice(0, ChainSize - 1)); + auto Chains = bisect(Chain, Sz); + return vectorizeStoreChain(Chains.first) | + vectorizeStoreChain(Chains.second); + } + + VectorType *VecTy; + VectorType *VecStoreTy = dyn_cast(StoreTy); + if (VecStoreTy) + VecTy = VectorType::get(StoreTy->getScalarType(), + Chain.size() * VecStoreTy->getNumElements()); + else + VecTy = VectorType::get(StoreTy, Chain.size()); + + // If it's more than the max vector size, break it into two pieces. + // TODO: Target hook to control types to split to. + if (ChainSize > VF) { + DEBUG( + dbgs() + << "GPULSV: Vector factor is too big. Creating two separate arrays.\n"); + return vectorizeStoreChain(Chain.slice(0, VF)) | + vectorizeStoreChain(Chain.slice(VF)); + } + + DEBUG( + dbgs() << "GPULSV: Stores to vectorize:\n"; + for (Value *V : Chain) + V->dump(); + ); + + // Check alignment restrictions. + unsigned Alignment = S0->getAlignment(); + + // If the store is going to be misaligned, don't vectorize it. + // TODO: Check TLI.allowsMisalignedMemoryAccess + if ((Alignment % SzInBytes) != 0 && (Alignment % 4) != 0) { + if (S0->getPointerAddressSpace() == 0) { + // If we're storing to an object on the stack, we control its alignment, + // so we can cheat and change it! + Value *V = GetUnderlyingObject(S0->getPointerOperand(), DL); + if (AllocaInst *AI = dyn_cast_or_null(V)) { + AI->setAlignment(4); + Alignment = 4; + } else { + return false; + } + } else { + return false; + } + } + + auto Boundaries = getBoundaryInstrs(Chain); + BasicBlock::iterator First = Boundaries.first; + BasicBlock::iterator Last = Boundaries.second; + + if (!isVectorizable(Chain, First, Last)) + return false; + + // Set insert point. + Builder.SetInsertPoint(&*Last); + unsigned AS = S0->getPointerAddressSpace(); + + Value *Vec = UndefValue::get(VecTy); + + if (VecStoreTy) { + unsigned VecWidth = VecStoreTy->getNumElements(); + for (unsigned I = 0, E = Chain.size(); I != E; ++I) { + StoreInst *Store = cast(Chain[I]); + for (unsigned J = 0, NE = VecStoreTy->getNumElements(); J != NE; ++J) { + unsigned NewIdx = J + I * VecWidth; + Value *Extract = Builder.CreateExtractElement(Store->getValueOperand(), + Builder.getInt32(J)); + if (Extract->getType() != StoreTy->getScalarType()) + Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType()); + Value *Insert = Builder.CreateInsertElement(Vec, Extract, + Builder.getInt32(NewIdx)); + Vec = Insert; + } + } + } else { + for (unsigned I = 0, E = Chain.size(); I != E; ++I) { + StoreInst *Store = cast(Chain[I]); + Value *Extract = Store->getValueOperand(); + if (Extract->getType() != StoreTy->getScalarType()) + Extract = Builder.CreateBitCast(Extract, StoreTy->getScalarType()); + Value *Insert = Builder.CreateInsertElement(Vec, Extract, + Builder.getInt32(I)); + Vec = Insert; + } + } + + Value *Bitcast = + Builder.CreateBitCast(S0->getPointerOperand(), VecTy->getPointerTo(AS)); + StoreInst *SI = cast(Builder.CreateStore(Vec, Bitcast)); + propagateMetadata(SI, Chain); + SI->setAlignment(Alignment); + + eraseInstructions(Chain); + ++NumVectorInstructions; + return true; +} + +bool Vectorizer::vectorizeLoadChain(ArrayRef Chain) { + LoadInst *L0 = cast(Chain[0]); + Type *LoadTy = L0->getType(); + unsigned Sz = DL.getTypeSizeInBits(LoadTy); + unsigned VF = VecRegSize / Sz; + unsigned ChainSize = Chain.size(); + + if (!isPowerOf2_32(Sz) || VF < 2 || ChainSize < 2) + return false; + + // Load size should be 1B, 2B or multiple of 4B. + // TODO: Should size constraint be a target hook? + unsigned SzInBytes = (Sz / 8) * ChainSize; + if (SzInBytes > 2 && SzInBytes % 4 != 0) { + DEBUG(dbgs() + << "GPULSV: Size should be 1B, 2B or multiple of 4B. Bisecting.\n"); + if (SzInBytes == 3) + return vectorizeLoadChain(Chain.slice(0, ChainSize - 1)); + auto Chains = bisect(Chain, Sz); + return vectorizeLoadChain(Chains.first) | vectorizeLoadChain(Chains.second); + } + + VectorType *VecTy; + VectorType *VecLoadTy = dyn_cast(LoadTy); + if (VecLoadTy) + VecTy = VectorType::get(LoadTy->getScalarType(), + Chain.size() * VecLoadTy->getNumElements()); + else + VecTy = VectorType::get(LoadTy, Chain.size()); + + // If it's more than the max vector size, break it into two pieces. + // TODO: Target hook to control types to split to. + if (ChainSize > VF ) { + DEBUG( + dbgs() + << "GPULSV: Vector factor is too big. Creating two separate arrays.\n"); + return vectorizeLoadChain(Chain.slice(0, VF)) | + vectorizeLoadChain(Chain.slice(VF)); + } + + // Check alignment restrictions. + unsigned Alignment = L0->getAlignment(); + + // If the load is going to be misaligned, don't vectorize it. + // TODO: Check TLI.allowsMisalignedMemoryAccess + if ((Alignment % SzInBytes) != 0 && (Alignment % 4) != 0) { + if (L0->getPointerAddressSpace() == 0) { + // If we're loading from an object on the stack, we control its alignment, + // so we can cheat and change it! + Value *V = GetUnderlyingObject(L0->getPointerOperand(), DL); + if (AllocaInst *AI = dyn_cast_or_null(V)) { + AI->setAlignment(4); + Alignment = 4; + } else { + return false; + } + } else { + return false; + } + } + + DEBUG( + dbgs() << "GPULSV: Loads to vectorize:\n"; + for (Value *V : Chain) + V->dump(); + ); + + auto Boundaries = getBoundaryInstrs(Chain); + BasicBlock::iterator First = Boundaries.first; + BasicBlock::iterator Last = Boundaries.second; + + if (!isVectorizable(Chain, First, Last)) + return false; + + // Set insert point. + Builder.SetInsertPoint(&*Last); + + unsigned AS = L0->getPointerAddressSpace(); + Value *Bitcast = + Builder.CreateBitCast(L0->getPointerOperand(), VecTy->getPointerTo(AS)); + + LoadInst *LI = cast(Builder.CreateLoad(Bitcast)); + propagateMetadata(LI, Chain); + LI->setAlignment(Alignment); + + SmallVector InstrsToReorder; + if (VecLoadTy) { + SmallVector InstrsToErase; + unsigned VecWidth = VecLoadTy->getNumElements(); + for (unsigned I = 0, E = Chain.size(); I != E; ++I) { + for (auto Use : Chain[I]->users()) { + Instruction *UI = cast(Use); + unsigned Idx = cast(UI->getOperand(1))->getZExtValue(); + unsigned NewIdx = Idx + I * VecWidth; + Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(NewIdx)); + Instruction *Extracted = cast(V); + if (Extracted->getType() != UI->getType()) + Extracted = + cast(Builder.CreateBitCast(Extracted, UI->getType())); + + // Replace the old instruction. + UI->replaceAllUsesWith(Extracted); + InstrsToReorder.push_back(Extracted); + InstrsToErase.push_back(UI); + } + } + + for (Instruction *ModUser : InstrsToReorder) + reorder(ModUser); + + for (auto I : InstrsToErase) + I->eraseFromParent(); + } else { + for (unsigned I = 0, E = Chain.size(); I != E; ++I) { + Value *V = Builder.CreateExtractElement(LI, Builder.getInt32(I)); + Instruction *Extracted = cast(V); + Instruction *UI = cast(Chain[I]); + if (Extracted->getType() != UI->getType()) + Extracted = + cast(Builder.CreateBitCast(Extracted, UI->getType())); + + // Replace the old instruction. + UI->replaceAllUsesWith(Extracted); + InstrsToReorder.push_back(Extracted); + } + + for (Instruction *ModUser : InstrsToReorder) + reorder(ModUser); + } + + eraseInstructions(Chain); + + ++NumVectorInstructions; + return true; +} Index: lib/Transforms/Vectorize/Vectorize.cpp =================================================================== --- lib/Transforms/Vectorize/Vectorize.cpp +++ lib/Transforms/Vectorize/Vectorize.cpp @@ -29,6 +29,7 @@ initializeBBVectorizePass(Registry); initializeLoopVectorizePass(Registry); initializeSLPVectorizerPass(Registry); + initializeLoadStoreVectorizerPass(Registry); } void LLVMInitializeVectorization(LLVMPassRegistryRef R) { Index: test/Transforms/LoadStoreVectorizer/AMDGPU/lit.local.cfg =================================================================== --- /dev/null +++ test/Transforms/LoadStoreVectorizer/AMDGPU/lit.local.cfg @@ -0,0 +1,3 @@ +if not 'AMDGPU' in config.root.targets: + config.unsupported = True + Index: test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll =================================================================== --- /dev/null +++ test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll @@ -0,0 +1,635 @@ +; RUN: opt -mtriple=amdgcn-amd-amdhsa -load-store-vectorizer -S -o - %s | FileCheck %s +; Copy of test/CodeGen/AMDGPU/merge-stores.ll with some additions + +; TODO: Vector element tests +; TODO: Non-zero base offset for load and store combinations +; TODO: Same base addrspacecasted + + +; CHECK-LABEL: @merge_global_store_2_constants_i8( +; CHECK: store <2 x i8> , <2 x i8> addrspace(1)* %{{[0-9]+}}, align 2 +define void @merge_global_store_2_constants_i8(i8 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 + + store i8 123, i8 addrspace(1)* %out.gep.1 + store i8 456, i8 addrspace(1)* %out, align 2 + ret void +} + +; CHECK-LABEL: @merge_global_store_2_constants_i8_natural_align +; CHECK: store <2 x i8> , <2 x i8> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_2_constants_i8_natural_align(i8 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 + + store i8 123, i8 addrspace(1)* %out.gep.1 + store i8 456, i8 addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_2_constants_i16 +; CHECK: store <2 x i16> , <2 x i16> addrspace(1)* %{{[0-9]+}}, align 4 +define void @merge_global_store_2_constants_i16(i16 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 + + store i16 123, i16 addrspace(1)* %out.gep.1 + store i16 456, i16 addrspace(1)* %out, align 4 + ret void +} + +; CHECK-LABEL: @merge_global_store_2_constants_0_i16 +; CHECK: store <2 x i16> zeroinitializer, <2 x i16> addrspace(1)* %{{[0-9]+}}, align 4 +define void @merge_global_store_2_constants_0_i16(i16 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 + + store i16 0, i16 addrspace(1)* %out.gep.1 + store i16 0, i16 addrspace(1)* %out, align 4 + ret void +} + +; CHECK-LABEL: @merge_global_store_2_constants_i16_natural_align +; CHECK: store <2 x i16> , <2 x i16> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_2_constants_i16_natural_align(i16 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 + + store i16 123, i16 addrspace(1)* %out.gep.1 + store i16 456, i16 addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_2_constants_half_natural_align +; CHECK: store <2 x half> , <2 x half> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_2_constants_half_natural_align(half addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr half, half addrspace(1)* %out, i32 1 + + store half 2.0, half addrspace(1)* %out.gep.1 + store half 1.0, half addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_2_constants_i32 +; CHECK: store <2 x i32> , <2 x i32> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_2_constants_i32(i32 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + + store i32 123, i32 addrspace(1)* %out.gep.1 + store i32 456, i32 addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_2_constants_i32_f32 +; CHECK: store <2 x i32> , <2 x i32> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_2_constants_i32_f32(i32 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %out.gep.1.bc = bitcast i32 addrspace(1)* %out.gep.1 to float addrspace(1)* + store float 1.0, float addrspace(1)* %out.gep.1.bc + store i32 456, i32 addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_2_constants_f32_i32 +; CHECK store <2 x float> , <2 x float> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_2_constants_f32_i32(float addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 + %out.gep.1.bc = bitcast float addrspace(1)* %out.gep.1 to i32 addrspace(1)* + store i32 123, i32 addrspace(1)* %out.gep.1.bc + store float 4.0, float addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_4_constants_i32 +; CHECK: store <4 x i32> , <4 x i32> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_4_constants_i32(i32 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3 + + store i32 123, i32 addrspace(1)* %out.gep.1 + store i32 456, i32 addrspace(1)* %out.gep.2 + store i32 333, i32 addrspace(1)* %out.gep.3 + store i32 1234, i32 addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_4_constants_f32_order +; CHECK: store <4 x float> , <4 x float> addrspace(1)* %{{[0-9]+}} +define void @merge_global_store_4_constants_f32_order(float addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3 + + store float 8.0, float addrspace(1)* %out + store float 1.0, float addrspace(1)* %out.gep.1 + store float 2.0, float addrspace(1)* %out.gep.2 + store float 4.0, float addrspace(1)* %out.gep.3 + ret void +} + +; First store is out of order. +; CHECK-LABEL: @merge_global_store_4_constants_f32 +; CHECK: store <4 x float> , <4 x float> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_4_constants_f32(float addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3 + + store float 1.0, float addrspace(1)* %out.gep.1 + store float 2.0, float addrspace(1)* %out.gep.2 + store float 4.0, float addrspace(1)* %out.gep.3 + store float 8.0, float addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_4_constants_mixed_i32_f32 +; CHECK: store <4 x float> , <4 x float> addrspace(1)* %{{[0-9]+}} +define void @merge_global_store_4_constants_mixed_i32_f32(float addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3 + + %out.gep.1.bc = bitcast float addrspace(1)* %out.gep.1 to i32 addrspace(1)* + %out.gep.3.bc = bitcast float addrspace(1)* %out.gep.3 to i32 addrspace(1)* + + store i32 11, i32 addrspace(1)* %out.gep.1.bc + store float 2.0, float addrspace(1)* %out.gep.2 + store i32 17, i32 addrspace(1)* %out.gep.3.bc + store float 8.0, float addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_3_constants_i32 +; CHECK: store <3 x i32> , <3 x i32> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_3_constants_i32(i32 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 + + store i32 123, i32 addrspace(1)* %out.gep.1 + store i32 456, i32 addrspace(1)* %out.gep.2 + store i32 1234, i32 addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_2_constants_i64 +; CHECK: store <2 x i64> , <2 x i64> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_2_constants_i64(i64 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1 + + store i64 123, i64 addrspace(1)* %out.gep.1 + store i64 456, i64 addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_4_constants_i64 +; CHECK: store <2 x i64> , <2 x i64> addrspace(1)* %{{[0-9]+$}} +; CHECK: store <2 x i64> , <2 x i64> addrspace(1)* %{{[0-9]+$}} +define void @merge_global_store_4_constants_i64(i64 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i64, i64 addrspace(1)* %out, i64 1 + %out.gep.2 = getelementptr i64, i64 addrspace(1)* %out, i64 2 + %out.gep.3 = getelementptr i64, i64 addrspace(1)* %out, i64 3 + + store i64 123, i64 addrspace(1)* %out.gep.1 + store i64 456, i64 addrspace(1)* %out.gep.2 + store i64 333, i64 addrspace(1)* %out.gep.3 + store i64 1234, i64 addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_2_adjacent_loads_i32 +; CHECK: [[LOAD:%[0-9]+]] = load <2 x i32> +; CHECK: [[ELT0:%[0-9]+]] = extractelement <2 x i32> [[LOAD]], i32 0 +; CHECK: [[ELT1:%[0-9]+]] = extractelement <2 x i32> [[LOAD]], i32 1 +; CHECK: [[INSERT0:%[0-9]+]] = insertelement <2 x i32> undef, i32 [[ELT0]], i32 0 +; CHECK: [[INSERT1:%[0-9]+]] = insertelement <2 x i32> [[INSERT0]], i32 [[ELT1]], i32 1 +; CHECK: store <2 x i32> [[INSERT1]] +define void @merge_global_store_2_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1 + + %lo = load i32, i32 addrspace(1)* %in + %hi = load i32, i32 addrspace(1)* %in.gep.1 + + store i32 %lo, i32 addrspace(1)* %out + store i32 %hi, i32 addrspace(1)* %out.gep.1 + ret void +} + +; CHECK-LABEL: @merge_global_store_2_adjacent_loads_i32_nonzero_base +; CHECK: extractelement +; CHECK: extractelement +; CHECK: insertelement +; CHECK: insertelement +; CHECK: store <2 x i32> +define void @merge_global_store_2_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 2 + %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 3 + + %out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i32 2 + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 3 + %lo = load i32, i32 addrspace(1)* %in.gep.0 + %hi = load i32, i32 addrspace(1)* %in.gep.1 + + store i32 %lo, i32 addrspace(1)* %out.gep.0 + store i32 %hi, i32 addrspace(1)* %out.gep.1 + ret void +} + +; CHECK-LABEL: @merge_global_store_2_adjacent_loads_shuffle_i32 +; CHECK: [[LOAD:%[0-9]+]] = load <2 x i32> +; CHECK: [[ELT0:%[0-9]+]] = extractelement <2 x i32> [[LOAD]], i32 0 +; CHECK: [[ELT1:%[0-9]+]] = extractelement <2 x i32> [[LOAD]], i32 1 +; CHECK: [[INSERT0:%[0-9]+]] = insertelement <2 x i32> undef, i32 [[ELT1]], i32 0 +; CHECK: [[INSERT1:%[0-9]+]] = insertelement <2 x i32> [[INSERT0]], i32 [[ELT0]], i32 1 +; CHECK: store <2 x i32> [[INSERT1]] +define void @merge_global_store_2_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1 + + %lo = load i32, i32 addrspace(1)* %in + %hi = load i32, i32 addrspace(1)* %in.gep.1 + + store i32 %hi, i32 addrspace(1)* %out + store i32 %lo, i32 addrspace(1)* %out.gep.1 + ret void +} + +; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i32 +; CHECK: load <4 x i32> +; CHECK: store <4 x i32> +define void @merge_global_store_4_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3 + %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1 + %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2 + %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 3 + + %x = load i32, i32 addrspace(1)* %in + %y = load i32, i32 addrspace(1)* %in.gep.1 + %z = load i32, i32 addrspace(1)* %in.gep.2 + %w = load i32, i32 addrspace(1)* %in.gep.3 + + store i32 %x, i32 addrspace(1)* %out + store i32 %y, i32 addrspace(1)* %out.gep.1 + store i32 %z, i32 addrspace(1)* %out.gep.2 + store i32 %w, i32 addrspace(1)* %out.gep.3 + ret void +} + +; CHECK-LABEL: @merge_global_store_3_adjacent_loads_i32 +; CHECK: load <3 x i32> +; CHECK: store <3 x i32> +define void @merge_global_store_3_adjacent_loads_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 + %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1 + %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2 + + %x = load i32, i32 addrspace(1)* %in + %y = load i32, i32 addrspace(1)* %in.gep.1 + %z = load i32, i32 addrspace(1)* %in.gep.2 + + store i32 %x, i32 addrspace(1)* %out + store i32 %y, i32 addrspace(1)* %out.gep.1 + store i32 %z, i32 addrspace(1)* %out.gep.2 + ret void +} + +; CHECK-LABEL: @merge_global_store_4_adjacent_loads_f32 +; CHECK: load <4 x float> +; CHECK: store <4 x float> +define void @merge_global_store_4_adjacent_loads_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr float, float addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr float, float addrspace(1)* %out, i32 3 + %in.gep.1 = getelementptr float, float addrspace(1)* %in, i32 1 + %in.gep.2 = getelementptr float, float addrspace(1)* %in, i32 2 + %in.gep.3 = getelementptr float, float addrspace(1)* %in, i32 3 + + %x = load float, float addrspace(1)* %in + %y = load float, float addrspace(1)* %in.gep.1 + %z = load float, float addrspace(1)* %in.gep.2 + %w = load float, float addrspace(1)* %in.gep.3 + + store float %x, float addrspace(1)* %out + store float %y, float addrspace(1)* %out.gep.1 + store float %z, float addrspace(1)* %out.gep.2 + store float %w, float addrspace(1)* %out.gep.3 + ret void +} + +; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i32_nonzero_base +; CHECK: load <4 x i32> +; CHECK: store <4 x i32> +define void @merge_global_store_4_adjacent_loads_i32_nonzero_base(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %in.gep.0 = getelementptr i32, i32 addrspace(1)* %in, i32 11 + %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 12 + %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 13 + %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 14 + %out.gep.0 = getelementptr i32, i32 addrspace(1)* %out, i32 7 + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 8 + %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 9 + %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 10 + + %x = load i32, i32 addrspace(1)* %in.gep.0 + %y = load i32, i32 addrspace(1)* %in.gep.1 + %z = load i32, i32 addrspace(1)* %in.gep.2 + %w = load i32, i32 addrspace(1)* %in.gep.3 + + store i32 %x, i32 addrspace(1)* %out.gep.0 + store i32 %y, i32 addrspace(1)* %out.gep.1 + store i32 %z, i32 addrspace(1)* %out.gep.2 + store i32 %w, i32 addrspace(1)* %out.gep.3 + ret void +} + +; CHECK-LABEL: @merge_global_store_4_adjacent_loads_inverse_i32 +; CHECK: load <4 x i32> +; CHECK: store <4 x i32> +define void @merge_global_store_4_adjacent_loads_inverse_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3 + %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1 + %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2 + %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 3 + + %x = load i32, i32 addrspace(1)* %in + %y = load i32, i32 addrspace(1)* %in.gep.1 + %z = load i32, i32 addrspace(1)* %in.gep.2 + %w = load i32, i32 addrspace(1)* %in.gep.3 + + ; Make sure the barrier doesn't stop this + tail call void @llvm.amdgcn.s.barrier() #1 + + store i32 %w, i32 addrspace(1)* %out.gep.3 + store i32 %z, i32 addrspace(1)* %out.gep.2 + store i32 %y, i32 addrspace(1)* %out.gep.1 + store i32 %x, i32 addrspace(1)* %out + + ret void +} + +; CHECK-LABEL: @merge_global_store_4_adjacent_loads_shuffle_i32 +; CHECK: load <4 x i32> +; CHECK: store <4 x i32> +define void @merge_global_store_4_adjacent_loads_shuffle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3 + %in.gep.1 = getelementptr i32, i32 addrspace(1)* %in, i32 1 + %in.gep.2 = getelementptr i32, i32 addrspace(1)* %in, i32 2 + %in.gep.3 = getelementptr i32, i32 addrspace(1)* %in, i32 3 + + %x = load i32, i32 addrspace(1)* %in + %y = load i32, i32 addrspace(1)* %in.gep.1 + %z = load i32, i32 addrspace(1)* %in.gep.2 + %w = load i32, i32 addrspace(1)* %in.gep.3 + + ; Make sure the barrier doesn't stop this + tail call void @llvm.amdgcn.s.barrier() #1 + + store i32 %w, i32 addrspace(1)* %out + store i32 %z, i32 addrspace(1)* %out.gep.1 + store i32 %y, i32 addrspace(1)* %out.gep.2 + store i32 %x, i32 addrspace(1)* %out.gep.3 + + ret void +} + +; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i8 +; CHECK: load <4 x i8> +; CHECK: extractelement <4 x i8> +; CHECK: extractelement <4 x i8> +; CHECK: extractelement <4 x i8> +; CHECK: extractelement <4 x i8> +; CHECK: insertelement <4 x i8> +; CHECK: insertelement <4 x i8> +; CHECK: insertelement <4 x i8> +; CHECK: insertelement <4 x i8> +; CHECK: store <4 x i8> +define void @merge_global_store_4_adjacent_loads_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1 + %out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2 + %out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3 + %in.gep.1 = getelementptr i8, i8 addrspace(1)* %in, i8 1 + %in.gep.2 = getelementptr i8, i8 addrspace(1)* %in, i8 2 + %in.gep.3 = getelementptr i8, i8 addrspace(1)* %in, i8 3 + + %x = load i8, i8 addrspace(1)* %in, align 4 + %y = load i8, i8 addrspace(1)* %in.gep.1 + %z = load i8, i8 addrspace(1)* %in.gep.2 + %w = load i8, i8 addrspace(1)* %in.gep.3 + + store i8 %x, i8 addrspace(1)* %out, align 4 + store i8 %y, i8 addrspace(1)* %out.gep.1 + store i8 %z, i8 addrspace(1)* %out.gep.2 + store i8 %w, i8 addrspace(1)* %out.gep.3 + ret void +} + +; CHECK-LABEL: @merge_global_store_4_adjacent_loads_i8_natural_align +; CHECK: load <4 x i8> +; CHECK: store <4 x i8> +define void @merge_global_store_4_adjacent_loads_i8_natural_align(i8 addrspace(1)* %out, i8 addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i8 1 + %out.gep.2 = getelementptr i8, i8 addrspace(1)* %out, i8 2 + %out.gep.3 = getelementptr i8, i8 addrspace(1)* %out, i8 3 + %in.gep.1 = getelementptr i8, i8 addrspace(1)* %in, i8 1 + %in.gep.2 = getelementptr i8, i8 addrspace(1)* %in, i8 2 + %in.gep.3 = getelementptr i8, i8 addrspace(1)* %in, i8 3 + + %x = load i8, i8 addrspace(1)* %in + %y = load i8, i8 addrspace(1)* %in.gep.1 + %z = load i8, i8 addrspace(1)* %in.gep.2 + %w = load i8, i8 addrspace(1)* %in.gep.3 + + store i8 %x, i8 addrspace(1)* %out + store i8 %y, i8 addrspace(1)* %out.gep.1 + store i8 %z, i8 addrspace(1)* %out.gep.2 + store i8 %w, i8 addrspace(1)* %out.gep.3 + ret void +} + +; CHECK-LABEL: @merge_global_store_4_vector_elts_loads_v4i32 +; CHECK: load <4 x i32> +; CHECK: store <4 x i32> +define void @merge_global_store_4_vector_elts_loads_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3 + %vec = load <4 x i32>, <4 x i32> addrspace(1)* %in + + %x = extractelement <4 x i32> %vec, i32 0 + %y = extractelement <4 x i32> %vec, i32 1 + %z = extractelement <4 x i32> %vec, i32 2 + %w = extractelement <4 x i32> %vec, i32 3 + + store i32 %x, i32 addrspace(1)* %out + store i32 %y, i32 addrspace(1)* %out.gep.1 + store i32 %z, i32 addrspace(1)* %out.gep.2 + store i32 %w, i32 addrspace(1)* %out.gep.3 + ret void +} + +; CHECK-LABEL: @merge_local_store_2_constants_i8 +; CHECK: store <2 x i8> , <2 x i8> addrspace(3)* %{{[0-9]+}}, align 2 +define void @merge_local_store_2_constants_i8(i8 addrspace(3)* %out) #0 { + %out.gep.1 = getelementptr i8, i8 addrspace(3)* %out, i32 1 + + store i8 123, i8 addrspace(3)* %out.gep.1 + store i8 456, i8 addrspace(3)* %out, align 2 + ret void +} + +; CHECK-LABEL: @merge_local_store_2_constants_i32 +; CHECK: store <2 x i32> , <2 x i32> addrspace(3)* %{{[0-9]+$}} +define void @merge_local_store_2_constants_i32(i32 addrspace(3)* %out) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1 + + store i32 123, i32 addrspace(3)* %out.gep.1 + store i32 456, i32 addrspace(3)* %out + ret void +} + +; CHECK-LABEL: @merge_local_store_2_constants_i32_align_2 +; CHECK: store i32 +; CHECK: store i32 +define void @merge_local_store_2_constants_i32_align_2(i32 addrspace(3)* %out) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1 + + store i32 123, i32 addrspace(3)* %out.gep.1, align 2 + store i32 456, i32 addrspace(3)* %out, align 2 + ret void +} + +; CHECK-LABEL: @merge_local_store_4_constants_i32 +; CHECK: store <4 x i32> , <4 x i32> addrspace(3)* +define void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1 + %out.gep.2 = getelementptr i32, i32 addrspace(3)* %out, i32 2 + %out.gep.3 = getelementptr i32, i32 addrspace(3)* %out, i32 3 + + store i32 123, i32 addrspace(3)* %out.gep.1 + store i32 456, i32 addrspace(3)* %out.gep.2 + store i32 333, i32 addrspace(3)* %out.gep.3 + store i32 1234, i32 addrspace(3)* %out + ret void +} + +; CHECK-LABEL: @merge_global_store_5_constants_i32 +; CHECK: store <4 x i32> , <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4 +; CHECK: store i32 +define void @merge_global_store_5_constants_i32(i32 addrspace(1)* %out) { + store i32 9, i32 addrspace(1)* %out, align 4 + %idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1 + store i32 12, i32 addrspace(1)* %idx1, align 4 + %idx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2 + store i32 16, i32 addrspace(1)* %idx2, align 4 + %idx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3 + store i32 -12, i32 addrspace(1)* %idx3, align 4 + %idx4 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 4 + store i32 11, i32 addrspace(1)* %idx4, align 4 + ret void +} + +; CHECK-LABEL: @merge_global_store_6_constants_i32 +; CHECK: store <4 x i32> , <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4 +; CHECK: store <2 x i32> , <2 x i32> addrspace(1)* %{{[0-9]+}}, align 4 +define void @merge_global_store_6_constants_i32(i32 addrspace(1)* %out) { + store i32 13, i32 addrspace(1)* %out, align 4 + %idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1 + store i32 15, i32 addrspace(1)* %idx1, align 4 + %idx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2 + store i32 62, i32 addrspace(1)* %idx2, align 4 + %idx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3 + store i32 63, i32 addrspace(1)* %idx3, align 4 + %idx4 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 4 + store i32 11, i32 addrspace(1)* %idx4, align 4 + %idx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 5 + store i32 123, i32 addrspace(1)* %idx5, align 4 + ret void +} + +; CHECK-LABEL: @merge_global_store_7_constants_i32 +; CHECK: store <4 x i32> , <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4 +; CHECK: store <3 x i32> , <3 x i32> addrspace(1)* %{{[0-9]+}}, align 4 +define void @merge_global_store_7_constants_i32(i32 addrspace(1)* %out) { + store i32 34, i32 addrspace(1)* %out, align 4 + %idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1 + store i32 999, i32 addrspace(1)* %idx1, align 4 + %idx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2 + store i32 65, i32 addrspace(1)* %idx2, align 4 + %idx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3 + store i32 33, i32 addrspace(1)* %idx3, align 4 + %idx4 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 4 + store i32 98, i32 addrspace(1)* %idx4, align 4 + %idx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 5 + store i32 91, i32 addrspace(1)* %idx5, align 4 + %idx6 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 6 + store i32 212, i32 addrspace(1)* %idx6, align 4 + ret void +} + +; CHECK-LABEL: @merge_global_store_8_constants_i32 +; CHECK: store <4 x i32> , <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4 +; CHECK: store <4 x i32> , <4 x i32> addrspace(1)* %{{[0-9]+}}, align 4 +define void @merge_global_store_8_constants_i32(i32 addrspace(1)* %out) { + store i32 34, i32 addrspace(1)* %out, align 4 + %idx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1 + store i32 999, i32 addrspace(1)* %idx1, align 4 + %idx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2 + store i32 65, i32 addrspace(1)* %idx2, align 4 + %idx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3 + store i32 33, i32 addrspace(1)* %idx3, align 4 + %idx4 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 4 + store i32 98, i32 addrspace(1)* %idx4, align 4 + %idx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 5 + store i32 91, i32 addrspace(1)* %idx5, align 4 + %idx6 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 6 + store i32 212, i32 addrspace(1)* %idx6, align 4 + %idx7 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 7 + store i32 999, i32 addrspace(1)* %idx7, align 4 + ret void +} + +; CHECK-LABEL: @copy_v3i32_align4 +; CHECK: %vec = load <3 x i32>, <3 x i32> addrspace(1)* %in, align 4 +; CHECK: store <3 x i32> %vec, <3 x i32> addrspace(1)* %out +define void @copy_v3i32_align4(<3 x i32> addrspace(1)* noalias %out, <3 x i32> addrspace(1)* noalias %in) #0 { + %vec = load <3 x i32>, <3 x i32> addrspace(1)* %in, align 4 + store <3 x i32> %vec, <3 x i32> addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @copy_v3i64_align4 +; CHECK: %vec = load <3 x i64>, <3 x i64> addrspace(1)* %in, align 4 +; CHECK: store <3 x i64> %vec, <3 x i64> addrspace(1)* %out +define void @copy_v3i64_align4(<3 x i64> addrspace(1)* noalias %out, <3 x i64> addrspace(1)* noalias %in) #0 { + %vec = load <3 x i64>, <3 x i64> addrspace(1)* %in, align 4 + store <3 x i64> %vec, <3 x i64> addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @copy_v3f32_align4 +; CHECK: %vec = load <3 x float>, <3 x float> addrspace(1)* %in, align 4 +; CHECK: store <3 x float> +define void @copy_v3f32_align4(<3 x float> addrspace(1)* noalias %out, <3 x float> addrspace(1)* noalias %in) #0 { + %vec = load <3 x float>, <3 x float> addrspace(1)* %in, align 4 + %fadd = fadd <3 x float> %vec, + store <3 x float> %fadd, <3 x float> addrspace(1)* %out + ret void +} + +; CHECK-LABEL: @copy_v3f64_align4 +; CHECK: %vec = load <3 x double>, <3 x double> addrspace(1)* %in, align 4 +; CHECK: store <3 x double> %fadd, <3 x double> addrspace(1)* %out +define void @copy_v3f64_align4(<3 x double> addrspace(1)* noalias %out, <3 x double> addrspace(1)* noalias %in) #0 { + %vec = load <3 x double>, <3 x double> addrspace(1)* %in, align 4 + %fadd = fadd <3 x double> %vec, + store <3 x double> %fadd, <3 x double> addrspace(1)* %out + ret void +} + +declare void @llvm.amdgcn.s.barrier() #1 + +attributes #0 = { nounwind } +attributes #1 = { convergent nounwind } Index: test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll =================================================================== --- /dev/null +++ test/Transforms/LoadStoreVectorizer/AMDGPU/no-implicit-float.ll @@ -0,0 +1,20 @@ +; RUN: opt -mtriple=amdgcn-amd-amdhsa -load-store-vectorizer -S -o - %s | FileCheck %s + +; CHECK-LABEL: @no_implicit_float( +; CHECK: store i32 +; CHECK: store i32 +; CHECK: store i32 +; CHECK: store i32 +define void @no_implicit_float(i32 addrspace(1)* %out) #0 { + %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 + %out.gep.2 = getelementptr i32, i32 addrspace(1)* %out, i32 2 + %out.gep.3 = getelementptr i32, i32 addrspace(1)* %out, i32 3 + + store i32 123, i32 addrspace(1)* %out.gep.1 + store i32 456, i32 addrspace(1)* %out.gep.2 + store i32 333, i32 addrspace(1)* %out.gep.3 + store i32 1234, i32 addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind noimplicitfloat }