Index: include/llvm/Analysis/LoopAccessAnalysis.h =================================================================== --- include/llvm/Analysis/LoopAccessAnalysis.h +++ include/llvm/Analysis/LoopAccessAnalysis.h @@ -74,7 +74,7 @@ /// generates run-time checks to prove independence. This is done by /// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the /// RuntimePointerCheck class. -class LoopAccessAnalysis { +class LoopAccessInfo { public: /// \brief Collection of parameters used from the vectorizer. struct VectorizerParams { @@ -137,10 +137,10 @@ SmallVector AliasSetId; }; - LoopAccessAnalysis(Function *F, Loop *L, ScalarEvolution *SE, - const DataLayout *DL, const TargetLibraryInfo *TLI, - AliasAnalysis *AA, DominatorTree *DT, - const VectorizerParams &VectParams) : + LoopAccessInfo(Function *F, Loop *L, ScalarEvolution *SE, + const DataLayout *DL, const TargetLibraryInfo *TLI, + AliasAnalysis *AA, DominatorTree *DT, + const VectorizerParams &VectParams) : TheFunction(F), TheLoop(L), SE(SE), DL(DL), TLI(TLI), AA(AA), DT(DT), NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1U), VectParams(VectParams) {} Index: lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- lib/Analysis/LoopAccessAnalysis.cpp +++ lib/Analysis/LoopAccessAnalysis.cpp @@ -73,12 +73,11 @@ return SE->getSCEV(Ptr); } -void LoopAccessAnalysis::RuntimePointerCheck::insert(ScalarEvolution *SE, - Loop *Lp, Value *Ptr, - bool WritePtr, - unsigned DepSetId, - unsigned ASId, - ValueToValueMap &Strides) { +void LoopAccessInfo::RuntimePointerCheck::insert(ScalarEvolution *SE, Loop *Lp, + Value *Ptr, bool WritePtr, + unsigned DepSetId, + unsigned ASId, + ValueToValueMap &Strides) { // Get the stride replaced scev. const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Ptr); const SCEVAddRecExpr *AR = dyn_cast(Sc); @@ -128,7 +127,7 @@ /// \brief Check whether we can check the pointers at runtime for /// non-intersection. - bool canCheckPtrAtRT(LoopAccessAnalysis::RuntimePointerCheck &RtCheck, + bool canCheckPtrAtRT(LoopAccessInfo::RuntimePointerCheck &RtCheck, unsigned &NumComparisons, ScalarEvolution *SE, Loop *TheLoop, ValueToValueMap &Strides, @@ -196,7 +195,7 @@ const Loop *Lp, ValueToValueMap &StridesMap); bool AccessAnalysis::canCheckPtrAtRT( - LoopAccessAnalysis::RuntimePointerCheck &RtCheck, + LoopAccessInfo::RuntimePointerCheck &RtCheck, unsigned &NumComparisons, ScalarEvolution *SE, Loop *TheLoop, ValueToValueMap &StridesMap, bool ShouldCheckStride) { // Find pointers with computable bounds. We are going to use this information @@ -439,7 +438,7 @@ typedef SmallPtrSet MemAccessInfoSet; MemoryDepChecker(ScalarEvolution *Se, const DataLayout *Dl, const Loop *L, - const LoopAccessAnalysis::VectorizerParams &VectParams) + const LoopAccessInfo::VectorizerParams &VectParams) : SE(Se), DL(Dl), InnermostLoop(L), AccessIdx(0), ShouldRetryWithRuntimeCheck(false), VectParams(VectParams) {} @@ -497,7 +496,7 @@ bool ShouldRetryWithRuntimeCheck; /// \brief Vectorizer parameters used by the analysis. - LoopAccessAnalysis::VectorizerParams VectParams; + LoopAccessInfo::VectorizerParams VectParams; /// \brief Check whether there is a plausible dependence between the two /// accesses. @@ -815,7 +814,7 @@ return true; } -bool LoopAccessAnalysis::canVectorizeMemory(ValueToValueMap &Strides) { +bool LoopAccessInfo::canVectorizeMemory(ValueToValueMap &Strides) { typedef SmallVector ValueVector; typedef SmallPtrSet ValueSet; @@ -1069,7 +1068,7 @@ return CanVecMem; } -bool LoopAccessAnalysis::blockNeedsPredication(BasicBlock *BB) { +bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB) { assert(TheLoop->contains(BB) && "Unknown block used"); // Blocks that do not dominate the latch need predication. @@ -1077,11 +1076,11 @@ return !DT->dominates(BB, Latch); } -void LoopAccessAnalysis::emitAnalysis(VectorizationReport &Message) { +void LoopAccessInfo::emitAnalysis(VectorizationReport &Message) { VectorizationReport::emitAnalysis(Message, TheFunction, TheLoop); } -bool LoopAccessAnalysis::isUniform(Value *V) { +bool LoopAccessInfo::isUniform(Value *V) { return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); } @@ -1097,7 +1096,7 @@ } std::pair -LoopAccessAnalysis::addRuntimeCheck(Instruction *Loc) { +LoopAccessInfo::addRuntimeCheck(Instruction *Loc) { Instruction *tnullptr = nullptr; if (!PtrRtCheck.Need) return std::pair(tnullptr, tnullptr); Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -551,8 +551,8 @@ : NumPredStores(0), TheLoop(L), SE(SE), DL(DL), TLI(TLI), TheFunction(F), TTI(TTI), Induction(nullptr), WidestIndTy(nullptr), - LAA(F, L, SE, DL, TLI, AA, DT, - LoopAccessAnalysis::VectorizerParams( + LAI(F, L, SE, DL, TLI, AA, DT, + LoopAccessInfo::VectorizerParams( MaxVectorWidth, VectorizationFactor, VectorizationInterleave, RuntimeMemoryCheckThreshold)), HasFunNoNaNAttr(false) {} @@ -740,19 +740,19 @@ bool isUniformAfterVectorization(Instruction* I) { return Uniforms.count(I); } /// Returns the information that we collected about runtime memory check. - LoopAccessAnalysis::RuntimePointerCheck *getRuntimePointerCheck() { - return LAA.getRuntimePointerCheck(); + LoopAccessInfo::RuntimePointerCheck *getRuntimePointerCheck() { + return LAI.getRuntimePointerCheck(); } - LoopAccessAnalysis *getLAA() { - return &LAA; + LoopAccessInfo *getLAI() { + return &LAI; } /// This function returns the identity element (or neutral element) for /// the operation K. static Constant *getReductionIdentity(ReductionKind K, Type *Tp); - unsigned getMaxSafeDepDistBytes() { return LAA.getMaxSafeDepDistBytes(); } + unsigned getMaxSafeDepDistBytes() { return LAI.getMaxSafeDepDistBytes(); } bool hasStride(Value *V) { return StrideSet.count(V); } bool mustCheckStrides() { return !StrideSet.empty(); } @@ -777,10 +777,10 @@ return (MaskedOp.count(I) != 0); } unsigned getNumStores() const { - return LAA.getNumStores(); + return LAI.getNumStores(); } unsigned getNumLoads() const { - return LAA.getNumLoads(); + return LAI.getNumLoads(); } unsigned getNumPredStores() const { return NumPredStores; @@ -874,7 +874,7 @@ /// This set holds the variables which are known to be uniform after /// vectorization. SmallPtrSet Uniforms; - LoopAccessAnalysis LAA; + LoopAccessInfo LAI; /// Can we assume the absence of NaNs. bool HasFunNoNaNAttr; @@ -1658,7 +1658,7 @@ } bool LoopVectorizationLegality::isUniform(Value *V) { - return LAA.isUniform(V); + return LAI.isUniform(V); } InnerLoopVectorizer::VectorParts& @@ -2230,7 +2230,7 @@ // faster. Instruction *MemRuntimeCheck; std::tie(FirstCheckInst, MemRuntimeCheck) = - Legal->getLAA()->addRuntimeCheck(LastBypassBlock->getTerminator()); + Legal->getLAI()->addRuntimeCheck(LastBypassBlock->getTerminator()); if (MemRuntimeCheck) { // Create a new block containing the memory check. BasicBlock *CheckBlock = @@ -3398,7 +3398,7 @@ collectLoopUniforms(); DEBUG(dbgs() << "LV: We can vectorize this loop" << - (LAA.getRuntimePointerCheck()->Need ? " (with a runtime bound check)" : + (LAI.getRuntimePointerCheck()->Need ? " (with a runtime bound check)" : "") <<"!\n"); @@ -3824,7 +3824,7 @@ } bool LoopVectorizationLegality::canVectorizeMemory() { - return LAA.canVectorizeMemory(Strides); + return LAI.canVectorizeMemory(Strides); } static bool hasMultipleUsesOf(Instruction *I, @@ -4168,7 +4168,7 @@ } bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) { - return LAA.blockNeedsPredication(BB); + return LAI.blockNeedsPredication(BB); } bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB,