Index: include/llvm/Analysis/LoopAccessAnalysis.h =================================================================== --- include/llvm/Analysis/LoopAccessAnalysis.h +++ include/llvm/Analysis/LoopAccessAnalysis.h @@ -24,6 +24,7 @@ #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Utils/LoopUtils.h" namespace llvm { @@ -178,10 +179,10 @@ }; MemoryDepChecker(ScalarEvolution *Se, const Loop *L, - SCEVUnionPredicate &Preds) + SCEVPredicatedLayer &PredSE) : SE(Se), InnermostLoop(L), AccessIdx(0), ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true), - RecordInterestingDependences(true), Preds(Preds) {} + RecordInterestingDependences(true), PredSE(PredSE) {} /// \brief Register the location (instructions are given increasing numbers) /// of a write access. @@ -299,7 +300,7 @@ /// be made in order to avoid unknown dependences. For example we might /// assume a unit stride for a pointer in order to prove that a memory access /// is strided and doesn't wrap. - SCEVUnionPredicate &Preds; + SCEVPredicatedLayer &PredSE; }; /// \brief Holds information about the memory runtime legality checks to verify @@ -347,7 +348,7 @@ /// and change \p Preds. void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId, unsigned ASId, const ValueToValueMap &Strides, - SCEVUnionPredicate &Preds); + SCEVPredicatedLayer &PredSE); /// \brief No run-time memory checking is necessary. bool empty() const { return Pointers.empty(); } @@ -560,7 +561,7 @@ /// re-written (and therefore simplified) according to Preds. /// A user of LoopAccessAnalysis will need to emit the runtime checks /// associated with this predicate. - SCEVUnionPredicate Preds; + SCEVPredicatedLayer PredSE; private: /// \brief Analyze the loop. Substitute symbolic strides using Strides. @@ -616,18 +617,17 @@ /// If \p OrigPtr is not null, use it to look up the stride value instead of \p /// Ptr. \p PtrToStride provides the mapping between the pointer value and its /// stride as collected by LoopVectorizationLegality::collectStridedAccess. -const SCEV *replaceSymbolicStrideSCEV(ScalarEvolution *SE, +const SCEV *replaceSymbolicStrideSCEV(SCEVPredicatedLayer &PredSE, const ValueToValueMap &PtrToStride, - SCEVUnionPredicate &Preds, Value *Ptr, - Value *OrigPtr = nullptr); + Value *Ptr, Value *OrigPtr = nullptr); /// \brief Check the stride of the pointer and ensure that it does not wrap in /// the address space, assuming \p Preds is true. /// /// If necessary this method will version the stride of the pointer according /// to \p PtrToStride and therefore add a new predicate to \p Preds. -int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp, - const ValueToValueMap &StridesMap, SCEVUnionPredicate &Preds); +int isStridedPtr(SCEVPredicatedLayer &PredSE, Value *Ptr, const Loop *Lp, + const ValueToValueMap &StridesMap); /// \brief This analysis provides dependence information for the memory accesses /// of a loop. Index: include/llvm/Analysis/ScalarEvolution.h =================================================================== --- include/llvm/Analysis/ScalarEvolution.h +++ include/llvm/Analysis/ScalarEvolution.h @@ -193,7 +193,7 @@ /// \brief Returns the estimated complexity of this predicate. /// This is roughly measured in the number of run-time checks required. - virtual unsigned getComplexity() { return 1; } + virtual unsigned getComplexity() const { return 1; } /// \brief Returns true if the predicate is always true. This means that no /// assumptions were made and nothing needs to be checked at run-time. @@ -303,7 +303,7 @@ /// \brief We estimate the complexity of a union predicate as the size /// number of predicates in the union. - unsigned getComplexity() override { return Preds.size(); } + unsigned getComplexity() const override { return Preds.size(); } /// Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const SCEVPredicate *P) { Index: include/llvm/Transforms/Utils/LoopUtils.h =================================================================== --- include/llvm/Transforms/Utils/LoopUtils.h +++ include/llvm/Transforms/Utils/LoopUtils.h @@ -374,6 +374,44 @@ /// \brief Returns the instructions that use values defined in the loop. SmallVector findDefsUsedOutsideOfLoop(Loop *L); + +/// An iterface layer with SCEV used to manage how we see SCEV expressions for +/// values in the context of existing predicates. We can add new predicates, +/// but we cannot remove them. +/// +/// This layer has multiple purposes: +/// - provides a simple interface for SCEV versioning. +/// - guarantees that the order of transformations applied on a SCEV +/// expression for a single Value is consistent across two different +/// getSCEV calls. This means that, for example, once we've obtained +/// an AddRec expression for a certain value through expression rewriting, +/// we will continue to get an AddRec expresion for that Value. +/// - lowers the number of expression rewrites. +class SCEVPredicatedLayer { +public: + SCEVPredicatedLayer(ScalarEvolution &SE); + const SCEVUnionPredicate &getPredicate() const; + /// \brief Returns the SCEV expression of V, in the context of the current + /// SCEV predicate. + /// The order of transformations applied on the expression of V returned + /// by ScalarEvolution is guaranteed to be preserved, even when adding new + /// predicates. + const SCEV *getSCEV(Value *V); + /// \brief Adds a new predicate. + void addPredicate(const SCEVPredicate &Pred); + + ScalarEvolution *getSE() { return &SE; } + +private: + void updateGeneration(); + + typedef std::pair RewriteEntry; + DenseMap RewriteMap; + ScalarEvolution &SE; + SCEVUnionPredicate Preds; + unsigned Generation; +}; + } #endif Index: lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- lib/Analysis/LoopAccessAnalysis.cpp +++ lib/Analysis/LoopAccessAnalysis.cpp @@ -87,11 +87,10 @@ return V; } -const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE, +const SCEV *llvm::replaceSymbolicStrideSCEV(SCEVPredicatedLayer &PredSE, const ValueToValueMap &PtrToStride, - SCEVUnionPredicate &Preds, Value *Ptr, Value *OrigPtr) { - const SCEV *OrigSCEV = SE->getSCEV(Ptr); + const SCEV *OrigSCEV = PredSE.getSCEV(Ptr); // If there is an entry in the map return the SCEV of the pointer with the // symbolic stride replaced by one. @@ -108,16 +107,17 @@ ValueToValueMap RewriteMap; RewriteMap[StrideVal] = One; + ScalarEvolution *SE = PredSE.getSE(); const auto *U = cast(SE->getSCEV(StrideVal)); const auto *CT = static_cast(SE->getOne(StrideVal->getType())); - Preds.add(SE->getEqualPredicate(U, CT)); + PredSE.addPredicate(*SE->getEqualPredicate(U, CT)); + auto *Expr = PredSE.getSCEV(Ptr); - const SCEV *ByOne = SE->rewriteUsingPredicate(OrigSCEV, Preds); - DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne + DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr << "\n"); - return ByOne; + return Expr; } // Otherwise, just return the SCEV of the original pointer. @@ -127,11 +127,12 @@ void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId, unsigned ASId, const ValueToValueMap &Strides, - SCEVUnionPredicate &Preds) { + SCEVPredicatedLayer &PredSE) { // Get the stride replaced scev. - const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Preds, Ptr); + const SCEV *Sc = replaceSymbolicStrideSCEV(PredSE, Strides, Ptr); const SCEVAddRecExpr *AR = dyn_cast(Sc); assert(AR && "Invalid addrec expression"); + ScalarEvolution *SE = PredSE.getSE(); const SCEV *Ex = SE->getBackedgeTakenCount(Lp); const SCEV *ScStart = AR->getStart(); @@ -423,9 +424,9 @@ typedef SmallPtrSet MemAccessInfoSet; AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI, - MemoryDepChecker::DepCandidates &DA, SCEVUnionPredicate &Preds) + MemoryDepChecker::DepCandidates &DA, SCEVPredicatedLayer &PredSE) : DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false), - Preds(Preds) {} + PredSE(PredSE) {} /// \brief Register a load and whether it is only read from. void addLoad(MemoryLocation &Loc, bool IsReadOnly) { @@ -512,16 +513,16 @@ bool IsRTCheckAnalysisNeeded; /// The SCEV predicate containing all the SCEV-related assumptions. - SCEVUnionPredicate &Preds; + SCEVPredicatedLayer &PredSE; }; } // end anonymous namespace /// \brief Check whether a pointer can participate in a runtime bounds check. -static bool hasComputableBounds(ScalarEvolution *SE, +static bool hasComputableBounds(SCEVPredicatedLayer &PredSE, const ValueToValueMap &Strides, Value *Ptr, - Loop *L, SCEVUnionPredicate &Preds) { - const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Preds, Ptr); + Loop *L) { + const SCEV *PtrScev = replaceSymbolicStrideSCEV(PredSE, Strides, Ptr); const SCEVAddRecExpr *AR = dyn_cast(PtrScev); if (!AR) return false; @@ -564,11 +565,11 @@ else ++NumReadPtrChecks; - if (hasComputableBounds(SE, StridesMap, Ptr, TheLoop, Preds) && + if (hasComputableBounds(PredSE, StridesMap, Ptr, TheLoop) && // When we run after a failing dependency check we have to make sure // we don't have wrapping pointers. (!ShouldCheckStride || - isStridedPtr(SE, Ptr, TheLoop, StridesMap, Preds) == 1)) { + isStridedPtr(PredSE, Ptr, TheLoop, StridesMap) == 1)) { // The id of the dependence set. unsigned DepId; @@ -582,7 +583,7 @@ // Each access has its own dependence set. DepId = RunningDepId++; - RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, Preds); + RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PredSE); DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n'); } else { @@ -812,9 +813,8 @@ } /// \brief Check whether the access through \p Ptr has a constant stride. -int llvm::isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp, - const ValueToValueMap &StridesMap, - SCEVUnionPredicate &Preds) { +int llvm::isStridedPtr(SCEVPredicatedLayer &PredSE, Value *Ptr, const Loop *Lp, + const ValueToValueMap &StridesMap) { Type *Ty = Ptr->getType(); assert(Ty->isPointerTy() && "Unexpected non-ptr"); @@ -826,7 +826,7 @@ return 0; } - const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Preds, Ptr); + const SCEV *PtrScev = replaceSymbolicStrideSCEV(PredSE, StridesMap, Ptr); const SCEVAddRecExpr *AR = dyn_cast(PtrScev); if (!AR) { @@ -849,7 +849,7 @@ // to access the pointer value "0" which is undefined behavior in address // space 0, therefore we can also vectorize this case. bool IsInBoundsGEP = isInBoundsGep(Ptr); - bool IsNoWrapAddRec = isNoWrapAddRec(Ptr, AR, SE, Lp); + bool IsNoWrapAddRec = isNoWrapAddRec(Ptr, AR, PredSE.getSE(), Lp); bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0; if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) { DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space " @@ -858,7 +858,7 @@ } // Check the step is constant. - const SCEV *Step = AR->getStepRecurrence(*SE); + const SCEV *Step = AR->getStepRecurrence(*PredSE.getSE()); // Calculate the pointer stride and check if it is constant. const SCEVConstant *C = dyn_cast(Step); @@ -1037,11 +1037,11 @@ BPtr->getType()->getPointerAddressSpace()) return Dependence::Unknown; - const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, Preds, APtr); - const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, Preds, BPtr); + const SCEV *AScev = replaceSymbolicStrideSCEV(PredSE, Strides, APtr); + const SCEV *BScev = replaceSymbolicStrideSCEV(PredSE, Strides, BPtr); - int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides, Preds); - int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides, Preds); + int StrideAPtr = isStridedPtr(PredSE, APtr, InnermostLoop, Strides); + int StrideBPtr = isStridedPtr(PredSE, BPtr, InnermostLoop, Strides); const SCEV *Src = AScev; const SCEV *Sink = BScev; @@ -1440,7 +1440,7 @@ MemoryDepChecker::DepCandidates DependentAccesses; AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(), - AA, LI, DependentAccesses, Preds); + AA, LI, DependentAccesses, PredSE); // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects // multiple times on the same object. If the ptr is accessed twice, once @@ -1492,7 +1492,7 @@ // words may be written to the same address. bool IsReadOnlyPtr = false; if (Seen.insert(Ptr).second || - !isStridedPtr(SE, Ptr, TheLoop, Strides, Preds)) { + !isStridedPtr(PredSE, Ptr, TheLoop, Strides)) { ++NumReads; IsReadOnlyPtr = true; } @@ -1742,7 +1742,8 @@ const TargetLibraryInfo *TLI, AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI, const ValueToValueMap &Strides) - : PtrRtChecking(SE), DepChecker(SE, L, Preds), TheLoop(L), SE(SE), DL(DL), + : PredSE(*SE), PtrRtChecking(SE), DepChecker(SE, L, PredSE), + TheLoop(L), SE(SE), DL(DL), TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1U), CanVecMem(false), StoreToLoopInvariantAddress(false) { @@ -1779,7 +1780,7 @@ << "found in loop.\n"; OS.indent(Depth) << "SCEV assumptions:\n"; - Preds.print(OS, Depth); + PredSE.getPredicate().print(OS, Depth); } const LoopAccessInfo & Index: lib/Transforms/Utils/LoopUtils.cpp =================================================================== --- lib/Transforms/Utils/LoopUtils.cpp +++ lib/Transforms/Utils/LoopUtils.cpp @@ -727,3 +727,45 @@ return UsedOutside; } + +SCEVPredicatedLayer::SCEVPredicatedLayer(ScalarEvolution &SE) : SE(SE), + Generation(0) {} + +const SCEV *SCEVPredicatedLayer::getSCEV(Value *V) { + const SCEV *Expr = SE.getSCEV(V); + auto II = RewriteMap.find(Expr); + if (II == RewriteMap.end()) { + const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, Preds); + RewriteMap[Expr] = {Generation, NewSCEV}; + return NewSCEV; + } + + if (Generation == II->second.first) + return II->second.second; + + const SCEV *NewExpr = SE.rewriteUsingPredicate(II->second.second, Preds); + RewriteMap[Expr] = {Generation, NewExpr}; + return NewExpr; +} + +void SCEVPredicatedLayer::addPredicate(const SCEVPredicate &Pred) { + if (Preds.implies(&Pred)) + return; + Preds.add(&Pred); + updateGeneration(); +} + +const SCEVUnionPredicate &SCEVPredicatedLayer::getPredicate() const { + return Preds; +} + +void SCEVPredicatedLayer::updateGeneration() { + Generation++; + // The generation number wrapped. Recompute everything. + if (Generation == 0) { + for (auto &II : RewriteMap) { + const SCEV *Rewritten = II.second.second; + II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, Preds)}; + } + } +} Index: lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- lib/Transforms/Vectorize/LoopVectorize.cpp +++ lib/Transforms/Vectorize/LoopVectorize.cpp @@ -313,12 +313,12 @@ InnerLoopVectorizer(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, unsigned VecWidth, - unsigned UnrollFactor, SCEVUnionPredicate &Preds) + unsigned UnrollFactor, SCEVPredicatedLayer &PredSE) : OrigLoop(OrigLoop), SE(SE), LI(LI), DT(DT), TLI(TLI), TTI(TTI), VF(VecWidth), UF(UnrollFactor), Builder(SE->getContext()), Induction(nullptr), OldInduction(nullptr), WidenMap(UnrollFactor), TripCount(nullptr), VectorTripCount(nullptr), Legal(nullptr), - AddedSafetyChecks(false), Preds(Preds) {} + AddedSafetyChecks(false), PredSE(PredSE) {} // Perform the actual loop widening (vectorization). // MinimumBitWidths maps scalar integer values to the smallest bitwidth they @@ -557,7 +557,7 @@ /// context of existing SCEV assumptions. Since legality checking is /// not done here, we don't need to use this predicate to record /// further assumptions. - SCEVUnionPredicate &Preds; + SCEVPredicatedLayer &PredSE; }; class InnerLoopUnroller : public InnerLoopVectorizer { @@ -565,9 +565,9 @@ InnerLoopUnroller(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI, unsigned UnrollFactor, - SCEVUnionPredicate &Preds) + SCEVPredicatedLayer &PredSE) : InnerLoopVectorizer(OrigLoop, SE, LI, DT, TLI, TTI, 1, UnrollFactor, - Preds) {} + PredSE) {} private: void scalarizeInstruction(Instruction *Instr, @@ -789,8 +789,8 @@ class InterleavedAccessInfo { public: InterleavedAccessInfo(ScalarEvolution *SE, Loop *L, DominatorTree *DT, - SCEVUnionPredicate &Preds) - : SE(SE), TheLoop(L), DT(DT), Preds(Preds) {} + SCEVPredicatedLayer &PredSE) + : SE(SE), TheLoop(L), DT(DT), PredSE(PredSE) {} ~InterleavedAccessInfo() { SmallSet DelSet; @@ -829,7 +829,7 @@ /// context of existing SCEV assumptions. The interleaved access /// analysis can also add new predicates (for example by versioning /// strides of pointers). - SCEVUnionPredicate &Preds; + SCEVPredicatedLayer &PredSE; /// Holds the relationships between the members and the interleave group. DenseMap InterleaveGroupMap; @@ -1194,12 +1194,12 @@ LoopAccessAnalysis *LAA, LoopVectorizationRequirements *R, const LoopVectorizeHints *H, - SCEVUnionPredicate &Preds) + SCEVPredicatedLayer &PredSE) : NumPredStores(0), TheLoop(L), SE(SE), TLI(TLI), TheFunction(F), TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), - InterleaveInfo(SE, L, DT, Preds), Induction(nullptr), + InterleaveInfo(SE, L, DT, PredSE), Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false), Requirements(R), Hints(H), - Preds(Preds) {} + PredSE(PredSE) {} /// ReductionList contains the reduction descriptors for all /// of the reductions that were found in the loop. @@ -1405,7 +1405,7 @@ /// context of existing SCEV assumptions. The analysis will also /// add a minimal set of new predicates if this is required to /// enable vectorization/unrolling. - SCEVUnionPredicate &Preds; + SCEVPredicatedLayer &PredSE; }; /// LoopVectorizationCostModel - estimates the expected speedups due to @@ -1423,8 +1423,7 @@ const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, const Function *F, const LoopVectorizeHints *Hints, - SmallPtrSetImpl &ValuesToIgnore, - SCEVUnionPredicate &Preds) + SmallPtrSetImpl &ValuesToIgnore) : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB), TheFunction(F), Hints(Hints), ValuesToIgnore(ValuesToIgnore) {} @@ -1754,12 +1753,12 @@ } } - SCEVUnionPredicate Preds; + SCEVPredicatedLayer PredSE(*SE); // Check if it is legal to vectorize the loop. LoopVectorizationRequirements Requirements; LoopVectorizationLegality LVL(L, SE, DT, TLI, AA, F, TTI, LAA, - &Requirements, &Hints, Preds); + &Requirements, &Hints, PredSE); if (!LVL.canVectorize()) { DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n"); emitMissedWarning(F, L, Hints); @@ -1778,7 +1777,7 @@ // Use the cost model. LoopVectorizationCostModel CM(L, SE, LI, &LVL, *TTI, TLI, DB, AC, F, &Hints, - ValuesToIgnore, Preds); + ValuesToIgnore); // Check the function attributes to find out if this function should be // optimized for size. @@ -1889,7 +1888,7 @@ assert(IC > 1 && "interleave count should not be 1 or 0"); // If we decided that it is not legal to vectorize the loop then // interleave it. - InnerLoopUnroller Unroller(L, SE, LI, DT, TLI, TTI, IC, Preds); + InnerLoopUnroller Unroller(L, SE, LI, DT, TLI, TTI, IC, PredSE); Unroller.vectorize(&LVL, CM.MinBWs); emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(), @@ -1897,7 +1896,7 @@ Twine(IC) + ")"); } else { // If we decided that it is *legal* to vectorize the loop then do it. - InnerLoopVectorizer LB(L, SE, LI, DT, TLI, TTI, VF.Width, IC, Preds); + InnerLoopVectorizer LB(L, SE, LI, DT, TLI, TTI, VF.Width, IC, PredSE); LB.vectorize(&LVL, CM.MinBWs); ++LoopsVectorized; @@ -2058,7 +2057,7 @@ // %idxprom = zext i32 %mul to i64 << Safe cast. // %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom // - Last = replaceSymbolicStrideSCEV(SE, Strides, Preds, + Last = replaceSymbolicStrideSCEV(PredSE, Strides, Gep->getOperand(InductionOperand), Gep); if (const SCEVCastExpr *C = dyn_cast(Last)) Last = @@ -2760,7 +2759,8 @@ // We want the new basic block to start at the first instruction in a // sequence of instructions that form a check. SCEVExpander Exp(*SE, Bypass->getModule()->getDataLayout(), "scev.check"); - Value *SCEVCheck = Exp.expandCodeForPredicate(&Preds, BB->getTerminator()); + Value *SCEVCheck = Exp.expandCodeForPredicate(&PredSE.getPredicate(), + BB->getTerminator()); if (auto *C = dyn_cast(SCEVCheck)) if (C->isZero()) @@ -4154,7 +4154,7 @@ if (Hints->getForce() == LoopVectorizeHints::FK_Enabled) SCEVThreshold = PragmaVectorizeSCEVCheckThreshold; - if (Preds.getComplexity() > SCEVThreshold) { + if (PredSE.getPredicate().getComplexity() > SCEVThreshold) { emitAnalysis(VectorizationReport() << "Too many SCEV assumptions need to be made and checked " << "at runtime"); @@ -4466,7 +4466,7 @@ } Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks()); - Preds.add(&LAI->Preds); + PredSE.addPredicate(LAI->PredSE.getPredicate()); return true; } @@ -4581,7 +4581,7 @@ StoreInst *SI = dyn_cast(I); Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand(); - int Stride = isStridedPtr(SE, Ptr, TheLoop, Strides, Preds); + int Stride = isStridedPtr(PredSE, Ptr, TheLoop, Strides); // The factor of the corresponding interleave group. unsigned Factor = std::abs(Stride); @@ -4590,7 +4590,7 @@ if (Factor < 2 || Factor > MaxInterleaveGroupFactor) continue; - const SCEV *Scev = replaceSymbolicStrideSCEV(SE, Strides, Preds, Ptr); + const SCEV *Scev = replaceSymbolicStrideSCEV(PredSE, Strides, Ptr); PointerType *PtrTy = dyn_cast(Ptr->getType()); unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType());