Index: include/polly/ScopInfo.h =================================================================== --- include/polly/ScopInfo.h +++ include/polly/ScopInfo.h @@ -66,6 +66,44 @@ class Comparison; class SCEVAffFunc; +// TODO: Rename to LoadOrStoreInst/MemAccInst, MemInst could include fences etc. +class MemInst : public Instruction { +public: + Value *getScalar() { + if (isa(this)) + return cast(this)->getValueOperand(); + else + return this; + } + const Value *getScalar() const { + if (isa(this)) + return cast(this)->getValueOperand(); + else + return this; + } + + // RENAME: getAccessedAddr + Value *getPointerOperand() { + return getOperand(isa(this) + ? StoreInst::getPointerOperandIndex() + : LoadInst::getPointerOperandIndex()); + } + const Value *getPointerOperand() const { + return getOperand(isa(this) + ? StoreInst::getPointerOperandIndex() + : LoadInst::getPointerOperandIndex()); + } + + static inline bool classof(const LoadInst *I) { return true; } + static inline bool classof(const StoreInst *I) { return true; } + static inline bool classof(const Instruction *I) { + return isa(I) || isa(I); + } + static inline bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } +}; + class Comparison { const SCEV *LHS; const SCEV *RHS; @@ -214,6 +252,7 @@ /// @brief Represent memory accesses in statements. class MemoryAccess { + friend class ScopInfo; friend class Scop; friend class ScopStmt; @@ -1122,6 +1161,70 @@ } //@} + SmallVector getAccessFunctionsFor(Instruction *Inst) { + SmallVector Result; + auto BB = Inst->getParent(); + if (!AccFuncMap.count(BB)) + return Result; + + auto &Accs = AccFuncMap[BB]; + for (auto &Acc : Accs) { + if (Acc.getAccessInstruction() == Inst) + Result.push_back(&Acc); + } + + return Result; + } + + MemoryAccess *getScalarWriteAccessFor(Instruction *Inst) const { + auto BB = Inst->getParent(); + if (!AccFuncMap.count(BB)) + return nullptr; + + MemoryAccess *Result = nullptr; + for (auto &Acc : AccFuncMap[BB]) { + if (Acc.getAccessInstruction() != Inst) + continue; + if (!Acc.isWrite()) + continue; + if (!Acc.isScalar()) + continue; + + assert(!Result); + Result = &Acc; +#ifdef NDEBUG + break; +#endif + } + return Result; + } + + MemoryAccess *getScalarReadAccessFor(const Use &Use) { + auto Inst = cast(Use.getUser()); + + auto BB = Inst->getParent(); + if (!AccFuncMap.count(BB)) + return nullptr; + + MemoryAccess *Result = nullptr; + for (auto &Acc : AccFuncMap[BB]) { + if (Acc.getAccessInstruction() != Inst) + continue; + if (!Acc.isRead()) + continue; + if (!Acc.isScalar()) + continue; + // TODO: Check if matching OperandNo + + assert(!Result); + Result = &Acc; +#ifdef NDEBUG + break; +#endif + } + return Result; + } + /// @brief Print data access information. /// /// @param OS The output stream the access functions is printed to. @@ -1409,6 +1512,9 @@ return O; } +typedef std::pair EdgeTy; +typedef DenseSet EdgeSetTy; + ///===---------------------------------------------------------------------===// /// @brief Build the Polly IR (Scop and ScopStmt) on a Region. /// @@ -1452,13 +1558,20 @@ // Build the SCoP for Region @p R. Scop *buildScop(Region &R, DominatorTree &DT); + Value *extractBasePointer(Value *Addr); + const ScopArrayInfo *getOrCreateSAI(MemInst *SI); + + void addMemoryAccessForMem(Instruction *AccessInstruction, Value *Scalar, + enum MemoryAccess::AccessType Type, MemInst *Inst, + bool IsPHI = false); + /// @brief Build an instance of MemoryAccess from the Load/Store instruction. /// /// @param Inst The Load/Store instruction that access the memory /// @param L The parent loop of the instruction /// @param R The region on which to build the data access dictionary. /// @param BoxedLoops The set of loops that are overapproximated in @p R. - void buildMemoryAccess(Instruction *Inst, Loop *L, Region *R, + void buildMemoryAccess(MemInst *Inst, Loop *L, Region *R, const ScopDetection::BoxedLoopsSetTy *BoxedLoops); /// @brief Analyze and extract the cross-BB scalar dependences (or, @@ -1536,6 +1649,151 @@ AccessValue, Subscripts, Sizes, false); } + void addScalarWriteAccess(Instruction *Inst) { + auto Store = getScalarMappedAddr(Inst); + if (!Store) { + addMemoryAccess(Inst->getParent(), Inst, MemoryAccess::MUST_WRITE, Inst, + ZeroOffset, 1, true, Inst, ArrayRef(), + ArrayRef(), false); + return; + } + + if (isa(Inst) && Store == getPHIMappedAddr(cast(Inst))) + return; + addMemoryAccessForMem(Inst, Inst, MemoryAccess::MUST_WRITE, + cast(Store)); + } + + void addScalarReadAccess(Instruction *Inst, Instruction *Scalar) { + auto Store = getScalarMappedAddr(Scalar); + if (!Store) { + addMemoryAccess(Inst->getParent(), Inst, MemoryAccess::READ, Scalar, + ZeroOffset, 1, true, Scalar, ArrayRef(), + ArrayRef(), false); + return; + } + + // Check whether redundant + bool AllUsersMapped = true; + for (auto User : Inst->users()) { + if (User == Store) + continue; + + auto UserInst = dyn_cast(User); + if (!UserInst) { + AllUsersMapped = false; + break; + } + + if (getPHIMappedAddr(UserInst) != Store) { + AllUsersMapped = false; + break; + } + } + if (AllUsersMapped) + return; + + addMemoryAccessForMem(Inst, Scalar, MemoryAccess::READ, + cast(Store)); + } + + void addPHIWriteAccess(TerminatorInst *Inst, PHINode *PHI, Value *AccessValue, + bool IsPHI = false) { + auto AddrAlias = getPHIMappedAddr(PHI); + if (!AddrAlias) { + addMemoryAccess(Inst->getParent(), Inst, MemoryAccess::MUST_WRITE, PHI, + ZeroOffset, 1, true, AccessValue, + ArrayRef(), ArrayRef(), + IsPHI); + return; + } + + if (isa(AccessValue) && + getScalarMappedAddr(cast(AccessValue)) == AddrAlias) + return; + addMemoryAccessForMem(Inst, AccessValue, MemoryAccess::MUST_WRITE, + cast(AddrAlias), IsPHI); + } + void addPHIReadAccess(PHINode *PHI, bool IsPHI = false) { + auto AddrAlias = getPHIMappedAddr(PHI); + if (!AddrAlias) { + addMemoryAccess(PHI->getParent(), PHI, MemoryAccess::READ, PHI, + ZeroOffset, 1, true, PHI, ArrayRef(), + ArrayRef(), IsPHI); + return; + } + + if (AddrAlias == getScalarMappedAddr(PHI)) + return; + addMemoryAccessForMem(PHI, PHI, MemoryAccess::MUST_WRITE, + cast(AddrAlias), IsPHI); + } + + DenseMap CollapseScalar; + DenseMap CollapsePHI; + DenseSet RedundantStores; + DenseSet RedundantPHIs; + + // TODO: Replace the StoreInst by a tuple/object (const SCEV*, llvm::Type, + // size, AAMetadata) or MemInst + StoreInst *getScalarMappedAddr(Instruction *Inst) { + auto Iter = CollapseScalar.find(Inst); + if (Iter == CollapseScalar.end()) + return nullptr; + return Iter->getSecond(); + } + + StoreInst *getPHIMappedAddr(PHINode *PHI) { + auto Iter = CollapsePHI.find(PHI); + if (Iter == CollapsePHI.end()) + return nullptr; + return Iter->getSecond(); + } + + bool isStoreRedundant(StoreInst *SI) { + auto Scalar = dyn_cast(SI->getValueOperand()); + if (!Scalar) + return false; + return getScalarMappedAddr(Scalar) == SI; + } + + bool isStoreRedundant(PHINode *PHI) { + auto SI = getPHIMappedAddr(PHI); + if (!SI) + return false; + + for (auto &Incoming : PHI->incoming_values()) { + auto UsedValue = dyn_cast(Incoming.get()); + if (!UsedValue) { + // Writing a constant + // We can perfectly emulate this by writing this constant to the array + // However, it is not perfectly redundant then + // FIXME: Is this method required at all?? + return false; + } + auto Loc = getScalarMappedAddr(UsedValue); + if (Loc != SI) + return false; + } + return true; + } + + void greedyCollapse(Region &R); + void greedyCollapseStore(Region &R, StoreInst *Store); + + std::map AliveValuesCache; + EdgeSetTy &getLiveEdges(Instruction *Val); + + // std::map PHIEdgesCache; + EdgeSetTy getPHIEdges(PHINode *PHI); + + bool mayRead(StoreInst *SI, LoadInst *LI); + + /// Edges where writing a value to the array doesn't matter, because it will + /// be overwritten anyway + EdgeSetTy computeNoUseZones(StoreInst *SI); + // EdgeSetTy computeNoChangeZones(Value *Addr); + public: static char ID; explicit ScopInfo(); Index: lib/Analysis/ScopInfo.cpp =================================================================== --- lib/Analysis/ScopInfo.cpp +++ lib/Analysis/ScopInfo.cpp @@ -35,6 +35,7 @@ #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/RegionIterator.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" +#include "llvm/Analysis/PostDominators.h" #include "llvm/Support/Debug.h" #include "isl/aff.h" #include "isl/constraint.h" @@ -290,7 +291,6 @@ } } - /// @brief Derive the individual index expressions from a GEP instruction /// /// This function optimistically assumes the GEP references into a fixed size @@ -599,7 +599,7 @@ AccessRelation = isl_map_flat_range_product(AccessRelation, SubscriptMap); } - if (Access.Sizes.size() > 1 && !isa(Access.Sizes[0])) + if (Sizes.size() > 1 && !isa(Sizes[0])) AccessRelation = foldAccess(AccessRelation, Statement); Space = Statement->getDomainSpace(); @@ -2781,26 +2781,27 @@ // As we pretend there is a use (or more precise a write) of OpI in OpBB // we have to insert a scalar dependence from the definition of OpI to // OpBB if the definition is not in OpBB. + // FIXME: What happens if this scalar already is demoted e.g. because it + // is used in another BB? if (OpIBB != OpBB) { - addMemoryAccess(OpBB, PHI, MemoryAccess::READ, OpI, ZeroOffset, 1, true, - OpI); - addMemoryAccess(OpIBB, OpI, MemoryAccess::MUST_WRITE, OpI, ZeroOffset, - 1, true, OpI); + addScalarReadAccess(PHI, OpI); + addScalarWriteAccess(OpI); } } // Always use the terminator of the incoming basic block as the access // instruction. - OpI = OpBB->getTerminator(); + auto WriterInst = OpBB->getTerminator(); - addMemoryAccess(OpBB, OpI, MemoryAccess::MUST_WRITE, PHI, ZeroOffset, 1, - true, Op, /* IsPHI */ !IsExitBlock); + addPHIWriteAccess(WriterInst, PHI, Op, /* IsPHI */ !IsExitBlock); } if (!OnlyNonAffineSubRegionOperands) { - addMemoryAccess(PHI->getParent(), PHI, MemoryAccess::READ, PHI, ZeroOffset, - 1, true, PHI, - /* IsPHI */ !IsExitBlock); + addPHIReadAccess(PHI, /* IsPHI */ !IsExitBlock); // TODO: IsPHI without + // meaning, as the only + // ever used such + // MemoryAccesses are + // IsPHI=true } } @@ -2856,8 +2857,7 @@ // Do not build a read access that is not in the current SCoP // Use the def instruction as base address of the MemoryAccess, so that it // will become the name of the scalar access in the polyhedral form. - addMemoryAccess(UseParent, UI, MemoryAccess::READ, Inst, ZeroOffset, 1, - true, Inst); + addScalarReadAccess(UI, Inst); } if (ModelReadOnlyScalars) { @@ -2882,9 +2882,112 @@ extern MapInsnToMemAcc InsnToMemAcc; +Value *ScopInfo::extractBasePointer(Value *Addr) { + Loop *L = isa(Addr) + ? LI->getLoopFor(cast(Addr)->getParent()) + : nullptr; + const SCEV *AccessFunction = SE->getSCEVAtScope(Addr, L); + const SCEVUnknown *BasePointer = + dyn_cast(SE->getPointerBase(AccessFunction)); + return BasePointer->getValue(); +} + +const ScopArrayInfo *ScopInfo::getOrCreateSAI(MemInst *SI) { + // TODO: Refactor with ScopInfo::buildMemoryAccess + auto Addr = SI->getPointerOperand(); + auto BasePtr = extractBasePointer(Addr); + auto EltType = Addr->getType()->getPointerElementType(); + + auto AccItr = InsnToMemAcc.find(SI); + if (PollyDelinearize && AccItr != InsnToMemAcc.end()) { + return scop->getOrCreateScopArrayInfo( + BasePtr, EltType, AccItr->second.Shape->DelinearizedSizes); + } + + auto Size = TD->getTypeStoreSize(EltType); + return scop->getOrCreateScopArrayInfo( + BasePtr, Addr->getType()->getPointerElementType(), + ArrayRef(SE->getConstant(ZeroOffset->getType(), Size))); +} + +void ScopInfo::addMemoryAccessForMem(Instruction *AccessInstruction, + Value *Scalar, + enum MemoryAccess::AccessType Type, + MemInst *Inst, bool IsPHI) { + auto &R = scop->getRegion(); + + Value *Val = Inst->getScalar(); + auto *SizeType = Val->getType(); + auto Size = TD->getTypeStoreSize(SizeType); + auto Addr = Inst->getPointerOperand(); + + Loop *L = isa(Addr) + ? LI->getLoopFor(cast(Addr)->getParent()) + : nullptr; + const SCEV *AccessFunction = SE->getSCEVAtScope(Addr, L); + const SCEVUnknown *BasePointer = + dyn_cast(SE->getPointerBase(AccessFunction)); + + assert(BasePointer && "Could not find base pointer"); + AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer); + + auto AccItr = InsnToMemAcc.find(Inst); + if (PollyDelinearize && AccItr != InsnToMemAcc.end()) { + addMemoryAccess(AccessInstruction->getParent(), AccessInstruction, Type, + BasePointer->getValue(), AccessFunction, Size, true, Scalar, + AccItr->second.DelinearizedSubscripts, + AccItr->second.Shape->DelinearizedSizes, IsPHI); + return; + } + + // Check if the access depends on a loop contained in a non-affine subregion. + auto BoxedLoops = SD->getBoxedLoops(&R); + bool isVariantInNonAffineLoop = false; + if (BoxedLoops) { + SetVector Loops; + findLoops(AccessFunction, Loops); + for (const Loop *L : Loops) + if (BoxedLoops->count(L)) + isVariantInNonAffineLoop = true; + } + + bool IsAffine = + !isVariantInNonAffineLoop && + isAffineExpr(&R, AccessFunction, *SE, BasePointer->getValue()); + + if (!IsAffine && Type == MemoryAccess::MUST_WRITE) + Type = MemoryAccess::MAY_WRITE; + + // FIXME: Is this correct? this should refer to the the number of elements in + // the array, not the size of a single element + auto SCEVSize = SE->getConstant(ZeroOffset->getType(), Size); + + addMemoryAccess(AccessInstruction->getParent(), AccessInstruction, Type, + BasePointer->getValue(), AccessFunction, Size, IsAffine, + Scalar, ArrayRef(AccessFunction), + ArrayRef(SCEVSize), IsPHI); +} + void ScopInfo::buildMemoryAccess( - Instruction *Inst, Loop *L, Region *R, + MemInst *Inst, Loop *L, Region *R, const ScopDetection::BoxedLoopsSetTy *BoxedLoops) { + if (auto LI = dyn_cast(Inst)) { + auto Scalar = dyn_cast(LI->getScalar()); + if (Scalar) { + auto ScalarMapped = getScalarMappedAddr(Scalar); + if (ScalarMapped && + SE->getSCEV(ScalarMapped->getPointerOperand()) == + SE->getSCEV(LI->getPointerOperand())) + return; // Storing to same mapped memory, i.e. redundant + } + } + + addMemoryAccessForMem(Inst, Inst->getScalar(), + isa(Inst) ? MemoryAccess::READ + : MemoryAccess::MUST_WRITE, + cast(Inst)); + return; + unsigned Size; Type *SizeType; Value *Val; @@ -2944,8 +3047,8 @@ SizesSCEV.push_back(SE->getSCEV(ConstantInt::get( IntegerType::getInt64Ty(BasePtr->getContext()), Size))); - return IRAccess(Type, BasePointer->getValue(), AccessFunction, Size, - true, Subscripts, SizesSCEV, Val); + addMemoryAccess(Inst->getParent(), Inst, Type, BasePointer->getValue(), + AccessFunction, Size, true, Subscripts, SizesSCEV, Val); } } } @@ -2974,7 +3077,10 @@ SmallVector Subscripts, Sizes; Subscripts.push_back(AccessFunction); - Sizes.push_back(SE->getConstant(ZeroOffset->getType(), Size)); + Sizes.push_back(SE->getConstant( + ZeroOffset->getType(), Size)); // TODO: Is this correct? this should refer + // to the the number of elements in the + // array, not the size of a single element if (!IsAffine && Type == MemoryAccess::MUST_WRITE) Type = MemoryAccess::MAY_WRITE; @@ -3017,16 +3123,15 @@ if (!PHI && IsExitBlock) break; - if (isa(Inst) || isa(Inst)) - buildMemoryAccess(Inst, L, &R, BoxedLoops); + if (isa(Inst)) + buildMemoryAccess(cast(Inst), L, &R, BoxedLoops); if (isIgnoredIntrinsic(Inst)) continue; if (buildScalarDependences(Inst, &R, NonAffineSubRegion)) { if (!isa(Inst)) - addMemoryAccess(&BB, Inst, MemoryAccess::MUST_WRITE, Inst, ZeroOffset, - 1, true, Inst); + addScalarWriteAccess(Inst); } } } @@ -3049,9 +3154,196 @@ Subscripts, Sizes, AccessValue, IsPHI, BaseName); } +static AliasSet *getAliasSetFor(AliasSetTracker *AST, MemInst *I) { + auto Scalar = I->getScalar(); + auto &DL = I->getModule()->getDataLayout(); + AAMDNodes AAInfo; + I->getAAMetadata(AAInfo); + return &AST->getAliasSetForPointer( + Scalar, DL.getTypeStoreSize(Scalar->getType()), AAInfo); +} + +void ScopInfo::greedyCollapse(Region &R) { + for (auto BB : R.blocks()) { + for (auto &I : *BB) { + if (!isa(I)) + continue; + greedyCollapseStore(R, cast(&I)); + } + } +} + +static bool isSubset(EdgeSetTy &Superset, EdgeSetTy &Subset) { + for (auto &Edge : Subset) { + if (!Superset.count(Edge)) + return false; + } + return true; +} + +static void subtractSet(EdgeSetTy &Set, EdgeSetTy &ToRemove) { + for (auto &Edge : ToRemove) { + Set.erase(Edge); + } +} + +void ScopInfo::greedyCollapseStore(Region &R, StoreInst *Store) { + auto SAI = getOrCreateSAI(cast(Store)); + auto Addr = Store->getPointerOperand(); + auto StoredVal = dyn_cast(Store->getValueOperand()); + if (!StoredVal) + return; + + auto NoUseZone = computeNoUseZones(Store); + + SmallVector Worklist; + Worklist.push_back(StoredVal); + + do { + auto Val = Worklist.pop_back_val(); + + if (CollapseScalar.count(Val)) + continue; + + // Don't try to demote synthesizable values + if (canSynthesize(Val, LI, SE, &R)) + continue; + + auto &Edges = getLiveEdges(Val); + if (!isSubset(NoUseZone, Edges)) + continue; + + CollapseScalar[Val] = Store; // TODO: Only collapse last instr of BB ?!? + subtractSet(NoUseZone, Edges); + + if (auto PHI = dyn_cast(Val)) { + if (CollapsePHI.count(PHI)) + continue; + + auto PHIEdges = getPHIEdges(PHI); + if (isSubset(NoUseZone, PHIEdges)) { + CollapsePHI[PHI] = Store; + subtractSet(NoUseZone, PHIEdges); + } + } + + for (auto &Use : Val->operands()) { + auto UseVal = dyn_cast(Use.get()); + if (UseVal) + Worklist.push_back(UseVal); + } + + } while (!Worklist.empty()); +} + +EdgeSetTy &ScopInfo::getLiveEdges(Instruction *Val) { + auto It = AliveValuesCache.find(Val); + if (It != AliveValuesCache.end()) + return It->second; + + auto &DT = getAnalysis().getDomTree(); + + auto &Edges = AliveValuesCache[Val]; + for (auto User : Val->users()) { + if (!isa(User)) + continue; + auto UserInst = cast(User); + auto UserNode = DT.getNode(Val->getParent()); + + while (UserNode->getBlock() != Val->getParent()) { + auto ParentNode = UserNode->getIDom(); + Edges.insert( + std::make_pair(ParentNode->getBlock(), UserNode->getBlock())); + UserNode = ParentNode; + } + } + return Edges; +} + +EdgeSetTy ScopInfo::getPHIEdges(PHINode *PHI) { + // TODO: Maybe we need to mark all edges from the incoming value definition + EdgeSetTy Result; + auto Parent = PHI->getParent(); + for (auto Incoming : llvm::make_range(PHI->block_begin(), PHI->block_end())) { + Result.insert(std::make_pair(Incoming, Parent)); + } + return Result; +} + +bool ScopInfo::mayRead(StoreInst *SI, LoadInst *LI) { + auto AATest = AA->alias(MemoryLocation::get(SI), MemoryLocation::get(LI)); + if (AATest == NoAlias) + return false; + + auto LoadSAI = getOrCreateSAI(cast(LI)); + if (LoadSAI->getBasePtrOriginSAI()) + LoadSAI = LoadSAI->getBasePtrOriginSAI(); + + auto WriteSAI = getOrCreateSAI(cast(SI)); + if (WriteSAI->getBasePtrOriginSAI()) + WriteSAI = WriteSAI->getBasePtrOriginSAI(); + + return LoadSAI == WriteSAI; +} + +EdgeSetTy ScopInfo::computeNoUseZones(StoreInst *SI) { + auto Addrs = SI->getPointerOperand(); + auto SAI = getOrCreateSAI(cast(SI)); + + auto &R = scop->getRegion(); + auto RI = R.getRegionInfo(); + + EdgeSetTy Result; + + auto StoreBB = SI->getParent(); + auto &PD = getAnalysis(); + auto BBNode = PD.getNode(StoreBB); + + SmallVector Worklist; + SmallVector Postdominated; + DenseSet ReachableRead; + // Postdominated.push_back(SI->getParent()); + PD.getDescendants(SI->getParent(), Postdominated); + for (auto Node : Postdominated) { + for (auto &Inst : *Node) { + if (!isa(Inst)) + continue; + auto &LI = cast(Inst); + if (mayRead(SI, &LI)) { + Worklist.push_back(Node); + break; + } + } + } + + while (!Worklist.empty()) { + auto BB = Worklist.pop_back_val(); + if (ReachableRead.count(BB)) + continue; + if (!PD.dominates(StoreBB, BB)) + continue; + ReachableRead.insert(BB); + for (auto Pred : predecessors(BB)) { + Worklist.push_back(Pred); + } + } + + for (auto Node : Postdominated) { + if (ReachableRead.count(Node)) + continue; + + for (auto Pred : predecessors(Node)) + Result.insert(std::make_pair(Pred, Node)); + } + + return Result; +} + Scop *ScopInfo::buildScop(Region &R, DominatorTree &DT) { unsigned MaxLoopDepth = getMaxLoopDepthInRegion(R, *LI, *SD); - Scop *S = new Scop(R, AccFuncMap, *SE, DT, ctx, MaxLoopDepth); + scop = new Scop(R, AccFuncMap, *SE, DT, ctx, MaxLoopDepth); + + greedyCollapse(R); buildAccessFunctions(R, R); @@ -3065,8 +3357,8 @@ if (!R.getExitingBlock()) buildAccessFunctions(R, *R.getExit(), nullptr, /* IsExitBlock */ true); - S->init(*LI, *SD, *AA); - return S; + scop->init(*LI, *SD, *AA); + return scop; } void ScopInfo::print(raw_ostream &OS, const Module *) const { @@ -3103,6 +3395,7 @@ AU.addRequired(); AU.addRequired(); AU.addRequired(); + AU.addRequired(); AU.addRequiredTransitive(); AU.addRequiredTransitive(); AU.addRequired(); Index: test/ScopInfo/licm_reduction.ll =================================================================== --- test/ScopInfo/licm_reduction.ll +++ test/ScopInfo/licm_reduction.ll @@ -1,7 +1,4 @@ -; RUN: opt %loadPolly -basicaa -loop-rotate -indvars -polly-prepare -polly-scops -analyze < %s | FileCheck %s -; RUN: opt %loadPolly -basicaa -loop-rotate -indvars -licm -polly-prepare -polly-scops -analyze < %s | FileCheck %s -; -; XFAIL: * +; RUN: opt %loadPolly -polly-detect-unprofitable -basicaa -loop-rotate -indvars -licm -polly-prepare -polly-scops -analyze < %s | FileCheck %s ; ; void test(int n, double B[static const restrict n], int j) { ; for (int i = 0; i < n; i += 1) {