diff --git a/llvm/include/llvm/Analysis/MemorySSA.h b/llvm/include/llvm/Analysis/MemorySSA.h --- a/llvm/include/llvm/Analysis/MemorySSA.h +++ b/llvm/include/llvm/Analysis/MemorySSA.h @@ -723,6 +723,8 @@ return cast_or_null(ValueToMemoryAccess.lookup(cast(BB))); } + auto &getDT() const { return *DT; } + void dump() const; void print(raw_ostream &) const; @@ -1179,9 +1181,9 @@ using BaseT = upward_defs_iterator::iterator_facade_base; public: - upward_defs_iterator(const MemoryAccessPair &Info) + upward_defs_iterator(const MemoryAccessPair &Info, DominatorTree *DT) : DefIterator(Info.first), Location(Info.second), - OriginalAccess(Info.first) { + OriginalAccess(Info.first), DT(DT) { CurrentPair.first = nullptr; WalkingPhi = Info.first && isa(Info.first); @@ -1220,12 +1222,13 @@ const_cast(Location.Ptr), OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr); if (!Translator.PHITranslateValue(OriginalAccess->getBlock(), - DefIterator.getPhiArgBlock(), nullptr, - false)) + DefIterator.getPhiArgBlock(), DT, + false)) { if (Translator.getAddr() != Location.Ptr) { CurrentPair.second = Location.getWithNewPtr(Translator.getAddr()); return; } + } } CurrentPair.second = Location; } @@ -1235,17 +1238,19 @@ MemoryLocation Location; MemoryAccess *OriginalAccess = nullptr; bool WalkingPhi = false; + DominatorTree *DT = nullptr; }; -inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) { - return upward_defs_iterator(Pair); +inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair, + DominatorTree &DT) { + return upward_defs_iterator(Pair, &DT); } inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); } inline iterator_range -upward_defs(const MemoryAccessPair &Pair) { - return make_range(upward_defs_begin(Pair), upward_defs_end()); +upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) { + return make_range(upward_defs_begin(Pair, DT), upward_defs_end()); } /// Walks the defining accesses of MemoryDefs. Stops after we hit something that diff --git a/llvm/lib/Analysis/MemorySSA.cpp b/llvm/lib/Analysis/MemorySSA.cpp --- a/llvm/lib/Analysis/MemorySSA.cpp +++ b/llvm/lib/Analysis/MemorySSA.cpp @@ -466,7 +466,8 @@ assert(isa(MA)); Worklist.append( - upward_defs_begin({const_cast(MA), MAP.second}), + upward_defs_begin({const_cast(MA), MAP.second}, + MSSA.getDT()), upward_defs_end()); } } @@ -595,8 +596,8 @@ void addSearches(MemoryPhi *Phi, SmallVectorImpl &PausedSearches, ListIndex PriorNode) { - auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), - upward_defs_end()); + auto UpwardDefs = make_range( + upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT), upward_defs_end()); for (const MemoryAccessPair &P : UpwardDefs) { PausedSearches.push_back(Paths.size()); Paths.emplace_back(P.second, P.first, PriorNode); diff --git a/llvm/test/Analysis/MemorySSA/phi-translation.ll b/llvm/test/Analysis/MemorySSA/phi-translation.ll --- a/llvm/test/Analysis/MemorySSA/phi-translation.ll +++ b/llvm/test/Analysis/MemorySSA/phi-translation.ll @@ -193,3 +193,45 @@ br label %while.cond } +; CHECK-LABEL: define i32 @use_not_optimized_due_to_backedge +define i32 @use_not_optimized_due_to_backedge(i32* nocapture %m_i_strides, i32* nocapture readonly %eval_left_dims) { +entry: +; CHECK: 1 = MemoryDef(liveOnEntry) +; CHECK_NEXT: store i32 1, i32* %m_i_strides, align 4 + store i32 1, i32* %m_i_strides, align 4 + br label %for.body + +for.cond.cleanup: ; preds = %for.inc + ret i32 %m_i_size.1 + +for.body: ; preds = %entry, %for.inc +; CHECK: 4 = MemoryPhi({entry,1},{for.inc,3}) +; CHECK-NEXT: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ] + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ] + %m_i_size.022 = phi i32 [ 1, %entry ], [ %m_i_size.1, %for.inc ] + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp1 = icmp eq i64 %indvars.iv, 0 + %arrayidx2 = getelementptr inbounds i32, i32* %m_i_strides, i64 %indvars.iv +; CHECK: MemoryUse(4) MayAlias +; CHECK-NEXT: %0 = load i32, i32* %arrayidx2, align 4 + %0 = load i32, i32* %arrayidx2, align 4 + %arrayidx4 = getelementptr inbounds i32, i32* %eval_left_dims, i64 %indvars.iv +; CHECK: MemoryUse(4) MayAlias +; CHECK-NEXT: %1 = load i32, i32* %arrayidx4, align 4 + %1 = load i32, i32* %arrayidx4, align 4 + %mul = mul nsw i32 %1, %0 + br i1 %cmp1, label %if.then, label %for.inc + +if.then: ; preds = %for.body + %arrayidx7 = getelementptr inbounds i32, i32* %m_i_strides, i64 %indvars.iv.next +; CHECK: 2 = MemoryDef(4) +; CHECK-NEXT: store i32 %mul, i32* %arrayidx7, align 4 + store i32 %mul, i32* %arrayidx7, align 4 + br label %for.inc + +for.inc: ; preds = %for.body, %if.then +; CHECK: 3 = MemoryPhi({for.body,4},{if.then,2}) +; CHECK-NEXT: %m_i_size.1 = phi i32 [ %m_i_size.022, %if.then ], [ %mul, %for.body ] + %m_i_size.1 = phi i32 [ %m_i_size.022, %if.then ], [ %mul, %for.body ] + br i1 %cmp1, label %for.body, label %for.cond.cleanup +}