diff --git a/llvm/include/llvm/Analysis/AliasAnalysis.h b/llvm/include/llvm/Analysis/AliasAnalysis.h --- a/llvm/include/llvm/Analysis/AliasAnalysis.h +++ b/llvm/include/llvm/Analysis/AliasAnalysis.h @@ -847,6 +847,13 @@ FunctionModRefBehavior getModRefBehavior(const CallBase *Call) { return AA.getModRefBehavior(Call); } + bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) { + return alias(LocA, LocB) == MustAlias; + } + bool isMustAlias(const Value *V1, const Value *V2) { + return alias(MemoryLocation(V1, LocationSize::precise(1)), + MemoryLocation(V2, LocationSize::precise(1))) == MustAlias; + } }; /// Temporary typedef for legacy code that uses a generic \c AliasAnalysis diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -383,12 +383,12 @@ /// write to the same underlying object. In that case, use isPartialOverwrite to /// check if \p Later partially overwrites \p Earlier. Returns 'OW_Unknown' if /// nothing can be determined. -static OverwriteResult isOverwrite(const MemoryLocation &Later, - const MemoryLocation &Earlier, - const DataLayout &DL, - const TargetLibraryInfo &TLI, - int64_t &EarlierOff, int64_t &LaterOff, - AliasAnalysis &AA, const Function *F) { +template +static OverwriteResult +isOverwrite(const MemoryLocation &Later, const MemoryLocation &Earlier, + const DataLayout &DL, const TargetLibraryInfo &TLI, + int64_t &EarlierOff, int64_t &LaterOff, AATy &AA, + const Function *F) { // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll // get imprecise values here, though (except for unknown sizes). if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) @@ -643,11 +643,10 @@ /// modified between the first and the second instruction. /// Precondition: Second instruction must be dominated by the first /// instruction. -static bool memoryIsNotModifiedBetween(Instruction *FirstI, - Instruction *SecondI, - AliasAnalysis *AA, - const DataLayout &DL, - DominatorTree *DT) { +template +static bool +memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI, AATy &AA, + const DataLayout &DL, DominatorTree *DT) { // Do a backwards scan through the CFG from SecondI to FirstI. Look for // instructions which can modify the memory location accessed by SecondI. // @@ -696,7 +695,7 @@ for (; BI != EI; ++BI) { Instruction *I = &*BI; if (I->mayWriteToMemory() && I != SecondI) - if (isModSet(AA->getModRefInfo(I, MemLoc.getWithNewPtr(Ptr)))) + if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr)))) return false; } if (B != FirstBB) { @@ -1132,7 +1131,7 @@ if (LoadInst *DepLoad = dyn_cast(SI->getValueOperand())) { if (SI->getPointerOperand() == DepLoad->getPointerOperand() && isRemovable(SI) && - memoryIsNotModifiedBetween(DepLoad, SI, AA, DL, DT)) { + memoryIsNotModifiedBetween(DepLoad, SI, *AA, DL, DT)) { LLVM_DEBUG( dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: " @@ -1151,7 +1150,7 @@ dyn_cast(getUnderlyingObject(SI->getPointerOperand())); if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) && - memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA, DL, DT)) { + memoryIsNotModifiedBetween(UnderlyingPointer, SI, *AA, DL, DT)) { LLVM_DEBUG( dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: " << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n'); @@ -1164,11 +1163,10 @@ return false; } -static Constant * -tryToMergePartialOverlappingStores(StoreInst *Earlier, StoreInst *Later, - int64_t InstWriteOffset, - int64_t DepWriteOffset, const DataLayout &DL, - AliasAnalysis *AA, DominatorTree *DT) { +template +static Constant *tryToMergePartialOverlappingStores( + StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset, + int64_t DepWriteOffset, const DataLayout &DL, AATy &AA, DominatorTree *DT) { if (Earlier && isa(Earlier->getValueOperand()) && DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) && @@ -1361,7 +1359,7 @@ auto *Earlier = dyn_cast(DepWrite); auto *Later = dyn_cast(Inst); if (Constant *C = tryToMergePartialOverlappingStores( - Earlier, Later, InstWriteOffset, DepWriteOffset, DL, AA, + Earlier, Later, InstWriteOffset, DepWriteOffset, DL, *AA, DT)) { auto *SI = new StoreInst( C, Earlier->getPointerOperand(), false, Earlier->getAlign(), @@ -1507,6 +1505,16 @@ struct DSEState { Function &F; AliasAnalysis &AA; + + /// The single BatchAA instance that is used to cache AA queries. It will + /// not be invalidated over the whole run. This is safe, because: + /// 1. Only memory writes are removed, so the alias cache for memory + /// locations remains valid. + /// 2. No new instructions are added (only instructions removed), so cached + /// information for a deleted value cannot be accessed by a re-used new + /// value pointer. + BatchAAResults BatchAA; + MemorySSA &MSSA; DominatorTree &DT; PostDominatorTree &PDT; @@ -1534,7 +1542,7 @@ DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, PostDominatorTree &PDT, const TargetLibraryInfo &TLI) - : F(F), AA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI) {} + : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI) {} static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, PostDominatorTree &PDT, @@ -1623,7 +1631,7 @@ } /// Returns true if \p Use completely overwrites \p DefLoc. - bool isCompleteOverwrite(MemoryLocation DefLoc, Instruction *UseInst) const { + bool isCompleteOverwrite(MemoryLocation DefLoc, Instruction *UseInst) { // UseInst has a MemoryDef associated in MemorySSA. It's possible for a // MemoryDef to not write to memory, e.g. a volatile load is modeled as a // MemoryDef. @@ -1638,7 +1646,7 @@ auto CC = getLocForWriteEx(UseInst); const DataLayout &DL = F.getParent()->getDataLayout(); return CC && isOverwrite(*CC, DefLoc, DL, TLI, DepWriteOffset, - InstWriteOffset, AA, &F) == OW_Complete; + InstWriteOffset, BatchAA, &F) == OW_Complete; } /// Returns true if \p Def is not read before returning from the function. @@ -1717,7 +1725,7 @@ /// Returns true if \p MaybeTerm is a memory terminator for the same /// underlying object as \p DefLoc. - bool isMemTerminator(MemoryLocation DefLoc, Instruction *MaybeTerm) const { + bool isMemTerminator(MemoryLocation DefLoc, Instruction *MaybeTerm) { Optional> MaybeTermLoc = getLocForTerminator(MaybeTerm); @@ -1730,11 +1738,11 @@ DataLayout DL = MaybeTerm->getParent()->getModule()->getDataLayout(); DefLoc = MemoryLocation(getUnderlyingObject(DefLoc.Ptr)); } - return AA.isMustAlias(MaybeTermLoc->first, DefLoc); + return BatchAA.isMustAlias(MaybeTermLoc->first, DefLoc); } // Returns true if \p Use may read from \p DefLoc. - bool isReadClobber(MemoryLocation DefLoc, Instruction *UseInst) const { + bool isReadClobber(MemoryLocation DefLoc, Instruction *UseInst) { if (!UseInst->mayReadFromMemory()) return false; @@ -1742,7 +1750,7 @@ if (CB->onlyAccessesInaccessibleMemory()) return false; - ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc); + ModRefInfo MR = BatchAA.getModRefInfo(UseInst, DefLoc); // If necessary, perform additional analysis. if (isRefSet(MR)) MR = AA.callCapturesBefore(UseInst, DefLoc, &DT); @@ -1758,7 +1766,7 @@ Optional getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *Current, MemoryLocation DefLoc, bool DefVisibleToCallerBeforeRet, - bool DefVisibleToCallerAfterRet, unsigned &ScanLimit) const { + bool DefVisibleToCallerAfterRet, unsigned &ScanLimit) { if (ScanLimit == 0) { LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n"); return None; @@ -2285,7 +2293,7 @@ // Check if NI overwrites SI. int64_t InstWriteOffset, DepWriteOffset; OverwriteResult OR = isOverwrite(SILoc, NILoc, DL, TLI, DepWriteOffset, - InstWriteOffset, State.AA, &F); + InstWriteOffset, State.BatchAA, &F); if (OR == OW_MaybePartial) { auto Iter = State.IOLs.insert( std::make_pair( @@ -2303,8 +2311,8 @@ // TODO: implement tryToMergeParialOverlappingStores using MemorySSA. if (Earlier && Later && DT.dominates(Earlier, Later)) { if (Constant *Merged = tryToMergePartialOverlappingStores( - Earlier, Later, InstWriteOffset, DepWriteOffset, DL, &AA, - &DT)) { + Earlier, Later, InstWriteOffset, DepWriteOffset, DL, + State.BatchAA, &DT)) { // Update stored value of earlier store to merged constant. Earlier->setOperand(0, Merged);