diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -1394,284 +1394,345 @@ // The above MemDep based methods will eventually be removed (if they are unused // by those below). -enum class WalkType { Bad, Next }; -struct WalkResult { - WalkType Type; - MemoryDef *Next; +/// Container for state required for MemorySSA-driven DSE and related functions. +struct DSEState { + Function &F; + AliasAnalysis &AA; + MemorySSA &MSSA; + DominatorTree &DT; + PostDominatorTree &PDT; + const TargetLibraryInfo &TLI; + + DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, + PostDominatorTree &PDT, const TargetLibraryInfo &TLI) + : F(F), AA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI) {} - operator bool() { return Type != WalkType::Bad; } -}; + SmallPtrSet SkipStores; + // A map of interval maps representing partially-overwritten value parts + InstOverlapIntervalsTy IOL; + // Keep track of all of the objects that are invisible to the caller until the + // function returns. + SmallSetVector InvisibleToCaller; + // Keep track of blocks with throwing instructions no modeled in MemorySSA. + // For each block with throwing instructions, we number all instructions in + // the block, starting with 0 and incrementing the counter for throwing + // instructions. If 2 instructions have the same number there are no throwing + // instructions between them. + DenseMap> ThrowingBlocks; + + // Post-order numbers for basic blocks in the function. + DenseMap PostOrderNumbers; -Optional getLocForWriteEx(Instruction *I, - const TargetLibraryInfo &TLI) { - if (!I->mayWriteToMemory()) - return None; + // All MemoryDef stores + SmallVector Stores; - if (auto *MTI = dyn_cast(I)) - return {MemoryLocation::getForDest(MTI)}; + void init() { + // Any that should be skipped as they are already deleted + unsigned PO = 0; + // Collect blocks with throwing instructions, alloc-like objects and + // post-order numbers for basic blocks. + for (BasicBlock *BB : post_order(&F)) { + PostOrderNumbers[BB] = PO++; + + unsigned ThrowingCnt = 0; + DenseMap BlockMap; + for (Instruction &I : *BB) { + if (I.mayThrow() && !MSSA.getMemoryAccess(&I)) + ThrowingCnt++; + + BlockMap[&I] = ThrowingCnt; + + auto *MD = dyn_cast_or_null(MSSA.getMemoryAccess(&I)); + if (MD && hasAnalyzableMemoryWrite(&I, TLI) && isRemovable(&I)) + Stores.push_back(MD); + + // Track alloca and alloca-like objects. Here we care about objects not + // visible to the caller during function execution. Alloca objects are + // invalid in the caller, for alloca-like objects we ensure that they + // are not captured throughout the function. + if (isa(&I) || + (isAllocLikeFn(&I, &TLI) && !PointerMayBeCaptured(&I, false, true))) + InvisibleToCaller.insert(&I); + } - if (auto CS = CallSite(I)) { - if (Function *F = CS.getCalledFunction()) { - StringRef FnName = F->getName(); - if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy)) - return {MemoryLocation(CS.getArgument(0))}; - if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy)) - return {MemoryLocation(CS.getArgument(0))}; - if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat)) - return {MemoryLocation(CS.getArgument(0))}; - if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat)) - return {MemoryLocation(CS.getArgument(0))}; + // Only add the mapping for blocks with any throwing instructions. + if (ThrowingCnt != 0) + ThrowingBlocks[BB] = BlockMap; } - return None; + // Treat byval or inalloca arguments the same as Allocas, stores to them are + // dead at the end of the function. + for (Argument &AI : F.args()) + if (AI.hasByValOrInAllocaAttr()) + InvisibleToCaller.insert(&AI); } - return MemoryLocation::getOrNone(I); -} + // Check forany extra throws between SI and NI that block DSE. This only + // checks extra maythrows (those that arn't MemoryDef's). MemoryDef that may + // throw are handled during the walk from one def to the next. + bool mayThrowBetween(Instruction *SI, Instruction *NI, + const Value *SILocUnd) { + // First see if we can ignore it by using the fact that SI is an + // alloca/alloca like object that is not visible to the caller during + // execution of the function. + if (SILocUnd && InvisibleToCaller.count(SILocUnd)) + return false; -// Returns true if \p Use may read from \p DefLoc. -bool isReadClobber(MemoryLocation DefLoc, Instruction *UseInst, - AliasAnalysis &AA, const TargetLibraryInfo &TLI, - DominatorTree &DT) { - if (!UseInst->mayReadFromMemory()) - return false; + if (SI->getParent() == NI->getParent()) { + auto BlockMap = ThrowingBlocks.find(SI->getParent()); + if (BlockMap == ThrowingBlocks.end()) + return false; + return BlockMap->second[SI] != + BlockMap->second[&*std::prev(NI->getIterator())]; + } + return !ThrowingBlocks.empty(); + } - if (auto CS = CallSite(UseInst)) - if (CS.onlyAccessesInaccessibleMemory()) - return false; + // Check if \p NI acts as a DSE barrier for \p SI. The following instructions + // act as barriers: + // * A memory instruction that may throw and \p SI accesses a non-stack + // object. + // * Atomic instructions stronger that monotonic. + bool isDSEBarrier(Instruction *SI, MemoryLocation &SILoc, + const Value *SILocUnd, Instruction *NI, + MemoryLocation &NILoc) { + // If NI may throw it acts as a barrier, unless we are to an alloca/alloca + // like object that does not escape. + if (NI->mayThrow() && !InvisibleToCaller.count(SILocUnd)) + return true; - ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc); - // If necessary, perform additional analysis. - if (isRefSet(MR)) - MR = AA.callCapturesBefore(UseInst, DefLoc, &DT); - MR = clearMust(MR); - return isRefSet(MR); -} + if (NI->isAtomic()) { + if (auto *NSI = dyn_cast(NI)) { + if (isStrongerThanMonotonic(NSI->getOrdering())) + return true; + } else if (auto *NLI = dyn_cast(NI)) { + if (isStrongerThanMonotonic(NLI->getOrdering()) || + !AA.isNoAlias(MemoryLocation::get(NLI), SILoc)) + return true; + } else if (isa(NI)) + // We can remove the dead stores, irrespective of the fence and its + // ordering (release/acquire/seq_cst). Fences only constraints the + // ordering of already visible stores, it does not make a store visible + // to other threads. So, skipping over a fence does not change a store + // from being dead. We treat fences as maythrows, to be conservative so + // that they block optimizations in the same way + return false; + else + return true; + } -// Returns true if \p Use may write to \p DefLoc. -bool isWriteClobber(MemoryLocation DefLoc, Instruction *UseInst, - AliasAnalysis &AA, const TargetLibraryInfo &TLI, - DominatorTree &DT) { - if (!UseInst->mayWriteToMemory()) return false; + } - if (auto CS = CallSite(UseInst)) - if (CS.onlyAccessesInaccessibleMemory()) - return false; + // Delete dead memory defs + void deleteDeadInstruction(Instruction *SI) { + MemorySSAUpdater Updater(&MSSA); + SmallVector NowDeadInsts; + NowDeadInsts.push_back(SI); + --NumFastOther; - ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc); - // If necessary, perform additional analysis. - if (isModSet(MR)) - MR = AA.callCapturesBefore(UseInst, DefLoc, &DT); - MR = clearMust(MR); - return isModSet(MR); -} + while (!NowDeadInsts.empty()) { + Instruction *DeadInst = NowDeadInsts.pop_back_val(); + ++NumFastOther; -// Returns true if \p M is an intrisnic that does not read or write memory. -bool isNoopIntrinsic(MemoryUseOrDef *M) { - if (const IntrinsicInst *II = dyn_cast(M->getMemoryInst())) { - switch (II->getIntrinsicID()) { - case Intrinsic::lifetime_start: - case Intrinsic::lifetime_end: - case Intrinsic::invariant_end: - case Intrinsic::assume: - case Intrinsic::dbg_addr: - case Intrinsic::dbg_declare: - case Intrinsic::dbg_label: - case Intrinsic::dbg_value: - return true; - default: - return false; + // Remove the Instruction from MSSA and IOL + if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) { + Updater.removeMemoryAccess(MA); + if (MemoryDef *MD = dyn_cast(MA)) { + SkipStores.insert(MD); + } + } + IOL.erase(DeadInst); + + // Remove it's operands + for (Use &O : DeadInst->operands()) + if (Instruction *OpI = dyn_cast(O)) { + O = nullptr; + if (isInstructionTriviallyDead(OpI, &TLI)) + NowDeadInsts.push_back(OpI); + } + + DeadInst->eraseFromParent(); } } - return false; -} -// Find the next MemoryDef clobbering Current, if there is any, skipping things -// that do not alias. WalkType::Bad indiciates that the walk hit one of the -// following: -// * An aliasing MemoryUse (read). -// * A MemoryPHI that comes before Current. -// * Multiple MemoryDefs, which indicates a split in the control flow. -WalkResult getNextMemoryDef(MemoryAccess *Current, MemoryLocation DefLoc, - Instruction *DefObj, bool DefVisibleToCaller, - int ScanLimit, AliasAnalysis &AA, - const TargetLibraryInfo &TLI, DominatorTree &DT, - DenseMap &PostOrderMap) { - if (--ScanLimit < 0) - return {WalkType::Bad, nullptr}; - - MemoryAccess *NextAccess = nullptr; - // Scan the uses to see if we have a single interesting MemoryAccess. - for (Use &U : Current->uses()) { - MemoryAccess *UseAccess = cast(U.getUser()); - - // For MemoryPhis, bail out if we already found a NextAccess or if the - // post-order number of the MemoryPhi's block is greater or equal than the - // number of the current access. This ensures we do not visit accesses that - // may be executed before the current access. - if (isa(UseAccess)) { - if (NextAccess || PostOrderMap[UseAccess->getBlock()] >= - PostOrderMap[Current->getBlock()]) - return {WalkType::Bad, nullptr}; - NextAccess = UseAccess; - continue; + enum class WalkType { Bad, Next }; + struct WalkResult { + WalkType Type; + MemoryDef *Next; + + operator bool() { return Type != WalkType::Bad; } + }; + + Optional getLocForWriteEx(Instruction *I) { + if (!I->mayWriteToMemory()) + return None; + + if (auto *MTI = dyn_cast(I)) + return {MemoryLocation::getForDest(MTI)}; + + if (auto CS = CallSite(I)) { + if (Function *F = CS.getCalledFunction()) { + StringRef FnName = F->getName(); + if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy)) + return {MemoryLocation(CS.getArgument(0))}; + if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy)) + return {MemoryLocation(CS.getArgument(0))}; + if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat)) + return {MemoryLocation(CS.getArgument(0))}; + if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat)) + return {MemoryLocation(CS.getArgument(0))}; + } + return None; } - Instruction *UseInst = cast(UseAccess)->getMemoryInst(); - LLVM_DEBUG(dbgs() << " Checking use " << *UseInst << "\n"); - // We can remove the dead stores, irrespective of the fence and its ordering - // (release/acquire/seq_cst). Fences only constraints the ordering of - // already visible stores, it does not make a store visible to other - // threads. So, skipping over a fence does not change a store from being - // dead. - if (isa(UseInst)) - continue; + return MemoryLocation::getOrNone(I); + } - // We cannot eliminate stores to locations visible to the caller across - // throwing instructions. We can also eliminate stores across throwing calls - // for alloc-like objects that do not escape before the current access. - if (UseInst->mayThrow() && DefVisibleToCaller) { - if (!DefObj || - (!isAllocLikeFn(DefObj, &TLI) || - PointerMayBeCapturedBefore(DefObj, false, true, UseInst, &DT))) - return {WalkType::Bad, nullptr}; - } + // Returns true if \p Use may read from \p DefLoc. + bool isReadClobber(MemoryLocation DefLoc, Instruction *UseInst) const { + if (!UseInst->mayReadFromMemory()) + return false; - // Skip intrinsics that do not really read or modify memory. - if (isNoopIntrinsic(cast(UseAccess))) { - if (isa(UseAccess)) - NextAccess = UseAccess; - continue; - } + if (auto CS = CallSite(UseInst)) + if (CS.onlyAccessesInaccessibleMemory()) + return false; - // Uses which may read the original MemoryDef mean we cannot eliminate the - // original MD. Stop walk. - if (isReadClobber(DefLoc, UseInst, AA, TLI, DT)) - return {WalkType::Bad, nullptr}; + ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc); + // If necessary, perform additional analysis. + if (isRefSet(MR)) + MR = AA.callCapturesBefore(UseInst, DefLoc, &DT); + MR = clearMust(MR); + return isRefSet(MR); + } - if (isa(UseAccess)) { - // Skip non-aliasing MemoryUses. - continue; - } + // Returns true if \p Use may write to \p DefLoc. + bool isWriteClobber(MemoryLocation DefLoc, Instruction *UseInst) const { + if (!UseInst->mayWriteToMemory()) + return false; - // Multiple definitions, bail out. - if (NextAccess) - return {WalkType::Bad, nullptr}; - NextAccess = UseAccess; + if (auto CS = CallSite(UseInst)) + if (CS.onlyAccessesInaccessibleMemory()) + return false; + + ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc); + // If necessary, perform additional analysis. + if (isModSet(MR)) + MR = AA.callCapturesBefore(UseInst, DefLoc, &DT); + MR = clearMust(MR); + return isModSet(MR); } - // If we found a next MemoryAccess, we are done if it clobbers the original - // def or it is an unrelated MemoryAccess and we continue the walk starting at - // NextAccess. - if (NextAccess) { - if (auto *NextDef = dyn_cast(NextAccess)) { - if (!isNoopIntrinsic(NextDef)) { - if (isWriteClobber(DefLoc, NextDef->getMemoryInst(), AA, TLI, DT)) { - return {WalkType::Next, NextDef}; - } + // Returns true if \p M is an intrisnic that does not read or write memory. + bool isNoopIntrinsic(MemoryUseOrDef *M) const { + if (const IntrinsicInst *II = dyn_cast(M->getMemoryInst())) { + switch (II->getIntrinsicID()) { + case Intrinsic::lifetime_start: + case Intrinsic::invariant_end: + case Intrinsic::assume: + case Intrinsic::dbg_addr: + case Intrinsic::dbg_declare: + case Intrinsic::dbg_label: + case Intrinsic::dbg_value: + return true; + default: + return false; } } - assert(isa(NextAccess) || isa(NextAccess)); - return getNextMemoryDef(NextAccess, DefLoc, DefObj, DefVisibleToCaller, - ScanLimit, AA, TLI, DT, PostOrderMap); + return false; } - // No aliasing definition found, bail out. - return {WalkType::Bad, nullptr}; -} + // Find the next MemoryDef clobbering Current, if there is any, skipping + // things that do not alias. WalkType::Bad indiciates that the walk hit one of + // the following: + // * An aliasing MemoryUse (read). + // * A MemoryPHI that comes before Current. + // * Multiple MemoryDefs, which indicates a split in the control flow. + WalkResult getNextMemoryDef(MemoryAccess *Current, MemoryLocation DefLoc, + Instruction *DefObj, bool DefVisibleToCaller, + int ScanLimit) { + if (--ScanLimit < 0) + return {WalkType::Bad, nullptr}; -// Delete dead memory defs -void deleteDeadInstruction(Instruction *SI, InstOverlapIntervalsTy &IOL, - MemorySSA &MSSA, const TargetLibraryInfo &TLI, - SmallPtrSetImpl &SkipStores) { - MemorySSAUpdater Updater(&MSSA); - SmallVector NowDeadInsts; - NowDeadInsts.push_back(SI); - --NumFastOther; + MemoryAccess *NextAccess = nullptr; + // Scan the uses to see if we have a single interesting MemoryAccess. + for (Use &U : Current->uses()) { + MemoryAccess *UseAccess = cast(U.getUser()); + + // For MemoryPhis, bail out if we already found a NextAccess or if the + // post-order number of the MemoryPhi's block is greater or equal than the + // number of the current access. This ensures we do not visit accesses + // that may be executed before the current access. + if (isa(UseAccess)) { + if (NextAccess || PostOrderNumbers[UseAccess->getBlock()] >= + PostOrderNumbers[Current->getBlock()]) + return {WalkType::Bad, nullptr}; + NextAccess = UseAccess; + continue; + } - while (!NowDeadInsts.empty()) { - Instruction *DeadInst = NowDeadInsts.pop_back_val(); - ++NumFastOther; + Instruction *UseInst = cast(UseAccess)->getMemoryInst(); + LLVM_DEBUG(dbgs() << " Checking use " << *UseInst << "\n"); + // We can remove the dead stores, irrespective of the fence and its + // ordering (release/acquire/seq_cst). Fences only constraints the + // ordering of already visible stores, it does not make a store visible to + // other threads. So, skipping over a fence does not change a store from + // being dead. + if (isa(UseInst)) + continue; - // Remove the Instruction from MSSA and IOL - if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) { - Updater.removeMemoryAccess(MA); - if (MemoryDef *MD = dyn_cast(MA)) { - SkipStores.insert(MD); + // We cannot eliminate stores to locations visible to the caller across + // throwing instructions. We can also eliminate stores across throwing + // calls for alloc-like objects that do not escape before the current + // access. + if (UseInst->mayThrow() && DefVisibleToCaller) { + if (!DefObj || + (!isAllocLikeFn(DefObj, &TLI) || + PointerMayBeCapturedBefore(DefObj, false, true, UseInst, &DT))) + return {WalkType::Bad, nullptr}; } - } - IOL.erase(DeadInst); - // Remove it's operands - for (Use &O : DeadInst->operands()) - if (Instruction *OpI = dyn_cast(O)) { - O = nullptr; - if (isInstructionTriviallyDead(OpI, &TLI)) - NowDeadInsts.push_back(OpI); + // Skip intrinsics that do not really read or modify memory. + if (isNoopIntrinsic(cast(UseAccess))) { + if (isa(UseAccess)) + NextAccess = UseAccess; + continue; } - DeadInst->eraseFromParent(); - } -} + // Uses which may read the original MemoryDef mean we cannot eliminate the + // original MD. Stop walk. + if (isReadClobber(DefLoc, UseInst)) + return {WalkType::Bad, nullptr}; -// Check for any extra throws between SI and NI that block DSE. This only -// checks extra maythrows (those that arn't MemoryDef's). MemoryDef that may -// throw are handled during the walk from one def to the next. -bool mayThrowBetween( - Instruction *SI, Instruction *NI, const Value *SILocUnd, - SmallSetVector &InvisibleToCaller, - DenseMap> &ThrowingBlocks) { - // First see if we can ignore it by using the fact that SI is an alloca/alloca - // like object that is not visible to the caller during execution of the - // function. - if (SILocUnd && InvisibleToCaller.count(SILocUnd)) - return false; + if (isa(UseAccess)) { + // Skip non-aliasing MemoryUses. + continue; + } - if (SI->getParent() == NI->getParent()) { - auto BlockMap = ThrowingBlocks.find(SI->getParent()); - if (BlockMap == ThrowingBlocks.end()) - return false; - return BlockMap->second[SI] != - BlockMap->second[&*std::prev(NI->getIterator())]; - } - return !ThrowingBlocks.empty(); -} + // Multiple definitions, bail out. + if (NextAccess) + return {WalkType::Bad, nullptr}; + NextAccess = UseAccess; + } -// Check if \p NI acts as a DSE barrier for \p SI. The following instructions -// act as barriers: -// * A memory instruction that may throw and \p SI accesses a non-stack object. -// * Atomic instructions stronger that monotonic. -bool isDSEBarrier(Instruction *SI, MemoryLocation &SILoc, const Value *SILocUnd, - Instruction *NI, MemoryLocation &NILoc, - SmallSetVector &InvisibleToCaller, - AliasAnalysis &AA, const TargetLibraryInfo &TLI) { - // If NI may throw it acts as a barrier, unless we are to an alloca/alloca - // like object that does not escape. - if (NI->mayThrow() && !InvisibleToCaller.count(SILocUnd)) - return true; + // If we found a next MemoryAccess, we are done if it clobbers the original + // def or it is an unrelated MemoryAccess and we continue the walk starting + // at NextAccess. + if (NextAccess) { + if (auto *NextDef = dyn_cast(NextAccess)) { + if (!isNoopIntrinsic(NextDef)) { + if (isWriteClobber(DefLoc, NextDef->getMemoryInst())) + return {WalkType::Next, NextDef}; + } + } + assert(isa(NextAccess) || isa(NextAccess)); + return getNextMemoryDef(NextAccess, DefLoc, DefObj, DefVisibleToCaller, + ScanLimit); + } - if (NI->isAtomic()) { - if (auto *NSI = dyn_cast(NI)) { - if (isStrongerThanMonotonic(NSI->getOrdering())) - return true; - } else if (auto *NLI = dyn_cast(NI)) { - if (isStrongerThanMonotonic(NLI->getOrdering()) || - !AA.isNoAlias(MemoryLocation::get(NLI), SILoc)) - return true; - } else if (isa(NI)) - // We can remove the dead stores, irrespective of the fence and its - // ordering (release/acquire/seq_cst). Fences only constraints the - // ordering of already visible stores, it does not make a store visible to - // other threads. So, skipping over a fence does not change a store from - // being dead. We treat fences as maythrows, to be conservative so that - // they block optimizations in the same way - return false; - else - return true; + // No aliasing definition found, we reached the end of the walk. + return {WalkType::Bad, nullptr}; } - - return false; -} +}; bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, @@ -1680,81 +1741,28 @@ const DataLayout &DL = F.getParent()->getDataLayout(); bool MadeChange = false; - // All MemoryDef stores - SmallVector Stores; - // Any that should be skipped as they are already deleted - SmallPtrSet SkipStores; - // A map of interval maps representing partially-overwritten value parts - InstOverlapIntervalsTy IOL; - // Keep track of all of the objects that are invisible to the caller until the - // function returns. - SmallSetVector InvisibleToCaller; - // Keep track of blocks with throwing instructions no modeled in MemorySSA. For each block with - // throwing instructions, we number all instructions in the block, starting - // with 0 and incrementing the counter for throwing instructions. If 2 - // instructions have the same number there are no throwing instructions - // between them. - DenseMap> ThrowingBlocks; - - // Post-order numbers for basic blocks in the function. - DenseMap PostOrderNumbers; - unsigned PO = 0; - // Collect blocks with throwing instructions, alloc-like objects and - // post-order numbers for basic blocks. - for (BasicBlock *BB : post_order(&F)) { - PostOrderNumbers[BB] = PO++; - - unsigned ThrowingCnt = 0; - DenseMap BlockMap; - for (Instruction &I : *BB) { - if (I.mayThrow() && !MSSA.getMemoryAccess(&I)) - ThrowingCnt++; - BlockMap[&I] = ThrowingCnt; - - auto *MD = dyn_cast_or_null(MSSA.getMemoryAccess(&I)); - if (MD && hasAnalyzableMemoryWrite(&I, TLI) && isRemovable(&I)) - Stores.push_back(MD); - - // Track alloca and alloca-like objects. Here we care about objects not - // visible to the caller during function execution. Alloca objects are - // invalid in the caller, for alloca-like objects we ensure that they are - // not captured throughout the function. - if (isa(&I) || - (isAllocLikeFn(&I, &TLI) && !PointerMayBeCaptured(&I, false, true))) - InvisibleToCaller.insert(&I); - } - - // Only add the mapping for blocks with any throwing instructions. - if (ThrowingCnt != 0) - ThrowingBlocks[BB] = BlockMap; - } - // Treat byval or inalloca arguments the same as Allocas, stores to them are - // dead at the end of the function. - for (Argument &AI : F.args()) - if (AI.hasByValOrInAllocaAttr()) - InvisibleToCaller.insert(&AI); - + DSEState State(F, AA, MSSA, DT, PDT, TLI); + State.init(); // For each store: - while (!Stores.empty()) { - MemoryDef *SIMD = Stores.back(); - Stores.pop_back(); - if (SkipStores.count(SIMD)) + while (!State.Stores.empty()) { + MemoryDef *SIMD = State.Stores.back(); + State.Stores.pop_back(); + if (State.SkipStores.count(SIMD)) continue; Instruction *SI = SIMD->getMemoryInst(); - MemoryLocation SILoc = *getLocForWriteEx(SI, TLI); + MemoryLocation SILoc = *State.getLocForWriteEx(SI); assert(SILoc.Ptr && "SILoc should not be null"); const Value *SILocUnd = GetUnderlyingObject(SILoc.Ptr, DL); Instruction *DefObj = const_cast(dyn_cast(SILocUnd)); - bool LocVisibleToCaller = !InvisibleToCaller.count(SILocUnd); + bool LocVisibleToCaller = !State.InvisibleToCaller.count(SILocUnd); LLVM_DEBUG(dbgs() << "Trying to eliminate " << *SI << "\n"); // Walk MemorySSA forward to find a MemoryDef that kills SI. - WalkResult Next; - while ((Next = getNextMemoryDef(SIMD, SILoc, DefObj, LocVisibleToCaller, - MemorySSAScanLimit, AA, TLI, DT, - PostOrderNumbers))) { + DSEState::WalkResult Next; + while ((Next = State.getNextMemoryDef( + SIMD, SILoc, DefObj, LocVisibleToCaller, MemorySSAScanLimit))) { MemoryAccess *NextAccess = Next.Next; LLVM_DEBUG(dbgs() << " Checking " << *NextAccess << "\n"); MemoryDef *ND = dyn_cast(NextAccess); @@ -1763,11 +1771,10 @@ if (!hasAnalyzableMemoryWrite(NI, TLI)) break; - MemoryLocation NILoc = *getLocForWriteEx(NI, TLI); + MemoryLocation NILoc = *State.getLocForWriteEx(NI); // Check for anything that looks like it will be a barrier to further // removal - if (isDSEBarrier(SI, SILoc, SILocUnd, NI, NILoc, InvisibleToCaller, AA, - TLI)) { + if (State.isDSEBarrier(SI, SILoc, SILocUnd, NI, NILoc)) { LLVM_DEBUG(dbgs() << " stop, barrier\n"); break; } @@ -1781,8 +1788,7 @@ // Before we try to remove anything, check for any extra throwing // instructions that block us from DSEing - if (mayThrowBetween(SI, NI, SILocUnd, InvisibleToCaller, - ThrowingBlocks)) { + if (State.mayThrowBetween(SI, NI, SILocUnd)) { LLVM_DEBUG(dbgs() << " stop, may throw!\n"); break; } @@ -1792,13 +1798,14 @@ // Get what type of overwrite this might be int64_t InstWriteOffset, DepWriteOffset; - OverwriteResult OR = isOverwrite(NILoc, SILoc, DL, TLI, DepWriteOffset, - InstWriteOffset, SI, IOL, AA, &F); + OverwriteResult OR = + isOverwrite(NILoc, SILoc, DL, State.TLI, DepWriteOffset, + InstWriteOffset, SI, State.IOL, State.AA, &F); if (OR == OW_Complete) { LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *SI << "\n KILLER: " << *NI << '\n'); - deleteDeadInstruction(SI, IOL, MSSA, TLI, SkipStores); + State.deleteDeadInstruction(SI); ++NumFastStores; MadeChange = true; break;