diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1157,6 +1157,9 @@ def int_ptrmask: Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty, llvm_anyint_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; +// Intrinsic that does nothing, besides may reading memory. +def int_donothing_mayread : Intrinsic<[], [], [IntrReadMem]>; + //===---------------- Vector Predication Intrinsics --------------===// // Binary operators @@ -1229,7 +1232,6 @@ } - //===-------------------------- Masked Intrinsics -------------------------===// // def int_masked_store : Intrinsic<[], [llvm_anyvector_ty, diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -32,7 +32,6 @@ #include "llvm/Analysis/MemoryLocation.h" #include "llvm/Analysis/MemorySSA.h" #include "llvm/Analysis/MemorySSAUpdater.h" -#include "llvm/Analysis/PostDominators.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Argument.h" @@ -1475,7 +1474,6 @@ AliasAnalysis &AA; MemorySSA &MSSA; DominatorTree &DT; - PostDominatorTree &PDT; const TargetLibraryInfo &TLI; // All MemoryDefs that potentially could kill other MemDefs. @@ -1495,14 +1493,19 @@ /// basic block. DenseMap IOLs; + Function *ExitUseFn; + DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, - PostDominatorTree &PDT, const TargetLibraryInfo &TLI) - : F(F), AA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI) {} + const TargetLibraryInfo &TLI) + : F(F), AA(AA), MSSA(MSSA), DT(DT), TLI(TLI) {} static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, - DominatorTree &DT, PostDominatorTree &PDT, - const TargetLibraryInfo &TLI) { - DSEState State(F, AA, MSSA, DT, PDT, TLI); + DominatorTree &DT, const TargetLibraryInfo &TLI) { + DSEState State(F, AA, MSSA, DT, TLI); + Module *M = F.getParent(); + State.ExitUseFn = + Intrinsic::getDeclaration(M, Intrinsic::donothing_mayread); + // Collect blocks with throwing instructions not modeled in MemorySSA and // alloc-like objects. unsigned PO = 0; @@ -1525,6 +1528,25 @@ (isAllocLikeFn(&I, &TLI) && !PointerMayBeCaptured(&I, false, true))) State.InvisibleToCaller.insert(&I); } + if (isa(BB->getTerminator()) && DT.isReachableFromEntry(BB)) { + // Add dummy readonly uses at all function exits and add them to MSSA. + // These extra uses model potential accesses after the function returns + // and allow us to rely on our MemorySSA traversal to ensure all paths + // from a killed store to any function exit do not contain any reads, + // without requiring post-dominance. + MemorySSAUpdater MSSAU(&MSSA); + CallInst *CI = + CallInst::Create(State.ExitUseFn->getFunctionType(), + State.ExitUseFn, {}, BB->getTerminator()); + MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB( + CI, nullptr, CI->getParent(), MemorySSA::End); + if (isa(NewMemAcc)) + MSSAU.insertUse(cast(NewMemAcc), false); + else + // With the new pass manager, AA does not know about the new calls and + // assumes ModRef. + MSSAU.insertDef(cast(NewMemAcc), false); + } } // Treat byval or inalloca arguments the same as Allocas, stores to them are @@ -1561,7 +1583,7 @@ } /// Returns true if \p Use completely overwrites \p DefLoc. - bool isCompleteOverwrite(MemoryLocation DefLoc, Instruction *UseInst) const { + bool isMustWriteClobber(MemoryLocation DefLoc, Instruction *UseInst) const { // UseInst has a MemoryDef associated in MemorySSA. It's possible for a // MemoryDef to not write to memory, e.g. a volatile load is modeled as a // MemoryDef. @@ -1573,13 +1595,10 @@ return false; ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc); - // If necessary, perform additional analysis. - if (isModSet(MR)) - MR = AA.callCapturesBefore(UseInst, DefLoc, &DT); - Optional UseLoc = getLocForWriteEx(UseInst); return isModSet(MR) && isMustSet(MR) && - UseLoc->Size.getValue() >= DefLoc.Size.getValue(); + (UseLoc->Size.hasValue() && DefLoc.Size.hasValue() && + UseLoc->Size.getValue() >= DefLoc.Size.getValue()); } /// Returns true if \p Use may read from \p DefLoc. @@ -1587,10 +1606,19 @@ if (!UseInst->mayReadFromMemory()) return false; - if (auto CS = CallSite(UseInst)) + if (auto CS = CallSite(UseInst)) { if (CS.onlyAccessesInaccessibleMemory()) return false; + if (CS.getCalledFunction() == ExitUseFn) { + DataLayout DL = F.getParent()->getDataLayout(); + const Value *UO = GetUnderlyingObject(DefLoc.Ptr, DL); + /// Maybe pre-compute + return !UO || InvisibleToCaller.find(UO) == InvisibleToCaller.end() || + (!isa(UO) && PointerMayBeCaptured(UO, true, false)); + } + } + ModRefInfo MR = AA.getModRefInfo(UseInst, DefLoc); // If necessary, perform additional analysis. if (isRefSet(MR)) @@ -1599,7 +1627,8 @@ } // Find a MemoryDef writing to \p DefLoc and dominating \p Current, with no - // read access in between or return None otherwise. The returned value may not + // read access between them or on any other path to a function exit block. If + // there is no such MemoryDef, return None. The returned value may not // (completely) overwrite \p DefLoc. Currently we bail out when we encounter // an aliasing MemoryUse (read). Optional getDomMemoryDef(MemoryDef *KillingDef, @@ -1632,12 +1661,9 @@ if (isa(DomAccess)) break; - // Check if we can skip DomDef for DSE. We also require the KillingDef - // execute whenever DomDef executes and use post-dominance to ensure that. - + // Check if we can skip DomDef for DSE. MemoryDef *DomDef = dyn_cast(DomAccess); - if ((DomDef && canSkipDef(DomDef, DefVisibleToCaller)) || - !PDT.dominates(KillingDef->getBlock(), DomDef->getBlock())) { + if ((DomDef && canSkipDef(DomDef, DefVisibleToCaller))) { StepAgain = true; Current = DomDef->getDefiningAccess(); } @@ -1705,8 +1731,19 @@ // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias, // stores [0,1] if (MemoryDef *UseDef = dyn_cast(UseAccess)) { - if (!isCompleteOverwrite(DefLoc, UseInst)) + int64_t InstWriteOffset, DepWriteOffset; + auto CC = getLocForWriteEx(UseInst); + InstOverlapIntervalsTy IOL; + + const DataLayout &DL = F.getParent()->getDataLayout(); + + if (!isMustWriteClobber(DefLoc, UseInst) || + (CC && + isOverwrite(DefLoc, *CC, DL, TLI, DepWriteOffset, InstWriteOffset, + UseInst, IOL, AA, &F) != OW_Complete)) { + LLVM_DEBUG(dbgs() << " ... found non-aliasing MemoryDef\n"); PushMemUses(UseDef); + } } } @@ -1795,12 +1832,11 @@ bool eliminateDeadStoresMemorySSA(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT, - PostDominatorTree &PDT, const TargetLibraryInfo &TLI) { const DataLayout &DL = F.getParent()->getDataLayout(); bool MadeChange = false; - DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI); + DSEState State = DSEState::get(F, AA, MSSA, DT, TLI); // For each store: for (unsigned I = 0; I < State.MemDefs.size(); I++) { MemoryDef *KillingDef = State.MemDefs[I]; @@ -1919,6 +1955,17 @@ for (auto &KV : State.IOLs) MadeChange |= removePartiallyOverlappedStores(&AA, DL, KV.second); + MemorySSAUpdater MSSAU(&MSSA); + for (auto &BB : F) { + if (!DT.isReachableFromEntry(&BB)) + continue; + if (isa(BB.getTerminator())) { + Instruction *C = &*std::prev(BB.getTerminator()->getIterator()); + MSSAU.removeMemoryAccess(C); + C->eraseFromParent(); + } + } + return MadeChange; } } // end anonymous namespace @@ -1933,9 +1980,7 @@ if (EnableMemorySSA) { MemorySSA &MSSA = AM.getResult(F).getMSSA(); - PostDominatorTree &PDT = AM.getResult(F); - - if (!eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI)) + if (!eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, TLI)) return PreservedAnalyses::all(); } else { MemoryDependenceResults &MD = AM.getResult(F); @@ -1976,10 +2021,7 @@ if (EnableMemorySSA) { MemorySSA &MSSA = getAnalysis().getMSSA(); - PostDominatorTree &PDT = - getAnalysis().getPostDomTree(); - - return eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, PDT, TLI); + return eliminateDeadStoresMemorySSA(F, AA, MSSA, DT, TLI); } else { MemoryDependenceResults &MD = getAnalysis().getMemDep(); @@ -1997,9 +2039,7 @@ AU.addPreserved(); if (EnableMemorySSA) { - AU.addRequired(); AU.addRequired(); - AU.addPreserved(); AU.addPreserved(); } else { AU.addRequired(); @@ -2015,7 +2055,6 @@ INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) diff --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-missing-debugloc.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-missing-debugloc.ll --- a/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-missing-debugloc.ll +++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-missing-debugloc.ll @@ -21,10 +21,9 @@ ; } -define dso_local i32 @_Z1av() !dbg !7 { +define dso_local i32 @_Z1av([5 x i64]* %b) !dbg !7 { entry: %retval = alloca i32, align 4 - %b = alloca [5 x i64], align 16 call void @llvm.dbg.declare(metadata [5 x i64]* %b, metadata !11, metadata !DIExpression()), !dbg !16 %0 = bitcast [5 x i64]* %b to i8*, !dbg !16 call void @llvm.memset.p0i8.i64(i8* align 16 %0, i8 0, i64 40, i1 false), !dbg !16 @@ -37,7 +36,7 @@ store i64 2, i64* %4, align 16, !dbg !16 %5 = getelementptr inbounds [5 x i64], [5 x i64]* %1, i32 0, i32 3, !dbg !16 store i64 2, i64* %5, align 8, !dbg !16 - %call = call i32 @_Z1av(), !dbg !17 + %call = call i32 @_Z1av([5 x i64]* %b), !dbg !17 %tobool = icmp ne i32 %call, 0, !dbg !17 br i1 %tobool, label %if.then, label %if.end, !dbg !19 diff --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-unknown-sizes.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-unknown-sizes.ll --- a/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-unknown-sizes.ll +++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/memset-unknown-sizes.ll @@ -13,7 +13,7 @@ ; CHECK: cond.true.i.i.i: ; CHECK-NEXT: ret void ; CHECK: cond.end.i.i.i: -; CHECK-NEXT: [[CALL_I_I_I_I_I:%.*]] = tail call noalias nonnull i8* @_Znam() #2 +; CHECK-NEXT: [[CALL_I_I_I_I_I:%.*]] = tail call noalias nonnull i8* @_Znam() ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[CALL_I_I_I_I_I]] to i64* ; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* nonnull align 8 [[CALL_I_I_I_I_I]], i8 0, i64 undef, i1 false) ; CHECK-NEXT: store i64 0, i64* [[TMP0]], align 8 @@ -42,7 +42,7 @@ ; CHECK-NEXT: br i1 [[C:%.*]], label [[CLEANUP_CONT104:%.*]], label [[IF_THEN:%.*]] ; CHECK: if.then: ; CHECK-NEXT: [[MUL_I_I_I_I:%.*]] = shl nuw nsw i64 undef, 3 -; CHECK-NEXT: [[CALL_I_I_I_I_I_I131:%.*]] = call noalias nonnull i8* @_Znwm() #2 +; CHECK-NEXT: [[CALL_I_I_I_I_I_I131:%.*]] = call noalias nonnull i8* @_Znwm() ; CHECK-NEXT: [[DOTCAST_I_I:%.*]] = bitcast i8* [[CALL_I_I_I_I_I_I131]] to i64* ; CHECK-NEXT: store i64 0, i64* [[DOTCAST_I_I]], align 8 ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* nonnull align 8 [[CALL_I_I_I_I_I_I131]], i8 0, i64 [[MUL_I_I_I_I]], i1 false) diff --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-malloc-free.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-malloc-free.ll --- a/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-malloc-free.ll +++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-malloc-free.ll @@ -1,8 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; XFAIL: * -; TODO: Handling of free not implemented yet. - ; RUN: opt < %s -basicaa -dse -enable-dse-memoryssa -S | FileCheck %s target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" diff --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-memoryphis.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-memoryphis.ll --- a/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-memoryphis.ll +++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-memoryphis.ll @@ -179,11 +179,11 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[EXIT:%.*]] ; CHECK: if.then: -; CHECK-NEXT: tail call void @fn2_test11() #0 +; CHECK-NEXT: tail call void @fn2_test11() #1 ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: store i8* null, i8** [[PTR_1:%.*]], align 8 -; CHECK-NEXT: tail call void @fn2_test11() #0 +; CHECK-NEXT: tail call void @fn2_test11() #1 ; CHECK-NEXT: ret void ; entry: diff --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-multipath.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-multipath.ll --- a/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-multipath.ll +++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-multipath.ll @@ -7,10 +7,9 @@ define void @test4(i32* noalias %P, i1 %c1) { ; CHECK-LABEL: @test4( -; CHECK-NEXT: store i32 1, i32* [[P:%.*]] ; CHECK-NEXT: br i1 [[C1:%.*]], label [[BB1:%.*]], label [[BB2:%.*]] ; CHECK: bb1: -; CHECK-NEXT: store i32 0, i32* [[P]] +; CHECK-NEXT: store i32 0, i32* [[P:%.*]] ; CHECK-NEXT: br label [[BB5:%.*]] ; CHECK: bb2: ; CHECK-NEXT: store i32 3, i32* [[P]] @@ -36,10 +35,9 @@ define void @test5(i32* noalias %P) { ; CHECK-LABEL: @test5( -; CHECK-NEXT: store i32 1, i32* [[P:%.*]] ; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]] ; CHECK: bb1: -; CHECK-NEXT: store i32 0, i32* [[P]] +; CHECK-NEXT: store i32 0, i32* [[P:%.*]] ; CHECK-NEXT: br label [[BB5:%.*]] ; CHECK: bb2: ; CHECK-NEXT: br i1 undef, label [[BB3:%.*]], label [[BB4:%.*]] diff --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-simple.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-simple.ll --- a/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-simple.ll +++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/multiblock-simple.ll @@ -175,7 +175,6 @@ define void @test11() { ; CHECK-LABEL: @test11( ; CHECK-NEXT: [[P:%.*]] = alloca i32 -; CHECK-NEXT: store i32 0, i32* [[P]] ; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]] ; CHECK: bb1: ; CHECK-NEXT: store i32 0, i32* [[P]] @@ -200,10 +199,9 @@ define void @test12(i32* %P) { ; CHECK-LABEL: @test12( -; CHECK-NEXT: store i32 0, i32* [[P:%.*]] ; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]] ; CHECK: bb1: -; CHECK-NEXT: store i32 1, i32* [[P]] +; CHECK-NEXT: store i32 1, i32* [[P:%.*]] ; CHECK-NEXT: br label [[BB3:%.*]] ; CHECK: bb2: ; CHECK-NEXT: store i32 1, i32* [[P]] @@ -226,10 +224,9 @@ define void @test13(i32* %P) { ; CHECK-LABEL: @test13( -; CHECK-NEXT: store i32 0, i32* [[P:%.*]] ; CHECK-NEXT: br i1 true, label [[BB1:%.*]], label [[BB2:%.*]] ; CHECK: bb1: -; CHECK-NEXT: store i32 1, i32* [[P]] +; CHECK-NEXT: store i32 1, i32* [[P:%.*]] ; CHECK-NEXT: br label [[BB3:%.*]] ; CHECK: bb2: ; CHECK-NEXT: store i32 1, i32* [[P]] diff --git a/llvm/test/Transforms/DeadStoreElimination/MSSA/simple.ll b/llvm/test/Transforms/DeadStoreElimination/MSSA/simple.ll --- a/llvm/test/Transforms/DeadStoreElimination/MSSA/simple.ll +++ b/llvm/test/Transforms/DeadStoreElimination/MSSA/simple.ll @@ -244,7 +244,7 @@ ; CHECK-NEXT: [[P_4:%.*]] = getelementptr i8, i8* [[P:%.*]], i64 4 ; CHECK-NEXT: [[TMP:%.*]] = load i8, i8* [[P_4]], align 1 ; CHECK-NEXT: store i8 0, i8* [[P_4]], align 1 -; CHECK-NEXT: [[Q:%.*]] = call i8* @strdup(i8* [[P]]) #6 +; CHECK-NEXT: [[Q:%.*]] = call i8* @strdup(i8* [[P]]) #7 ; CHECK-NEXT: store i8 [[TMP]], i8* [[P_4]], align 1 ; CHECK-NEXT: ret i8* [[Q]] ;