Index: lib/Analysis/MemoryDependenceAnalysis.cpp =================================================================== --- lib/Analysis/MemoryDependenceAnalysis.cpp +++ lib/Analysis/MemoryDependenceAnalysis.cpp @@ -978,6 +978,11 @@ Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) { + bool isInvariantLoad = false; + + if (LoadInst *LI = dyn_cast_or_null(QueryInst)) + isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load); + // Do a binary search to see if we already have an entry for this block in // the cache set. If so, find it. NonLocalDepInfo::iterator Entry = std::upper_bound( @@ -989,6 +994,13 @@ if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) ExistingResult = &*Entry; + // Use cached result for invariant load only if there is no dependency for non + // invariant load. In this case invariant load can not have any dependency as + // well. + if (ExistingResult && isInvariantLoad && + !ExistingResult->getResult().isNonFuncLocal()) + ExistingResult = nullptr; + // If we have a cached entry, and it is non-dirty, use it as the value for // this dependency. if (ExistingResult && !ExistingResult->getResult().isDirty()) { @@ -1017,6 +1029,10 @@ MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst); + // Don't cache results for invariant load. + if (isInvariantLoad) + return Dep; + // If we had a dirty entry for the block, update it. Otherwise, just add // a new entry. if (ExistingResult) @@ -1453,7 +1469,6 @@ if (SkipFirstBlock) return false; - bool foundBlock = false; for (NonLocalDepEntry &I : llvm::reverse(*Cache)) { if (I.getBB() != BB) continue; @@ -1461,14 +1476,12 @@ assert((GotWorklistLimit || I.getResult().isNonLocal() || !DT.isReachableFromEntry(BB)) && "Should only be here with transparent block"); - foundBlock = true; I.setResult(MemDepResult::getUnknown()); - Result.push_back( - NonLocalDepResult(I.getBB(), I.getResult(), Pointer.getAddr())); break; } - (void)foundBlock; (void)GotWorklistLimit; - assert((foundBlock || GotWorklistLimit) && "Current block not in cache?"); + // Go ahead and report unknown dependence. + Result.push_back( + NonLocalDepResult(BB, MemDepResult::getUnknown(), Pointer.getAddr())); } // Okay, we're done now. If we added new values to the cache, re-sort it. Index: test/Analysis/DependenceAnalysis/InvariantLoad.ll =================================================================== --- /dev/null +++ test/Analysis/DependenceAnalysis/InvariantLoad.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -gvn -S | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:1" +target triple = "x86_64-unknown-linux-gnu" + +declare void @llvm.memset.p0i8.i8(i8*, i8, i32, i1) +declare void @foo(i8*) + +define i8 @test(i1 %cmp) { +; CHECK-LABEL: @test( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[P:%.*]] = alloca i8 +; CHECK-NEXT: store i8 5, i8* [[P]] +; CHECK-NEXT: br label [[HEADER:%.*]] +; CHECK: header: +; CHECK-NEXT: [[V:%.*]] = phi i8 [ 5, [[ENTRY:%.*]] ], [ -5, [[ALIVE:%.*]] ] +; CHECK-NEXT: [[I:%.*]] = phi i8 [ 0, [[ENTRY]] ], [ [[I_INC:%.*]], [[ALIVE]] ] +; CHECK-NEXT: br i1 [[CMP:%.*]], label [[ALIVE]], label [[DEAD:%.*]] +; CHECK: dead: +; CHECK-NEXT: call void @foo(i8* [[P]]) +; CHECK-NEXT: [[I_1:%.*]] = add i8 [[I]], [[V]] +; CHECK-NEXT: br label [[ALIVE]] +; CHECK: alive: +; CHECK-NEXT: [[I_2:%.*]] = phi i8 [ [[I]], [[HEADER]] ], [ [[I_1]], [[DEAD]] ] +; CHECK-NEXT: store i8 -5, i8* [[P]] +; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* align 1 [[P]], i8 0, i32 1, i1 false) +; CHECK-NEXT: [[I_INC]] = add i8 [[I_2]], 1 +; CHECK-NEXT: [[CMP_LOOP:%.*]] = icmp ugt i8 [[I_INC]], 100 +; CHECK-NEXT: br i1 [[CMP_LOOP]], label [[EXIT:%.*]], label [[HEADER]] +; CHECK: exit: +; CHECK-NEXT: ret i8 0 +; + +entry: + %p = alloca i8 + %addr = getelementptr inbounds i8, i8* %p, i64 0 + store i8 5, i8* %addr + br label %header +header: + %i = phi i8 [0, %entry], [%i.inc, %backedge] + br i1 %cmp, label %alive, label %dead +dead: + call void @foo(i8* %p) + %v = load i8, i8* %addr, !invariant.load !1 + %i.1 = add i8 %i, %v + br label %alive +alive: + %i.2 = phi i8 [%i, %header], [%i.1, %dead] + store i8 -5, i8* %addr + br label %backedge +backedge: + call void @llvm.memset.p0i8.i8(i8 * align 1 %p, i8 0, i32 1, i1 false) + %i.inc = add i8 %i.2, 1 + %cmp.loop = icmp ugt i8 %i.inc, 100 + br i1 %cmp.loop, label %exit, label %header +exit: + %res = load i8, i8* %addr + ret i8 %res +} + +; Check that first two loads are not optimized out while the one marked with +; invariant.load reuses %res1 +define i8 @test2(i1 %cmp, i8 *%p) { +; CHECK-LABEL: @test2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RES1:%.*]] = load i8, i8* [[P:%.*]] +; CHECK-NEXT: call void @foo(i8* [[P]]) +; CHECK-NEXT: br i1 [[CMP:%.*]], label [[B2:%.*]], label [[B1:%.*]] +; CHECK: b1: +; CHECK-NEXT: [[RES2:%.*]] = load i8, i8* [[P]] +; CHECK-NEXT: [[RES3:%.*]] = add i8 [[RES1]], [[RES2]] +; CHECK-NEXT: br label [[ALIVE:%.*]] +; CHECK: b2: +; CHECK-NEXT: [[RES_DEAD:%.*]] = add i8 [[RES1]], [[RES1]] +; CHECK-NEXT: br label [[ALIVE]] +; CHECK: alive: +; CHECK-NEXT: [[RES_PHI:%.*]] = phi i8 [ [[RES3]], [[B1]] ], [ [[RES_DEAD]], [[B2]] ] +; CHECK-NEXT: ret i8 [[RES_PHI]] +; + +entry: + %res1 = load i8, i8* %p + call void @foo(i8 *%p) + br i1 %cmp, label %b2, label %b1 +b1: + %res2 = load i8, i8* %p + %res3 = add i8 %res1, %res2 + br label %alive +b2: + %v = load i8, i8* %p, !invariant.load !1 + %res.dead = add i8 %v, %res1 + br label %alive +alive: + %res.phi = phi i8 [%res3, %b1], [%res.dead, %b2] + ret i8 %res.phi +} + +; This is essentially the same test case as the above one but with %b1 and %b2 +; swapped in "br i1 %cmp, label %b1, label %b2" instruction. That helps us to +; ensure that results doesn't depend on visiting order. +define i8 @test3(i1 %cmp, i8 *%p) { +; CHECK-LABEL: @test3( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RES1:%.*]] = load i8, i8* [[P:%.*]] +; CHECK-NEXT: call void @foo(i8* [[P]]) +; CHECK-NEXT: br i1 [[CMP:%.*]], label [[B1:%.*]], label [[B2:%.*]] +; CHECK: b1: +; CHECK-NEXT: [[RES2:%.*]] = load i8, i8* [[P]] +; CHECK-NEXT: [[RES3:%.*]] = add i8 [[RES1]], [[RES2]] +; CHECK-NEXT: br label [[ALIVE:%.*]] +; CHECK: b2: +; CHECK-NEXT: [[RES_DEAD:%.*]] = add i8 [[RES1]], [[RES1]] +; CHECK-NEXT: br label [[ALIVE]] +; CHECK: alive: +; CHECK-NEXT: [[RES_PHI:%.*]] = phi i8 [ [[RES3]], [[B1]] ], [ [[RES_DEAD]], [[B2]] ] +; CHECK-NEXT: ret i8 [[RES_PHI]] +; +entry: + %res1 = load i8, i8* %p + call void @foo(i8 *%p) + br i1 %cmp, label %b1, label %b2 +b1: + %res2 = load i8, i8* %p + %res3 = add i8 %res1, %res2 + br label %alive +b2: + %v = load i8, i8* %p, !invariant.load !1 + %res.dead = add i8 %v, %res1 + br label %alive +alive: + %res.phi = phi i8 [%res3, %b1], [%res.dead, %b2] + ret i8 %res.phi +} + +!1 = !{}