diff --git a/llvm/include/llvm/Analysis/LazyCallGraph.h b/llvm/include/llvm/Analysis/LazyCallGraph.h --- a/llvm/include/llvm/Analysis/LazyCallGraph.h +++ b/llvm/include/llvm/Analysis/LazyCallGraph.h @@ -1054,6 +1054,30 @@ /// fully visited by the DFS prior to calling this routine. void removeDeadFunction(Function &F); + /// Add a new function split/outlined from an existing function. + /// + /// The new function may only reference other functions that the original + /// function did. + /// + /// The original function must reference (either directly or indirectly) the + /// new function. + /// + /// The new function may also reference the original function. + /// It may end up in a parent SCC in the case that the original function's + /// edge to the new function is a ref edge, and the edge back is a call edge. + void addSplitFunction(Function &OriginalFunction, Function &NewFunction); + + /// Add new ref-recursive functions split/outlined from an existing function. + /// + /// The new functions may only reference other functions that the original + /// function did. The new functions may reference (not call) the original + /// function. + /// + /// The original function must reference (not call) all new functions. + /// All new functions must reference (not call) each other. + void addSplitRefRecursiveFunctions(Function &OriginalFunction, + ArrayRef NewFunctions); + ///@} ///@{ diff --git a/llvm/include/llvm/Transforms/Utils/CallGraphUpdater.h b/llvm/include/llvm/Transforms/Utils/CallGraphUpdater.h --- a/llvm/include/llvm/Transforms/Utils/CallGraphUpdater.h +++ b/llvm/include/llvm/Transforms/Utils/CallGraphUpdater.h @@ -87,7 +87,7 @@ /// If a new function was created by outlining, this method can be called /// to update the call graph for the new function. Note that the old one /// still needs to be re-analyzed or manually updated. - void registerOutlinedFunction(Function &NewFn); + void registerOutlinedFunction(Function &OriginalFn, Function &NewFn); /// Replace \p OldFn in the call graph (and SCC) with \p NewFn. The uses /// outside the call graph and the function \p OldFn are not modified. diff --git a/llvm/lib/Analysis/CGSCCPassManager.cpp b/llvm/lib/Analysis/CGSCCPassManager.cpp --- a/llvm/lib/Analysis/CGSCCPassManager.cpp +++ b/llvm/lib/Analysis/CGSCCPassManager.cpp @@ -913,7 +913,6 @@ SmallSetVector DemotedCallTargets; SmallSetVector NewCallEdges; SmallSetVector NewRefEdges; - SmallSetVector NewNodes; // First walk the function and handle all called functions. We do this first // because if there is a single call edge, whether there are ref edges is @@ -923,10 +922,8 @@ if (Function *Callee = CB->getCalledFunction()) { if (Visited.insert(Callee).second && !Callee->isDeclaration()) { Node *CalleeN = G.lookup(*Callee); - if (!CalleeN) { - CalleeN = &G.get(*Callee); - NewNodes.insert(CalleeN); - } + assert(CalleeN && + "Visited function should already have an associated node"); Edge *E = N->lookup(*CalleeN); assert((E || !FunctionPass) && "No function transformations should introduce *new* " @@ -961,10 +958,8 @@ auto VisitRef = [&](Function &Referee) { Node *RefereeN = G.lookup(Referee); - if (!RefereeN) { - RefereeN = &G.get(Referee); - NewNodes.insert(RefereeN); - } + assert(RefereeN && + "Visited function should already have an associated node"); Edge *E = N->lookup(*RefereeN); assert((E || !FunctionPass) && "No function transformations should introduce *new* ref " @@ -980,9 +975,6 @@ }; LazyCallGraph::visitReferences(Worklist, Visited, VisitRef); - for (Node *NewNode : NewNodes) - G.initNode(*NewNode, *C); - // Handle new ref edges. for (Node *RefTarget : NewRefEdges) { SCC &TargetC = *G.lookupSCC(*RefTarget); diff --git a/llvm/lib/Analysis/LazyCallGraph.cpp b/llvm/lib/Analysis/LazyCallGraph.cpp --- a/llvm/lib/Analysis/LazyCallGraph.cpp +++ b/llvm/lib/Analysis/LazyCallGraph.cpp @@ -19,6 +19,7 @@ #include "llvm/Config/llvm-config.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/InstIterator.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" @@ -255,6 +256,24 @@ "Must set low link to -1 when adding a node to an SCC!"); for (Edge &E : **N) assert(E.getNode().isPopulated() && "Can't have an unpopulated node!"); + +#ifdef EXPENSIVE_CHECKS + // Verify that all nodes in this SCC can reach all other nodes. + SmallVector Worklist; + SmallPtrSet Visited; + Worklist.push_back(N); + while (!Worklist.empty()) { + Node *VisitingNode = Worklist.pop_back_val(); + if (!Visited.insert(VisitingNode).second) + continue; + for (Edge &E : (*VisitingNode)->calls()) + Worklist.push_back(&E.getNode()); + } + for (Node *NodeToVisit : Nodes) { + assert(Visited.contains(NodeToVisit) && + "Cannot reach all nodes within SCC"); + } +#endif } } #endif @@ -357,6 +376,31 @@ } } } + +#ifdef EXPENSIVE_CHECKS + // Verify that all nodes in this RefSCC can reach all other nodes. + SmallVector Nodes; + for (SCC *C : SCCs) { + for (Node &N : *C) + Nodes.push_back(&N); + } + for (Node *N : Nodes) { + SmallVector Worklist; + SmallPtrSet Visited; + Worklist.push_back(N); + while (!Worklist.empty()) { + Node *VisitingNode = Worklist.pop_back_val(); + if (!Visited.insert(VisitingNode).second) + continue; + for (Edge &E : **VisitingNode) + Worklist.push_back(&E.getNode()); + } + for (Node *NodeToVisit : Nodes) { + assert(Visited.contains(NodeToVisit) && + "Cannot reach all nodes within RefSCC"); + } + } +#endif } #endif @@ -1565,6 +1609,221 @@ // allocators. } +// Gets the Edge::Kind from one function to another by looking at the function's +// instructions. Asserts if there is no edge. +static LazyCallGraph::Edge::Kind getEdgeKind(Function &OriginalFunction, + Function &NewFunction) { + // In release builds, assume that if there are no direct calls to the new + // function, then there is a ref edge. In debug builds, keep track of + // references to assert that there is actually a ref edge if there is no call + // edge. +#ifndef NDEBUG + SmallVector Worklist; + SmallPtrSet Visited; +#endif + + for (Instruction &I : instructions(OriginalFunction)) { + if (auto *CB = dyn_cast(&I)) { + if (Function *Callee = CB->getCalledFunction()) { + if (Callee == &NewFunction) + return LazyCallGraph::Edge::Kind::Call; + } + } +#ifndef NDEBUG + for (Value *Op : I.operand_values()) { + if (Constant *C = dyn_cast(Op)) { + if (Visited.insert(C).second) + Worklist.push_back(C); + } + } +#endif + } + +#ifndef NDEBUG + bool FoundNewFunction = false; + LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &F) { + if (&F == &NewFunction) + FoundNewFunction = true; + }); + assert(FoundNewFunction && "No edge from original function to new function"); +#endif + + return LazyCallGraph::Edge::Kind::Ref; +} + +void LazyCallGraph::addSplitFunction(Function &OriginalFunction, + Function &NewFunction) { + assert(lookup(OriginalFunction) && + "Original function's node should already exist"); + Node &OriginalN = get(OriginalFunction); + SCC *OriginalC = lookupSCC(OriginalN); + RefSCC *OriginalRC = lookupRefSCC(OriginalN); + +#ifndef NDEBUG + OriginalRC->verify(); + auto VerifyOnExit = make_scope_exit([&]() { OriginalRC->verify(); }); +#endif + + assert(!lookup(NewFunction) && + "New function's node should not already exist"); + Node &NewN = get(NewFunction); + NewN.populate(); + NewN.DFSNumber = -1; + NewN.LowLink = -1; + NodeMap[&NewFunction] = &NewN; + + Edge::Kind EK = getEdgeKind(OriginalFunction, NewFunction); + + SCC *NewC = nullptr; + for (Edge &E : *NewN) { + Node &EN = E.getNode(); + if (EK == Edge::Kind::Call && E.isCall() && lookupSCC(EN) == OriginalC) { + // If the edge to the new function is a call edge and there is a call edge + // from the new function to any function in the original function's SCC, + // it is in the same SCC (and RefSCC) as the original function. + NewC = lookupSCC(OriginalN); + NewC->Nodes.push_back(&NewN); + break; + } + } + + if (!NewC) { + for (Edge &E : *NewN) { + Node &EN = E.getNode(); + if (lookupRefSCC(EN) == OriginalRC) { + // If there is any edge from the new function to any function in the + // original function's RefSCC, it is in the same RefSCC as the original + // function but a new SCC. + RefSCC *NewRC = lookupRefSCC(OriginalN); + NewC = createSCC(*NewRC, SmallVector({&NewN})); + + // The new function's SCC is not the same as the original function's + // SCC, since that case was handled earlier. If the edge from the + // original function to the new function was a call edge, then we need + // to insert the newly created function's SCC before the original + // function's SCC. Otherwise either the new SCC comes after the original + // function's SCC, or it doesn't matter, and in both cases we can add it + // to the very end. + int InsertIndex = EK == Edge::Kind::Call ? NewRC->SCCIndices[OriginalC] + : NewRC->SCCIndices.size(); + NewRC->SCCs.insert(NewRC->SCCs.begin() + InsertIndex, NewC); + for (int i = InsertIndex, Size = NewRC->SCCs.size(); i < Size; ++i) + NewRC->SCCIndices[NewRC->SCCs[i]] = i; + + break; + } + } + } + + if (!NewC) { + // We didn't find any edges back to the original function's RefSCC, so the + // new function belongs in a new RefSCC. The new RefSCC goes before the + // original function's RefSCC. + RefSCC *NewRC = createRefSCC(*this); + NewC = createSCC(*NewRC, SmallVector({&NewN})); + NewRC->SCCIndices[NewC] = 0; + NewRC->SCCs.push_back(NewC); + auto OriginalRCIndex = RefSCCIndices.find(OriginalRC)->second; + PostOrderRefSCCs.insert(PostOrderRefSCCs.begin() + OriginalRCIndex, NewRC); + for (int i = OriginalRCIndex, Size = PostOrderRefSCCs.size(); i < Size; ++i) + RefSCCIndices[PostOrderRefSCCs[i]] = i; + } + + SCCMap[&NewN] = NewC; + + OriginalN->insertEdgeInternal(NewN, EK); +} + +void LazyCallGraph::addSplitRefRecursiveFunctions( + Function &OriginalFunction, ArrayRef NewFunctions) { + assert(!NewFunctions.empty() && "Can't add zero functions"); + assert(lookup(OriginalFunction) && + "Original function's node should already exist"); + Node &OriginalN = get(OriginalFunction); + RefSCC *OriginalRC = lookupRefSCC(OriginalN); + +#ifndef NDEBUG + OriginalRC->verify(); + auto VerifyOnExit = make_scope_exit([&]() { + OriginalRC->verify(); +#ifdef EXPENSIVE_CHECKS + for (Function *NewFunction : NewFunctions) + lookupRefSCC(get(*NewFunction))->verify(); +#endif + }); +#endif + + bool ExistsRefToOriginalRefSCC = false; + + for (Function *NewFunction : NewFunctions) { + Node &NewN = get(*NewFunction); + NewN.populate(); + NewN.DFSNumber = -1; + NewN.LowLink = -1; + NodeMap[NewFunction] = &NewN; + + OriginalN->insertEdgeInternal(NewN, Edge::Kind::Ref); + + // Check if there is any edge from any new function back to any function in + // the original function's RefSCC. + for (Edge &E : *NewN) { + if (lookupRefSCC(E.getNode()) == OriginalRC) { + ExistsRefToOriginalRefSCC = true; + break; + } + } + } + + RefSCC *NewRC; + if (ExistsRefToOriginalRefSCC) { + // If there is any edge from any new function to any function in the + // original function's RefSCC, all new functions will be in the same RefSCC + // as the original function. + NewRC = OriginalRC; + } else { + // Otherwise the new functions are in their own RefSCC. + NewRC = createRefSCC(*this); + // The new RefSCC goes before the original function's RefSCC in postorder + // since there are only edges from the original function's RefSCC to the new + // RefSCC. + auto OriginalRCIndex = RefSCCIndices.find(OriginalRC)->second; + PostOrderRefSCCs.insert(PostOrderRefSCCs.begin() + OriginalRCIndex, NewRC); + for (int i = OriginalRCIndex, Size = PostOrderRefSCCs.size(); i < Size; ++i) + RefSCCIndices[PostOrderRefSCCs[i]] = i; + } + + for (Function *NewFunction : NewFunctions) { + Node &NewN = get(*NewFunction); + // Each new function is in its own new SCC. The original function can only + // have a ref edge to new functions, and no other existing functions can + // have references to new functions. Each new function only has a ref edge + // to the other new functions. + SCC *NewC = createSCC(*NewRC, SmallVector({&NewN})); + // The new SCCs are either sibling SCCs or parent SCCs to all other existing + // SCCs in the RefSCC. Either way, they can go at the back of the postorder + // SCC list. + auto Index = NewRC->SCCIndices.size(); + NewRC->SCCIndices[NewC] = Index; + NewRC->SCCs.push_back(NewC); + SCCMap[&NewN] = NewC; + } + +#ifndef NDEBUG + for (Function *F1 : NewFunctions) { + assert(getEdgeKind(OriginalFunction, *F1) == Edge::Kind::Ref && + "Expected ref edges from original function to every new function"); + Node &N1 = get(*F1); + for (Function *F2 : NewFunctions) { + if (F1 == F2) + continue; + Node &N2 = get(*F2); + assert(!N1->lookup(N2)->isCall() && + "Edges between new functions must be ref edges"); + } +#endif + } +} + LazyCallGraph::Node &LazyCallGraph::insertInto(Function &F, Node *&MappedN) { return *new (MappedN = BPA.Allocate()) Node(*this, F); } diff --git a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp --- a/llvm/lib/Transforms/Coroutines/CoroSplit.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroSplit.cpp @@ -28,6 +28,7 @@ #include "llvm/ADT/Twine.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CallGraphSCCPass.h" +#include "llvm/Analysis/LazyCallGraph.h" #include "llvm/IR/Argument.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" @@ -1747,21 +1748,29 @@ End->eraseFromParent(); } - postSplitCleanup(N.getFunction()); + switch (Shape.ABI) { + case coro::ABI::Switch: + // Each clone in the Switch lowering is independent of the other clones. Let + // the LazyCallGraph know about each one separately. + for (Function *Clone : Clones) + CG.addSplitFunction(N.getFunction(), *Clone); + break; + case coro::ABI::Async: + case coro::ABI::Retcon: + case coro::ABI::RetconOnce: + // Each clone in the Async/Retcon lowering references of the other clones. + // Let the LazyCallGraph know about all of them at once. + CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones); + break; + } - // We've inserted instructions into coroutine 'f' that reference the three new - // coroutine funclets. We must now update the call graph so that reference - // edges between 'f' and its funclets are added to it. LazyCallGraph only - // allows CGSCC passes to insert "trivial" reference edges. We've ensured - // above, by inserting the funclets into the same SCC as the corutine, that - // the edges are trivial. - // - // N.B.: If we didn't update the call graph here, a CGSCCToFunctionPassAdaptor - // later in this CGSCC pass pipeline may be run, triggering a call graph - // update of its own. Function passes run by the adaptor are not permitted to - // add new edges of any kind to the graph, and the new edges inserted by this - // pass would be misattributed to that unrelated function pass. + // Let the CGSCC infra handle the changes to the original function. updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM); + + // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges + // to the split functions. + postSplitCleanup(N.getFunction()); + updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM); } // When we see the coroutine the first time, we insert an indirect call to a @@ -2006,17 +2015,7 @@ LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName() << "' state: " << Value << "\n"); if (Value == UNPREPARED_FOR_SPLIT) { - // Enqueue a second iteration of the CGSCC pipeline. - // N.B.: - // The CoroSplitLegacy pass "triggers" a restart of the CGSCC pass - // pipeline by inserting an indirect function call that the - // CoroElideLegacy pass then replaces with a direct function call. The - // legacy CGSCC pipeline's implicit behavior was as if wrapped in the new - // pass manager abstraction DevirtSCCRepeatedPass. - // - // This pass does not need to "trigger" another run of the pipeline. - // Instead, it simply enqueues the same RefSCC onto the pipeline's - // worklist. + // Enqueue a second iteration of the CGSCC pipeline on this SCC. UR.CWorklist.insert(&C); F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT); continue; @@ -2027,9 +2026,12 @@ const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot); updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM); - if (Shape.ABI == coro::ABI::Async && !Shape.CoroSuspends.empty()) { - // We want the inliner to be run on the newly inserted functions. - UR.CWorklist.insert(&C); + if ((Shape.ABI == coro::ABI::Async || Shape.ABI == coro::ABI::Retcon || + Shape.ABI == coro::ABI::RetconOnce) && + !Shape.CoroSuspends.empty()) { + // Run the CGSCC pipeline on the newly split functions. + // All clones will be in the same RefSCC, so choose a random clone. + UR.RCWorklist.insert(CG.lookupRefSCC(CG.get(*Clones[0]))); } } diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp --- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp +++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp @@ -750,7 +750,7 @@ } assert(OutlinedFn != OriginalFn && "Outlining failed"); - CGUpdater.registerOutlinedFunction(*OutlinedFn); + CGUpdater.registerOutlinedFunction(*OriginalFn, *OutlinedFn); CGUpdater.reanalyzeFunction(*OriginalFn); NumOpenMPParallelRegionsMerged += MergableCIs.size(); diff --git a/llvm/lib/Transforms/Utils/CallGraphUpdater.cpp b/llvm/lib/Transforms/Utils/CallGraphUpdater.cpp --- a/llvm/lib/Transforms/Utils/CallGraphUpdater.cpp +++ b/llvm/lib/Transforms/Utils/CallGraphUpdater.cpp @@ -96,9 +96,12 @@ } } -void CallGraphUpdater::registerOutlinedFunction(Function &NewFn) { +void CallGraphUpdater::registerOutlinedFunction(Function &OriginalFn, + Function &NewFn) { if (CG) CG->addToCallGraph(&NewFn); + else if (LCG) + LCG->addSplitFunction(OriginalFn, NewFn); } void CallGraphUpdater::removeFunction(Function &DeadFn) { diff --git a/llvm/test/Transforms/Coroutines/coro-async.ll b/llvm/test/Transforms/Coroutines/coro-async.ll --- a/llvm/test/Transforms/Coroutines/coro-async.ll +++ b/llvm/test/Transforms/Coroutines/coro-async.ll @@ -1,4 +1,5 @@ ; RUN: opt < %s -enable-coroutines -O2 -S | FileCheck --check-prefixes=CHECK %s +; RUN: opt < %s -enable-coroutines -passes='default' -S | FileCheck --check-prefixes=CHECK %s target datalayout = "p:64:64:64" diff --git a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values2.ll b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values2.ll --- a/llvm/test/Transforms/Coroutines/coro-retcon-resume-values2.ll +++ b/llvm/test/Transforms/Coroutines/coro-retcon-resume-values2.ll @@ -1,4 +1,5 @@ ; RUN: opt < %s -coro-split -coro-cleanup -S | FileCheck %s +; RUN: opt < %s -passes='coro-split,coro-cleanup' -S | FileCheck %s define i8* @f(i8* %buffer, i32 %n) "coroutine.presplit"="1" { entry: diff --git a/llvm/test/Transforms/Coroutines/coro-split-recursive.ll b/llvm/test/Transforms/Coroutines/coro-split-recursive.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Coroutines/coro-split-recursive.ll @@ -0,0 +1,36 @@ +; RUN: opt -passes='default' -enable-coroutines -S < %s | FileCheck %s + +declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*) + +declare i8* @llvm.coro.begin(token, i8* writeonly) + +declare token @llvm.coro.save(i8*) + +declare i8 @llvm.coro.suspend(token, i1) + +; CHECK-LABEL: define void @foo() +; CHECK-LABEL: define {{.*}}void @foo.resume( +; CHECK: call void @foo() +; CHECK-LABEL: define {{.*}}void @foo.destroy( + +define void @foo() { +entry: + %__promise = alloca i32, align 8 + %0 = bitcast i32* %__promise to i8* + %1 = call token @llvm.coro.id(i32 16, i8* %0, i8* null, i8* null) + %2 = call i8* @llvm.coro.begin(token %1, i8* null) + br i1 undef, label %if.then154, label %init.suspend + +init.suspend: ; preds = %entry + %save = call token @llvm.coro.save(i8* null) + %3 = call i8 @llvm.coro.suspend(token %save, i1 false) + %cond = icmp eq i8 %3, 0 + br i1 %cond, label %if.then154, label %invoke.cont163 + +if.then154: ; preds = %init.suspend, %entry + call void @foo() + br label %invoke.cont163 + +invoke.cont163: ; preds = %if.then154, %init.suspend + ret void +} diff --git a/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp b/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp --- a/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp +++ b/llvm/unittests/Analysis/CGSCCPassManagerTest.cpp @@ -1490,15 +1490,15 @@ BasicBlock *BB = BasicBlock::Create(FnewF->getContext(), "", FnewF); ReturnInst::Create(FnewF->getContext(), BB); + // And insert a call to `newF` + Instruction *IP = &FnF->getEntryBlock().front(); + (void)CallInst::Create(FnewF, {}, "", IP); + // Use the CallGraphUpdater to update the call graph for the new // function. CallGraphUpdater CGU; CGU.initialize(CG, C, AM, UR); - CGU.registerOutlinedFunction(*FnewF); - - // And insert a call to `newF` - Instruction *IP = &FnF->getEntryBlock().front(); - (void)CallInst::Create(FnewF, {}, "", IP); + CGU.registerOutlinedFunction(*FnF, *FnewF); auto &FN = *llvm::find_if( C, [](LazyCallGraph::Node &N) { return N.getName() == "f"; }); @@ -1533,7 +1533,6 @@ // function. CallGraphUpdater CGU; CGU.initialize(CG, C, AM, UR); - CGU.registerOutlinedFunction(*FnewF); // And insert a call to `newF` Instruction *IP = &FnF->getEntryBlock().front(); @@ -1542,9 +1541,8 @@ auto &FN = *llvm::find_if( C, [](LazyCallGraph::Node &N) { return N.getName() == "f"; }); - ASSERT_DEATH( - updateCGAndAnalysisManagerForFunctionPass(CG, C, FN, AM, UR, FAM), - "Any new calls should be modeled as"); + ASSERT_DEATH(updateCGAndAnalysisManagerForCGSCCPass(CG, C, FN, AM, UR, FAM), + "should already have an associated node"); })); ModulePassManager MPM(/*DebugLogging*/ true); @@ -1767,6 +1765,12 @@ (void)CastInst::CreatePointerCast( &F, Type::getInt8PtrTy(F.getContext()), "f.ref", &F.getEntryBlock().front()); + // 3. Insert a ref edge from 'f' to 'g'. + (void)CastInst::CreatePointerCast( + G, Type::getInt8PtrTy(F.getContext()), "g.ref", + &F.getEntryBlock().front()); + + CG.addSplitFunction(F, *G); ASSERT_FALSE(verifyModule(*F.getParent(), &errs())); @@ -1786,7 +1790,7 @@ ASSERT_TRUE(Ran); } -// Start with f, end with f -> (g1 <-> g2) and f -ref-> (h1 <-> h2). +// Start with f, end with f -> g1, f -> g2, and f -ref-> (h1 <-ref-> h2). TEST_F(CGSCCPassManagerTest, TestInsertionOfNewFunctions2) { std::unique_ptr M = parseIR("define void @f() {\n" "entry:\n" @@ -1811,7 +1815,7 @@ if (F.getName() != "f") continue; - // Create mutually recursive functions (call) 'g1' and 'g2'. + // Create g1 and g2. auto *G1 = Function::Create(F.getFunctionType(), F.getLinkage(), F.getAddressSpace(), "g1", F.getParent()); auto *G2 = Function::Create(F.getFunctionType(), F.getLinkage(), @@ -1820,9 +1824,7 @@ BasicBlock::Create(F.getParent()->getContext(), "entry", G1); BasicBlock *G2BB = BasicBlock::Create(F.getParent()->getContext(), "entry", G2); - (void)CallInst::Create(G2, {}, "", G1BB); (void)ReturnInst::Create(G1->getContext(), G1BB); - (void)CallInst::Create(G1, {}, "", G2BB); (void)ReturnInst::Create(G2->getContext(), G2BB); // Add 'f -> g1' call edge. @@ -1830,6 +1832,9 @@ // Add 'f -> g2' call edge. (void)CallInst::Create(G2, {}, "", &F.getEntryBlock().front()); + CG.addSplitFunction(F, *G1); + CG.addSplitFunction(F, *G2); + // Create mutually recursive functions (ref only) 'h1' and 'h2'. auto *H1 = Function::Create(F.getFunctionType(), F.getLinkage(), F.getAddressSpace(), "h1", F.getParent()); @@ -1853,6 +1858,8 @@ (void)CastInst::CreatePointerCast(H2, Type::getInt8PtrTy(F.getContext()), "h2.ref", &F.getEntryBlock().front()); + CG.addSplitRefRecursiveFunctions(F, SmallVector({H1, H2})); + ASSERT_FALSE(verifyModule(*F.getParent(), &errs())); ASSERT_NO_FATAL_FAILURE( diff --git a/llvm/unittests/Analysis/LazyCallGraphTest.cpp b/llvm/unittests/Analysis/LazyCallGraphTest.cpp --- a/llvm/unittests/Analysis/LazyCallGraphTest.cpp +++ b/llvm/unittests/Analysis/LazyCallGraphTest.cpp @@ -13,6 +13,7 @@ #include "llvm/IR/Instructions.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" +#include "llvm/IR/Verifier.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/SourceMgr.h" #include "gtest/gtest.h" @@ -2170,4 +2171,685 @@ EXPECT_EQ(&RC2, &*I++); EXPECT_EQ(CG.postorder_ref_scc_end(), I); } + +TEST(LazyCallGraphTest, AddSplitFunction1) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + (void)ReturnInst::Create(Context, GBB); + + // Create f -call-> g. + (void)CallInst::Create(G, {}, "", &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitFunction(F, *G); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC1 = &*I++; + EXPECT_EQ(RC1, CG.lookupRefSCC(*GN)); + LazyCallGraph::RefSCC *RC2 = &*I++; + EXPECT_EQ(RC2, ORC); + EXPECT_EQ(RC2, CG.lookupRefSCC(FN)); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); +} + +TEST(LazyCallGraphTest, AddSplitFunction2) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + (void)ReturnInst::Create(Context, GBB); + + // Create f -ref-> g. + (void)CastInst::CreatePointerCast(G, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitFunction(F, *G); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC1 = &*I++; + EXPECT_EQ(RC1, CG.lookupRefSCC(*GN)); + LazyCallGraph::RefSCC *RC2 = &*I++; + EXPECT_EQ(RC2, ORC); + EXPECT_EQ(RC2, CG.lookupRefSCC(FN)); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); +} + +TEST(LazyCallGraphTest, AddSplitFunction3) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + // Create g -ref-> f. + (void)CastInst::CreatePointerCast(&F, Type::getInt8PtrTy(Context), "", GBB); + (void)ReturnInst::Create(Context, GBB); + + // Create f -call-> g. + (void)CallInst::Create(G, {}, "", &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitFunction(F, *G); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(RC, CG.lookupRefSCC(*GN)); + EXPECT_EQ(RC, ORC); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + EXPECT_EQ(2, RC->size()); + EXPECT_EQ(CG.lookupSCC(*GN), &(*RC)[0]); + EXPECT_EQ(CG.lookupSCC(FN), &(*RC)[1]); +} + +TEST(LazyCallGraphTest, AddSplitFunction4) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + // Create g -ref-> f. + (void)CastInst::CreatePointerCast(&F, Type::getInt8PtrTy(Context), "", GBB); + (void)ReturnInst::Create(Context, GBB); + + // Create f -ref-> g. + (void)CastInst::CreatePointerCast(G, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitFunction(F, *G); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(RC, CG.lookupRefSCC(*GN)); + EXPECT_EQ(RC, ORC); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + // Order doesn't matter for sibling SCCs. + EXPECT_EQ(2, RC->size()); + EXPECT_EQ(&CG.lookupSCC(*GN)->getOuterRefSCC(), RC); + EXPECT_EQ(&CG.lookupSCC(FN)->getOuterRefSCC(), RC); +} + +TEST(LazyCallGraphTest, AddSplitFunction5) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + // Create g -call-> f. + (void)CallInst::Create(&F, {}, "", GBB); + (void)ReturnInst::Create(Context, GBB); + + // Create f -ref-> g. + (void)CastInst::CreatePointerCast(G, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitFunction(F, *G); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(RC, CG.lookupRefSCC(*GN)); + EXPECT_EQ(RC, ORC); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + EXPECT_EQ(2, RC->size()); + EXPECT_EQ(CG.lookupSCC(FN), &(*RC)[0]); + EXPECT_EQ(CG.lookupSCC(*GN), &(*RC)[1]); +} + +TEST(LazyCallGraphTest, AddSplitFunction6) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + // Create g -call-> f. + (void)CallInst::Create(&F, {}, "", GBB); + (void)ReturnInst::Create(Context, GBB); + + // Create f -call-> g. + (void)CallInst::Create(G, {}, "", &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitFunction(F, *G); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(RC, CG.lookupRefSCC(*GN)); + EXPECT_EQ(RC, ORC); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + EXPECT_EQ(1, RC->size()); + EXPECT_EQ(CG.lookupSCC(FN), &(*RC)[0]); + EXPECT_EQ(CG.lookupSCC(*GN), &(*RC)[0]); +} + +TEST(LazyCallGraphTest, AddSplitFunction7) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " call void @f2()\n" + " ret void\n" + "}\n" + "define void @f2() {\n" + " call void @f()\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + Function &F2 = lookupFunction(*M, "f2"); + LazyCallGraph::Node &F2N = CG.get(F2); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + // Create g -call-> f2. + (void)CallInst::Create(&F2, {}, "", GBB); + (void)ReturnInst::Create(Context, GBB); + + // Create f -call-> g. + (void)CallInst::Create(G, {}, "", &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitFunction(F, *G); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(RC, CG.lookupRefSCC(*GN)); + EXPECT_EQ(RC, ORC); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + EXPECT_EQ(1, RC->size()); + EXPECT_EQ(CG.lookupSCC(FN), &(*RC)[0]); + EXPECT_EQ(CG.lookupSCC(F2N), &(*RC)[0]); + EXPECT_EQ(CG.lookupSCC(*GN), &(*RC)[0]); +} + +TEST(LazyCallGraphTest, AddSplitFunction8) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " call void @f2()\n" + " ret void\n" + "}\n" + "define void @f2() {\n" + " call void @f()\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + Function &F2 = lookupFunction(*M, "f2"); + LazyCallGraph::Node &F2N = CG.get(F2); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + // Create g -call-> f2. + (void)CallInst::Create(&F2, {}, "", GBB); + (void)ReturnInst::Create(Context, GBB); + + // Create f -ref-> g. + (void)CastInst::CreatePointerCast(G, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitFunction(F, *G); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(RC, CG.lookupRefSCC(*GN)); + EXPECT_EQ(RC, ORC); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + EXPECT_EQ(2, RC->size()); + EXPECT_EQ(CG.lookupSCC(FN), &(*RC)[0]); + EXPECT_EQ(CG.lookupSCC(F2N), &(*RC)[0]); + EXPECT_EQ(CG.lookupSCC(*GN), &(*RC)[1]); +} + +TEST(LazyCallGraphTest, AddSplitFunction9) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " call void @f2()\n" + " ret void\n" + "}\n" + "define void @f2() {\n" + " call void @f()\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + Function &F2 = lookupFunction(*M, "f2"); + LazyCallGraph::Node &F2N = CG.get(F2); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + // Create g -ref-> f2. + (void)CastInst::CreatePointerCast(&F2, Type::getInt8PtrTy(Context), "", GBB); + (void)ReturnInst::Create(Context, GBB); + + // Create f -call-> g. + (void)CallInst::Create(G, {}, "", &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitFunction(F, *G); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(RC, CG.lookupRefSCC(*GN)); + EXPECT_EQ(RC, ORC); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + EXPECT_EQ(2, RC->size()); + EXPECT_EQ(CG.lookupSCC(*GN), &(*RC)[0]); + EXPECT_EQ(CG.lookupSCC(FN), &(*RC)[1]); + EXPECT_EQ(CG.lookupSCC(F2N), &(*RC)[1]); +} + +TEST(LazyCallGraphTest, AddSplitFunctions1) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + (void)ReturnInst::Create(Context, GBB); + + // Create f -ref-> g. + (void)CastInst::CreatePointerCast(G, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitRefRecursiveFunctions(F, SmallVector({G})); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC1 = &*I++; + EXPECT_EQ(RC1, CG.lookupRefSCC(*GN)); + LazyCallGraph::RefSCC *RC2 = &*I++; + EXPECT_EQ(RC2, ORC); + EXPECT_EQ(RC2, CG.lookupRefSCC(FN)); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); +} + +TEST(LazyCallGraphTest, AddSplitFunctions2) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g", F.getParent()); + BasicBlock *GBB = BasicBlock::Create(Context, "", G); + // Create g -ref-> f. + (void)CastInst::CreatePointerCast(&F, Type::getInt8PtrTy(Context), "", GBB); + (void)ReturnInst::Create(Context, GBB); + + // Create f -ref-> g. + (void)CastInst::CreatePointerCast(G, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitRefRecursiveFunctions(F, SmallVector({G})); + + LazyCallGraph::Node *GN = CG.lookup(*G); + EXPECT_TRUE(GN); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(RC, CG.lookupRefSCC(*GN)); + EXPECT_EQ(RC, ORC); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + // Order doesn't matter for sibling SCCs. + EXPECT_EQ(2, RC->size()); + EXPECT_EQ(&CG.lookupSCC(*GN)->getOuterRefSCC(), RC); + EXPECT_EQ(&CG.lookupSCC(FN)->getOuterRefSCC(), RC); +} + +TEST(LazyCallGraphTest, AddSplitFunctions3) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G1 = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g1", F.getParent()); + auto *G2 = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g2", F.getParent()); + BasicBlock *G1BB = BasicBlock::Create(Context, "", G1); + BasicBlock *G2BB = BasicBlock::Create(Context, "", G2); + // Create g1 -ref-> g2 and g2 -ref-> g1. + (void)CastInst::CreatePointerCast(G2, Type::getInt8PtrTy(Context), "", G1BB); + (void)CastInst::CreatePointerCast(G1, Type::getInt8PtrTy(Context), "", G2BB); + (void)ReturnInst::Create(Context, G1BB); + (void)ReturnInst::Create(Context, G2BB); + + // Create f -ref-> g1 and f -ref-> g2. + (void)CastInst::CreatePointerCast(G1, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + (void)CastInst::CreatePointerCast(G2, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitRefRecursiveFunctions(F, SmallVector({G1, G2})); + + LazyCallGraph::Node *G1N = CG.lookup(*G1); + EXPECT_TRUE(G1N); + LazyCallGraph::Node *G2N = CG.lookup(*G2); + EXPECT_TRUE(G2N); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC1 = &*I++; + EXPECT_EQ(2, RC1->size()); + EXPECT_EQ(RC1, CG.lookupRefSCC(*G1N)); + EXPECT_EQ(RC1, CG.lookupRefSCC(*G2N)); + LazyCallGraph::RefSCC *RC2 = &*I++; + EXPECT_EQ(RC2, ORC); + EXPECT_EQ(RC2, CG.lookupRefSCC(FN)); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); +} + +TEST(LazyCallGraphTest, AddSplitFunctions4) { + LLVMContext Context; + std::unique_ptr M = parseAssembly(Context, "define void @f() {\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G1 = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g1", F.getParent()); + auto *G2 = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g2", F.getParent()); + BasicBlock *G1BB = BasicBlock::Create(Context, "", G1); + BasicBlock *G2BB = BasicBlock::Create(Context, "", G2); + // Create g1 -ref-> g2 and g2 -ref-> g1. + (void)CastInst::CreatePointerCast(G2, Type::getInt8PtrTy(Context), "", G1BB); + (void)CastInst::CreatePointerCast(G1, Type::getInt8PtrTy(Context), "", G2BB); + // Create g2 -ref-> f. + (void)CastInst::CreatePointerCast(&F, Type::getInt8PtrTy(Context), "", G2BB); + (void)ReturnInst::Create(Context, G1BB); + (void)ReturnInst::Create(Context, G2BB); + + // Create f -ref-> g1 and f -ref-> g2. + (void)CastInst::CreatePointerCast(G1, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + (void)CastInst::CreatePointerCast(G2, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitRefRecursiveFunctions(F, SmallVector({G1, G2})); + + LazyCallGraph::Node *G1N = CG.lookup(*G1); + EXPECT_TRUE(G1N); + LazyCallGraph::Node *G2N = CG.lookup(*G2); + EXPECT_TRUE(G2N); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(RC, ORC); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + // Order doesn't matter for sibling SCCs. + EXPECT_EQ(3, RC->size()); + EXPECT_EQ(&CG.lookupSCC(FN)->getOuterRefSCC(), RC); + EXPECT_EQ(&CG.lookupSCC(*G1N)->getOuterRefSCC(), RC); + EXPECT_EQ(&CG.lookupSCC(*G2N)->getOuterRefSCC(), RC); + EXPECT_EQ(RC, CG.lookupRefSCC(*G1N)); + EXPECT_EQ(RC, CG.lookupRefSCC(*G2N)); +} + +TEST(LazyCallGraphTest, AddSplitFunctions5) { + LLVMContext Context; + std::unique_ptr M = + parseAssembly(Context, "define void @f() {\n" + " %1 = bitcast void ()* @f2 to i8*\n" + " ret void\n" + "}\n" + "define void @f2() {\n" + " call void @f()\n" + " ret void\n" + "}\n"); + LazyCallGraph CG = buildCG(*M); + + Function &F = lookupFunction(*M, "f"); + LazyCallGraph::Node &FN = CG.get(F); + Function &F2 = lookupFunction(*M, "f2"); + LazyCallGraph::Node &F2N = CG.get(F); + + // Force the graph to be fully expanded. + CG.buildRefSCCs(); + auto I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *ORC = &*I++; + EXPECT_EQ(CG.postorder_ref_scc_end(), I); + + auto *G1 = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g1", F.getParent()); + auto *G2 = Function::Create(F.getFunctionType(), F.getLinkage(), + F.getAddressSpace(), "g2", F.getParent()); + BasicBlock *G1BB = BasicBlock::Create(Context, "", G1); + BasicBlock *G2BB = BasicBlock::Create(Context, "", G2); + // Create g1 -ref-> g2 and g2 -ref-> g1. + (void)CastInst::CreatePointerCast(G2, Type::getInt8PtrTy(Context), "", G1BB); + (void)CastInst::CreatePointerCast(G1, Type::getInt8PtrTy(Context), "", G2BB); + // Create g2 -ref-> f2. + (void)CastInst::CreatePointerCast(&F2, Type::getInt8PtrTy(Context), "", G2BB); + (void)ReturnInst::Create(Context, G1BB); + (void)ReturnInst::Create(Context, G2BB); + + // Create f -ref-> g1 and f -ref-> g2. + (void)CastInst::CreatePointerCast(G1, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + (void)CastInst::CreatePointerCast(G2, Type::getInt8PtrTy(Context), "", + &*F.getEntryBlock().begin()); + + EXPECT_FALSE(verifyModule(*M, &errs())); + + CG.addSplitRefRecursiveFunctions(F, SmallVector({G1, G2})); + + LazyCallGraph::Node *G1N = CG.lookup(*G1); + EXPECT_TRUE(G1N); + LazyCallGraph::Node *G2N = CG.lookup(*G2); + EXPECT_TRUE(G2N); + + I = CG.postorder_ref_scc_begin(); + LazyCallGraph::RefSCC *RC = &*I++; + EXPECT_EQ(4, RC->size()); + EXPECT_EQ(RC, ORC); + EXPECT_EQ(RC, CG.lookupRefSCC(*G1N)); + EXPECT_EQ(RC, CG.lookupRefSCC(*G2N)); + EXPECT_EQ(RC, CG.lookupRefSCC(FN)); + EXPECT_EQ(RC, CG.lookupRefSCC(F2N)); + EXPECT_EQ(CG.postorder_ref_scc_end(), I); +} }