Index: lib/Transforms/IPO/Inliner.cpp =================================================================== --- lib/Transforms/IPO/Inliner.cpp +++ lib/Transforms/IPO/Inliner.cpp @@ -1145,6 +1145,7 @@ // re-use the exact same logic for updating the call graph to reflect the // change. LazyCallGraph::SCC *OldC = C; + unsigned CWSizeBeforeUpdate = UR.CWorklist.size(); C = &updateCGAndAnalysisManagerForFunctionPass(CG, *C, N, AM, UR); LLVM_DEBUG(dbgs() << "Updated inlining SCC: " << *C << "\n"); RC = &C->getOuterRefSCC(); @@ -1159,9 +1160,20 @@ // node and the SCC containing the call edge. This is a slight over // approximation of the possible inlining decisions that must be avoided, // but is relatively efficient to store. + // + // It is possible that even if no new SCC is generated (i.e., C == OldC), + // the original SCC could be split and then merged into the same one as + // itself. During this process, the original SCC will be added into + // UR.CWorklist again, we want to cache such case too. + // + // So if only split has ever happen, the size of UR.CWorklist will be + // larger than that before the SCC update, and we will cache the history + // then. + // // FIXME: This seems like a very heavyweight way of retaining the inline // history, we should look for a more efficient way of tracking it. - if (C != OldC && llvm::any_of(InlinedCallees, [&](Function *Callee) { + if (CWSizeBeforeUpdate < UR.CWorklist.size() && + llvm::any_of(InlinedCallees, [&](Function *Callee) { return CG.lookupSCC(*CG.lookup(*Callee)) == OldC; })) { LLVM_DEBUG(dbgs() << "Inlined an internal call edge and split an SCC, " Index: test/Transforms/Inline/cgscc-cycle-2.ll =================================================================== --- test/Transforms/Inline/cgscc-cycle-2.ll +++ test/Transforms/Inline/cgscc-cycle-2.ll @@ -0,0 +1,436 @@ +; This test contains another tricky call graph structures for the inliner to +; handle correctly. The callgraph is like following: +; +; foo <---> goo +; | ^ +; v | +; moo <---> noo +; +; For all the call edges in the call graph, only moo and noo can be inlined +; into foo, and no other call edge can be inlined. +; +; After moo is inlined into foo, the original call edge foo->moo will be +; removed, a new call edge will be added and the call graph becomes: +; +; foo <---> goo +; \ ^ +; v / +; moo <---> noo +; But foo, goo, moo and noo still belong to the same SCC. +; +; Then after foo->noo is inlined, when foo->noo is converted to a ref edge, +; the original SCC will be split into two: {moo, noo} and {foo, goo}, +; immediately the newly added ref edge foo->moo will be converted to a call +; edge, and the two SCCs will be merged into the original one again. During +; this cycle, the original SCC will be added into UR.CWorklist again and +; this creates an infinite loop. +; +; RUN: opt < %s -passes='cgscc(inline,function(sroa,instcombine))' -S | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@a = dso_local local_unnamed_addr global i64 0, align 8 + +define dso_local void @_Z3mooi(i32 %i) local_unnamed_addr #1 { +entry: + %cmp = icmp eq i32 %i, 5 + br i1 %cmp, label %if.end, label %if.then + +if.then: ; preds = %entry + %call = tail call i64 @random() #2 + %0 = load i64, i64* @a, align 8, !tbaa !2 + %add = add nsw i64 %0, %call + store i64 %add, i64* @a, align 8, !tbaa !2 + %call1 = tail call i64 @random() #2 + %1 = load i64, i64* @a, align 8, !tbaa !2 + %add2 = add nsw i64 %1, %call1 + store i64 %add2, i64* @a, align 8, !tbaa !2 + %call3 = tail call i64 @random() #2 + %2 = load i64, i64* @a, align 8, !tbaa !2 + %add4 = add nsw i64 %2, %call3 + store i64 %add4, i64* @a, align 8, !tbaa !2 + %call5 = tail call i64 @random() #2 + %3 = load i64, i64* @a, align 8, !tbaa !2 + %add6 = add nsw i64 %3, %call5 + store i64 %add6, i64* @a, align 8, !tbaa !2 + %call7 = tail call i64 @random() #2 + %4 = load i64, i64* @a, align 8, !tbaa !2 + %add8 = add nsw i64 %4, %call7 + store i64 %add8, i64* @a, align 8, !tbaa !2 + %call9 = tail call i64 @random() #2 + %5 = load i64, i64* @a, align 8, !tbaa !2 + %add10 = add nsw i64 %5, %call9 + store i64 %add10, i64* @a, align 8, !tbaa !2 + %call11 = tail call i64 @random() #2 + %6 = load i64, i64* @a, align 8, !tbaa !2 + %add12 = add nsw i64 %6, %call11 + store i64 %add12, i64* @a, align 8, !tbaa !2 + %call13 = tail call i64 @random() #2 + %7 = load i64, i64* @a, align 8, !tbaa !2 + %add14 = add nsw i64 %7, %call13 + store i64 %add14, i64* @a, align 8, !tbaa !2 + %call15 = tail call i64 @random() #2 + %8 = load i64, i64* @a, align 8, !tbaa !2 + %add16 = add nsw i64 %8, %call15 + store i64 %add16, i64* @a, align 8, !tbaa !2 + %call17 = tail call i64 @random() #2 + %9 = load i64, i64* @a, align 8, !tbaa !2 + %add18 = add nsw i64 %9, %call17 + store i64 %add18, i64* @a, align 8, !tbaa !2 + %call19 = tail call i64 @random() #2 + %10 = load i64, i64* @a, align 8, !tbaa !2 + %add20 = add nsw i64 %10, %call19 + store i64 %add20, i64* @a, align 8, !tbaa !2 + %call21 = tail call i64 @random() #2 + %11 = load i64, i64* @a, align 8, !tbaa !2 + %add22 = add nsw i64 %11, %call21 + store i64 %add22, i64* @a, align 8, !tbaa !2 + %call23 = tail call i64 @random() #2 + %12 = load i64, i64* @a, align 8, !tbaa !2 + %add24 = add nsw i64 %12, %call23 + store i64 %add24, i64* @a, align 8, !tbaa !2 + %call25 = tail call i64 @random() #2 + %13 = load i64, i64* @a, align 8, !tbaa !2 + %add26 = add nsw i64 %13, %call25 + store i64 %add26, i64* @a, align 8, !tbaa !2 + %call27 = tail call i64 @random() #2 + %14 = load i64, i64* @a, align 8, !tbaa !2 + %add28 = add nsw i64 %14, %call27 + store i64 %add28, i64* @a, align 8, !tbaa !2 + %call29 = tail call i64 @random() #2 + %15 = load i64, i64* @a, align 8, !tbaa !2 + %add30 = add nsw i64 %15, %call29 + store i64 %add30, i64* @a, align 8, !tbaa !2 + %call31 = tail call i64 @random() #2 + %16 = load i64, i64* @a, align 8, !tbaa !2 + %add32 = add nsw i64 %16, %call31 + store i64 %add32, i64* @a, align 8, !tbaa !2 + %call33 = tail call i64 @random() #2 + %17 = load i64, i64* @a, align 8, !tbaa !2 + %add34 = add nsw i64 %17, %call33 + store i64 %add34, i64* @a, align 8, !tbaa !2 + %call35 = tail call i64 @random() #2 + %18 = load i64, i64* @a, align 8, !tbaa !2 + %add36 = add nsw i64 %18, %call35 + store i64 %add36, i64* @a, align 8, !tbaa !2 + %call37 = tail call i64 @random() #2 + %19 = load i64, i64* @a, align 8, !tbaa !2 + %add38 = add nsw i64 %19, %call37 + store i64 %add38, i64* @a, align 8, !tbaa !2 + %call39 = tail call i64 @random() #2 + %20 = load i64, i64* @a, align 8, !tbaa !2 + %add40 = add nsw i64 %20, %call39 + store i64 %add40, i64* @a, align 8, !tbaa !2 + %call41 = tail call i64 @random() #2 + %21 = load i64, i64* @a, align 8, !tbaa !2 + %add42 = add nsw i64 %21, %call41 + store i64 %add42, i64* @a, align 8, !tbaa !2 + %call43 = tail call i64 @random() #2 + %22 = load i64, i64* @a, align 8, !tbaa !2 + %add44 = add nsw i64 %22, %call43 + store i64 %add44, i64* @a, align 8, !tbaa !2 + %call45 = tail call i64 @random() #2 + %23 = load i64, i64* @a, align 8, !tbaa !2 + %add46 = add nsw i64 %23, %call45 + store i64 %add46, i64* @a, align 8, !tbaa !2 + %call47 = tail call i64 @random() #2 + %24 = load i64, i64* @a, align 8, !tbaa !2 + %add48 = add nsw i64 %24, %call47 + store i64 %add48, i64* @a, align 8, !tbaa !2 + %call49 = tail call i64 @random() #2 + %25 = load i64, i64* @a, align 8, !tbaa !2 + %add50 = add nsw i64 %25, %call49 + store i64 %add50, i64* @a, align 8, !tbaa !2 + %call51 = tail call i64 @random() #2 + %26 = load i64, i64* @a, align 8, !tbaa !2 + %add52 = add nsw i64 %26, %call51 + store i64 %add52, i64* @a, align 8, !tbaa !2 + %call53 = tail call i64 @random() #2 + %27 = load i64, i64* @a, align 8, !tbaa !2 + %add54 = add nsw i64 %27, %call53 + store i64 %add54, i64* @a, align 8, !tbaa !2 + %call55 = tail call i64 @random() #2 + %28 = load i64, i64* @a, align 8, !tbaa !2 + %add56 = add nsw i64 %28, %call55 + store i64 %add56, i64* @a, align 8, !tbaa !2 + %call57 = tail call i64 @random() #2 + %29 = load i64, i64* @a, align 8, !tbaa !2 + %add58 = add nsw i64 %29, %call57 + store i64 %add58, i64* @a, align 8, !tbaa !2 + %call59 = tail call i64 @random() #2 + %30 = load i64, i64* @a, align 8, !tbaa !2 + %add60 = add nsw i64 %30, %call59 + store i64 %add60, i64* @a, align 8, !tbaa !2 + %call61 = tail call i64 @random() #2 + %31 = load i64, i64* @a, align 8, !tbaa !2 + %add62 = add nsw i64 %31, %call61 + store i64 %add62, i64* @a, align 8, !tbaa !2 + %call63 = tail call i64 @random() #2 + %32 = load i64, i64* @a, align 8, !tbaa !2 + %add64 = add nsw i64 %32, %call63 + store i64 %add64, i64* @a, align 8, !tbaa !2 + %call65 = tail call i64 @random() #2 + %33 = load i64, i64* @a, align 8, !tbaa !2 + %add66 = add nsw i64 %33, %call65 + store i64 %add66, i64* @a, align 8, !tbaa !2 + %call67 = tail call i64 @random() #2 + %34 = load i64, i64* @a, align 8, !tbaa !2 + %add68 = add nsw i64 %34, %call67 + store i64 %add68, i64* @a, align 8, !tbaa !2 + %call69 = tail call i64 @random() #2 + %35 = load i64, i64* @a, align 8, !tbaa !2 + %add70 = add nsw i64 %35, %call69 + store i64 %add70, i64* @a, align 8, !tbaa !2 + %call71 = tail call i64 @random() #2 + %36 = load i64, i64* @a, align 8, !tbaa !2 + %add72 = add nsw i64 %36, %call71 + store i64 %add72, i64* @a, align 8, !tbaa !2 + %call73 = tail call i64 @random() #2 + %37 = load i64, i64* @a, align 8, !tbaa !2 + %add74 = add nsw i64 %37, %call73 + store i64 %add74, i64* @a, align 8, !tbaa !2 + %call75 = tail call i64 @random() #2 + %38 = load i64, i64* @a, align 8, !tbaa !2 + %add76 = add nsw i64 %38, %call75 + store i64 %add76, i64* @a, align 8, !tbaa !2 + %call77 = tail call i64 @random() #2 + %39 = load i64, i64* @a, align 8, !tbaa !2 + %add78 = add nsw i64 %39, %call77 + store i64 %add78, i64* @a, align 8, !tbaa !2 + %call79 = tail call i64 @random() #2 + %40 = load i64, i64* @a, align 8, !tbaa !2 + %add80 = add nsw i64 %40, %call79 + store i64 %add80, i64* @a, align 8, !tbaa !2 + %call81 = tail call i64 @random() #2 + %41 = load i64, i64* @a, align 8, !tbaa !2 + %add82 = add nsw i64 %41, %call81 + store i64 %add82, i64* @a, align 8, !tbaa !2 + %call83 = tail call i64 @random() #2 + %42 = load i64, i64* @a, align 8, !tbaa !2 + %add84 = add nsw i64 %42, %call83 + store i64 %add84, i64* @a, align 8, !tbaa !2 + br label %if.end + +if.end: ; preds = %entry, %if.then + tail call void @_Z3nooi(i32 %i) + %43 = load i64, i64* @a, align 8, !tbaa !2 + %add85 = add nsw i64 %43, 1 + store i64 %add85, i64* @a, align 8, !tbaa !2 + ret void +} + +; Function Attrs: nounwind +declare dso_local i64 @random() local_unnamed_addr + +define dso_local void @_Z3nooi(i32 %i) local_unnamed_addr #1 { +entry: + %cmp = icmp eq i32 %i, 5 + br i1 %cmp, label %if.end, label %if.then + +if.then: ; preds = %entry + %call = tail call i64 @random() #2 + %0 = load i64, i64* @a, align 8, !tbaa !2 + %add = add nsw i64 %0, %call + store i64 %add, i64* @a, align 8, !tbaa !2 + %call1 = tail call i64 @random() #2 + %1 = load i64, i64* @a, align 8, !tbaa !2 + %add2 = add nsw i64 %1, %call1 + store i64 %add2, i64* @a, align 8, !tbaa !2 + %call3 = tail call i64 @random() #2 + %2 = load i64, i64* @a, align 8, !tbaa !2 + %add4 = add nsw i64 %2, %call3 + store i64 %add4, i64* @a, align 8, !tbaa !2 + %call5 = tail call i64 @random() #2 + %3 = load i64, i64* @a, align 8, !tbaa !2 + %add6 = add nsw i64 %3, %call5 + store i64 %add6, i64* @a, align 8, !tbaa !2 + %call7 = tail call i64 @random() #2 + %4 = load i64, i64* @a, align 8, !tbaa !2 + %add8 = add nsw i64 %4, %call7 + store i64 %add8, i64* @a, align 8, !tbaa !2 + %call9 = tail call i64 @random() #2 + %5 = load i64, i64* @a, align 8, !tbaa !2 + %add10 = add nsw i64 %5, %call9 + store i64 %add10, i64* @a, align 8, !tbaa !2 + %call11 = tail call i64 @random() #2 + %6 = load i64, i64* @a, align 8, !tbaa !2 + %add12 = add nsw i64 %6, %call11 + store i64 %add12, i64* @a, align 8, !tbaa !2 + %call13 = tail call i64 @random() #2 + %7 = load i64, i64* @a, align 8, !tbaa !2 + %add14 = add nsw i64 %7, %call13 + store i64 %add14, i64* @a, align 8, !tbaa !2 + %call15 = tail call i64 @random() #2 + %8 = load i64, i64* @a, align 8, !tbaa !2 + %add16 = add nsw i64 %8, %call15 + store i64 %add16, i64* @a, align 8, !tbaa !2 + %call17 = tail call i64 @random() #2 + %9 = load i64, i64* @a, align 8, !tbaa !2 + %add18 = add nsw i64 %9, %call17 + store i64 %add18, i64* @a, align 8, !tbaa !2 + %call19 = tail call i64 @random() #2 + %10 = load i64, i64* @a, align 8, !tbaa !2 + %add20 = add nsw i64 %10, %call19 + store i64 %add20, i64* @a, align 8, !tbaa !2 + %call21 = tail call i64 @random() #2 + %11 = load i64, i64* @a, align 8, !tbaa !2 + %add22 = add nsw i64 %11, %call21 + store i64 %add22, i64* @a, align 8, !tbaa !2 + %call23 = tail call i64 @random() #2 + %12 = load i64, i64* @a, align 8, !tbaa !2 + %add24 = add nsw i64 %12, %call23 + store i64 %add24, i64* @a, align 8, !tbaa !2 + %call25 = tail call i64 @random() #2 + %13 = load i64, i64* @a, align 8, !tbaa !2 + %add26 = add nsw i64 %13, %call25 + store i64 %add26, i64* @a, align 8, !tbaa !2 + %call27 = tail call i64 @random() #2 + %14 = load i64, i64* @a, align 8, !tbaa !2 + %add28 = add nsw i64 %14, %call27 + store i64 %add28, i64* @a, align 8, !tbaa !2 + %call29 = tail call i64 @random() #2 + %15 = load i64, i64* @a, align 8, !tbaa !2 + %add30 = add nsw i64 %15, %call29 + store i64 %add30, i64* @a, align 8, !tbaa !2 + %call31 = tail call i64 @random() #2 + %16 = load i64, i64* @a, align 8, !tbaa !2 + %add32 = add nsw i64 %16, %call31 + store i64 %add32, i64* @a, align 8, !tbaa !2 + %call33 = tail call i64 @random() #2 + %17 = load i64, i64* @a, align 8, !tbaa !2 + %add34 = add nsw i64 %17, %call33 + store i64 %add34, i64* @a, align 8, !tbaa !2 + %call35 = tail call i64 @random() #2 + %18 = load i64, i64* @a, align 8, !tbaa !2 + %add36 = add nsw i64 %18, %call35 + store i64 %add36, i64* @a, align 8, !tbaa !2 + %call37 = tail call i64 @random() #2 + %19 = load i64, i64* @a, align 8, !tbaa !2 + %add38 = add nsw i64 %19, %call37 + store i64 %add38, i64* @a, align 8, !tbaa !2 + %call39 = tail call i64 @random() #2 + %20 = load i64, i64* @a, align 8, !tbaa !2 + %add40 = add nsw i64 %20, %call39 + store i64 %add40, i64* @a, align 8, !tbaa !2 + %call41 = tail call i64 @random() #2 + %21 = load i64, i64* @a, align 8, !tbaa !2 + %add42 = add nsw i64 %21, %call41 + store i64 %add42, i64* @a, align 8, !tbaa !2 + %call43 = tail call i64 @random() #2 + %22 = load i64, i64* @a, align 8, !tbaa !2 + %add44 = add nsw i64 %22, %call43 + store i64 %add44, i64* @a, align 8, !tbaa !2 + %call45 = tail call i64 @random() #2 + %23 = load i64, i64* @a, align 8, !tbaa !2 + %add46 = add nsw i64 %23, %call45 + store i64 %add46, i64* @a, align 8, !tbaa !2 + %call47 = tail call i64 @random() #2 + %24 = load i64, i64* @a, align 8, !tbaa !2 + %add48 = add nsw i64 %24, %call47 + store i64 %add48, i64* @a, align 8, !tbaa !2 + %call49 = tail call i64 @random() #2 + %25 = load i64, i64* @a, align 8, !tbaa !2 + %add50 = add nsw i64 %25, %call49 + store i64 %add50, i64* @a, align 8, !tbaa !2 + %call51 = tail call i64 @random() #2 + %26 = load i64, i64* @a, align 8, !tbaa !2 + %add52 = add nsw i64 %26, %call51 + store i64 %add52, i64* @a, align 8, !tbaa !2 + %call53 = tail call i64 @random() #2 + %27 = load i64, i64* @a, align 8, !tbaa !2 + %add54 = add nsw i64 %27, %call53 + store i64 %add54, i64* @a, align 8, !tbaa !2 + %call55 = tail call i64 @random() #2 + %28 = load i64, i64* @a, align 8, !tbaa !2 + %add56 = add nsw i64 %28, %call55 + store i64 %add56, i64* @a, align 8, !tbaa !2 + %call57 = tail call i64 @random() #2 + %29 = load i64, i64* @a, align 8, !tbaa !2 + %add58 = add nsw i64 %29, %call57 + store i64 %add58, i64* @a, align 8, !tbaa !2 + %call59 = tail call i64 @random() #2 + %30 = load i64, i64* @a, align 8, !tbaa !2 + %add60 = add nsw i64 %30, %call59 + store i64 %add60, i64* @a, align 8, !tbaa !2 + %call61 = tail call i64 @random() #2 + %31 = load i64, i64* @a, align 8, !tbaa !2 + %add62 = add nsw i64 %31, %call61 + store i64 %add62, i64* @a, align 8, !tbaa !2 + %call63 = tail call i64 @random() #2 + %32 = load i64, i64* @a, align 8, !tbaa !2 + %add64 = add nsw i64 %32, %call63 + store i64 %add64, i64* @a, align 8, !tbaa !2 + %call65 = tail call i64 @random() #2 + %33 = load i64, i64* @a, align 8, !tbaa !2 + %add66 = add nsw i64 %33, %call65 + store i64 %add66, i64* @a, align 8, !tbaa !2 + %call67 = tail call i64 @random() #2 + %34 = load i64, i64* @a, align 8, !tbaa !2 + %add68 = add nsw i64 %34, %call67 + store i64 %add68, i64* @a, align 8, !tbaa !2 + %call69 = tail call i64 @random() #2 + %35 = load i64, i64* @a, align 8, !tbaa !2 + %add70 = add nsw i64 %35, %call69 + store i64 %add70, i64* @a, align 8, !tbaa !2 + %call71 = tail call i64 @random() #2 + %36 = load i64, i64* @a, align 8, !tbaa !2 + %add72 = add nsw i64 %36, %call71 + store i64 %add72, i64* @a, align 8, !tbaa !2 + %call73 = tail call i64 @random() #2 + %37 = load i64, i64* @a, align 8, !tbaa !2 + %add74 = add nsw i64 %37, %call73 + store i64 %add74, i64* @a, align 8, !tbaa !2 + %call75 = tail call i64 @random() #2 + %38 = load i64, i64* @a, align 8, !tbaa !2 + %add76 = add nsw i64 %38, %call75 + store i64 %add76, i64* @a, align 8, !tbaa !2 + %call77 = tail call i64 @random() #2 + %39 = load i64, i64* @a, align 8, !tbaa !2 + %add78 = add nsw i64 %39, %call77 + store i64 %add78, i64* @a, align 8, !tbaa !2 + br label %if.end + +if.end: ; preds = %entry, %if.then + tail call void @_Z3mooi(i32 %i) + tail call void @_Z3goov() + %40 = load i64, i64* @a, align 8, !tbaa !2 + %add79 = add nsw i64 %40, 3 + store i64 %add79, i64* @a, align 8, !tbaa !2 + ret void +} + +; Function Attrs: noinline nounwind uwtable +define dso_local void @_Z3goov() local_unnamed_addr #0 { +entry: + tail call void @_Z3foov() + %0 = load i64, i64* @a, align 8, !tbaa !2 + %add = add nsw i64 %0, 2 + store i64 %add, i64* @a, align 8, !tbaa !2 + ret void +} + +; Function Attrs: noinline nounwind uwtable +define dso_local void @_Z3foov() local_unnamed_addr #0 { +entry: + tail call void @_Z3goov() + tail call void @_Z3mooi(i32 5) + %0 = load i64, i64* @a, align 8, !tbaa !2 + %add = add nsw i64 %0, 5 + store i64 %add, i64* @a, align 8, !tbaa !2 + ret void +} + +attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #2 = { nounwind } + +!llvm.module.flags = !{!0} +!llvm.ident = !{!1} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{!"clang version 8.0.0 (trunk 342257)"} +!2 = !{!3, !3, i64 0} +!3 = !{!"long", !4, i64 0} +!4 = !{!"omnipotent char", !5, i64 0} +!5 = !{!"Simple C++ TBAA"}