diff --git a/llvm/include/llvm/Transforms/IPO/Attributor.h b/llvm/include/llvm/Transforms/IPO/Attributor.h --- a/llvm/include/llvm/Transforms/IPO/Attributor.h +++ b/llvm/include/llvm/Transforms/IPO/Attributor.h @@ -103,6 +103,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/iterator.h" #include "llvm/Analysis/AssumeBundleQueries.h" #include "llvm/Analysis/CFG.h" @@ -132,6 +133,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ModRef.h" #include "llvm/Support/TimeProfiler.h" +#include "llvm/Support/TypeSize.h" #include "llvm/TargetParser/Triple.h" #include "llvm/Transforms/Utils/CallGraphUpdater.h" @@ -6112,6 +6114,12 @@ /// See AbstractAttribute::getIdAddr() const char *getIdAddr() const override { return &ID; } + using OffsetBinsTy = DenseMap>; + using const_bin_iterator = OffsetBinsTy::const_iterator; + virtual const_bin_iterator begin() const = 0; + virtual const_bin_iterator end() const = 0; + virtual int64_t numOffsetBins() const = 0; + /// Call \p CB on all accesses that might interfere with \p Range and return /// true if all such accesses were known and the callback returned true for /// all of them, false otherwise. An access interferes with an offset-size @@ -6265,6 +6273,41 @@ static const char ID; }; +struct AAAllocationInfo : public StateWrapper { + AAAllocationInfo(const IRPosition &IRP, Attributor &A) + : StateWrapper(IRP) {} + + /// See AbstractAttribute::isValidIRPositionForInit + static bool isValidIRPositionForInit(Attributor &A, const IRPosition &IRP) { + if (!IRP.getAssociatedType()->isPtrOrPtrVectorTy()) + return false; + return AbstractAttribute::isValidIRPositionForInit(A, IRP); + } + + /// Create an abstract attribute view for the position \p IRP. + static AAAllocationInfo &createForPosition(const IRPosition &IRP, + Attributor &A); + + virtual std::optional getAllocatedSize() const = 0; + + /// See AbstractAttribute::getName() + const std::string getName() const override { return "AAAllocationInfo"; } + + /// See AbstractAttribute::getIdAddr() + const char *getIdAddr() const override { return &ID; } + + /// This function should return true if the type of the \p AA is + /// AAAllocationInfo + static bool classof(const AbstractAttribute *AA) { + return (AA->getIdAddr() == &ID); + } + + constexpr static const std::optional HasNoAllocationSize = + std::optional(TypeSize(-1, true)); + + static const char ID; +}; + /// An abstract interface for llvm::GlobalValue information interference. struct AAGlobalValueInfo : public StateWrapper { diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/Attributor.cpp @@ -3595,14 +3595,13 @@ }; auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F); - bool Success; + [[maybe_unused]] bool Success; bool UsedAssumedInformation = false; Success = checkForAllInstructionsImpl( nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr, {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, (unsigned)Instruction::Call}, UsedAssumedInformation); - (void)Success; assert(Success && "Expected the check call to be successful!"); auto LoadStorePred = [&](Instruction &I) -> bool { @@ -3628,7 +3627,18 @@ nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr, {(unsigned)Instruction::Load, (unsigned)Instruction::Store}, UsedAssumedInformation); - (void)Success; + assert(Success && "Expected the check call to be successful!"); + + // AllocaInstPredicate + auto AAAllocationInfoPred = [&](Instruction &I) -> bool { + getOrCreateAAFor(IRPosition::value(I)); + return true; + }; + + Success = checkForAllInstructionsImpl( + nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr, + {(unsigned)Instruction::Alloca, (unsigned)Instruction::Call}, + UsedAssumedInformation); assert(Success && "Expected the check call to be successful!"); } diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -65,6 +65,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/GraphWriter.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/TypeSize.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/CallPromotionUtils.h" @@ -192,6 +193,7 @@ PIPE_OPERATOR(AAAssumptionInfo) PIPE_OPERATOR(AAUnderlyingObjects) PIPE_OPERATOR(AAAddressSpace) +PIPE_OPERATOR(AAAllocationInfo) PIPE_OPERATOR(AAIndirectCallInfo) PIPE_OPERATOR(AAGlobalValueInfo) PIPE_OPERATOR(AADenormalFPMath) @@ -880,6 +882,7 @@ using const_bin_iterator = OffsetBinsTy::const_iterator; const_bin_iterator begin() const { return OffsetBins.begin(); } const_bin_iterator end() const { return OffsetBins.end(); } + int64_t numOffsetBins() const { return OffsetBins.size(); } const AAPointerInfo::Access &getAccess(unsigned Index) const { return AccessList[Index]; @@ -1103,6 +1106,14 @@ return AAPointerInfo::manifest(A); } + using OffsetBinsTy = DenseMap>; + using const_bin_iterator = OffsetBinsTy::const_iterator; + virtual const_bin_iterator begin() const override { return State::begin(); } + virtual const_bin_iterator end() const override { return State::end(); } + virtual int64_t numOffsetBins() const override { + return State::numOffsetBins(); + } + bool forallInterferingAccesses( AA::RangeTy Range, function_ref CB) @@ -12647,6 +12658,268 @@ }; } // namespace +/// ----------- Allocation Info ---------- +namespace { +struct AAAllocationInfoImpl : public AAAllocationInfo { + AAAllocationInfoImpl(const IRPosition &IRP, Attributor &A) + : AAAllocationInfo(IRP, A) {} + + std::optional getAllocatedSize() const override { + assert(isValidState() && "the AA is invalid"); + return AssumedAllocatedSize; + } + + bool isaMallocInst(Instruction *I) { + CallInst *Call = dyn_cast(I); + auto CallName = Call->getCalledFunction()->getName(); + if (CallName.str() == "malloc") + return true; + + return false; + }; + + ChangeStatus updateImpl(Attributor &A) override { + + Instruction *I = getIRPosition().getCtxI(); + + const IRPosition &IRP = getIRPosition(); + + if (!(isa(I) || isaMallocInst(I))) + return llvm::IntegerStateBase::indicatePessimisticFixpoint(); + + const AAPointerInfo *PI = + A.getOrCreateAAFor(IRP, *this, DepClassTy::REQUIRED); + + if (!PI) + return indicatePessimisticFixpoint(); + + if (!PI->getState().isValidState()) + return indicatePessimisticFixpoint(); + + int64_t BinSize = PI->numOffsetBins(); + + // TODO: Handle for more than one Bin + if (BinSize > 1) + return indicatePessimisticFixpoint(); + + const auto &It = PI->begin(); + + if (BinSize == 0) + return indicatePessimisticFixpoint(); + + if (It->first.Offset != 0) + return indicatePessimisticFixpoint(); + + uint64_t OffsetEnd = It->first.Offset + It->first.Size; + const DataLayout &DL = A.getDataLayout(); + + switch (I->getOpcode()) { + case Instruction::Alloca: { + AllocaInst *AI = dyn_cast(getIRPosition().getCtxI()); + if (!AI) + return indicatePessimisticFixpoint(); + const auto &AllocationSize = AI->getAllocationSize(DL); + + if (!AllocationSize || AllocationSize == 0) + return indicatePessimisticFixpoint(); + + if (OffsetEnd == *AllocationSize) + return indicatePessimisticFixpoint(); + + break; + } + case Instruction::Call: { + + if (!isaMallocInst(I)) + return llvm::IntegerStateBase::indicatePessimisticFixpoint(); + + Value *ValueOperand = I->getOperand(0); + + if (!ValueOperand) + return llvm::IntegerStateBase::indicatePessimisticFixpoint(); + + ConstantInt *IntOperand = dyn_cast(ValueOperand); + + if (!IntOperand) { + const AAPotentialConstantValues *PotentialConstant = + A.getOrCreateAAFor( + IRPosition::value(*ValueOperand), *this, DepClassTy::REQUIRED); + + if (!PotentialConstant || !PotentialConstant->isValidState()) + return llvm::IntegerStateBase::indicatePessimisticFixpoint(); + + Value *GetConstantAsValue = + PotentialConstant->getAssumedConstant(A).value(); + + IntOperand = dyn_cast(GetConstantAsValue); + + if (!IntOperand) + return llvm::IntegerStateBase::indicatePessimisticFixpoint(); + } + + if (OffsetEnd == IntOperand->getZExtValue()) + return llvm::IntegerStateBase::indicatePessimisticFixpoint(); + + break; + } + default: + return indicatePessimisticFixpoint(); + } + + auto SizeOfTypeInBits = + std::optional(TypeSize(OffsetEnd * 8, false)); + + if (!changeAllocationSize(SizeOfTypeInBits)) + return ChangeStatus::UNCHANGED; + + return ChangeStatus::CHANGED; + } + + /// See AbstractAttribute::manifest(...). + ChangeStatus manifest(Attributor &A) override { + + assert(isValidState() && + "Manifest should only be called if the state is valid."); + + Instruction *I = getIRPosition().getCtxI(); + + auto FixedAllocatedSizeInBits = getAllocatedSize()->getFixedValue(); + + int NumBytesToAllocate = (FixedAllocatedSizeInBits + 7) / 8; + + Type *CharType = Type::getInt8Ty(I->getContext()); + + auto *NumBytesToValue = llvm::ConstantInt::get( + I->getContext(), llvm::APInt(32, NumBytesToAllocate)); + + switch (I->getOpcode()) { + case Instruction::Alloca: { + + AllocaInst *AI = dyn_cast(I); + + AllocaInst *NewAllocaInst = + new AllocaInst(CharType, AI->getAddressSpace(), NumBytesToValue, + AI->getAlign(), AI->getName(), AI->getNextNode()); + + if (A.changeAfterManifest(IRPosition::inst(*AI), *NewAllocaInst)) + return ChangeStatus::CHANGED; + + break; + } + case Instruction::Call: { + + if (!isaMallocInst(I)) + return llvm::IntegerStateBase::indicatePessimisticFixpoint(); + + Type *IntPtrTy = IntegerType::getInt32Ty(I->getContext()); + Instruction *NewMallocInst = CallInst::CreateMalloc( + I->getNextNode(), IntPtrTy, CharType, NumBytesToValue, nullptr, + nullptr, I->getName()); + + if (A.changeAfterManifest(IRPosition::inst(*I), *NewMallocInst)) { + A.deleteAfterManifest(*I); + return ChangeStatus::CHANGED; + } + + break; + } + default: + break; + } + + return llvm::ChangeStatus::UNCHANGED; + } + + /// See AbstractAttribute::getAsStr(). + const std::string getAsStr(Attributor *A) const override { + if (!isValidState()) + return "allocationinfo()"; + return "allocationinfo(" + + (AssumedAllocatedSize == HasNoAllocationSize + ? "none" + : std::to_string(AssumedAllocatedSize->getFixedValue())) + + ")"; + } + +private: + std::optional AssumedAllocatedSize = HasNoAllocationSize; + + bool changeAllocationSize(std::optional Size) { + if (AssumedAllocatedSize == HasNoAllocationSize || + AssumedAllocatedSize != Size) { + AssumedAllocatedSize = Size; + return true; + } + return false; + } +}; + +struct AAAllocationInfoFloating : AAAllocationInfoImpl { + AAAllocationInfoFloating(const IRPosition &IRP, Attributor &A) + : AAAllocationInfoImpl(IRP, A) {} + + void trackStatistics() const override { + STATS_DECLTRACK_FLOATING_ATTR(allocationinfo); + } +}; + +struct AAAllocationInfoReturned : AAAllocationInfoImpl { + AAAllocationInfoReturned(const IRPosition &IRP, Attributor &A) + : AAAllocationInfoImpl(IRP, A) {} + + /// See AbstractAttribute::initialize(...). + void initialize(Attributor &A) override { + // TODO: we don't rewrite function argument for now because it will need to + // rewrite the function signature and all call sites + (void)indicatePessimisticFixpoint(); + } + + void trackStatistics() const override { + STATS_DECLTRACK_FNRET_ATTR(allocationinfo); + } +}; + +struct AAAllocationInfoCallSiteReturned : AAAllocationInfoImpl { + AAAllocationInfoCallSiteReturned(const IRPosition &IRP, Attributor &A) + : AAAllocationInfoImpl(IRP, A) {} + + void trackStatistics() const override { + STATS_DECLTRACK_CSRET_ATTR(allocationinfo); + } +}; + +struct AAAllocationInfoArgument : AAAllocationInfoImpl { + AAAllocationInfoArgument(const IRPosition &IRP, Attributor &A) + : AAAllocationInfoImpl(IRP, A) {} + + void trackStatistics() const override { + STATS_DECLTRACK_ARG_ATTR(allocationinfo); + } +}; + +struct AAAllocationInfoCallSiteArgument : AAAllocationInfoImpl { + AAAllocationInfoCallSiteArgument(const IRPosition &IRP, Attributor &A) + : AAAllocationInfoImpl(IRP, A) {} + + /// See AbstractAttribute::initialize(...). + void initialize(Attributor &A) override { + + (void)indicatePessimisticFixpoint(); + } + + void trackStatistics() const override { + STATS_DECLTRACK_CSARG_ATTR(allocationinfo); + } +}; +} // namespace + const char AANoUnwind::ID = 0; const char AANoSync::ID = 0; const char AANoFree::ID = 0; @@ -12680,6 +12953,7 @@ const char AAAssumptionInfo::ID = 0; const char AAUnderlyingObjects::ID = 0; const char AAAddressSpace::ID = 0; +const char AAAllocationInfo::ID = 0; const char AAIndirectCallInfo::ID = 0; const char AAGlobalValueInfo::ID = 0; const char AADenormalFPMath::ID = 0; @@ -12813,6 +13087,7 @@ CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFPClass) CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo) CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAddressSpace) +CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAllocationInfo) CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify) CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead) diff --git a/llvm/test/Transforms/Attributor/allocator.ll b/llvm/test/Transforms/Attributor/allocator.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Attributor/allocator.ll @@ -0,0 +1,430 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals --version 2 +; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,TUNIT +; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,CGSCC + +%struct.Foo = type { i32, i32, i8 } + +@.str = private unnamed_addr constant [17 x i8] c"The value is %d\0A\00", align 1 + +;. +; CHECK: @[[_STR:[a-zA-Z0-9_$"\\.-]+]] = private unnamed_addr constant [17 x i8] c"The value is %d\0A\00", align 1 +;. +define dso_local void @positive_alloca_1(i32 noundef %val) #0 { +; CHECK-LABEL: define dso_local void @positive_alloca_1 +; CHECK-SAME: (i32 noundef [[VAL:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VAL_ADDR1:%.*]] = alloca i8, i32 4, align 4 +; CHECK-NEXT: [[F2:%.*]] = alloca i8, i32 4, align 4 +; CHECK-NEXT: store i32 [[VAL]], ptr [[VAL_ADDR1]], align 4 +; CHECK-NEXT: store i32 10, ptr [[F2]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[F2]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 +; CHECK-NEXT: store i32 [[ADD]], ptr [[F2]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[F2]], align 4 +; CHECK-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP1]], [[VAL]] +; CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[ADD3]]) +; CHECK-NEXT: ret void +; +entry: + %val.addr = alloca i64, align 4 + %f = alloca %struct.Foo, align 4 + store i32 %val, ptr %val.addr, align 4 + %field1 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0 + store i32 10, ptr %field1, align 4 + %field11 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0 + %0 = load i32, ptr %field11, align 4 + %add = add nsw i32 %0, 1 + store i32 %add, ptr %field11, align 4 + %field12 = getelementptr inbounds %struct.Foo, ptr %f, i32 0, i32 0 + %1 = load i32, ptr %field12, align 4 + %2 = load i32, ptr %val.addr, align 4 + %add3 = add nsw i32 %1, %2 + %call = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %add3) + ret void +} + +; Function Attrs: noinline nounwind uwtable +define dso_local void @positive_malloc_1(ptr noundef %val) #0 { +; CHECK-LABEL: define dso_local void @positive_malloc_1 +; CHECK-SAME: (ptr nocapture nofree noundef readonly [[VAL:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[F:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: store ptr [[VAL]], ptr [[VAL_ADDR]], align 8 +; CHECK-NEXT: [[MALLOCCALL:%.*]] = tail call ptr @malloc(i32 4) +; CHECK-NEXT: store ptr [[MALLOCCALL]], ptr [[F]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[VAL]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 10 +; CHECK-NEXT: store i32 [[ADD]], ptr [[MALLOCCALL]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[MALLOCCALL]], align 4 +; CHECK-NEXT: [[CALL2:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP1]]) +; CHECK-NEXT: ret void +; +entry: + %val.addr = alloca ptr, align 8 + %f = alloca ptr, align 8 + store ptr %val, ptr %val.addr, align 8 + %call = call noalias ptr @malloc(i64 noundef 12) #3 + store ptr %call, ptr %f, align 8 + %0 = load ptr, ptr %val.addr, align 8 + %1 = load i32, ptr %0, align 4 + %add = add nsw i32 %1, 10 + %2 = load ptr, ptr %f, align 8 + %a = getelementptr inbounds %struct.Foo, ptr %2, i32 0, i32 0 + store i32 %add, ptr %a, align 4 + %3 = load ptr, ptr %f, align 8 + %a1 = getelementptr inbounds %struct.Foo, ptr %3, i32 0, i32 0 + %4 = load i32, ptr %a1, align 4 + %call2 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %4) + ret void +} + +; Function Attrs: noinline nounwind uwtable +define dso_local void @positive_malloc_2(ptr noundef %val) #0 { +; CHECK-LABEL: define dso_local void @positive_malloc_2 +; CHECK-SAME: (ptr nocapture nofree noundef readonly [[VAL:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[F:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: store ptr [[VAL]], ptr [[VAL_ADDR]], align 8 +; CHECK-NEXT: [[MALLOCCALL:%.*]] = tail call ptr @malloc(i32 4) +; CHECK-NEXT: store ptr [[MALLOCCALL]], ptr [[F]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[VAL]], align 4 +; CHECK-NEXT: store i32 [[TMP0]], ptr [[MALLOCCALL]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[MALLOCCALL]], align 4 +; CHECK-NEXT: [[CALL2:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP1]]) +; CHECK-NEXT: ret void +; +entry: + %val.addr = alloca ptr, align 8 + %x = alloca i32, align 4 + %f = alloca ptr, align 8 + store ptr %val, ptr %val.addr, align 8 + store i32 15, ptr %x, align 4 + %0 = load i32, ptr %x, align 4 + %conv = sext i32 %0 to i64 + %mul = mul i64 4, %conv + %call = call noalias ptr @malloc(i64 noundef %mul) + store ptr %call, ptr %f, align 8 + %1 = load ptr, ptr %val.addr, align 8 + %2 = load i32, ptr %1, align 4 + %3 = load ptr, ptr %f, align 8 + %arrayidx = getelementptr inbounds i32, ptr %3, i64 0 + store i32 %2, ptr %arrayidx, align 4 + %4 = load ptr, ptr %f, align 8 + %arrayidx1 = getelementptr inbounds i32, ptr %4, i64 0 + %5 = load i32, ptr %arrayidx1, align 4 + %call2 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %5) + ret void +} + +; Function Attrs: noinline nounwind uwtable +define dso_local ptr @negative_test_escaping_pointer(i32 noundef %val) #0 { +; CHECK-LABEL: define dso_local ptr @negative_test_escaping_pointer +; CHECK-SAME: (i32 noundef [[VAL:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[F:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: store i32 [[VAL]], ptr [[VAL_ADDR]], align 4 +; CHECK-NEXT: [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef 16) +; CHECK-NEXT: store ptr [[CALL]], ptr [[F]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[F]], align 8 +; CHECK-NEXT: store i32 2, ptr [[TMP0]], align 8 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 10, [[VAL]] +; CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[F]], align 8 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 8 +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP2]], [[ADD]] +; CHECK-NEXT: store i32 [[ADD2]], ptr [[TMP1]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[F]], align 8 +; CHECK-NEXT: ret ptr [[TMP3]] +; +entry: + %val.addr = alloca i32, align 4 + %f = alloca ptr, align 8 + store i32 %val, ptr %val.addr, align 4 + %call = call noalias ptr @malloc(i64 noundef 16) #2 + store ptr %call, ptr %f, align 8 + %0 = load ptr, ptr %f, align 8 + %field1 = getelementptr inbounds %struct.Foo, ptr %0, i32 0, i32 0 + store i32 2, ptr %field1, align 8 + %1 = load i32, ptr %val.addr, align 4 + %add = add nsw i32 10, %1 + %2 = load ptr, ptr %f, align 8 + %field11 = getelementptr inbounds %struct.Foo, ptr %2, i32 0, i32 0 + %3 = load i32, ptr %field11, align 8 + %add2 = add nsw i32 %3, %add + store i32 %add2, ptr %field11, align 8 + %4 = load ptr, ptr %f, align 8 + ret ptr %4 +} + +; Function Attrs: noinline nounwind uwtable +define dso_local { i64, ptr } @positive_test_not_a_single_start_offset(i32 noundef %val) #0 { +; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(none) +; CHECK-LABEL: define dso_local { i64, ptr } @positive_test_not_a_single_start_offset +; CHECK-SAME: (i32 noundef [[VAL:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_FOO:%.*]], align 8 +; CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca i32, align 4 +; CHECK-NEXT: store i32 [[VAL]], ptr [[VAL_ADDR]], align 4 +; CHECK-NEXT: store i32 2, ptr [[RETVAL]], align 8 +; CHECK-NEXT: [[FIELD3:%.*]] = getelementptr inbounds [[STRUCT_FOO]], ptr [[RETVAL]], i32 0, i32 2 +; CHECK-NEXT: store ptr [[VAL_ADDR]], ptr [[FIELD3]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load { i64, ptr }, ptr [[RETVAL]], align 8 +; CHECK-NEXT: ret { i64, ptr } [[TMP0]] +; +entry: + ;TODO: The allocation can be reduced here. + ;However, the offsets (load/store etc.) Need to be changed. + %retval = alloca %struct.Foo, align 8 + %val.addr = alloca i32, align 4 + store i32 %val, ptr %val.addr, align 4 + %field1 = getelementptr inbounds %struct.Foo, ptr %retval, i32 0, i32 0 + store i32 2, ptr %field1, align 8 + %field3 = getelementptr inbounds %struct.Foo, ptr %retval, i32 0, i32 2 + store ptr %val.addr, ptr %field3, align 8 + %0 = load { i64, ptr }, ptr %retval, align 8 + ret { i64, ptr } %0 +} + +; Function Attrs: noinline nounwind uwtable +define dso_local void @positive_test_reduce_array_allocation_1() { +; CHECK-LABEL: define dso_local void @positive_test_reduce_array_allocation_1() { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[ARRAY1:%.*]] = alloca i8, i32 4, align 8 +; CHECK-NEXT: store i32 0, ptr [[ARRAY1]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAY1]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], 2 +; CHECK-NEXT: store i32 [[TMP1]], ptr [[ARRAY1]], align 8 +; CHECK-NEXT: [[TMP2:%.*]] = add i32 1, 2 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAY1]], align 8 +; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP2]], [[TMP3]] +; CHECK-NEXT: store i32 [[TMP4]], ptr [[ARRAY1]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAY1]], align 8 +; CHECK-NEXT: [[CALL:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP5]]) +; CHECK-NEXT: ret void +; +entry: + %array = alloca ptr, i32 10 + store i32 0, ptr %array + %0 = load i32, ptr %array + %1 = add i32 %0, 2 + store i32 %1, ptr %array + %2 = add i32 1, 2 + %3 = load i32, ptr %array + %4 = add i32 %2, %3 + store i32 %4, ptr %array + %5 = load i32, ptr %array + %call = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %5) + ret void +} + + +; Function Attrs: noinline nounwind uwtable +; TODO: Here the array size is not known at compile time. +; However the array does not escape and is only partially used. +; Should the optimization reduce the allocation size regardless? Based on AAPointerInfo. +define dso_local void @baz(ptr noundef %val, i32 noundef %arrayLength) #0 { +; CHECK-LABEL: define dso_local void @baz +; CHECK-SAME: (ptr nocapture nofree noundef readonly [[VAL:%.*]], i32 noundef [[ARRAYLENGTH:%.*]]) { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VAL_ADDR:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[ARRAYLENGTH_ADDR:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[F:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: store ptr [[VAL]], ptr [[VAL_ADDR]], align 8 +; CHECK-NEXT: store i32 [[ARRAYLENGTH]], ptr [[ARRAYLENGTH_ADDR]], align 4 +; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[ARRAYLENGTH]] to i64 +; CHECK-NEXT: [[MUL:%.*]] = mul i64 4, [[CONV]] +; CHECK-NEXT: [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef [[MUL]]) +; CHECK-NEXT: store ptr [[CALL]], ptr [[F]], align 8 +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[VAL]], align 4 +; CHECK-NEXT: store i32 [[TMP0]], ptr [[CALL]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[CALL]], align 4 +; CHECK-NEXT: [[CALL2:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP1]]) +; CHECK-NEXT: ret void +; +entry: + %val.addr = alloca ptr, align 8 + %arrayLength.addr = alloca i32, align 4 + %f = alloca ptr, align 8 + store ptr %val, ptr %val.addr, align 8 + store i32 %arrayLength, ptr %arrayLength.addr, align 4 + %0 = load i32, ptr %arrayLength.addr, align 4 + %conv = sext i32 %0 to i64 + %mul = mul i64 4, %conv + %call = call noalias ptr @malloc(i64 noundef %mul) #3 + store ptr %call, ptr %f, align 8 + %1 = load ptr, ptr %val.addr, align 8 + %2 = load i32, ptr %1, align 4 + %3 = load ptr, ptr %f, align 8 + %arrayidx = getelementptr inbounds i32, ptr %3, i64 0 + store i32 %2, ptr %arrayidx, align 4 + %4 = load ptr, ptr %f, align 8 + %arrayidx1 = getelementptr inbounds i32, ptr %4, i64 0 + %5 = load i32, ptr %arrayidx1, align 4 + %call2 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %5) + ret void +} + + +; Function Attrs: noinline nounwind uwtable +define dso_local void @positive_test_reduce_array_allocation_2() #0 { +; CHECK-LABEL: define dso_local void @positive_test_reduce_array_allocation_2() { +; CHECK-NEXT: entry: +; CHECK-NEXT: [[ARRAY:%.*]] = alloca ptr, align 8 +; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 +; CHECK-NEXT: [[CALL:%.*]] = call noalias ptr @malloc(i64 noundef 40000) +; CHECK-NEXT: store ptr [[CALL]], ptr [[ARRAY]], align 8 +; CHECK-NEXT: store i32 0, ptr [[I]], align 4 +; CHECK-NEXT: br label [[FOR_COND:%.*]] +; CHECK: for.cond: +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10000 +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM]] +; CHECK-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: br label [[FOR_INC:%.*]] +; CHECK: for.inc: +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], 2 +; CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4 +; CHECK-NEXT: br label [[FOR_COND]] +; CHECK: for.end: +; CHECK-NEXT: store i32 0, ptr [[I]], align 4 +; CHECK-NEXT: br label [[FOR_COND1:%.*]] +; CHECK: for.cond1: +; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP4]], 10000 +; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY3:%.*]], label [[FOR_END9:%.*]] +; CHECK: for.body3: +; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[IDXPROM4:%.*]] = sext i32 [[TMP5]] to i64 +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM4]] +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP6]], 1 +; CHECK-NEXT: store i32 [[ADD6]], ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: br label [[FOR_INC7:%.*]] +; CHECK: for.inc7: +; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP7]], 2 +; CHECK-NEXT: store i32 [[ADD8]], ptr [[I]], align 4 +; CHECK-NEXT: br label [[FOR_COND1]] +; CHECK: for.end9: +; CHECK-NEXT: store i32 0, ptr [[I]], align 4 +; CHECK-NEXT: br label [[FOR_COND10:%.*]] +; CHECK: for.cond10: +; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP8]], 10000 +; CHECK-NEXT: br i1 [[CMP11]], label [[FOR_BODY12:%.*]], label [[FOR_END18:%.*]] +; CHECK: for.body12: +; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[IDXPROM13:%.*]] = sext i32 [[TMP9]] to i64 +; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, ptr [[CALL]], i64 [[IDXPROM13]] +; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX14]], align 4 +; CHECK-NEXT: [[CALL15:%.*]] = call i32 (ptr, ...) @printf(ptr noundef nonnull dereferenceable(17) @.str, i32 noundef [[TMP10]]) +; CHECK-NEXT: br label [[FOR_INC16:%.*]] +; CHECK: for.inc16: +; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4 +; CHECK-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP11]], 2 +; CHECK-NEXT: store i32 [[ADD17]], ptr [[I]], align 4 +; CHECK-NEXT: br label [[FOR_COND10]] +; CHECK: for.end18: +; CHECK-NEXT: ret void +; +entry: + ;TODO: Here since only even indexes of the array are part of the output + ;We can reduce the allocation by half and make an array that's accessed contiguously + %array = alloca ptr, align 8 + %i = alloca i32, align 4 + %call = call noalias ptr @malloc(i64 noundef 40000) #3 + store ptr %call, ptr %array, align 8 + store i32 0, ptr %i, align 4 + br label %for.cond + +for.cond: + %0 = load i32, ptr %i, align 4 + %cmp = icmp slt i32 %0, 10000 + br i1 %cmp, label %for.body, label %for.end + +for.body: + %1 = load i32, ptr %i, align 4 + %2 = load ptr, ptr %array, align 8 + %3 = load i32, ptr %i, align 4 + %idxprom = sext i32 %3 to i64 + %arrayidx = getelementptr inbounds i32, ptr %2, i64 %idxprom + store i32 %1, ptr %arrayidx, align 4 + br label %for.inc + +for.inc: + %4 = load i32, ptr %i, align 4 + %add = add nsw i32 %4, 2 + store i32 %add, ptr %i, align 4 + br label %for.cond + +for.end: + store i32 0, ptr %i, align 4 + br label %for.cond1 + +for.cond1: + %5 = load i32, ptr %i, align 4 + %cmp2 = icmp slt i32 %5, 10000 + br i1 %cmp2, label %for.body3, label %for.end9 + +for.body3: + %6 = load ptr, ptr %array, align 8 + %7 = load i32, ptr %i, align 4 + %idxprom4 = sext i32 %7 to i64 + %arrayidx5 = getelementptr inbounds i32, ptr %6, i64 %idxprom4 + %8 = load i32, ptr %arrayidx5, align 4 + %add6 = add nsw i32 %8, 1 + store i32 %add6, ptr %arrayidx5, align 4 + br label %for.inc7 + +for.inc7: + %9 = load i32, ptr %i, align 4 + %add8 = add nsw i32 %9, 2 + store i32 %add8, ptr %i, align 4 + br label %for.cond1 + +for.end9: + store i32 0, ptr %i, align 4 + br label %for.cond10 + +for.cond10: + %10 = load i32, ptr %i, align 4 + %cmp11 = icmp slt i32 %10, 10000 + br i1 %cmp11, label %for.body12, label %for.end18 + +for.body12: + %11 = load ptr, ptr %array, align 8 + %12 = load i32, ptr %i, align 4 + %idxprom13 = sext i32 %12 to i64 + %arrayidx14 = getelementptr inbounds i32, ptr %11, i64 %idxprom13 + %13 = load i32, ptr %arrayidx14, align 4 + %call15 = call i32 (ptr, ...) @printf(ptr noundef @.str, i32 noundef %13) + br label %for.inc16 + +for.inc16: + %14 = load i32, ptr %i, align 4 + %add17 = add nsw i32 %14, 2 + store i32 %add17, ptr %i, align 4 + br label %for.cond10 + +for.end18: + ret void +} + +declare i32 @printf(ptr noundef, ...) #1 + +; Function Attrs: nounwind allocsize(0) +declare noalias ptr @malloc(i64 noundef) #1 +;. +; CHECK: attributes #[[ATTR0]] = { mustprogress nofree norecurse nosync nounwind willreturn memory(none) } +;. +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; CGSCC: {{.*}} +; TUNIT: {{.*}} diff --git a/llvm/test/Transforms/Attributor/depgraph.ll b/llvm/test/Transforms/Attributor/depgraph.ll --- a/llvm/test/Transforms/Attributor/depgraph.ll +++ b/llvm/test/Transforms/Attributor/depgraph.ll @@ -1,8 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals ; RUN: opt -passes=attributor-cgscc -S < %s 2>&1 | FileCheck %s --check-prefixes=CHECK -; RUN: opt -passes=attributor-cgscc -disable-output -attributor-print-dep < %s 2>&1 | FileCheck %s --check-prefixes=GRAPH -; RUN: opt -passes=attributor-cgscc -disable-output -attributor-dump-dep-graph -attributor-depgraph-dot-filename-prefix=%t < %s 2>/dev/null -; RUN: FileCheck %s -input-file=%t_0.dot --check-prefix=DOT ; Test 0 ; @@ -46,298 +43,7 @@ %.0 = phi ptr [ %6, %4 ], [ %0, %7 ] ret ptr %.0 } - -; -; Check for graph -; - -; GRAPH: [AAIsDead] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state Live[#BB 4/4][#TBEP 0][#KDE 1] -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI ' %3 = icmp eq i32 %2, 0' at position {flt: [@-1]} with state set-state(< { %3 = icmp eq i32 %2, 0[3], } >) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI ' %2 = load i32, ptr %0, align 4' at position {flt: [@-1]} with state set-state(< { %2 = load i32, ptr %0, align 4[3], } >) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAUnderlyingObjects] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state UnderlyingObjects -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI <> at position {flt: [@-1]} with state set-state(< {i32 0[3], } >) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoReturn] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state may-return -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoReturn] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state may-return -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' ret ptr %.0' at position {flt: [@-1]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAUndefinedBehavior] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state undefined-behavior -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state set-state(< {ptr %0[3], } >) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {flt: [@-1]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoUnwind] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nounwind -; GRAPH-NEXT: updates [AAIsDead] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {flt: [@-1]} with state assumed-live -; GRAPH-NEXT: updates [AANoUnwind] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state nounwind -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoUnwind] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state nounwind -; GRAPH-NEXT: updates [AANoUnwind] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nounwind -; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAMemoryBehavior] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state readonly -; GRAPH-NEXT: updates [AAIsDead] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {flt: [@-1]} with state assumed-live -; GRAPH-NEXT: updates [AAMemoryBehavior] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state readonly -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAMemoryBehavior] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state readonly -; GRAPH-NEXT: updates [AAMemoryBehavior] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state readonly -; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned -; GRAPH-NEXT: updates [AAMemoryBehavior] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state readonly -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' %2 = load i32, ptr %0, align 4' at position {flt: [@-1]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' %3 = icmp eq i32 %2, 0' at position {flt: [@-1]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' br i1 %3, label %4, label %7' at position {flt: [@-1]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_ret: [@-1]} with state set-state(< { %5 = getelementptr inbounds i32, ptr %0, i64 4[3], %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >) -; GRAPH-NEXT: updates [AAPotentialValues] for CtxI ' %.0 = phi ptr [ %6, %4 ], [ %0, %7 ]' at position {flt:.0 [.0@-1]} with state set-state(< {ptr %0[3], %5 = getelementptr inbounds i32, ptr %0, i64 4[3], %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state set-state(< { %.0 = phi ptr [ %6, %4 ], [ %0, %7 ][3], } >) -; GRAPH-NEXT: updates [AAPotentialValues] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_ret: [@-1]} with state set-state(< { %5 = getelementptr inbounds i32, ptr %0, i64 4[3], %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >) -; GRAPH-NEXT: updates [AANoUndef] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state may-undef-or-poison -; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned -; GRAPH-NEXT: updates [AAAlign] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state align<1-16> -; GRAPH-NEXT: updates [AANonNull] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state nonnull -; GRAPH-NEXT: updates [AADereferenceable] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state unknown-dereferenceable -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI ' %.0 = phi ptr [ %6, %4 ], [ %0, %7 ]' at position {flt:.0 [.0@-1]} with state set-state(< {ptr %0[3], %5 = getelementptr inbounds i32, ptr %0, i64 4[3], %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >) -; GRAPH-NEXT: updates [AAPotentialValues] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state set-state(< { %.0 = phi ptr [ %6, %4 ], [ %0, %7 ][3], } >) -; GRAPH-NEXT: updates [AAPotentialValues] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_ret: [@-1]} with state set-state(< { %5 = getelementptr inbounds i32, ptr %0, i64 4[3], %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >) -; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned -; GRAPH-NEXT: updates [AAAlign] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state align<1-16> -; GRAPH-NEXT: updates [AANonNull] for CtxI ' %.0 = phi ptr [ %6, %4 ], [ %0, %7 ]' at position {flt:.0 [.0@-1]} with state nonnull -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI ' %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state set-state(< { %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI <> at position {flt: [@-1]} with state set-state(< {i64 4[3], } >) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI ' %2 = load i32, ptr %0, align 4' at position {flt:checkAndAdvance [checkAndAdvance@-1]} with state set-state(< {@checkAndAdvance[3], } >) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPotentialValues] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state set-state(< { %5 = getelementptr inbounds i32, ptr %0, i64 4[3], } >) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAInstanceInfo] for CtxI ' %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoRecurse] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state may-recurse -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAInterFnReachability] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state #queries(1) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIntraFnReachability] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state #queries(1) -; GRAPH-EMPTY: -; GRAPH-NEXT: [AACallEdges] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state CallEdges[0,1] -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' br label %8' at position {flt: [@-1]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoUndef] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state may-undef-or-poison -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoUndef] for CtxI ' %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state may-undef-or-poison -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoUndef] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state may-undef-or-poison -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAHeapToStack] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state [H2S] Mallocs Good/Bad: 0/0 -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAMustProgress] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state may-not-progress -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAWillReturn] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state may-noreturn -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAWillReturn] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state may-noreturn -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoRecurse] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state may-recurse -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoFree] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state nofree -; GRAPH-NEXT: updates [AANoFree] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nofree -; GRAPH-NEXT: updates [AANoFree] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state nofree -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoFree] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nofree -; GRAPH-NEXT: updates [AANoFree] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state nofree -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoSync] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state nosync -; GRAPH-NEXT: updates [AANoSync] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nosync -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoSync] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state nosync -; GRAPH-NEXT: updates [AANoSync] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state nosync -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAMemoryLocation] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state memory:argument -; GRAPH-NEXT: updates [AAMemoryLocation] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state memory:argument -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAMemoryLocation] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state memory:argument -; GRAPH-NEXT: updates [AAMemoryLocation] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state memory:argument -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAMemoryBehavior] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state readonly -; GRAPH-NEXT: updates [AAMemoryBehavior] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state readonly -; GRAPH-NEXT: updates [AAMemoryLocation] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state memory:argument -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAMemoryBehavior] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state readonly -; GRAPH-NEXT: updates [AAMemoryBehavior] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state readonly -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoCapture] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned -; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state assumed not-captured-maybe-returned -; GRAPH-NEXT: updates [AAMemoryBehavior] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state readonly -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoCapture] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state assumed not-captured-maybe-returned -; GRAPH-NEXT: updates [AANoCapture] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state assumed not-captured-maybe-returned -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAIsDead] for CtxI ' br label %8' at position {flt: [@-1]} with state assumed-live -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAUnderlyingObjects] for CtxI ' %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state UnderlyingObjects inter #1 objs, intra #1 objs -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAAssumptionInfo] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn:checkAndAdvance [checkAndAdvance@-1]} with state Known [], Assumed [] -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAAlign] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state align<1-16> -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAAlign] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state align<16-16> -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAAlign] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state align<16-16> -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAAlign] for CtxI ' %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state align<16-16> -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANonNull] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state nonnull -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANonNull] for CtxI ' %.0 = phi ptr [ %6, %4 ], [ %0, %7 ]' at position {flt:.0 [.0@-1]} with state nonnull -; GRAPH-NEXT: updates [AANonNull] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state nonnull -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANonNull] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state nonnull -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoAlias] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state may-alias -; GRAPH-EMPTY: -; GRAPH-NEXT: [AADereferenceable] for CtxI ' %2 = load i32, ptr %0, align 4' at position {fn_ret:checkAndAdvance [checkAndAdvance@-1]} with state unknown-dereferenceable -; GRAPH-EMPTY: -; GRAPH-NEXT: [AADereferenceable] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state dereferenceable_or_null<4-4> [non-null is unknown] -; GRAPH-NEXT: updates [AADereferenceable] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state unknown-dereferenceable -; GRAPH-EMPTY: -; GRAPH-NEXT: [AADereferenceable] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state unknown-dereferenceable -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoFree] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state nofree -; GRAPH-NEXT: updates [AANoFree] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state nofree -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAPrivatizablePtr] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state [no-priv] -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAAssumptionInfo] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs: [@-1]} with state Known [], Assumed [] -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoAlias] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state may-alias -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoAlias] for CtxI ' %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state may-alias -; GRAPH-EMPTY: -; GRAPH-NEXT: [AANoFree] for CtxI ' %6 = call ptr @checkAndAdvance(ptr %5)' at position {cs_arg: [@0]} with state nofree -; GRAPH-EMPTY: -; GRAPH-NEXT: [AAAddressSpace] for CtxI ' %2 = load i32, ptr %0, align 4' at position {arg: [@0]} with state addrspace() -; GRAPH-EMPTY: -; GRAPH-NEXT: [AADereferenceable] for CtxI ' %5 = getelementptr inbounds i32, ptr %0, i64 4' at position {flt: [@-1]} with state unknown-dereferenceable - -; GRAPH-NOT: update - -; -; Check for .dot file -; -; DOT-DAG: Node[[Node0:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node1:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node2:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node3:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node4:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node5:0x[a-z0-9]+]] [shape=record,label="{[AANoReturn] -; DOT-DAG: Node[[Node6:0x[a-z0-9]+]] [shape=record,label="{[AANoReturn] -; DOT-DAG: Node[[Node7:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node8:0x[a-z0-9]+]] [shape=record,label="{[AAWillReturn] -; DOT-DAG: Node[[Node9:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node10:0x[a-z0-9]+]] [shape=record,label="{[AANoUnwind] -; DOT-DAG: Node[[Node11:0x[a-z0-9]+]] [shape=record,label="{[AANoUnwind] -; DOT-DAG: Node[[Node12:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryLocation] -; DOT-DAG: Node[[Node13:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryLocation] -; DOT-DAG: Node[[Node14:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryBehavior] -; DOT-DAG: Node[[Node15:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node16:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node17:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node18:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryBehavior] -; DOT-DAG: Node[[Node19:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node20:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node22:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node23:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node24:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node25:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node26:0x[a-z0-9]+]] [shape=record,label="{[AAPotentialValues] -; DOT-DAG: Node[[Node27:0x[a-z0-9]+]] [shape=record,label="{[AAInstanceInfo] -; DOT-DAG: Node[[Node28:0x[a-z0-9]+]] [shape=record,label="{[AANoRecurse] -; DOT-DAG: Node[[Node29:0x[a-z0-9]+]] [shape=record,label="{[AAInterFnReachability] -; DOT-DAG: Node[[Node30:0x[a-z0-9]+]] [shape=record,label="{[AAIntraFnReachability] -; DOT-DAG: Node[[Node31:0x[a-z0-9]+]] [shape=record,label="{[AACallEdges] -; DOT-DAG: Node[[Node32:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node33:0x[a-z0-9]+]] [shape=record,label="{[AAWillReturn] -; DOT-DAG: Node[[Node34:0x[a-z0-9]+]] [shape=record,label="{[AANoRecurse] -; DOT-DAG: Node[[Node35:0x[a-z0-9]+]] [shape=record,label="{[AAUndefinedBehavior] -; DOT-DAG: Node[[Node36:0x[a-z0-9]+]] [shape=record,label="{[AANoUndef] -; DOT-DAG: Node[[Node37:0x[a-z0-9]+]] [shape=record,label="{[AANoUndef] -; DOT-DAG: Node[[Node38:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node39:0x[a-z0-9]+]] [shape=record,label="{[AANoUndef] -; DOT-DAG: Node[[Node41:0x[a-z0-9]+]] [shape=record,label="{[AANoSync] -; DOT-DAG: Node[[Node42:0x[a-z0-9]+]] [shape=record,label="{[AANoSync] -; DOT-DAG: Node[[Node43:0x[a-z0-9]+]] [shape=record,label="{[AANoFree] -; DOT-DAG: Node[[Node44:0x[a-z0-9]+]] [shape=record,label="{[AANoFree] -; DOT-DAG: Node[[Node45:0x[a-z0-9]+]] [shape=record,label="{[AAAssumptionInfo] -; DOT-DAG: Node[[Node46:0x[a-z0-9]+]] [shape=record,label="{[AAHeapToStack] -; DOT-DAG: Node[[Node47:0x[a-z0-9]+]] [shape=record,label="{[AAAlign] -; DOT-DAG: Node[[Node48:0x[a-z0-9]+]] [shape=record,label="{[AAAlign] -; DOT-DAG: Node[[Node49:0x[a-z0-9]+]] [shape=record,label="{[AAAlign] -; DOT-DAG: Node[[Node50:0x[a-z0-9]+]] [shape=record,label="{[AAAlign] -; DOT-DAG: Node[[Node51:0x[a-z0-9]+]] [shape=record,label="{[AANonNull] -; DOT-DAG: Node[[Node52:0x[a-z0-9]+]] [shape=record,label="{[AANonNull] -; DOT-DAG: Node[[Node53:0x[a-z0-9]+]] [shape=record,label="{[AANoAlias] -; DOT-DAG: Node[[Node54:0x[a-z0-9]+]] [shape=record,label="{[AADereferenceable] -; DOT-DAG: Node[[Node55:0x[a-z0-9]+]] [shape=record,label="{[AADereferenceable] -; DOT-DAG: Node[[Node56:0x[a-z0-9]+]] [shape=record,label="{[AADereferenceable] -; DOT-DAG: Node[[Node59:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node60:0x[a-z0-9]+]] [shape=record,label="{[AANoAlias] -; DOT-DAG: Node[[Node61:0x[a-z0-9]+]] [shape=record,label="{[AANoCapture] -; DOT-DAG: Node[[Node62:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node63:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node64:0x[a-z0-9]+]] [shape=record,label="{[AANoCapture] -; DOT-DAG: Node[[Node65:0x[a-z0-9]+]] [shape=record,label="{[AAIsDead] -; DOT-DAG: Node[[Node66:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryBehavior] -; DOT-DAG: Node[[Node67:0x[a-z0-9]+]] [shape=record,label="{[AAMemoryBehavior] -; DOT-DAG: Node[[Node68:0x[a-z0-9]+]] [shape=record,label="{[AANoFree] -; DOT-DAG: Node[[Node69:0x[a-z0-9]+]] [shape=record,label="{[AAPrivatizablePtr] -; DOT-DAG: Node[[Node70:0x[a-z0-9]+]] [shape=record,label="{[AAAssumptionInfo] -; DOT-DAG: Node[[Node71:0x[a-z0-9]+]] [shape=record,label="{[AANoAlias] -; DOT-DAG: Node[[Node73:0x[a-z0-9]+]] [shape=record,label="{[AANoFree] -; DOT-DAG: Node[[Node75:0x[a-z0-9]+]] [shape=record,label="{[AAAddressSpace] -; DOT-DAG: Node[[Node74:0x[a-z0-9]+]] [shape=record,label="{[AADereferenceable] - -; DOT-DAG: Node[[Node20]] -> Node[[Node19]]; -; DOT-DAG: Node[[Node13]] -> Node[[Node12]]; -; DOT-DAG: Node[[Node55]] -> Node[[Node56]]; -; DOT-DAG: Node[[Node68]] -> Node[[Node73]]; -; DOT-DAG: Node[[Node64]] -> Node[[Node61]]; -; DOT-DAG: Node[[Node61]] -> Node[[Node64]]; -; DOT-DAG: Node[[Node12]] -> Node[[Node13]]; -; DOT-DAG: Node[[Node11]] -> Node[[Node61]]; -; DOT-DAG: Node[[Node14]] -> Node[[Node18]]; -; DOT-DAG: Node[[Node43]] -> Node[[Node68]]; -; DOT-DAG: Node[[Node19]] -> Node[[Node22]]; -; DOT-DAG: Node[[Node10]] -> Node[[Node11]]; -; DOT-DAG: Node[[Node41]] -> Node[[Node42]]; -; DOT-DAG: Node[[Node42]] -> Node[[Node41]]; -; DOT-DAG: Node[[Node11]] -> Node[[Node10]]; -; DOT-DAG: Node[[Node67]] -> Node[[Node66]]; -; DOT-DAG: Node[[Node18]] -> Node[[Node14]]; -; DOT-DAG: Node[[Node66]] -> Node[[Node67]]; -; DOT-DAG: Node[[Node44]] -> Node[[Node43]]; -; DOT-DAG: Node[[Node43]] -> Node[[Node44]]; ;. ; CHECK: attributes #[[ATTR0]] = { nofree nosync nounwind memory(argmem: read) } ; CHECK: attributes #[[ATTR1]] = { nofree nosync nounwind memory(read) } ;. -;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: -; GRAPH: {{.*}}