diff --git a/llvm/lib/IR/KnowledgeRetention.cpp b/llvm/lib/IR/KnowledgeRetention.cpp --- a/llvm/lib/IR/KnowledgeRetention.cpp +++ b/llvm/lib/IR/KnowledgeRetention.cpp @@ -8,6 +8,7 @@ #include "llvm/IR/KnowledgeRetention.h" #include "llvm/ADT/DenseSet.h" +#include "llvm/IR/Function.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Module.h" @@ -173,9 +174,45 @@ FnAssume, ArrayRef({ConstantInt::getTrue(C)}), OpBundle)); } - void addInstruction(const Instruction *I) { + void addAttr(Attribute::AttrKind Kind, Value *Val, unsigned Argument = 0) { + AssumedKnowledge AK; + AK.Name = Attribute::getNameFromAttrKind(Kind).data(); + AK.WasOn.setPointer(Val); + if (Attribute::doesAttrKindHaveArgument(Kind)) { + AK.Argument = + ConstantInt::get(Type::getInt64Ty(M->getContext()), Argument); + } else { + AK.Argument = nullptr; + assert(Argument == 0 && "there should be no argument"); + } + AssumedKnowledgeSet.insert(AK); + }; + + void addAccessedPtr(Instruction *MemInst, Value *Pointer, Type *AccType, + MaybeAlign MA) { + uint64_t DerefSize = MemInst->getModule() + ->getDataLayout() + .getTypeStoreSize(AccType) + .getKnownMinSize(); + if (DerefSize != 0) { + addAttr(Attribute::Dereferenceable, Pointer, DerefSize); + if (!NullPointerIsDefined(MemInst->getFunction(), + Pointer->getType()->getPointerAddressSpace())) + addAttr(Attribute::NonNull, Pointer); + } + if (MA.valueOrOne() > 1) + addAttr(Attribute::Alignment, Pointer, MA.valueOrOne().value()); + } + + void addInstruction(Instruction *I) { if (auto *Call = dyn_cast(I)) - addCall(Call); + return addCall(Call); + if (auto* Load = dyn_cast(I)) + return addAccessedPtr(I, Load->getPointerOperand(), Load->getType(), Load->getAlign()); + if (auto* Store = dyn_cast(I)) + return addAccessedPtr(I, Store->getPointerOperand(), + Store->getValueOperand()->getType(), + Store->getAlign()); // TODO: Add support for the other Instructions. // TODO: Maybe we should look around and merge with other llvm.assume. } diff --git a/llvm/test/IR/assume-builder.ll b/llvm/test/IR/assume-builder.ll --- a/llvm/test/IR/assume-builder.ll +++ b/llvm/test/IR/assume-builder.ll @@ -2,6 +2,8 @@ ; RUN: opt -passes='assume-builder,verify' --enable-knowledge-retention -S %s | FileCheck %s --check-prefixes=BASIC ; RUN: opt -passes='assume-builder,verify' --enable-knowledge-retention --assume-preserve-all -S %s | FileCheck %s --check-prefixes=ALL +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" + declare void @func(i32*, i32*) declare void @func_cold(i32*) cold declare void @func_strbool(i32*) "no-jump-tables" @@ -64,3 +66,259 @@ call void @func(i32* nonnull %P1, i32* nonnull %P) ret void } + +%struct.S = type { i32, i8, i32* } + +define i32 @test2(%struct.S* %0, i32* %1, i8* %2) { +; BASIC-LABEL: @test2( +; BASIC-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8 +; BASIC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8 +; BASIC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8 +; BASIC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; BASIC-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]) ] +; BASIC-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ] +; BASIC-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]] +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]) ] +; BASIC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]) ] +; BASIC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 +; BASIC-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ] +; BASIC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ] +; BASIC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1 +; BASIC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8* +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; BASIC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]] +; BASIC-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8* +; BASIC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8* +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; BASIC-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; BASIC-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]) ] +; BASIC-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; BASIC-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; BASIC-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]) ] +; BASIC-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4 +; BASIC-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32 +; BASIC-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]] +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; BASIC-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; BASIC-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]) ] +; BASIC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP26]], i64 4), "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]) ] +; BASIC-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 +; BASIC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]] +; BASIC-NEXT: ret i32 [[TMP28]] +; +; ALL-LABEL: @test2( +; ALL-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8 +; ALL-NEXT: [[TMP5:%.*]] = alloca i32*, align 8 +; ALL-NEXT: [[TMP6:%.*]] = alloca i8*, align 8 +; ALL-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; ALL-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]) ] +; ALL-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ] +; ALL-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]] +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]) ] +; ALL-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]) ] +; ALL-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 +; ALL-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ] +; ALL-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ] +; ALL-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1 +; ALL-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8* +; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; ALL-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]] +; ALL-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8* +; ALL-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8* +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; ALL-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; ALL-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]) ] +; ALL-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; ALL-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; ALL-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]) ] +; ALL-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4 +; ALL-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32 +; ALL-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]] +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ] +; ALL-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; ALL-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]) ] +; ALL-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP26]], i64 4), "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]) ] +; ALL-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 +; ALL-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]] +; ALL-NEXT: ret i32 [[TMP28]] +; + %4 = alloca %struct.S*, align 8 + %5 = alloca i32*, align 8 + %6 = alloca i8*, align 8 + %7 = alloca %struct.S, align 8 + store %struct.S* %0, %struct.S** %4, align 8 + store i32* %1, i32** %5, align 8 + store i8* %2, i8** %6 + %8 = load i32*, i32** %5, align 8 + %9 = load i32, i32* %8, align 4 + %10 = trunc i32 %9 to i8 + %11 = load i8*, i8** %6, align 8 + store i8 %10, i8* %11, align 1 + %12 = bitcast %struct.S* %7 to i8* + %13 = load %struct.S*, %struct.S** %4 + %14 = bitcast %struct.S* %13 to i8* + %15 = bitcast %struct.S* %7 to i8* + %16 = load %struct.S*, %struct.S** %4, align 8 + %17 = getelementptr inbounds %struct.S, %struct.S* %16, i32 0, i32 0 + %18 = load i32, i32* %17, align 8 + %19 = load %struct.S*, %struct.S** %4, align 8 + %20 = getelementptr inbounds %struct.S, %struct.S* %19, i32 0, i32 1 + %21 = load i8, i8* %20, align 4 + %22 = sext i8 %21 to i32 + %23 = add nsw i32 %18, %22 + %24 = load %struct.S*, %struct.S** %4, align 8 + %25 = getelementptr inbounds %struct.S, %struct.S* %24, i32 0, i32 2 + %26 = load i32*, i32** %25, align 8 + %27 = load i32, i32* %26, align 4 + %28 = add nsw i32 %23, %27 + ret i32 %28 +} + +define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true" { +; BASIC-LABEL: @test3( +; BASIC-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8 +; BASIC-NEXT: [[TMP5:%.*]] = alloca i32*, align 8 +; BASIC-NEXT: [[TMP6:%.*]] = alloca i8*, align 8 +; BASIC-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; BASIC-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8) ] +; BASIC-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8) ] +; BASIC-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8) ] +; BASIC-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i32* [[TMP8]], i64 4) ] +; BASIC-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 +; BASIC-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8) ] +; BASIC-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]] +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ] +; BASIC-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1 +; BASIC-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8* +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; BASIC-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; BASIC-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8* +; BASIC-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8* +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; BASIC-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; BASIC-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i32* [[TMP17]], i64 4) ] +; BASIC-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; BASIC-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; BASIC-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i8* [[TMP20]], i64 1) ] +; BASIC-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4 +; BASIC-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32 +; BASIC-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]] +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; BASIC-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; BASIC-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2 +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8) ] +; BASIC-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]] +; BASIC-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP26]], i64 4), "dereferenceable"(i32* [[TMP26]], i64 4) ] +; BASIC-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 +; BASIC-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]] +; BASIC-NEXT: ret i32 [[TMP28]] +; +; ALL-LABEL: @test3( +; ALL-NEXT: [[TMP4:%.*]] = alloca %struct.S*, align 8 +; ALL-NEXT: [[TMP5:%.*]] = alloca i32*, align 8 +; ALL-NEXT: [[TMP6:%.*]] = alloca i8*, align 8 +; ALL-NEXT: [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; ALL-NEXT: store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8) ] +; ALL-NEXT: store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8) ] +; ALL-NEXT: store i8* [[TMP2:%.*]], i8** [[TMP6]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8) ] +; ALL-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i32* [[TMP8]], i64 4) ] +; ALL-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 +; ALL-NEXT: [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8) ] +; ALL-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP6]] +; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ] +; ALL-NEXT: store i8 [[TMP10]], i8* [[TMP11]], align 1 +; ALL-NEXT: [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8* +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; ALL-NEXT: [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; ALL-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8* +; ALL-NEXT: [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8* +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; ALL-NEXT: [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; ALL-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i32* [[TMP17]], i64 4) ] +; ALL-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; ALL-NEXT: [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; ALL-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i8* [[TMP20]], i64 1) ] +; ALL-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4 +; ALL-NEXT: [[TMP22:%.*]] = sext i8 [[TMP21]] to i32 +; ALL-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]] +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ] +; ALL-NEXT: [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8 +; ALL-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2 +; ALL-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8) ] +; ALL-NEXT: [[TMP26:%.*]] = load i32*, i32** [[TMP25]] +; ALL-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[TMP26]], i64 4), "dereferenceable"(i32* [[TMP26]], i64 4) ] +; ALL-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 +; ALL-NEXT: [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]] +; ALL-NEXT: ret i32 [[TMP28]] +; + %4 = alloca %struct.S*, align 8 + %5 = alloca i32*, align 8 + %6 = alloca i8*, align 8 + %7 = alloca %struct.S, align 8 + store %struct.S* %0, %struct.S** %4, align 8 + store i32* %1, i32** %5, align 8 + store i8* %2, i8** %6, align 8 + %8 = load i32*, i32** %5, align 8 + %9 = load i32, i32* %8, align 4 + %10 = trunc i32 %9 to i8 + %11 = load i8*, i8** %6 + store i8 %10, i8* %11, align 1 + %12 = bitcast %struct.S* %7 to i8* + %13 = load %struct.S*, %struct.S** %4, align 8 + %14 = bitcast %struct.S* %13 to i8* + %15 = bitcast %struct.S* %7 to i8* + %16 = load %struct.S*, %struct.S** %4, align 8 + %17 = getelementptr inbounds %struct.S, %struct.S* %16, i32 0, i32 0 + %18 = load i32, i32* %17, align 8 + %19 = load %struct.S*, %struct.S** %4, align 8 + %20 = getelementptr inbounds %struct.S, %struct.S* %19, i32 0, i32 1 + %21 = load i8, i8* %20, align 4 + %22 = sext i8 %21 to i32 + %23 = add nsw i32 %18, %22 + %24 = load %struct.S*, %struct.S** %4, align 8 + %25 = getelementptr inbounds %struct.S, %struct.S* %24, i32 0, i32 2 + %26 = load i32*, i32** %25 + %27 = load i32, i32* %26, align 4 + %28 = add nsw i32 %23, %27 + ret i32 %28 +}