Index: include/llvm/Analysis/MemoryBuiltins.h =================================================================== --- include/llvm/Analysis/MemoryBuiltins.h +++ include/llvm/Analysis/MemoryBuiltins.h @@ -93,6 +93,11 @@ /// reallocates memory (e.g., realloc). bool isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI); +/// Tests if a value is a call or invoke to a library function that +/// allocates memory and throws if an allocation failed (e.g., new). +bool isOpNewLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast = false); + //===----------------------------------------------------------------------===// // malloc Call Utility Functions. // Index: lib/Analysis/MemoryBuiltins.cpp =================================================================== --- lib/Analysis/MemoryBuiltins.cpp +++ lib/Analysis/MemoryBuiltins.cpp @@ -276,6 +276,13 @@ return getAllocationDataForFunction(F, ReallocLike, TLI).hasValue(); } +/// Tests if a value is a call or invoke to a library function that +/// allocates memory and throws if an allocation failed (e.g., new). +bool llvm::isOpNewLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast) { + return getAllocationData(V, OpNewLike, TLI, LookThroughBitCast).hasValue(); +} + /// extractMallocCall - Returns the corresponding CallInst if the instruction /// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we /// ignore InvokeInst here. Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -4177,8 +4177,43 @@ return nullptr; } +static void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) { + CallInst *CI = dyn_cast(&Call); + if (!CI) + return; + ConstantInt *Op0C = dyn_cast(CI->getOperand(0)); + ConstantInt *Op1C = (CI->getNumArgOperands() == 1) + ? nullptr + : dyn_cast(CI->getOperand(1)); + if (isMallocLikeFn(CI, TLI) && Op0C && !Op0C->isNullValue()) { + CI->addAttribute(AttributeList::ReturnIndex, + Attribute::getWithDereferenceableOrNullBytes( + CI->getContext(), Op0C->getZExtValue())); + } else if (isOpNewLikeFn(CI, TLI) && Op0C && !Op0C->isNullValue()) { + CI->addAttribute(AttributeList::ReturnIndex, + Attribute::getWithDereferenceableBytes( + CI->getContext(), Op0C->getZExtValue())); + } else if (isReallocLikeFn(CI, TLI) && Op1C && !Op1C->isNullValue()) { + CI->addAttribute(AttributeList::ReturnIndex, + Attribute::getWithDereferenceableOrNullBytes( + CI->getContext(), Op1C->getZExtValue())); + } else if (isCallocLikeFn(CI, TLI) && Op0C && !Op0C->isNullValue() && Op1C && + !Op1C->isNullValue()) { + bool Overflow; + const APInt &N = Op0C->getValue(); + APInt Size = N.umul_ov(Op1C->getValue(), Overflow); + if (!Overflow) + CI->addAttribute(AttributeList::ReturnIndex, + Attribute::getWithDereferenceableOrNullBytes( + CI->getContext(), Size.getZExtValue())); + } +} + /// Improvements for call, callbr and invoke instructions. Instruction *InstCombiner::visitCallBase(CallBase &Call) { + if (isAllocationFn(&Call, &TLI)) + annotateAnyAllocSite(Call, &TLI); + if (isAllocLikeFn(&Call, &TLI)) return visitAllocSite(Call); Index: test/Transforms/InstCombine/compare-unescaped.ll =================================================================== --- test/Transforms/InstCombine/compare-unescaped.ll +++ test/Transforms/InstCombine/compare-unescaped.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -instcombine -S < %s | FileCheck %s @gp = global i32* null, align 8 @@ -5,23 +6,25 @@ declare i8* @malloc(i64) #1 define i1 @compare_global_trivialeq() { +; CHECK-LABEL: @compare_global_trivialeq( +; CHECK-NEXT: ret i1 false +; %m = call i8* @malloc(i64 4) %bc = bitcast i8* %m to i32* %lgp = load i32*, i32** @gp, align 8 %cmp = icmp eq i32* %bc, %lgp ret i1 %cmp -; CHECK-LABEL: compare_global_trivialeq -; CHECK: ret i1 false } define i1 @compare_global_trivialne() { +; CHECK-LABEL: @compare_global_trivialne( +; CHECK-NEXT: ret i1 true +; %m = call i8* @malloc(i64 4) %bc = bitcast i8* %m to i32* %lgp = load i32*, i32** @gp, align 8 %cmp = icmp ne i32* %bc, %lgp ret i1 %cmp -; CHECK-LABEL: compare_global_trivialne -; CHECK: ret i1 true } @@ -30,45 +33,68 @@ ; The comparison should fold to false irrespective of whether the call to malloc can be elided or not declare void @f() define i1 @compare_and_call_with_deopt() { -; CHECK-LABEL: compare_and_call_with_deopt +; CHECK-LABEL: @compare_and_call_with_deopt( +; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24) +; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ] +; CHECK-NEXT: ret i1 false +; %m = call i8* @malloc(i64 24) %bc = bitcast i8* %m to i32* %lgp = load i32*, i32** @gp, align 8, !nonnull !0 %cmp = icmp eq i32* %lgp, %bc tail call void @f() [ "deopt"(i8* %m) ] ret i1 %cmp -; CHECK: ret i1 false } ; Same functon as above with deopt operand in function f, but comparison is NE define i1 @compare_ne_and_call_with_deopt() { -; CHECK-LABEL: compare_ne_and_call_with_deopt +; CHECK-LABEL: @compare_ne_and_call_with_deopt( +; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24) +; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ] +; CHECK-NEXT: ret i1 true +; %m = call i8* @malloc(i64 24) %bc = bitcast i8* %m to i32* %lgp = load i32*, i32** @gp, align 8, !nonnull !0 %cmp = icmp ne i32* %lgp, %bc tail call void @f() [ "deopt"(i8* %m) ] ret i1 %cmp -; CHECK: ret i1 true } ; Same function as above, but global not marked nonnull, and we cannot fold the comparison define i1 @compare_ne_global_maybe_null() { -; CHECK-LABEL: compare_ne_global_maybe_null +; CHECK-LABEL: @compare_ne_global_maybe_null( +; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24) +; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32* +; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8 +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32* [[LGP]], [[BC]] +; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ] +; CHECK-NEXT: ret i1 [[CMP]] +; %m = call i8* @malloc(i64 24) %bc = bitcast i8* %m to i32* %lgp = load i32*, i32** @gp %cmp = icmp ne i32* %lgp, %bc tail call void @f() [ "deopt"(i8* %m) ] ret i1 %cmp -; CHECK: ret i1 %cmp } ; FIXME: The comparison should fold to false since %m escapes (call to function escape) ; after the comparison. declare void @escape(i8*) define i1 @compare_and_call_after() { -; CHECK-LABEL: compare_and_call_after +; CHECK-LABEL: @compare_and_call_after( +; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24) +; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32* +; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8, !nonnull !0 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[LGP]], [[BC]] +; CHECK-NEXT: br i1 [[CMP]], label [[ESCAPE_CALL:%.*]], label [[JUST_RETURN:%.*]] +; CHECK: escape_call: +; CHECK-NEXT: call void @escape(i8* [[M]]) +; CHECK-NEXT: ret i1 true +; CHECK: just_return: +; CHECK-NEXT: ret i1 [[CMP]] +; %m = call i8* @malloc(i64 24) %bc = bitcast i8* %m to i32* %lgp = load i32*, i32** @gp, align 8, !nonnull !0 @@ -76,56 +102,74 @@ br i1 %cmp, label %escape_call, label %just_return escape_call: - call void @escape(i8* %m) - ret i1 true + call void @escape(i8* %m) + ret i1 true just_return: - ret i1 %cmp + ret i1 %cmp } define i1 @compare_distinct_mallocs() { +; CHECK-LABEL: @compare_distinct_mallocs( +; CHECK-NEXT: ret i1 false +; %m = call i8* @malloc(i64 4) %n = call i8* @malloc(i64 4) %cmp = icmp eq i8* %m, %n ret i1 %cmp - ; CHECK-LABEL: compare_distinct_mallocs - ; CHECK: ret i1 false } -; the compare is folded to true since the folding compare looks through bitcasts. -; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc +; the compare is folded to true since the folding compare looks through bitcasts. +; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc define i1 @compare_samepointer_under_bitcast() { +; CHECK-LABEL: @compare_samepointer_under_bitcast( +; CHECK-NEXT: ret i1 true +; %m = call i8* @malloc(i64 4) %bc = bitcast i8* %m to i32* %bcback = bitcast i32* %bc to i8* %cmp = icmp eq i8* %m, %bcback ret i1 %cmp -; CHECK-LABEL: compare_samepointer_under_bitcast -; CHECK: ret i1 true } -; the compare is folded to true since the folding compare looks through bitcasts. +; the compare is folded to true since the folding compare looks through bitcasts. ; The malloc call for %m cannot be elided since it is used in the call to function f. define i1 @compare_samepointer_escaped() { +; CHECK-LABEL: @compare_samepointer_escaped( +; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4) +; CHECK-NEXT: call void @f() [ "deopt"(i8* [[M]]) ] +; CHECK-NEXT: ret i1 true +; %m = call i8* @malloc(i64 4) %bc = bitcast i8* %m to i32* %bcback = bitcast i32* %bc to i8* %cmp = icmp eq i8* %m, %bcback call void @f() [ "deopt"(i8* %m) ] ret i1 %cmp -; CHECK-LABEL: compare_samepointer_escaped -; CHECK-NEXT: %m = call i8* @malloc(i64 4) -; CHECK-NEXT: call void @f() [ "deopt"(i8* %m) ] -; CHECK: ret i1 true } ; Technically, we can fold the %cmp2 comparison, even though %m escapes through ; the ret statement since `ret` terminates the function and we cannot reach from -; the ret to cmp. +; the ret to cmp. ; FIXME: Folding this %cmp2 when %m escapes through ret could be an issue with ; cross-threading data dependencies since we do not make the distinction between ; atomic and non-atomic loads in capture tracking. define i8* @compare_ret_escape(i8* %c) { +; CHECK-LABEL: @compare_ret_escape( +; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4) +; CHECK-NEXT: [[N:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4) +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[N]], [[C:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[RETST:%.*]], label [[CHK:%.*]] +; CHECK: retst: +; CHECK-NEXT: ret i8* [[M]] +; CHECK: chk: +; CHECK-NEXT: [[BC:%.*]] = bitcast i8* [[M]] to i32* +; CHECK-NEXT: [[LGP:%.*]] = load i32*, i32** @gp, align 8, !nonnull !0 +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32* [[LGP]], [[BC]] +; CHECK-NEXT: br i1 [[CMP2]], label [[RETST]], label [[CHK2:%.*]] +; CHECK: chk2: +; CHECK-NEXT: ret i8* [[N]] +; %m = call i8* @malloc(i64 4) %n = call i8* @malloc(i64 4) %cmp = icmp eq i8* %n, %c @@ -142,23 +186,21 @@ chk2: ret i8* %n -; CHECK-LABEL: compare_ret_escape -; CHECK: %cmp = icmp eq i8* %n, %c -; CHECK: %cmp2 = icmp eq i32* %lgp, %bc } ; The malloc call for %m cannot be elided since it is used in the call to function f. ; However, the cmp can be folded to true as %n doesnt escape and %m, %n are distinct allocations define i1 @compare_distinct_pointer_escape() { +; CHECK-LABEL: @compare_distinct_pointer_escape( +; CHECK-NEXT: [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4) +; CHECK-NEXT: tail call void @f() [ "deopt"(i8* [[M]]) ] +; CHECK-NEXT: ret i1 true +; %m = call i8* @malloc(i64 4) %n = call i8* @malloc(i64 4) tail call void @f() [ "deopt"(i8* %m) ] %cmp = icmp ne i8* %m, %n ret i1 %cmp -; CHECK-LABEL: compare_distinct_pointer_escape -; CHECK-NEXT: %m = call i8* @malloc(i64 4) -; CHECK-NEXT: tail call void @f() [ "deopt"(i8* %m) ] -; CHECK-NEXT: ret i1 true } !0 = !{} Index: test/Transforms/InstCombine/deref-alloc-fns.ll =================================================================== --- test/Transforms/InstCombine/deref-alloc-fns.ll +++ test/Transforms/InstCombine/deref-alloc-fns.ll @@ -17,7 +17,7 @@ define noalias i8* @malloc_constant_size() { ; CHECK-LABEL: @malloc_constant_size( -; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @malloc(i64 40) +; CHECK-NEXT: [[CALL:%.*]] = tail call noalias dereferenceable_or_null(40) i8* @malloc(i64 40) ; CHECK-NEXT: ret i8* [[CALL]] ; %call = tail call noalias i8* @malloc(i64 40) @@ -35,7 +35,7 @@ define noalias i8* @malloc_constant_size3() { ; CHECK-LABEL: @malloc_constant_size3( -; CHECK-NEXT: [[CALL:%.*]] = tail call noalias dereferenceable(80) i8* @malloc(i64 40) +; CHECK-NEXT: [[CALL:%.*]] = tail call noalias dereferenceable(80) dereferenceable_or_null(40) i8* @malloc(i64 40) ; CHECK-NEXT: ret i8* [[CALL]] ; %call = tail call noalias dereferenceable(80) i8* @malloc(i64 40) @@ -72,7 +72,7 @@ define noalias i8* @realloc_constant_size(i8* %p) { ; CHECK-LABEL: @realloc_constant_size( -; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @realloc(i8* [[P:%.*]], i64 40) +; CHECK-NEXT: [[CALL:%.*]] = tail call noalias dereferenceable_or_null(40) i8* @realloc(i8* [[P:%.*]], i64 40) ; CHECK-NEXT: ret i8* [[CALL]] ; %call = tail call noalias i8* @realloc(i8* %p, i64 40) @@ -136,7 +136,7 @@ define noalias i8* @calloc_constant_size() { ; CHECK-LABEL: @calloc_constant_size( -; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @calloc(i64 16, i64 8) +; CHECK-NEXT: [[CALL:%.*]] = tail call noalias dereferenceable_or_null(128) i8* @calloc(i64 16, i64 8) ; CHECK-NEXT: ret i8* [[CALL]] ; %call = tail call noalias i8* @calloc(i64 16, i64 8) @@ -152,7 +152,6 @@ ret i8* %call } - define noalias i8* @op_new_nonconstant_size(i64 %n) { ; CHECK-LABEL: @op_new_nonconstant_size( ; CHECK-NEXT: [[CALL:%.*]] = tail call i8* @_Znam(i64 [[N:%.*]]) @@ -162,17 +161,17 @@ ret i8* %call } -define noalias i8* @op_new_constant_zero_size() { -; CHECK-LABEL: @op_new_constant_zero_size( -; CHECK-NEXT: [[CALL:%.*]] = tail call i8* @_Znam(i64 40) +define noalias i8* @op_new_constant_size() { +; CHECK-LABEL: @op_new_constant_size( +; CHECK-NEXT: [[CALL:%.*]] = tail call dereferenceable_or_null(40) i8* @_Znam(i64 40) ; CHECK-NEXT: ret i8* [[CALL]] ; %call = tail call i8* @_Znam(i64 40) ret i8* %call } -define noalias i8* @op_new_constant_size() { -; CHECK-LABEL: @op_new_constant_size( +define noalias i8* @op_new_constant_zero_size() { +; CHECK-LABEL: @op_new_constant_zero_size( ; CHECK-NEXT: [[CALL:%.*]] = tail call i8* @_Znam(i64 0) ; CHECK-NEXT: ret i8* [[CALL]] ; Index: test/Transforms/InstCombine/malloc-free-delete.ll =================================================================== --- test/Transforms/InstCombine/malloc-free-delete.ll +++ test/Transforms/InstCombine/malloc-free-delete.ll @@ -1,14 +1,16 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -instcombine -S | FileCheck %s ; PR1201 define i32 @main(i32 %argc, i8** %argv) { ; CHECK-LABEL: @main( - %c_19 = alloca i8* - %malloc_206 = tail call i8* @malloc(i32 mul (i32 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i32), i32 10)) - store i8* %malloc_206, i8** %c_19 - %tmp_207 = load i8*, i8** %c_19 - tail call void @free(i8* %tmp_207) - ret i32 0 -; CHECK-NEXT: ret i32 0 +; CHECK-NEXT: ret i32 0 +; + %c_19 = alloca i8* + %malloc_206 = tail call i8* @malloc(i32 mul (i32 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i32), i32 10)) + store i8* %malloc_206, i8** %c_19 + %tmp_207 = load i8*, i8** %c_19 + tail call void @free(i8* %tmp_207) + ret i32 0 } declare noalias i8* @calloc(i32, i32) nounwind @@ -17,7 +19,8 @@ define i1 @foo() { ; CHECK-LABEL: @foo( -; CHECK-NEXT: ret i1 false +; CHECK-NEXT: ret i1 false +; %m = call i8* @malloc(i32 1) %z = icmp eq i8* %m, null call void @free(i8* %m) @@ -33,7 +36,8 @@ define void @test3(i8* %src) { ; CHECK-LABEL: @test3( -; CHECK-NEXT: ret void +; CHECK-NEXT: ret void +; %a = call noalias i8* @malloc(i32 10) call void @llvm.lifetime.start.p0i8(i64 10, i8* %a) call void @llvm.lifetime.end.p0i8(i64 10, i8* %a) @@ -50,7 +54,8 @@ ;; This used to crash. define void @test4() { ; CHECK-LABEL: @test4( -; CHECK-NEXT: ret void +; CHECK-NEXT: ret void +; %A = call i8* @malloc(i32 16000) %B = bitcast i8* %A to double* %C = bitcast double* %B to i8* @@ -58,23 +63,24 @@ ret void } -; CHECK-LABEL: @test5( define void @test5(i8* %ptr, i8** %esc) { -; CHECK-NEXT: call i8* @malloc -; CHECK-NEXT: call i8* @malloc -; CHECK-NEXT: call i8* @malloc -; CHECK-NEXT: call i8* @malloc -; CHECK-NEXT: call i8* @malloc -; CHECK-NEXT: call i8* @malloc -; CHECK-NEXT: call i8* @malloc -; CHECK-NEXT: call void @llvm.memcpy -; CHECK-NEXT: call void @llvm.memmove -; CHECK-NEXT: store -; CHECK-NEXT: call void @llvm.memcpy -; CHECK-NEXT: call void @llvm.memmove -; CHECK-NEXT: call void @llvm.memset -; CHECK-NEXT: store volatile -; CHECK-NEXT: ret +; CHECK-LABEL: @test5( +; CHECK-NEXT: [[A:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700) +; CHECK-NEXT: [[B:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700) +; CHECK-NEXT: [[C:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700) +; CHECK-NEXT: [[D:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700) +; CHECK-NEXT: [[E:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700) +; CHECK-NEXT: [[F:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700) +; CHECK-NEXT: [[G:%.*]] = call dereferenceable_or_null(700) i8* @malloc(i32 700) +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 dereferenceable(32) [[PTR:%.*]], i8* align 1 dereferenceable(32) [[A]], i32 32, i1 false) +; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i8* align 1 dereferenceable(32) [[PTR]], i8* align 1 dereferenceable(32) [[B]], i32 32, i1 false) +; CHECK-NEXT: store i8* [[C]], i8** [[ESC:%.*]], align 8 +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[D]], i8* [[PTR]], i32 32, i1 true) +; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i8* [[E]], i8* [[PTR]], i32 32, i1 true) +; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[F]], i8 5, i32 32, i1 true) +; CHECK-NEXT: store volatile i8 4, i8* [[G]], align 1 +; CHECK-NEXT: ret void +; %a = call i8* @malloc(i32 700) %b = call i8* @malloc(i32 700) %c = call i8* @malloc(i32 700) @@ -98,17 +104,19 @@ ;; Using simplifycfg will remove the empty basic block and the branch operation ;; Then, performing a dead elimination will remove the comparison. ;; This is what happens with -O1 and upper. -; CHECK-LABEL: @test6( define void @test6(i8* %foo) minsize { -; CHECK: %tobool = icmp eq i8* %foo, null +; CHECK-LABEL: @test6( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8* [[FOO:%.*]], null +; CHECK-NEXT: tail call void @free(i8* [[FOO]]) +; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] +; CHECK: if.then: +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: ret void +; ;; Call to free moved -; CHECK-NEXT: tail call void @free(i8* %foo) -; CHECK-NEXT: br i1 %tobool, label %if.end, label %if.then -; CHECK: if.then: ;; Block is now empty and may be simplified by simplifycfg -; CHECK-NEXT: br label %if.end -; CHECK: if.end: -; CHECK-NEXT: ret void entry: %tobool = icmp eq i8* %foo, null br i1 %tobool, label %if.end, label %if.then @@ -126,27 +134,40 @@ declare i32 @__gxx_personality_v0(...) declare void @_ZN1AC2Ev(i8* %this) -; CHECK-LABEL: @test7( define void @test7() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +; CHECK-LABEL: @test7( +; CHECK-NEXT: entry: +; CHECK-NEXT: invoke void @_ZN1AC2Ev(i8* undef) +; CHECK-NEXT: to label [[DOTNOEXC_I:%.*]] unwind label [[LPAD_I:%.*]] +; CHECK: .noexc.i: +; CHECK-NEXT: unreachable +; CHECK: lpad.i: +; CHECK-NEXT: [[TMP0:%.*]] = landingpad { i8*, i32 } +; CHECK-NEXT: cleanup +; CHECK-NEXT: resume { i8*, i32 } [[TMP0]] +; entry: %nt = alloca i8 - ; CHECK-NOT: call {{.*}}@_ZnwmRKSt9nothrow_t( %call.i = tail call i8* @_ZnwmRKSt9nothrow_t(i64 1, i8* %nt) builtin nounwind invoke void @_ZN1AC2Ev(i8* undef) - to label %.noexc.i unwind label %lpad.i + to label %.noexc.i unwind label %lpad.i .noexc.i: ; preds = %entry unreachable lpad.i: ; preds = %entry %0 = landingpad { i8*, i32 } cleanup - ; CHECK-NOT: call {{.*}}@_ZdlPvRKSt9nothrow_t( call void @_ZdlPvRKSt9nothrow_t(i8* %call.i, i8* %nt) builtin nounwind resume { i8*, i32 } %0 } declare i8* @_Znwm(i64) nobuiltin define i8* @_Znwj(i32 %n) nobuiltin { +; CHECK-LABEL: @_Znwj( +; CHECK-NEXT: [[Z:%.*]] = zext i32 [[N:%.*]] to i64 +; CHECK-NEXT: [[M:%.*]] = call i8* @_Znwm(i64 [[Z]]) +; CHECK-NEXT: ret i8* [[M]] +; %z = zext i32 %n to i64 %m = call i8* @_Znwm(i64 %z) ret i8* %m @@ -157,18 +178,34 @@ declare void @_ZdaPv(i8*) nobuiltin define linkonce void @_ZdlPvm(i8* %p, i64) nobuiltin { +; CHECK-LABEL: @_ZdlPvm( +; CHECK-NEXT: call void @_ZdlPv(i8* [[P:%.*]]) +; CHECK-NEXT: ret void +; call void @_ZdlPv(i8* %p) ret void } define linkonce void @_ZdlPvj(i8* %p, i32) nobuiltin { +; CHECK-LABEL: @_ZdlPvj( +; CHECK-NEXT: call void @_ZdlPv(i8* [[P:%.*]]) +; CHECK-NEXT: ret void +; call void @_ZdlPv(i8* %p) ret void } define linkonce void @_ZdaPvm(i8* %p, i64) nobuiltin { +; CHECK-LABEL: @_ZdaPvm( +; CHECK-NEXT: call void @_ZdaPv(i8* [[P:%.*]]) +; CHECK-NEXT: ret void +; call void @_ZdaPv(i8* %p) ret void } define linkonce void @_ZdaPvj(i8* %p, i32) nobuiltin { +; CHECK-LABEL: @_ZdaPvj( +; CHECK-NEXT: call void @_ZdaPv(i8* [[P:%.*]]) +; CHECK-NEXT: ret void +; call void @_ZdaPv(i8* %p) ret void } @@ -196,9 +233,10 @@ declare void @_ZdaPvSt11align_val_tRKSt9nothrow_t(i8*, i64, i8*) nobuiltin -; CHECK-LABEL: @test8( define void @test8() { - ; CHECK-NOT: call +; CHECK-LABEL: @test8( +; CHECK-NEXT: ret void +; %nt = alloca i8 %nw = call i8* @_Znwm(i64 32) builtin call void @_ZdlPv(i8* %nw) builtin @@ -234,25 +272,30 @@ declare noalias i8* @"\01??2@YAPEAX_K@Z"(i64) nobuiltin declare void @"\01??3@YAXPEAX@Z"(i8*) nobuiltin -; CHECK-LABEL: @test9( define void @test9() { - ; CHECK-NOT: call +; CHECK-LABEL: @test9( +; CHECK-NEXT: ret void +; %new_long_long = call noalias i8* @"\01??2@YAPEAX_K@Z"(i64 32) builtin call void @"\01??3@YAXPEAX@Z"(i8* %new_long_long) builtin ret void } define void @test10() { -; CHECK-LABEL: @test10 -; CHECK: call void @_ZdlPv +; CHECK-LABEL: @test10( +; CHECK-NEXT: call void @_ZdlPv(i8* null) +; CHECK-NEXT: ret void +; call void @_ZdlPv(i8* null) ret void } define void @test11() { -; CHECK-LABEL: @test11 -; CHECK: call i8* @_Znwm -; CHECK: call void @_ZdlPv +; CHECK-LABEL: @test11( +; CHECK-NEXT: [[CALL:%.*]] = call dereferenceable_or_null(8) i8* @_Znwm(i64 8) #5 +; CHECK-NEXT: call void @_ZdlPv(i8* [[CALL]]) +; CHECK-NEXT: ret void +; %call = call i8* @_Znwm(i64 8) builtin call void @_ZdlPv(i8* %call) ret void @@ -260,19 +303,21 @@ ;; Check that the optimization that moves a call to free in its predecessor ;; block (see test6) also happens when noop casts are involved. -; CHECK-LABEL: @test12( define void @test12(i32* %foo) minsize { -; CHECK: %tobool = icmp eq i32* %foo, null +; CHECK-LABEL: @test12( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32* [[FOO:%.*]], null +; CHECK-NEXT: [[BITCAST:%.*]] = bitcast i32* [[FOO]] to i8* +; CHECK-NEXT: tail call void @free(i8* [[BITCAST]]) +; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] +; CHECK: if.then: +; CHECK-NEXT: br label [[IF_END]] +; CHECK: if.end: +; CHECK-NEXT: ret void +; ;; Everything before the call to free should have been moved as well. -; CHECK-NEXT: %bitcast = bitcast i32* %foo to i8* ;; Call to free moved -; CHECK-NEXT: tail call void @free(i8* %bitcast) -; CHECK-NEXT: br i1 %tobool, label %if.end, label %if.then -; CHECK: if.then: ;; Block is now empty and may be simplified by simplifycfg -; CHECK-NEXT: br label %if.end -; CHECK: if.end: -; CHECK-NEXT: ret void entry: %tobool = icmp eq i32* %foo, null br i1 %tobool, label %if.end, label %if.then Index: test/Transforms/InstCombine/objsize.ll =================================================================== --- test/Transforms/InstCombine/objsize.ll +++ test/Transforms/InstCombine/objsize.ll @@ -161,7 +161,7 @@ define i8* @test5(i32 %n) nounwind ssp { ; CHECK-LABEL: @test5( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = tail call noalias i8* @malloc(i32 20) #0 +; CHECK-NEXT: [[TMP0:%.*]] = tail call noalias dereferenceable_or_null(20) i8* @malloc(i32 20) #0 ; CHECK-NEXT: [[TMP1:%.*]] = load i8*, i8** @s, align 8 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 dereferenceable(10) [[TMP0]], i8* align 1 dereferenceable(10) [[TMP1]], i32 10, i1 false) ; CHECK-NEXT: ret i8* [[TMP0]] @@ -177,7 +177,7 @@ define void @test6(i32 %n) nounwind ssp { ; CHECK-LABEL: @test6( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = tail call noalias i8* @malloc(i32 20) #0 +; CHECK-NEXT: [[TMP0:%.*]] = tail call noalias dereferenceable_or_null(20) i8* @malloc(i32 20) #0 ; CHECK-NEXT: [[TMP1:%.*]] = load i8*, i8** @s, align 8 ; CHECK-NEXT: [[TMP2:%.*]] = tail call i8* @__memcpy_chk(i8* [[TMP0]], i8* [[TMP1]], i32 30, i32 20) #0 ; CHECK-NEXT: ret void @@ -196,7 +196,7 @@ define i32 @test7(i8** %esc) { ; CHECK-LABEL: @test7( -; CHECK-NEXT: [[ALLOC:%.*]] = call noalias i8* @malloc(i32 48) #0 +; CHECK-NEXT: [[ALLOC:%.*]] = call noalias dereferenceable_or_null(48) i8* @malloc(i32 48) #0 ; CHECK-NEXT: store i8* [[ALLOC]], i8** [[ESC:%.*]], align 4 ; CHECK-NEXT: ret i32 32 ; @@ -211,7 +211,7 @@ define i32 @test8(i8** %esc) { ; CHECK-LABEL: @test8( -; CHECK-NEXT: [[ALLOC:%.*]] = call noalias i8* @calloc(i32 5, i32 7) #0 +; CHECK-NEXT: [[ALLOC:%.*]] = call noalias dereferenceable_or_null(35) i8* @calloc(i32 5, i32 7) #0 ; CHECK-NEXT: store i8* [[ALLOC]], i8** [[ESC:%.*]], align 4 ; CHECK-NEXT: ret i32 30 ; Index: test/Transforms/InstCombine/realloc.ll =================================================================== --- test/Transforms/InstCombine/realloc.ll +++ test/Transforms/InstCombine/realloc.ll @@ -7,7 +7,7 @@ define i8* @realloc_null_ptr() #0 { ; CHECK-LABEL: @realloc_null_ptr( -; CHECK-NEXT: [[MALLOC:%.*]] = call i8* @malloc(i64 100) +; CHECK-NEXT: [[MALLOC:%.*]] = call dereferenceable_or_null(100) i8* @malloc(i64 100) ; CHECK-NEXT: ret i8* [[MALLOC]] ; %call = call i8* @realloc(i8* null, i64 100) #2 @@ -16,7 +16,7 @@ define i8* @realloc_unknown_ptr(i8* %ptr) #0 { ; CHECK-LABEL: @realloc_unknown_ptr( -; CHECK-NEXT: [[CALL:%.*]] = call i8* @realloc(i8* [[PTR:%.*]], i64 100) +; CHECK-NEXT: [[CALL:%.*]] = call dereferenceable_or_null(100) i8* @realloc(i8* [[PTR:%.*]], i64 100) ; CHECK-NEXT: ret i8* [[CALL]] ; %call = call i8* @realloc(i8* %ptr, i64 100) #2