diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1908,6 +1908,7 @@ if (!CtxI || !DT) return false; + const auto ValueAddressSpace = V->getType()->getPointerAddressSpace(); unsigned NumUsesExplored = 0; for (auto *U : V->users()) { // Avoid massive lists @@ -1924,6 +1925,14 @@ Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) return true; + // If the value is used as a load/store, then the pointer must be non null. + if (V == getLoadStorePointerOperand(U)) { + const Instruction *I = cast(U); + if (!NullPointerIsDefined(I->getFunction(), ValueAddressSpace) && + DT->dominates(I, CtxI)) + return true; + } + // Consider only compare instructions uniquely controlling a branch CmpInst::Predicate Pred; if (!match(const_cast(U), diff --git a/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll b/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll --- a/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll +++ b/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll @@ -119,3 +119,22 @@ ret i1 %null_check } +; Make sure that if load/store happened, the pointer is nonnull. +define i32 @test_null_after_store(i32* %0) { +; CHECK-LABEL: @test_null_after_store( +; CHECK-NEXT: store i32 123, i32* %0, align 4 +; CHECK-NEXT: ret i32 2 + store i32 123, i32* %0, align 4 + %2 = icmp eq i32* %0, null + %3 = select i1 %2, i32 1, i32 2 + ret i32 %3 +} + +define i32 @test_null_after_load(i32* %0) { +; CHECK-LABEL: @test_null_after_load( +; CHECK-NEXT: ret i32 1 + %2 = load i32, i32* %0, align 4 + %3 = icmp eq i32* %0, null + %4 = select i1 %3, i32 %2, i32 1 + ret i32 %4 +} diff --git a/llvm/test/CodeGen/PowerPC/pr39815.ll b/llvm/test/CodeGen/PowerPC/pr39815.ll --- a/llvm/test/CodeGen/PowerPC/pr39815.ll +++ b/llvm/test/CodeGen/PowerPC/pr39815.ll @@ -20,11 +20,10 @@ ; CHECK: # %bb.0: ; CHECK-DAG: addis [[REG1:[0-9]+]], [[REG2:[0-9]+]], [[VAR1:[a-z0-9A-Z_.]+]]@toc@ha ; CHECK-DAG: ld [[REG3:[0-9]+]], [[VAR1]]@toc@l([[REG1]]) -; CHECK-DAG: lwz [[REG4:[0-9]+]], 0([[REG3]]) -; CHECK-DAG: addic [[REG5:[0-9]+]], [[REG3]], -1 -; CHECK-DAG: addze [[REG7:[0-9]+]], [[REG4]] -; CHECK-DAG: addis [[REG8:[0-9]+]], [[REG2]], [[VAR2:[a-z0-9A-Z_.]+]]@toc@ha +; CHECK-DAG: lbz [[REG4:[0-9]+]], 0([[REG3]]) +; CHECK-DAG: addi [[REG7:[0-9]+]], [[REG4]] ; CHECK-DAG: andi. [[REG9:[0-9]+]], [[REG7]], 5 +; CHECK-DAG: addis [[REG8:[0-9]+]], [[REG2]], [[VAR2:[a-z0-9A-Z_.]+]]@toc@ha ; CHECK-DAG: stb [[REG9]], [[VAR2]]@toc@l([[REG8]]) ; CHECK: blr } diff --git a/llvm/test/Transforms/Coroutines/coro-swifterror.ll b/llvm/test/Transforms/Coroutines/coro-swifterror.ll --- a/llvm/test/Transforms/Coroutines/coro-swifterror.ll +++ b/llvm/test/Transforms/Coroutines/coro-swifterror.ll @@ -33,7 +33,7 @@ ; CHECK-NEXT: call void @print(i32 %n) ; TODO: figure out a way to eliminate this ; CHECK-NEXT: store i8* null, i8** %errorslot -; CHECK-NEXT: call void @maybeThrow(i8** swifterror %errorslot) +; CHECK-NEXT: call void @maybeThrow(i8** nonnull swifterror %errorslot) ; CHECK-NEXT: [[T1:%.*]] = load i8*, i8** %errorslot ; CHECK-NEXT: call void @logError(i8* [[T1]]) ; CHECK-NEXT: store i8* [[T1]], i8** %errorslot @@ -51,7 +51,7 @@ ; CHECK-NEXT: store i32 %inc, i32* [[T0]], align 4 ; CHECK-NEXT: call void @print(i32 %inc) ; CHECK-NEXT: store i8* [[ERROR]], i8** %2 -; CHECK-NEXT: call void @maybeThrow(i8** swifterror %2) +; CHECK-NEXT: call void @maybeThrow(i8** nonnull swifterror %2) ; CHECK-NEXT: [[T2:%.*]] = load i8*, i8** %2 ; CHECK-NEXT: call void @logError(i8* [[T2]]) ; CHECK-NEXT: store i8* [[T2]], i8** %2 diff --git a/llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll b/llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll --- a/llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll +++ b/llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll @@ -15,10 +15,10 @@ define void @test_memset_to_store(i8* %dest) { ; CHECK-LABEL: @test_memset_to_store( ; CHECK-NEXT: store atomic i8 1, i8* [[DEST:%.*]] unordered, align 1 -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 2, i32 1) -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 4, i32 1) -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 8, i32 1) -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 16, i32 1) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 2, i32 1) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 4, i32 1) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 8, i32 1) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 16, i32 1) ; CHECK-NEXT: ret void ; call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 1, i32 1) @@ -34,9 +34,9 @@ ; CHECK-NEXT: store atomic i8 1, i8* [[DEST:%.*]] unordered, align 2 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[DEST]] to i16* ; CHECK-NEXT: store atomic i16 257, i16* [[TMP1]] unordered, align 2 -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 4, i32 2) -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 8, i32 2) -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 16, i32 2) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 4, i32 2) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 8, i32 2) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 16, i32 2) ; CHECK-NEXT: ret void ; call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %dest, i8 1, i32 1, i32 1) @@ -54,8 +54,8 @@ ; CHECK-NEXT: store atomic i16 257, i16* [[TMP1]] unordered, align 4 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[DEST]] to i32* ; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 4 -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 8, i32 4) -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 16, i32 4) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 4 [[DEST]], i8 1, i32 8, i32 4) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 4 [[DEST]], i8 1, i32 16, i32 4) ; CHECK-NEXT: ret void ; call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %dest, i8 1, i32 1, i32 1) @@ -75,7 +75,7 @@ ; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 8 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 8 -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 [[DEST]], i8 1, i32 16, i32 8) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 8 [[DEST]], i8 1, i32 16, i32 8) ; CHECK-NEXT: ret void ; call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %dest, i8 1, i32 1, i32 1) @@ -95,7 +95,7 @@ ; CHECK-NEXT: store atomic i32 16843009, i32* [[TMP2]] unordered, align 16 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 16 -; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 [[DEST]], i8 1, i32 16, i32 16) +; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 16 [[DEST]], i8 1, i32 16, i32 16) ; CHECK-NEXT: ret void ; call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %dest, i8 1, i32 1, i32 1) @@ -154,10 +154,10 @@ ; CHECK-LABEL: @test_memmove_loadstore( ; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1 ; CHECK-NEXT: store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1 -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1) -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1) -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1) -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 2, i32 1) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 4, i32 1) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 8, i32 1) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 16, i32 1) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1) @@ -176,9 +176,9 @@ ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16* ; CHECK-NEXT: [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2 ; CHECK-NEXT: store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2 -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2) -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2) -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 4, i32 2) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 8, i32 2) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 16, i32 2) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 1, i32 1) @@ -201,8 +201,8 @@ ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32* ; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4 ; CHECK-NEXT: store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4 -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4) -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 8, i32 4) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 16, i32 4) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 1, i32 1) @@ -229,7 +229,7 @@ ; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8 ; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8 -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 8 [[DEST]], i8* nonnull align 8 [[SRC]], i32 16, i32 8) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 1, i32 1) @@ -256,7 +256,7 @@ ; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16 ; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16 -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 16 [[DEST]], i8* nonnull align 16 [[SRC]], i32 16, i32 16) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 1, i32 1) @@ -302,10 +302,10 @@ ; CHECK-LABEL: @test_memcpy_loadstore( ; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1 ; CHECK-NEXT: store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1 -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1) -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1) -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1) -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 2, i32 1) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 4, i32 1) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 8, i32 1) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 16, i32 1) ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1) @@ -324,9 +324,9 @@ ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16* ; CHECK-NEXT: [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2 ; CHECK-NEXT: store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2 -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2) -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2) -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 4, i32 2) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 8, i32 2) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 16, i32 2) ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 1, i32 1) @@ -349,8 +349,8 @@ ; CHECK-NEXT: [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32* ; CHECK-NEXT: [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4 ; CHECK-NEXT: store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4 -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4) -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 8, i32 4) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 16, i32 4) ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 1, i32 1) @@ -377,7 +377,7 @@ ; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8 ; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8 -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 8 [[DEST]], i8* nonnull align 8 [[SRC]], i32 16, i32 8) ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 1, i32 1) @@ -404,7 +404,7 @@ ; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16 ; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16 -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 16 [[DEST]], i8* nonnull align 16 [[SRC]], i32 16, i32 16) ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 1, i32 1) diff --git a/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll b/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll --- a/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll +++ b/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll @@ -101,7 +101,7 @@ ; CHECK: alloca ; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64 ; CHECK: load i32, i32* %{{.*}} -; CHECK: call i32 @foo(i32* %{{.*}}) +; CHECK: call i32 @foo(i32* nonnull %{{.*}}) ; CHECK-NOT: addrspacecast ; CHECK-NOT: load i32, i32 addrspace(2)* define void @test_load_and_call_no_null_opt(i32 addrspace(1)* %out, i64 %x, i64 %y) #0 { diff --git a/llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll b/llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll --- a/llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll +++ b/llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll @@ -474,13 +474,13 @@ ; ALL-NEXT: [[PTR1:%.*]] = getelementptr i8, i8* [[OBJ]], i64 16 ; ALL-NEXT: [[PTR1_TYPED:%.*]] = bitcast i8* [[PTR1]] to i32* ; ALL-NEXT: [[RES1:%.*]] = load i32, i32* [[PTR1_TYPED]], align 4 -; ALL-NEXT: call void @foo.i32(i32* [[PTR1_TYPED]]) +; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR1_TYPED]]) ; ALL-NEXT: br label [[EXIT:%.*]] ; ALL: bb2: ; ALL-NEXT: [[PTR2:%.*]] = getelementptr i8, i8* [[OBJ]], i64 16 ; ALL-NEXT: [[PTR2_TYPED:%.*]] = bitcast i8* [[PTR2]] to i32* ; ALL-NEXT: [[RES2:%.*]] = load i32, i32* [[PTR2_TYPED]], align 4 -; ALL-NEXT: call void @foo.i32(i32* [[PTR2_TYPED]]) +; ALL-NEXT: call void @foo.i32(i32* nonnull [[PTR2_TYPED]]) ; ALL-NEXT: br label [[EXIT]] ; ALL: exit: ; ALL-NEXT: [[PTR_TYPED:%.*]] = phi i32* [ [[PTR1_TYPED]], [[BB1]] ], [ [[PTR2_TYPED]], [[BB2]] ] diff --git a/llvm/test/Transforms/InstCombine/sink-alloca.ll b/llvm/test/Transforms/InstCombine/sink-alloca.ll --- a/llvm/test/Transforms/InstCombine/sink-alloca.ll +++ b/llvm/test/Transforms/InstCombine/sink-alloca.ll @@ -47,6 +47,6 @@ ; CHECK: %p = call i32* @use_and_return(i32* nonnull %argmem) ; CHECK: store i32 13, i32* %p ; CHECK: call void @llvm.stackrestore(i8* %sp) -; CHECK: %0 = call i32* @use_and_return(i32* %p) +; CHECK: %0 = call i32* @use_and_return(i32* nonnull %p) attributes #0 = { nounwind }