Index: llvm/trunk/include/llvm/Analysis/ValueTracking.h =================================================================== --- llvm/trunk/include/llvm/Analysis/ValueTracking.h +++ llvm/trunk/include/llvm/Analysis/ValueTracking.h @@ -367,19 +367,6 @@ /// operands are not memory dependent. bool mayBeMemoryDependent(const Instruction &I); - /// Return true if this pointer couldn't possibly be null by its definition. - /// This returns true for allocas, non-extern-weak globals, and byval - /// arguments. - bool isKnownNonNull(const Value *V); - - /// Return true if this pointer couldn't possibly be null. If the context - /// instruction and dominator tree are specified, perform context-sensitive - /// analysis and return true if the pointer couldn't possibly be null at the - /// specified instruction. - bool isKnownNonNullAt(const Value *V, - const Instruction *CtxI = nullptr, - const DominatorTree *DT = nullptr); - /// Return true if it is valid to use the assumptions provided by an /// assume intrinsic, I, at the point in the control-flow identified by the /// context instruction, CxtI. Index: llvm/trunk/lib/Analysis/InstructionSimplify.cpp =================================================================== --- llvm/trunk/lib/Analysis/InstructionSimplify.cpp +++ llvm/trunk/lib/Analysis/InstructionSimplify.cpp @@ -2063,13 +2063,14 @@ static Constant * computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, const DominatorTree *DT, CmpInst::Predicate Pred, - const Instruction *CxtI, Value *LHS, Value *RHS) { + AssumptionCache *AC, const Instruction *CxtI, + Value *LHS, Value *RHS) { // First, skip past any trivial no-ops. LHS = LHS->stripPointerCasts(); RHS = RHS->stripPointerCasts(); // A non-null pointer is not equal to a null pointer. - if (llvm::isKnownNonNull(LHS) && isa(RHS) && + if (llvm::isKnownNonZero(LHS, DL) && isa(RHS) && (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE)) return ConstantInt::get(GetCompareTy(LHS), !CmpInst::isTrueWhenEqual(Pred)); @@ -2224,9 +2225,11 @@ // cannot be elided. We cannot fold malloc comparison to null. Also, the // dynamic allocation call could be either of the operands. Value *MI = nullptr; - if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT)) + if (isAllocLikeFn(LHS, TLI) && + llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT)) MI = LHS; - else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT)) + else if (isAllocLikeFn(RHS, TLI) && + llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT)) MI = RHS; // FIXME: We should also fold the compare when the pointer escapes, but the // compare dominates the pointer escape @@ -3313,7 +3316,8 @@ // Simplify comparisons of related pointers using a powerful, recursive // GEP-walk when we have target data available.. if (LHS->getType()->isPointerTy()) - if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, LHS, RHS)) + if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, LHS, + RHS)) return C; if (auto *CLHS = dyn_cast(LHS)) if (auto *CRHS = dyn_cast(RHS)) @@ -3321,7 +3325,7 @@ Q.DL.getTypeSizeInBits(CLHS->getType()) && Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) == Q.DL.getTypeSizeInBits(CRHS->getType())) - if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, + if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, CLHS->getPointerOperand(), CRHS->getPointerOperand())) return C; Index: llvm/trunk/lib/Analysis/LazyValueInfo.cpp =================================================================== --- llvm/trunk/lib/Analysis/LazyValueInfo.cpp +++ llvm/trunk/lib/Analysis/LazyValueInfo.cpp @@ -817,12 +817,12 @@ // definition. We could easily extend this to look through geps, bitcasts, // and the like to prove non-nullness, but it's not clear that's worth it // compile time wise. The context-insensitive value walk done inside - // isKnownNonNull gets most of the profitable cases at much less expense. + // isKnownNonZero gets most of the profitable cases at much less expense. // This does mean that we have a sensativity to where the defining // instruction is placed, even if it could legally be hoisted much higher. // That is unfortunate. PointerType *PT = dyn_cast(BBI->getType()); - if (PT && isKnownNonNull(BBI)) { + if (PT && isKnownNonZero(BBI, DL)) { Res = LVILatticeVal::getNot(ConstantPointerNull::get(PT)); return true; } @@ -901,7 +901,7 @@ // Before giving up, see if we can prove the pointer non-null local to // this particular block. if (Val->getType()->isPointerTy() && - (isKnownNonNull(Val) || isObjectDereferencedInBlock(Val, BB))) { + (isKnownNonZero(Val, DL) || isObjectDereferencedInBlock(Val, BB))) { PointerType *PTy = cast(Val->getType()); Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy)); } else { @@ -1886,17 +1886,17 @@ LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C, Instruction *CxtI) { // Is or is not NonNull are common predicates being queried. If - // isKnownNonNull can tell us the result of the predicate, we can + // isKnownNonZero can tell us the result of the predicate, we can // return it quickly. But this is only a fastpath, and falling // through would still be correct. + const DataLayout &DL = CxtI->getModule()->getDataLayout(); if (V->getType()->isPointerTy() && C->isNullValue() && - isKnownNonNull(V->stripPointerCasts())) { + isKnownNonZero(V->stripPointerCasts(), DL)) { if (Pred == ICmpInst::ICMP_EQ) return LazyValueInfo::False; else if (Pred == ICmpInst::ICMP_NE) return LazyValueInfo::True; } - const DataLayout &DL = CxtI->getModule()->getDataLayout(); LVILatticeVal Result = getImpl(PImpl, AC, &DL, DT).getValueAt(V, CxtI); Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI); if (Ret != Unknown) Index: llvm/trunk/lib/Analysis/Loads.cpp =================================================================== --- llvm/trunk/lib/Analysis/Loads.cpp +++ llvm/trunk/lib/Analysis/Loads.cpp @@ -72,7 +72,7 @@ V->getPointerDereferenceableBytes(DL, CheckForNonNull)); if (KnownDerefBytes.getBoolValue()) { if (KnownDerefBytes.uge(Size)) - if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT)) + if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) return isAligned(V, Align, DL); } Index: llvm/trunk/lib/Analysis/ValueTracking.cpp =================================================================== --- llvm/trunk/lib/Analysis/ValueTracking.cpp +++ llvm/trunk/lib/Analysis/ValueTracking.cpp @@ -1749,6 +1749,58 @@ return false; } +static bool isKnownNonNullFromDominatingCondition(const Value *V, + const Instruction *CtxI, + const DominatorTree *DT) { + assert(V->getType()->isPointerTy() && "V must be pointer type"); + assert(!isa(V) && "Did not expect ConstantPointerNull"); + + if (!CtxI || !DT) + return false; + + unsigned NumUsesExplored = 0; + for (auto *U : V->users()) { + // Avoid massive lists + if (NumUsesExplored >= DomConditionsMaxUses) + break; + NumUsesExplored++; + + // If the value is used as an argument to a call or invoke, then argument + // attributes may provide an answer about null-ness. + if (auto CS = ImmutableCallSite(U)) + if (auto *CalledFunc = CS.getCalledFunction()) + for (const Argument &Arg : CalledFunc->args()) + if (CS.getArgOperand(Arg.getArgNo()) == V && + Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) + return true; + + // Consider only compare instructions uniquely controlling a branch + CmpInst::Predicate Pred; + if (!match(const_cast(U), + m_c_ICmp(Pred, m_Specific(V), m_Zero())) || + (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) + continue; + + for (auto *CmpU : U->users()) { + if (const BranchInst *BI = dyn_cast(CmpU)) { + assert(BI->isConditional() && "uses a comparison!"); + + BasicBlock *NonNullSuccessor = + BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); + BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); + if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) + return true; + } else if (Pred == ICmpInst::ICMP_NE && + match(CmpU, m_Intrinsic()) && + DT->dominates(cast(CmpU), CtxI)) { + return true; + } + } + } + + return false; +} + /// Does the 'Range' metadata (which must be a valid MD_range operand list) /// ensure that the value it's attached to is never Value? 'RangeType' is /// is the type of the value described by the range. @@ -1794,7 +1846,15 @@ return true; } - return false; + // A global variable in address space 0 is non null unless extern weak + // or an absolute symbol reference. Other address spaces may have null as a + // valid address for a global, so we can't assume anything. + if (const GlobalValue *GV = dyn_cast(V)) { + if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && + GV->getType()->getAddressSpace() == 0) + return true; + } else + return false; } if (auto *I = dyn_cast(V)) { @@ -1809,14 +1869,36 @@ } } + // Check for pointer simplifications. + if (V->getType()->isPointerTy()) { + // Alloca never returns null, malloc might. + if (isa(V) && Q.DL.getAllocaAddrSpace() == 0) + return true; + + // A byval, inalloca, or nonnull argument is never null. + if (const Argument *A = dyn_cast(V)) + if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr()) + return true; + + // A Load tagged with nonnull metadata is never null. + if (const LoadInst *LI = dyn_cast(V)) + if (LI->getMetadata(LLVMContext::MD_nonnull)) + return true; + + if (auto CS = ImmutableCallSite(V)) + if (CS.isReturnNonNull()) + return true; + } + // The remaining tests are all recursive, so bail out if we hit the limit. if (Depth++ >= MaxDepth) return false; - // Check for pointer simplifications. + // Check for recursive pointer simplifications. if (V->getType()->isPointerTy()) { - if (isKnownNonNullAt(V, Q.CxtI, Q.DT)) + if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) return true; + if (const GEPOperator *GEP = dyn_cast(V)) if (isGEPKnownNonNull(GEP, Depth, Q)) return true; @@ -3482,100 +3564,6 @@ return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); } -/// Return true if we know that the specified value is never null. -bool llvm::isKnownNonNull(const Value *V) { - assert(V->getType()->isPointerTy() && "V must be pointer type"); - - // Alloca never returns null, malloc might. - if (isa(V)) return true; - - // A byval, inalloca, or nonnull argument is never null. - if (const Argument *A = dyn_cast(V)) - return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr(); - - // A global variable in address space 0 is non null unless extern weak - // or an absolute symbol reference. Other address spaces may have null as a - // valid address for a global, so we can't assume anything. - if (const GlobalValue *GV = dyn_cast(V)) - return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && - GV->getType()->getAddressSpace() == 0; - - // A Load tagged with nonnull metadata is never null. - if (const LoadInst *LI = dyn_cast(V)) - return LI->getMetadata(LLVMContext::MD_nonnull); - - if (auto CS = ImmutableCallSite(V)) - if (CS.isReturnNonNull()) - return true; - - return false; -} - -static bool isKnownNonNullFromDominatingCondition(const Value *V, - const Instruction *CtxI, - const DominatorTree *DT) { - assert(V->getType()->isPointerTy() && "V must be pointer type"); - assert(!isa(V) && "Did not expect ConstantPointerNull"); - assert(CtxI && "Context instruction required for analysis"); - assert(DT && "Dominator tree required for analysis"); - - unsigned NumUsesExplored = 0; - for (auto *U : V->users()) { - // Avoid massive lists - if (NumUsesExplored >= DomConditionsMaxUses) - break; - NumUsesExplored++; - - // If the value is used as an argument to a call or invoke, then argument - // attributes may provide an answer about null-ness. - if (auto CS = ImmutableCallSite(U)) - if (auto *CalledFunc = CS.getCalledFunction()) - for (const Argument &Arg : CalledFunc->args()) - if (CS.getArgOperand(Arg.getArgNo()) == V && - Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) - return true; - - // Consider only compare instructions uniquely controlling a branch - CmpInst::Predicate Pred; - if (!match(const_cast(U), - m_c_ICmp(Pred, m_Specific(V), m_Zero())) || - (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) - continue; - - for (auto *CmpU : U->users()) { - if (const BranchInst *BI = dyn_cast(CmpU)) { - assert(BI->isConditional() && "uses a comparison!"); - - BasicBlock *NonNullSuccessor = - BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); - BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); - if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) - return true; - } else if (Pred == ICmpInst::ICMP_NE && - match(CmpU, m_Intrinsic()) && - DT->dominates(cast(CmpU), CtxI)) { - return true; - } - } - } - - return false; -} - -bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI, - const DominatorTree *DT) { - if (isa(V) || isa(V)) - return false; - - if (isKnownNonNull(V)) - return true; - - if (!CtxI || !DT) - return false; - - return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT); -} - OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const DataLayout &DL, Index: llvm/trunk/lib/Transforms/IPO/FunctionAttrs.cpp =================================================================== --- llvm/trunk/lib/Transforms/IPO/FunctionAttrs.cpp +++ llvm/trunk/lib/Transforms/IPO/FunctionAttrs.cpp @@ -884,11 +884,13 @@ if (auto *Ret = dyn_cast(BB.getTerminator())) FlowsToReturn.insert(Ret->getReturnValue()); + auto &DL = F->getParent()->getDataLayout(); + for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { Value *RetVal = FlowsToReturn[i]; // If this value is locally known to be non-null, we're good - if (isKnownNonNull(RetVal)) + if (isKnownNonZero(RetVal, DL)) continue; // Otherwise, we need to look upwards since we can't make any local Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3743,7 +3743,7 @@ return replaceInstUsesWith(*II, ConstantPointerNull::get(PT)); // isKnownNonNull -> nonnull attribute - if (isKnownNonNullAt(DerivedPtr, II, &DT)) + if (isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT)) II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); } @@ -3932,7 +3932,7 @@ for (Value *V : CS.args()) { if (V->getType()->isPointerTy() && !CS.paramHasAttr(ArgNo, Attribute::NonNull) && - isKnownNonNullAt(V, CS.getInstruction(), &DT)) + isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT)) ArgNos.push_back(ArgNo); ArgNo++; } Index: llvm/trunk/lib/Transforms/Utils/PromoteMemoryToRegister.cpp =================================================================== --- llvm/trunk/lib/Transforms/Utils/PromoteMemoryToRegister.cpp +++ llvm/trunk/lib/Transforms/Utils/PromoteMemoryToRegister.cpp @@ -338,8 +338,8 @@ /// and thus must be phi-ed with undef. We fall back to the standard alloca /// promotion algorithm in that case. static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, - LargeBlockInfo &LBI, DominatorTree &DT, - AssumptionCache *AC) { + LargeBlockInfo &LBI, const DataLayout &DL, + DominatorTree &DT, AssumptionCache *AC) { StoreInst *OnlyStore = Info.OnlyStore; bool StoringGlobalVal = !isa(OnlyStore->getOperand(0)); BasicBlock *StoreBB = OnlyStore->getParent(); @@ -395,7 +395,7 @@ // that information when we erase this Load. So we preserve // it with an assume. if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && - !llvm::isKnownNonNullAt(ReplVal, LI, &DT)) + !llvm::isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT)) addAssumeNonNull(AC, LI); LI->replaceAllUsesWith(ReplVal); @@ -442,6 +442,7 @@ /// } static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, LargeBlockInfo &LBI, + const DataLayout &DL, DominatorTree &DT, AssumptionCache *AC) { // The trickiest case to handle is when we have large blocks. Because of this, @@ -490,7 +491,7 @@ // information when we erase it. So we preserve it with an assume. Value *ReplVal = std::prev(I)->second->getOperand(0); if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && - !llvm::isKnownNonNullAt(ReplVal, LI, &DT)) + !llvm::isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT)) addAssumeNonNull(AC, LI); LI->replaceAllUsesWith(ReplVal); @@ -560,7 +561,7 @@ // If there is only a single store to this value, replace any loads of // it that are directly dominated by the definition with the value stored. if (Info.DefiningBlocks.size() == 1) { - if (rewriteSingleStoreAlloca(AI, Info, LBI, DT, AC)) { + if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) { // The alloca has been processed, move on. RemoveFromAllocasList(AllocaNum); ++NumSingleStore; @@ -571,7 +572,7 @@ // If the alloca is only read and written in one basic block, just perform a // linear sweep over the block to eliminate it. if (Info.OnlyUsedInOneBlock && - promoteSingleBlockAlloca(AI, Info, LBI, DT, AC)) { + promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) { // The alloca has been processed, move on. RemoveFromAllocasList(AllocaNum); continue; @@ -931,7 +932,7 @@ // that information when we erase this Load. So we preserve // it with an assume. if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && - !llvm::isKnownNonNullAt(V, LI, &DT)) + !llvm::isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT)) addAssumeNonNull(AC, LI); // Anything using the load now uses the current value. Index: llvm/trunk/test/Other/cgscc-libcall-update.ll =================================================================== --- llvm/trunk/test/Other/cgscc-libcall-update.ll +++ llvm/trunk/test/Other/cgscc-libcall-update.ll @@ -18,7 +18,7 @@ %tmp3 = call i64 @llvm.objectsize.i64.p0i8(i8* %tmp2, i1 false, i1 true) %tmp4 = call i8* @__strncpy_chk(i8* %arg2, i8* %tmp2, i64 1023, i64 %tmp3) ; CHECK-NOT: call -; CHECK: call i8* @strncpy(i8* %arg2, i8* %tmp2, i64 1023) +; CHECK: call i8* @strncpy(i8* %arg2, i8* nonnull %tmp2, i64 1023) ; CHECK-NOT: call ret i8* %tmp4 Index: llvm/trunk/test/Transforms/FunctionAttrs/nonnull.ll =================================================================== --- llvm/trunk/test/Transforms/FunctionAttrs/nonnull.ll +++ llvm/trunk/test/Transforms/FunctionAttrs/nonnull.ll @@ -216,3 +216,14 @@ unreachable } +; CHECK: define nonnull i32* @gep1( +define i32* @gep1(i32* %p) { + %q = getelementptr inbounds i32, i32* %p, i32 1 + ret i32* %q +} + +; CHECK: define i32 addrspace(3)* @gep2( +define i32 addrspace(3)* @gep2(i32 addrspace(3)* %p) { + %q = getelementptr inbounds i32, i32 addrspace(3)* %p, i32 1 + ret i32 addrspace(3)* %q +} Index: llvm/trunk/test/Transforms/InstCombine/lifetime.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/lifetime.ll +++ llvm/trunk/test/Transforms/InstCombine/lifetime.ll @@ -17,11 +17,11 @@ ; CHECK: bb3: ; CHECK-NEXT: call void @llvm.dbg.declare ; CHECK-NEXT: br label %fin -; CHECK: call void @llvm.lifetime.start.p0i8(i64 1, i8* %[[T]]) -; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* %[[B]]) -; CHECK-NEXT: call void @foo(i8* %[[B]], i8* %[[T]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* %[[B]]) -; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* %[[T]]) +; CHECK: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %[[T]]) +; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %[[B]]) +; CHECK-NEXT: call void @foo(i8* nonnull %[[B]], i8* nonnull %[[T]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %[[B]]) +; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %[[T]]) %text = alloca [1 x i8], align 1 %buff = alloca [1 x i8], align 1 %0 = getelementptr inbounds [1 x i8], [1 x i8]* %text, i64 0, i64 0 Index: llvm/trunk/test/Transforms/InstCombine/memcpy-addrspace.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/memcpy-addrspace.ll +++ llvm/trunk/test/Transforms/InstCombine/memcpy-addrspace.ll @@ -45,7 +45,7 @@ ; CHECK: alloca ; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64 ; CHECK-NOT: addrspacecast -; CHECK: call i32 @foo(i32* %{{.*}}) +; CHECK: call i32 @foo(i32* nonnull %{{.*}}) define void @test_call(i32 addrspace(1)* %out, i64 %x) { entry: %data = alloca [8 x i32], align 4 @@ -62,7 +62,7 @@ ; CHECK: alloca ; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64 ; CHECK: load i32, i32* %{{.*}} -; CHECK: call i32 @foo(i32* %{{.*}}) +; CHECK: call i32 @foo(i32* nonnull %{{.*}}) ; CHECK-NOT: addrspacecast ; CHECK-NOT: load i32, i32 addrspace(2)* define void @test_load_and_call(i32 addrspace(1)* %out, i64 %x, i64 %y) { Index: llvm/trunk/test/Transforms/InstCombine/memcpy-from-global.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/memcpy-from-global.ll +++ llvm/trunk/test/Transforms/InstCombine/memcpy-from-global.ll @@ -60,7 +60,7 @@ ; CHECK-NEXT: getelementptr inbounds [124 x i8], [124 x i8]* ; use @G instead of %A -; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.*}}, i8* getelementptr inbounds (%T, %T* @G, i64 0, i32 0) +; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %{{.*}}, i8* getelementptr inbounds (%T, %T* @G, i64 0, i32 0) call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false) call void @llvm.memcpy.p0i8.p0i8.i64(i8* %b, i8* %a, i64 124, i32 4, i1 false) call void @bar(i8* %b) Index: llvm/trunk/test/Transforms/InstCombine/strcpy_chk-64.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/strcpy_chk-64.ll +++ llvm/trunk/test/Transforms/InstCombine/strcpy_chk-64.ll @@ -4,7 +4,7 @@ define void @func(i8* %i) nounwind ssp { ; CHECK-LABEL: @func( -; CHECK: @__strcpy_chk(i8* %arraydecay, i8* %i, i64 32) +; CHECK: @__strcpy_chk(i8* nonnull %arraydecay, i8* %i, i64 32) entry: %s = alloca [32 x i8], align 16 %arraydecay = getelementptr inbounds [32 x i8], [32 x i8]* %s, i32 0, i32 0 Index: llvm/trunk/test/Transforms/InstCombine/strlen-1.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/strlen-1.ll +++ llvm/trunk/test/Transforms/InstCombine/strlen-1.ll @@ -154,7 +154,7 @@ define i32 @test_no_simplify2(i32 %x) { ; CHECK-LABEL: @test_no_simplify2( ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* @null_hello, i32 0, i32 %x -; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(i8* [[HELLO_P]]) +; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(i8* nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i32 [[HELLO_L]] ; %hello_p = getelementptr inbounds [7 x i8], [7 x i8]* @null_hello, i32 0, i32 %x @@ -168,7 +168,7 @@ ; CHECK-LABEL: @test_no_simplify3( ; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 15 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i8], [13 x i8]* @null_hello_mid, i32 0, i32 [[AND]] -; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(i8* [[HELLO_P]]) +; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(i8* nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i32 [[HELLO_L]] ; %and = and i32 %x, 15 Index: llvm/trunk/test/Transforms/InstCombine/wcslen-1.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/wcslen-1.ll +++ llvm/trunk/test/Transforms/InstCombine/wcslen-1.ll @@ -155,7 +155,7 @@ ; CHECK-LABEL: @test_no_simplify2( ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @null_hello, i64 0, i64 [[TMP1]] -; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* [[HELLO_P]]) +; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] ; %hello_p = getelementptr inbounds [7 x i32], [7 x i32]* @null_hello, i32 0, i32 %x @@ -170,7 +170,7 @@ ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 15 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[AND]] to i64 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i32], [13 x i32]* @null_hello_mid, i64 0, i64 [[TMP1]] -; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* [[HELLO_P]]) +; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] ; %and = and i32 %x, 15 Index: llvm/trunk/test/Transforms/InstCombine/wcslen-3.ll =================================================================== --- llvm/trunk/test/Transforms/InstCombine/wcslen-3.ll +++ llvm/trunk/test/Transforms/InstCombine/wcslen-3.ll @@ -159,7 +159,7 @@ ; CHECK-LABEL: @test_no_simplify2( ; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i64 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i16], [7 x i16]* @null_hello, i64 0, i64 [[TMP1]] -; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* [[HELLO_P]]) +; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] ; %hello_p = getelementptr inbounds [7 x i16], [7 x i16]* @null_hello, i16 0, i16 %x @@ -174,7 +174,7 @@ ; CHECK-NEXT: [[AND:%.*]] = and i16 [[X:%.*]], 15 ; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[AND]] to i64 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i16], [13 x i16]* @null_hello_mid, i64 0, i64 [[TMP1]] -; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* [[HELLO_P]]) +; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] ; %and = and i16 %x, 15