Index: llvm/include/llvm/Analysis/Loads.h =================================================================== --- llvm/include/llvm/Analysis/Loads.h +++ llvm/include/llvm/Analysis/Loads.h @@ -19,6 +19,7 @@ namespace llvm { class AAResults; +class AssumptionCache; class DataLayout; class DominatorTree; class Instruction; @@ -31,9 +32,9 @@ /// Return true if this is always a dereferenceable pointer. If the context /// instruction is specified perform context-sensitive analysis and return true /// if the pointer is dereferenceable at the specified instruction. -bool isDereferenceablePointer(const Value *V, Type *Ty, - const DataLayout &DL, +bool isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI = nullptr, + AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr); @@ -44,6 +45,7 @@ bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI = nullptr, + AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr); @@ -54,6 +56,7 @@ bool isDereferenceableAndAlignedPointer(const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, const Instruction *CtxI = nullptr, + AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr); @@ -79,8 +82,8 @@ /// if desired.) This is more powerful than the variants above when the /// address loaded from is analyzeable by SCEV. bool isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, - ScalarEvolution &SE, - DominatorTree &DT); + ScalarEvolution &SE, DominatorTree &DT, + AssumptionCache *AC = nullptr); /// Return true if we know that executing a load from this value cannot trap. /// Index: llvm/lib/Analysis/Loads.cpp =================================================================== --- llvm/lib/Analysis/Loads.cpp +++ llvm/lib/Analysis/Loads.cpp @@ -38,7 +38,7 @@ /// a simple load or store. static bool isDereferenceableAndAlignedPointer( const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, - const Instruction *CtxI, const DominatorTree *DT, + const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI, SmallPtrSetImpl &Visited, unsigned MaxDepth) { assert(V->getType()->isPointerTy() && "Base must be pointer"); @@ -57,19 +57,19 @@ // Recurse into both hands of select. if (const SelectInst *Sel = dyn_cast(V)) { return isDereferenceableAndAlignedPointer(Sel->getTrueValue(), Alignment, - Size, DL, CtxI, DT, TLI, Visited, - MaxDepth) && + Size, DL, CtxI, AC, DT, TLI, + Visited, MaxDepth) && isDereferenceableAndAlignedPointer(Sel->getFalseValue(), Alignment, - Size, DL, CtxI, DT, TLI, Visited, - MaxDepth); + Size, DL, CtxI, AC, DT, TLI, + Visited, MaxDepth); } // bitcast instructions are no-ops as far as dereferenceability is concerned. if (const BitCastOperator *BC = dyn_cast(V)) { if (BC->getSrcTy()->isPointerTy()) - return isDereferenceableAndAlignedPointer( - BC->getOperand(0), Alignment, Size, DL, CtxI, DT, TLI, - Visited, MaxDepth); + return isDereferenceableAndAlignedPointer(BC->getOperand(0), Alignment, + Size, DL, CtxI, AC, DT, TLI, + Visited, MaxDepth); } bool CheckForNonNull, CheckForFreed; @@ -94,7 +94,7 @@ RetainedKnowledge AlignRK; RetainedKnowledge DerefRK; if (getKnowledgeForValue( - V, {Attribute::Dereferenceable, Attribute::Alignment}, nullptr, + V, {Attribute::Dereferenceable, Attribute::Alignment}, AC, [&](RetainedKnowledge RK, Instruction *Assume, auto) { if (!isValidAssumeForContext(Assume, CtxI)) return false; @@ -133,24 +133,24 @@ // addrspacecast, so we can't do arithmetic directly on the APInt values. return isDereferenceableAndAlignedPointer( Base, Alignment, Offset + Size.sextOrTrunc(Offset.getBitWidth()), DL, - CtxI, DT, TLI, Visited, MaxDepth); + CtxI, AC, DT, TLI, Visited, MaxDepth); } // For gc.relocate, look through relocations if (const GCRelocateInst *RelocateInst = dyn_cast(V)) return isDereferenceableAndAlignedPointer(RelocateInst->getDerivedPtr(), - Alignment, Size, DL, CtxI, DT, + Alignment, Size, DL, CtxI, AC, DT, TLI, Visited, MaxDepth); if (const AddrSpaceCastOperator *ASC = dyn_cast(V)) return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Alignment, - Size, DL, CtxI, DT, TLI, + Size, DL, CtxI, AC, DT, TLI, Visited, MaxDepth); if (const auto *Call = dyn_cast(V)) { if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) return isDereferenceableAndAlignedPointer(RP, Alignment, Size, DL, CtxI, - DT, TLI, Visited, MaxDepth); + AC, DT, TLI, Visited, MaxDepth); // If we have a call we can't recurse through, check to see if this is an // allocation function for which we can establish an minimum object size. @@ -173,7 +173,7 @@ // As we recursed through GEPs to get here, we've incrementally // checked that each step advanced by a multiple of the alignment. If // our base is properly aligned, then the original offset accessed - // must also be. + // must also be. Type *Ty = V->getType(); assert(Ty->isSized() && "must be sized"); APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0); @@ -186,28 +186,24 @@ return false; } -bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Align Alignment, - const APInt &Size, - const DataLayout &DL, - const Instruction *CtxI, - const DominatorTree *DT, - const TargetLibraryInfo *TLI) { +bool llvm::isDereferenceableAndAlignedPointer( + const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, + const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, + const TargetLibraryInfo *TLI) { // Note: At the moment, Size can be zero. This ends up being interpreted as // a query of whether [Base, V] is dereferenceable and V is aligned (since // that's what the implementation happened to do). It's unclear if this is // the desired semantic, but at least SelectionDAG does exercise this case. SmallPtrSet Visited; - return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT, - TLI, Visited, 16); + return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, + DT, TLI, Visited, 16); } -bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, - Align Alignment, - const DataLayout &DL, - const Instruction *CtxI, - const DominatorTree *DT, - const TargetLibraryInfo *TLI) { +bool llvm::isDereferenceableAndAlignedPointer( + const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, + const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, + const TargetLibraryInfo *TLI) { // For unsized types or scalable vectors we don't know exactly how many bytes // are dereferenced, so bail out. if (!Ty->isSized() || isa(Ty)) @@ -221,15 +217,17 @@ APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty)); return isDereferenceableAndAlignedPointer(V, Alignment, AccessSize, DL, CtxI, - DT, TLI); + AC, DT, TLI); } bool llvm::isDereferenceablePointer(const Value *V, Type *Ty, const DataLayout &DL, const Instruction *CtxI, + AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI) { - return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, DT, TLI); + return isDereferenceableAndAlignedPointer(V, Ty, Align(1), DL, CtxI, AC, DT, + TLI); } /// Test if A and B will obviously have the same value. @@ -265,7 +263,8 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, ScalarEvolution &SE, - DominatorTree &DT) { + DominatorTree &DT, + AssumptionCache *AC) { auto &DL = LI->getModule()->getDataLayout(); Value *Ptr = LI->getPointerOperand(); @@ -279,7 +278,7 @@ // access is safe within the loop w/o needing predication. if (L->isLoopInvariant(Ptr)) return isDereferenceableAndAlignedPointer(Ptr, Alignment, EltSize, DL, - HeaderFirstNonPHI, &DT); + HeaderFirstNonPHI, AC, &DT); // Otherwise, check to see if we have a repeating access pattern where we can // prove that all accesses are well aligned and dereferenceable. @@ -311,7 +310,9 @@ if (EltSize.urem(Alignment.value()) != 0) return false; return isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL, - HeaderFirstNonPHI, &DT); + HeaderFirstNonPHI, + nullptr, // FIXME + &DT); } /// Check if executing a load of this pointer value cannot trap. @@ -332,7 +333,8 @@ const TargetLibraryInfo *TLI) { // If DT is not specified we can't make context-sensitive query const Instruction* CtxI = DT ? ScanFrom : nullptr; - if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, DT, TLI)) + if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, nullptr, + DT, TLI)) return true; if (!ScanFrom) Index: llvm/lib/Analysis/ValueTracking.cpp =================================================================== --- llvm/lib/Analysis/ValueTracking.cpp +++ llvm/lib/Analysis/ValueTracking.cpp @@ -4785,8 +4785,9 @@ return false; const DataLayout &DL = LI->getModule()->getDataLayout(); return isDereferenceableAndAlignedPointer( - LI->getPointerOperand(), LI->getType(), LI->getAlign(), DL, CtxI, DT, - TLI); + LI->getPointerOperand(), LI->getType(), LI->getAlign(), DL, CtxI, + nullptr, // FIXME + DT, TLI); } case Instruction::Call: { auto *CI = dyn_cast(Inst); Index: llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1313,7 +1313,7 @@ if (!(Flags & MachineMemOperand::MODereferenceable)) { if (isDereferenceableAndAlignedPointer(Ptr, LI.getType(), LI.getAlign(), - *DL, &LI, nullptr, LibInfo)) + *DL, &LI, nullptr, nullptr, LibInfo)) Flags |= MachineMemOperand::MODereferenceable; } Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4139,7 +4139,7 @@ } if (isDereferenceableAndAlignedPointer(SV, Ty, Alignment, DAG.getDataLayout(), - &I, nullptr, LibInfo)) + &I, nullptr, nullptr, LibInfo)) MMOFlags |= MachineMemOperand::MODereferenceable; SDLoc dl = getCurSDLoc(); Index: llvm/lib/Transforms/Scalar/LICM.cpp =================================================================== --- llvm/lib/Transforms/Scalar/LICM.cpp +++ llvm/lib/Transforms/Scalar/LICM.cpp @@ -2090,7 +2090,9 @@ if (!DereferenceableInPH) { DereferenceableInPH = isDereferenceableAndAlignedPointer( Store->getPointerOperand(), Store->getValueOperand()->getType(), - Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI); + Store->getAlign(), MDL, Preheader->getTerminator(), + nullptr, // FIXME + DT, TLI); } } else continue; // Not a load or store. Index: llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp =================================================================== --- llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -920,12 +920,11 @@ // trap. Otherwise the transform is invalid since it might cause a trap // to occur earlier than it otherwise would. if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpySize), - DL, C, DT)) { + DL, C, nullptr, DT)) { LLVM_DEBUG(dbgs() << "Call Slot: Dest pointer not dereferenceable\n"); return false; } - // Make sure that nothing can observe cpyDest being written early. There are // a number of cases to consider: // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of Index: llvm/lib/Transforms/Utils/LoopPeel.cpp =================================================================== --- llvm/lib/Transforms/Utils/LoopPeel.cpp +++ llvm/lib/Transforms/Utils/LoopPeel.cpp @@ -202,7 +202,7 @@ if (auto *LI = dyn_cast(&I)) { Value *Ptr = LI->getPointerOperand(); if (DT.dominates(BB, Latch) && L.isLoopInvariant(Ptr) && - !isDereferenceablePointer(Ptr, LI->getType(), DL, LI, &DT)) + !isDereferenceablePointer(Ptr, LI->getType(), DL, LI, nullptr, &DT)) for (Value *U : I.users()) LoadUsers.insert(U); }