Index: include/llvm/Analysis/ValueTracking.h =================================================================== --- include/llvm/Analysis/ValueTracking.h +++ include/llvm/Analysis/ValueTracking.h @@ -237,13 +237,8 @@ bool onlyUsedByLifetimeMarkers(const Value *V); /// isDereferenceablePointer - Return true if this is always a dereferenceable - /// pointer. If the context instruction is specified perform context-sensitive - /// analysis and return true if the pointer is dereferenceable at the - /// specified instruction. - bool isDereferenceablePointer(const Value *V, const DataLayout &DL, - const Instruction *CtxI = nullptr, - const DominatorTree *DT = nullptr, - const TargetLibraryInfo *TLI = nullptr); + /// pointer + bool isDereferenceablePointer(const Value *V, const DataLayout &DL); /// Returns true if V is always a dereferenceable pointer with alignment /// greater or equal than requested. If the context instruction is specified Index: include/llvm/IR/Value.h =================================================================== --- include/llvm/IR/Value.h +++ include/llvm/IR/Value.h @@ -498,6 +498,15 @@ return const_cast(this)->stripInBoundsOffsets(); } + /// \brief Check if this is always a dereferenceable pointer + /// Sets CanBeNull if the pointer can be either dereferenceable or null + bool isDereferenceablePointer(bool &CanBeNull) const; + + /// \brief Returns the number of bytes can be which safely dereferenced + /// via the pointer + /// Sets CanBeNull if the pointer can be either dereferenceable or null + uint64_t getDereferenceableBytes(const DataLayout &DL, bool &CanBeNull) const; + /// \brief Translate PHI node to its predecessor from the given basic block. /// /// If this value is a PHI node with CurBB as its parent, return the value in Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -3122,169 +3122,66 @@ return true; } -static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset, - Type *Ty, const DataLayout &DL, - const Instruction *CtxI, - const DominatorTree *DT, - const TargetLibraryInfo *TLI) { - assert(Offset.isNonNegative() && "offset can't be negative"); - assert(Ty->isSized() && "must be sized"); - - APInt DerefBytes(Offset.getBitWidth(), 0); - bool CheckForNonNull = false; - if (const Argument *A = dyn_cast(BV)) { - DerefBytes = A->getDereferenceableBytes(); - if (!DerefBytes.getBoolValue()) { - DerefBytes = A->getDereferenceableOrNullBytes(); - CheckForNonNull = true; - } - } else if (auto CS = ImmutableCallSite(BV)) { - DerefBytes = CS.getDereferenceableBytes(0); - if (!DerefBytes.getBoolValue()) { - DerefBytes = CS.getDereferenceableOrNullBytes(0); - CheckForNonNull = true; - } - } else if (const LoadInst *LI = dyn_cast(BV)) { - if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { - ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - DerefBytes = CI->getLimitedValue(); - } - if (!DerefBytes.getBoolValue()) { - if (MDNode *MD = - LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { - ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - DerefBytes = CI->getLimitedValue(); - } - CheckForNonNull = true; - } - } - - if (DerefBytes.getBoolValue()) - if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty))) - if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI)) - return true; - - return false; -} - -static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL, - const Instruction *CtxI, - const DominatorTree *DT, - const TargetLibraryInfo *TLI) { - Type *VTy = V->getType(); - Type *Ty = VTy->getPointerElementType(); - if (!Ty->isSized()) - return false; - - APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); - return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI); -} - -static bool isAligned(const Value *Base, APInt Offset, unsigned Align, +static bool isAligned(const Value *Base, uint64_t Offset, unsigned Align, const DataLayout &DL) { - APInt BaseAlign(Offset.getBitWidth(), getAlignment(Base, DL)); + unsigned BaseAlign = getAlignment(Base, DL); - if (!BaseAlign) { + if (BaseAlign == 0) { Type *Ty = Base->getType()->getPointerElementType(); if (!Ty->isSized()) return false; BaseAlign = DL.getABITypeAlignment(Ty); } - APInt Alignment(Offset.getBitWidth(), Align); - - assert(Alignment.isPowerOf2() && "must be a power of 2!"); - return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1)); + assert(isPowerOf2_32(Align) && "must be a power of 2!"); + return BaseAlign >= Align && !(Offset & (Align-1)); } static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) { - Type *Ty = Base->getType(); - assert(Ty->isSized() && "must be sized"); - APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0); - return isAligned(Base, Offset, Align, DL); + return isAligned(Base, 0, Align, DL); } -/// Test if V is always a pointer to allocated and suitably aligned memory for -/// a simple load or store. -static bool isDereferenceableAndAlignedPointer( - const Value *V, unsigned Align, const DataLayout &DL, - const Instruction *CtxI, const DominatorTree *DT, - const TargetLibraryInfo *TLI, SmallPtrSetImpl &Visited) { - // Note that it is not safe to speculate into a malloc'd region because - // malloc may return null. - - // These are obviously ok if aligned. - if (isa(V)) - return isAligned(V, Align, DL); - - // It's not always safe to follow a bitcast, for example: - // bitcast i8* (alloca i8) to i32* - // would result in a 4-byte load from a 1-byte alloca. However, - // if we're casting from a pointer from a type of larger size - // to a type of smaller size (or the same size), and the alignment - // is at least as large as for the resulting pointer type, then - // we can look through the bitcast. - if (const BitCastOperator *BC = dyn_cast(V)) { - Type *STy = BC->getSrcTy()->getPointerElementType(), - *DTy = BC->getDestTy()->getPointerElementType(); - if (STy->isSized() && DTy->isSized() && - (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) && - (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy))) - return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL, - CtxI, DT, TLI, Visited); - } - - // Global variables which can't collapse to null are ok. - if (const GlobalVariable *GV = dyn_cast(V)) - if (!GV->hasExternalWeakLinkage()) - return isAligned(V, Align, DL); - - // byval arguments are okay. - if (const Argument *A = dyn_cast(V)) - if (A->hasByValAttr()) - return isAligned(V, Align, DL); - - if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI)) - return isAligned(V, Align, DL); - - // For GEPs, determine if the indexing lands within the allocated object. - if (const GEPOperator *GEP = dyn_cast(V)) { - Type *VTy = GEP->getType(); - Type *Ty = VTy->getPointerElementType(); - const Value *Base = GEP->getPointerOperand(); - - // Conservatively require that the base pointer be fully dereferenceable - // and aligned. - if (!Visited.insert(Base).second) - return false; - if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI, - Visited)) - return false; +static const Value *stripForDereferenceable(const Value *V) { + // Even though we don't look through PHI nodes, we could be called on an + // instruction in an unreachable block, which may be on a cycle. + SmallPtrSet Visited; + Visited.insert(V); + do { + V = V->stripPointerCasts(); - APInt Offset(DL.getPointerTypeSizeInBits(VTy), 0); - if (!GEP->accumulateConstantOffset(DL, Offset)) - return false; + if (const GCRelocateInst *RelocateInst = dyn_cast(V)) { + V = RelocateInst->getDerivedPtr(); + } else { + return V; + } + assert(V->getType()->isPointerTy() && "Unexpected operand type!"); + } while (Visited.insert(V).second); - // Check if the load is within the bounds of the underlying object - // and offset is aligned. - uint64_t LoadSize = DL.getTypeStoreSize(Ty); - Type *BaseType = Base->getType()->getPointerElementType(); - assert(isPowerOf2_32(Align) && "must be a power of 2!"); - return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) && - !(Offset & APInt(Offset.getBitWidth(), Align-1)); - } + return V; +} - // For gc.relocate, look through relocations - if (const GCRelocateInst *RelocateInst = dyn_cast(V)) - return isDereferenceableAndAlignedPointer( - RelocateInst->getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited); +static const Value * +stripAndAccumulateOffsetForDereferenceable(const Value *V, + const DataLayout &DL, + uint64_t &Offset) { + // Even though we don't look through PHI nodes, we could be called on an + // instruction in an unreachable block, which may be on a cycle. + SmallPtrSet Visited; + Visited.insert(V); + do { + int64_t CurrentOffset = 0; + V = GetPointerBaseWithConstantOffset(V, CurrentOffset, DL); + Offset += CurrentOffset; - if (const AddrSpaceCastInst *ASC = dyn_cast(V)) - return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL, - CtxI, DT, TLI, Visited); + if (const GCRelocateInst *RelocateInst = dyn_cast(V)) { + V = RelocateInst->getDerivedPtr(); + } else { + return V; + } + assert(V->getType()->isPointerTy() && "Unexpected operand type!"); + } while (Visited.insert(V).second); - // If we don't know, assume the worst. - return false; + return V; } bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, @@ -3292,10 +3189,6 @@ const Instruction *CtxI, const DominatorTree *DT, const TargetLibraryInfo *TLI) { - // When dereferenceability information is provided by a dereferenceable - // attribute, we know exactly how many bytes are dereferenceable. If we can - // determine the exact offset to the attributed variable, we can use that - // information here. Type *VTy = V->getType(); Type *Ty = VTy->getPointerElementType(); @@ -3303,26 +3196,40 @@ if (Align == 0) Align = DL.getABITypeAlignment(Ty); - if (Ty->isSized()) { - APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); - const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); + bool CanBeNull = false; - if (Offset.isNonNegative()) - if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) && - isAligned(BV, Offset, Align, DL)) - return true; + // (1) Check if we can say if V is dereferenceable without dealing with type + // sizes. Strip casts, GC relocations. If the resulting pointer is fully + // dereferenceable we are done. + const Value *BV = stripForDereferenceable(V); + if (BV->getType() == V->getType()) + if (BV->isDereferenceablePointer(CanBeNull)) { + if (CanBeNull && !isKnownNonNullAt(BV, CtxI, DT, TLI)) + return false; + return Align == 1 || isAligned(BV, Align, DL); + } + + // If accessed type is not sized, there is nothing we can do + if (!Ty->isSized()) + return false; + + // (2) Strip casts, relocations, GEPs and accumulate V offset along the way. + // Check if the access is within dereferenceable portion of the base pointer. + uint64_t Offset = 0; + BV = stripAndAccumulateOffsetForDereferenceable(V, DL, Offset); + + uint64_t DerefBytes = BV->getDereferenceableBytes(DL, CanBeNull); + if (DerefBytes >= Offset + DL.getTypeStoreSize(Ty)) { + if (CanBeNull && !isKnownNonNullAt(BV, CtxI, DT, TLI)) + return false; + return Align == 1 || isAligned(BV, Offset, Align, DL); } - SmallPtrSet Visited; - return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI, - Visited); + return false; } -bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL, - const Instruction *CtxI, - const DominatorTree *DT, - const TargetLibraryInfo *TLI) { - return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI); +bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL) { + return isDereferenceableAndAlignedPointer(V, 1, DL); } bool llvm::isSafeToSpeculativelyExecute(const Value *V, Index: lib/IR/Value.cpp =================================================================== --- lib/IR/Value.cpp +++ lib/IR/Value.cpp @@ -517,6 +517,84 @@ return stripPointerCastsAndOffsets(this); } +bool Value::isDereferenceablePointer(bool &CanBeNull) const { + CanBeNull = false; + + if (isa(this)) + return true; + + if (const GlobalVariable *GV = dyn_cast(this)) { + // TODO: set CanBeNull and return true instead + return !GV->hasExternalWeakLinkage(); + } + + if (const Argument *A = dyn_cast(this)) + if (A->hasByValAttr()) + return true; + + return false; +} + +uint64_t Value::getDereferenceableBytes(const DataLayout &DL, + bool &CanBeNull) const { + CanBeNull = false; + + Type *Ty = getType()->getPointerElementType(); + uint64_t TyStoreSize = Ty->isSized() ? DL.getTypeStoreSize(Ty) : 0; + + if (isa(this)) { + return TyStoreSize; + } + + if (const GlobalVariable *GV = dyn_cast(this)) { + // TODO: set CanBeNull and return true instead + if (GV->hasExternalWeakLinkage()) + return 0; + return TyStoreSize; + } + + uint64_t DerefBytes = 0; + if (const Argument *A = dyn_cast(this)) { + if (A->hasByValAttr()) { + DerefBytes = TyStoreSize; + } else { + DerefBytes = A->getDereferenceableBytes(); + if (DerefBytes == 0) { + DerefBytes = A->getDereferenceableOrNullBytes(); + CanBeNull = true; + } + } + return DerefBytes; + } + + if (auto CS = ImmutableCallSite(this)) { + DerefBytes = CS.getDereferenceableBytes(0); + if (DerefBytes == 0) { + DerefBytes = CS.getDereferenceableOrNullBytes(0); + CanBeNull = true; + } + return DerefBytes; + } + + if (const LoadInst *LI = dyn_cast(this)) { + if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { + ConstantInt *CI = mdconst::extract(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + if (DerefBytes == 0) { + if (MDNode *MD = + LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { + ConstantInt *CI = mdconst::extract(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + CanBeNull = true; + } + return DerefBytes; + } + + return 0; +} + Value *Value::DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) { PHINode *PN = dyn_cast(this);