Index: llvm/trunk/include/llvm/Analysis/Loads.h =================================================================== --- llvm/trunk/include/llvm/Analysis/Loads.h +++ llvm/trunk/include/llvm/Analysis/Loads.h @@ -23,6 +23,25 @@ class DataLayout; class MDNode; +/// isDereferenceablePointer - Return true if this is always a dereferenceable +/// pointer. If the context instruction is specified perform context-sensitive +/// analysis and return true if the pointer is dereferenceable at the +/// specified instruction. +bool isDereferenceablePointer(const Value *V, const DataLayout &DL, + const Instruction *CtxI = nullptr, + const DominatorTree *DT = nullptr, + const TargetLibraryInfo *TLI = nullptr); + +/// Returns true if V is always a dereferenceable pointer with alignment +/// greater or equal than requested. If the context instruction is specified +/// performs context-sensitive analysis and returns true if the pointer is +/// dereferenceable at the specified instruction. +bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, + const DataLayout &DL, + const Instruction *CtxI = nullptr, + const DominatorTree *DT = nullptr, + const TargetLibraryInfo *TLI = nullptr); + /// isSafeToLoadUnconditionally - Return true if we know that executing a load /// from this value cannot trap. /// Index: llvm/trunk/include/llvm/Analysis/ValueTracking.h =================================================================== --- llvm/trunk/include/llvm/Analysis/ValueTracking.h +++ llvm/trunk/include/llvm/Analysis/ValueTracking.h @@ -236,25 +236,6 @@ /// are lifetime markers. bool onlyUsedByLifetimeMarkers(const Value *V); - /// isDereferenceablePointer - Return true if this is always a dereferenceable - /// pointer. If the context instruction is specified perform context-sensitive - /// analysis and return true if the pointer is dereferenceable at the - /// specified instruction. - bool isDereferenceablePointer(const Value *V, const DataLayout &DL, - const Instruction *CtxI = nullptr, - const DominatorTree *DT = nullptr, - const TargetLibraryInfo *TLI = nullptr); - - /// Returns true if V is always a dereferenceable pointer with alignment - /// greater or equal than requested. If the context instruction is specified - /// performs context-sensitive analysis and returns true if the pointer is - /// dereferenceable at the specified instruction. - bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, - const DataLayout &DL, - const Instruction *CtxI = nullptr, - const DominatorTree *DT = nullptr, - const TargetLibraryInfo *TLI = nullptr); - /// isSafeToSpeculativelyExecute - Return true if the instruction does not /// have any effects besides calculating the result and does not have /// undefined behavior. Index: llvm/trunk/lib/Analysis/Loads.cpp =================================================================== --- llvm/trunk/lib/Analysis/Loads.cpp +++ llvm/trunk/lib/Analysis/Loads.cpp @@ -21,8 +21,212 @@ #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" +#include "llvm/IR/Statepoint.h" + using namespace llvm; +static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset, + Type *Ty, const DataLayout &DL, + const Instruction *CtxI, + const DominatorTree *DT, + const TargetLibraryInfo *TLI) { + assert(Offset.isNonNegative() && "offset can't be negative"); + assert(Ty->isSized() && "must be sized"); + + APInt DerefBytes(Offset.getBitWidth(), 0); + bool CheckForNonNull = false; + if (const Argument *A = dyn_cast(BV)) { + DerefBytes = A->getDereferenceableBytes(); + if (!DerefBytes.getBoolValue()) { + DerefBytes = A->getDereferenceableOrNullBytes(); + CheckForNonNull = true; + } + } else if (auto CS = ImmutableCallSite(BV)) { + DerefBytes = CS.getDereferenceableBytes(0); + if (!DerefBytes.getBoolValue()) { + DerefBytes = CS.getDereferenceableOrNullBytes(0); + CheckForNonNull = true; + } + } else if (const LoadInst *LI = dyn_cast(BV)) { + if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { + ConstantInt *CI = mdconst::extract(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + if (!DerefBytes.getBoolValue()) { + if (MDNode *MD = + LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { + ConstantInt *CI = mdconst::extract(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + CheckForNonNull = true; + } + } + + if (DerefBytes.getBoolValue()) + if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty))) + if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI)) + return true; + + return false; +} + +static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL, + const Instruction *CtxI, + const DominatorTree *DT, + const TargetLibraryInfo *TLI) { + Type *VTy = V->getType(); + Type *Ty = VTy->getPointerElementType(); + if (!Ty->isSized()) + return false; + + APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); + return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI); +} + +static bool isAligned(const Value *Base, APInt Offset, unsigned Align, + const DataLayout &DL) { + APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL)); + + if (!BaseAlign) { + Type *Ty = Base->getType()->getPointerElementType(); + if (!Ty->isSized()) + return false; + BaseAlign = DL.getABITypeAlignment(Ty); + } + + APInt Alignment(Offset.getBitWidth(), Align); + + assert(Alignment.isPowerOf2() && "must be a power of 2!"); + return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1)); +} + +static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) { + Type *Ty = Base->getType(); + assert(Ty->isSized() && "must be sized"); + APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0); + return isAligned(Base, Offset, Align, DL); +} + +/// Test if V is always a pointer to allocated and suitably aligned memory for +/// a simple load or store. +static bool isDereferenceableAndAlignedPointer( + const Value *V, unsigned Align, const DataLayout &DL, + const Instruction *CtxI, const DominatorTree *DT, + const TargetLibraryInfo *TLI, SmallPtrSetImpl &Visited) { + // Note that it is not safe to speculate into a malloc'd region because + // malloc may return null. + + // These are obviously ok if aligned. + if (isa(V)) + return isAligned(V, Align, DL); + + // It's not always safe to follow a bitcast, for example: + // bitcast i8* (alloca i8) to i32* + // would result in a 4-byte load from a 1-byte alloca. However, + // if we're casting from a pointer from a type of larger size + // to a type of smaller size (or the same size), and the alignment + // is at least as large as for the resulting pointer type, then + // we can look through the bitcast. + if (const BitCastOperator *BC = dyn_cast(V)) { + Type *STy = BC->getSrcTy()->getPointerElementType(), + *DTy = BC->getDestTy()->getPointerElementType(); + if (STy->isSized() && DTy->isSized() && + (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) && + (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy))) + return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL, + CtxI, DT, TLI, Visited); + } + + // Global variables which can't collapse to null are ok. + if (const GlobalVariable *GV = dyn_cast(V)) + if (!GV->hasExternalWeakLinkage()) + return isAligned(V, Align, DL); + + // byval arguments are okay. + if (const Argument *A = dyn_cast(V)) + if (A->hasByValAttr()) + return isAligned(V, Align, DL); + + if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI)) + return isAligned(V, Align, DL); + + // For GEPs, determine if the indexing lands within the allocated object. + if (const GEPOperator *GEP = dyn_cast(V)) { + Type *Ty = GEP->getResultElementType(); + const Value *Base = GEP->getPointerOperand(); + + // Conservatively require that the base pointer be fully dereferenceable + // and aligned. + if (!Visited.insert(Base).second) + return false; + if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI, + Visited)) + return false; + + APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0); + if (!GEP->accumulateConstantOffset(DL, Offset)) + return false; + + // Check if the load is within the bounds of the underlying object + // and offset is aligned. + uint64_t LoadSize = DL.getTypeStoreSize(Ty); + Type *BaseType = GEP->getSourceElementType(); + assert(isPowerOf2_32(Align) && "must be a power of 2!"); + return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) && + !(Offset & APInt(Offset.getBitWidth(), Align-1)); + } + + // For gc.relocate, look through relocations + if (const GCRelocateInst *RelocateInst = dyn_cast(V)) + return isDereferenceableAndAlignedPointer( + RelocateInst->getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited); + + if (const AddrSpaceCastInst *ASC = dyn_cast(V)) + return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL, + CtxI, DT, TLI, Visited); + + // If we don't know, assume the worst. + return false; +} + +bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, + const DataLayout &DL, + const Instruction *CtxI, + const DominatorTree *DT, + const TargetLibraryInfo *TLI) { + // When dereferenceability information is provided by a dereferenceable + // attribute, we know exactly how many bytes are dereferenceable. If we can + // determine the exact offset to the attributed variable, we can use that + // information here. + Type *VTy = V->getType(); + Type *Ty = VTy->getPointerElementType(); + + // Require ABI alignment for loads without alignment specification + if (Align == 0) + Align = DL.getABITypeAlignment(Ty); + + if (Ty->isSized()) { + APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); + const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); + + if (Offset.isNonNegative()) + if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) && + isAligned(BV, Offset, Align, DL)) + return true; + } + + SmallPtrSet Visited; + return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI, + Visited); +} + +bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL, + const Instruction *CtxI, + const DominatorTree *DT, + const TargetLibraryInfo *TLI) { + return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI); +} + /// \brief Test if A and B will obviously have the same value. /// /// This includes recognizing that %t0 and %t1 will have the same Index: llvm/trunk/lib/Analysis/MemDerefPrinter.cpp =================================================================== --- llvm/trunk/lib/Analysis/MemDerefPrinter.cpp +++ llvm/trunk/lib/Analysis/MemDerefPrinter.cpp @@ -10,7 +10,7 @@ #include "llvm/Analysis/Passes.h" #include "llvm/ADT/SetVector.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" -#include "llvm/Analysis/ValueTracking.h" +#include "llvm/Analysis/Loads.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/InstIterator.h" Index: llvm/trunk/lib/Analysis/ValueTracking.cpp =================================================================== --- llvm/trunk/lib/Analysis/ValueTracking.cpp +++ llvm/trunk/lib/Analysis/ValueTracking.cpp @@ -18,6 +18,7 @@ #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/ConstantRange.h" @@ -3106,208 +3107,6 @@ return true; } -static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset, - Type *Ty, const DataLayout &DL, - const Instruction *CtxI, - const DominatorTree *DT, - const TargetLibraryInfo *TLI) { - assert(Offset.isNonNegative() && "offset can't be negative"); - assert(Ty->isSized() && "must be sized"); - - APInt DerefBytes(Offset.getBitWidth(), 0); - bool CheckForNonNull = false; - if (const Argument *A = dyn_cast(BV)) { - DerefBytes = A->getDereferenceableBytes(); - if (!DerefBytes.getBoolValue()) { - DerefBytes = A->getDereferenceableOrNullBytes(); - CheckForNonNull = true; - } - } else if (auto CS = ImmutableCallSite(BV)) { - DerefBytes = CS.getDereferenceableBytes(0); - if (!DerefBytes.getBoolValue()) { - DerefBytes = CS.getDereferenceableOrNullBytes(0); - CheckForNonNull = true; - } - } else if (const LoadInst *LI = dyn_cast(BV)) { - if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { - ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - DerefBytes = CI->getLimitedValue(); - } - if (!DerefBytes.getBoolValue()) { - if (MDNode *MD = - LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { - ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - DerefBytes = CI->getLimitedValue(); - } - CheckForNonNull = true; - } - } - - if (DerefBytes.getBoolValue()) - if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty))) - if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI)) - return true; - - return false; -} - -static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL, - const Instruction *CtxI, - const DominatorTree *DT, - const TargetLibraryInfo *TLI) { - Type *VTy = V->getType(); - Type *Ty = VTy->getPointerElementType(); - if (!Ty->isSized()) - return false; - - APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); - return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI); -} - -static bool isAligned(const Value *Base, APInt Offset, unsigned Align, - const DataLayout &DL) { - APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL)); - - if (!BaseAlign) { - Type *Ty = Base->getType()->getPointerElementType(); - if (!Ty->isSized()) - return false; - BaseAlign = DL.getABITypeAlignment(Ty); - } - - APInt Alignment(Offset.getBitWidth(), Align); - - assert(Alignment.isPowerOf2() && "must be a power of 2!"); - return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1)); -} - -static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) { - Type *Ty = Base->getType(); - assert(Ty->isSized() && "must be sized"); - APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0); - return isAligned(Base, Offset, Align, DL); -} - -/// Test if V is always a pointer to allocated and suitably aligned memory for -/// a simple load or store. -static bool isDereferenceableAndAlignedPointer( - const Value *V, unsigned Align, const DataLayout &DL, - const Instruction *CtxI, const DominatorTree *DT, - const TargetLibraryInfo *TLI, SmallPtrSetImpl &Visited) { - // Note that it is not safe to speculate into a malloc'd region because - // malloc may return null. - - // These are obviously ok if aligned. - if (isa(V)) - return isAligned(V, Align, DL); - - // It's not always safe to follow a bitcast, for example: - // bitcast i8* (alloca i8) to i32* - // would result in a 4-byte load from a 1-byte alloca. However, - // if we're casting from a pointer from a type of larger size - // to a type of smaller size (or the same size), and the alignment - // is at least as large as for the resulting pointer type, then - // we can look through the bitcast. - if (const BitCastOperator *BC = dyn_cast(V)) { - Type *STy = BC->getSrcTy()->getPointerElementType(), - *DTy = BC->getDestTy()->getPointerElementType(); - if (STy->isSized() && DTy->isSized() && - (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) && - (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy))) - return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL, - CtxI, DT, TLI, Visited); - } - - // Global variables which can't collapse to null are ok. - if (const GlobalVariable *GV = dyn_cast(V)) - if (!GV->hasExternalWeakLinkage()) - return isAligned(V, Align, DL); - - // byval arguments are okay. - if (const Argument *A = dyn_cast(V)) - if (A->hasByValAttr()) - return isAligned(V, Align, DL); - - if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI)) - return isAligned(V, Align, DL); - - // For GEPs, determine if the indexing lands within the allocated object. - if (const GEPOperator *GEP = dyn_cast(V)) { - Type *Ty = GEP->getResultElementType(); - const Value *Base = GEP->getPointerOperand(); - - // Conservatively require that the base pointer be fully dereferenceable - // and aligned. - if (!Visited.insert(Base).second) - return false; - if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI, - Visited)) - return false; - - APInt Offset(DL.getPointerTypeSizeInBits(GEP->getType()), 0); - if (!GEP->accumulateConstantOffset(DL, Offset)) - return false; - - // Check if the load is within the bounds of the underlying object - // and offset is aligned. - uint64_t LoadSize = DL.getTypeStoreSize(Ty); - Type *BaseType = GEP->getSourceElementType(); - assert(isPowerOf2_32(Align) && "must be a power of 2!"); - return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) && - !(Offset & APInt(Offset.getBitWidth(), Align-1)); - } - - // For gc.relocate, look through relocations - if (const GCRelocateInst *RelocateInst = dyn_cast(V)) - return isDereferenceableAndAlignedPointer( - RelocateInst->getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited); - - if (const AddrSpaceCastInst *ASC = dyn_cast(V)) - return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL, - CtxI, DT, TLI, Visited); - - // If we don't know, assume the worst. - return false; -} - -bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, - const DataLayout &DL, - const Instruction *CtxI, - const DominatorTree *DT, - const TargetLibraryInfo *TLI) { - // When dereferenceability information is provided by a dereferenceable - // attribute, we know exactly how many bytes are dereferenceable. If we can - // determine the exact offset to the attributed variable, we can use that - // information here. - Type *VTy = V->getType(); - Type *Ty = VTy->getPointerElementType(); - - // Require ABI alignment for loads without alignment specification - if (Align == 0) - Align = DL.getABITypeAlignment(Ty); - - if (Ty->isSized()) { - APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); - const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); - - if (Offset.isNonNegative()) - if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) && - isAligned(BV, Offset, Align, DL)) - return true; - } - - SmallPtrSet Visited; - return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI, - Visited); -} - -bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL, - const Instruction *CtxI, - const DominatorTree *DT, - const TargetLibraryInfo *TLI) { - return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI); -} - bool llvm::isSafeToSpeculativelyExecute(const Value *V, const Instruction *CtxI, const DominatorTree *DT, Index: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -20,6 +20,7 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/BranchProbabilityInfo.h" #include "llvm/Analysis/ConstantFolding.h" +#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Analysis/VectorUtils.h" Index: llvm/trunk/lib/Transforms/IPO/ArgumentPromotion.cpp =================================================================== --- llvm/trunk/lib/Transforms/IPO/ArgumentPromotion.cpp +++ llvm/trunk/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -38,6 +38,7 @@ #include "llvm/Analysis/BasicAliasAnalysis.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CallGraphSCCPass.h" +#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/CFG.h" Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -14,6 +14,7 @@ #include "InstCombineInternal.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/InstructionSimplify.h" +#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Dominators.h"