diff --git a/llvm/include/llvm/Analysis/Loads.h b/llvm/include/llvm/Analysis/Loads.h --- a/llvm/include/llvm/Analysis/Loads.h +++ b/llvm/include/llvm/Analysis/Loads.h @@ -74,6 +74,20 @@ Instruction *ScanFrom = nullptr, const DominatorTree *DT = nullptr); +/// Returns the number of bytes known to be dereferenceable for the +/// pointer value \p V. +/// +/// If \p CanBeNull is set by this function the pointer can either be null or be +/// dereferenceable up to the returned number of bytes (or both). +uint64_t getPointerDereferenceableBytes(const Value *V, const DataLayout &DL, + bool &CanBeNull); + +/// Returns an alignment of the pointer value \p V. +/// +/// Returns an alignment which is either specified explicitly, e.g. via +/// align attribute of a function argument, or guaranteed by DataLayout. +unsigned getPointerAlignment(const Value *V, const DataLayout &DL); + /// The default number of maximum instructions to scan in the block, used by /// FindAvailableLoadedValue(). extern cl::opt DefMaxInstsToScan; diff --git a/llvm/include/llvm/IR/Value.h b/llvm/include/llvm/IR/Value.h --- a/llvm/include/llvm/IR/Value.h +++ b/llvm/include/llvm/IR/Value.h @@ -620,20 +620,6 @@ static_cast(this)->stripInBoundsOffsets()); } - /// Returns the number of bytes known to be dereferenceable for the - /// pointer value. - /// - /// If CanBeNull is set by this function the pointer can either be null or be - /// dereferenceable up to the returned number of bytes. - uint64_t getPointerDereferenceableBytes(const DataLayout &DL, - bool &CanBeNull) const; - - /// Returns an alignment of the pointer value. - /// - /// Returns an alignment which is either specified explicitly, e.g. via - /// align attribute of a function argument, or guaranteed by DataLayout. - unsigned getPointerAlignment(const DataLayout &DL) const; - /// Translate PHI node to its predecessor from the given basic block. /// /// If this value is a PHI node with CurBB as its parent, return the value in diff --git a/llvm/lib/Analysis/CaptureTracking.cpp b/llvm/lib/Analysis/CaptureTracking.cpp --- a/llvm/lib/Analysis/CaptureTracking.cpp +++ b/llvm/lib/Analysis/CaptureTracking.cpp @@ -20,6 +20,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/CFG.h" +#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/OrderedBasicBlock.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constants.h" @@ -46,7 +47,7 @@ if (GEP->isInBounds()) return true; bool CanBeNull; - return O->getPointerDereferenceableBytes(DL, CanBeNull); + return getPointerDereferenceableBytes(O, DL, CanBeNull); } namespace { diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -22,11 +22,13 @@ #include "llvm/IR/Operator.h" #include "llvm/IR/Statepoint.h" +#include + using namespace llvm; static bool isAligned(const Value *Base, const APInt &Offset, unsigned Align, const DataLayout &DL) { - APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL)); + APInt BaseAlign(Offset.getBitWidth(), getPointerAlignment(Base, DL)); if (!BaseAlign) { Type *Ty = Base->getType()->getPointerElementType(); @@ -48,71 +50,244 @@ return isAligned(Base, Offset, Align, DL); } -/// Test if V is always a pointer to allocated and suitably aligned memory for -/// a simple load or store. -static bool isDereferenceableAndAlignedPointer( - const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL, - const Instruction *CtxI, const DominatorTree *DT, - SmallPtrSetImpl &Visited) { - // Already visited? Bail out, we've likely hit unreachable code. - if (!Visited.insert(V).second) - return false; - - // Note that it is not safe to speculate into a malloc'd region because - // malloc may return null. +static unsigned getPointerAlignment(const Value *V, const DataLayout &DL, + SmallPtrSetImpl &Visited) { + assert(V->getType()->isPointerTy() && "must be pointer"); - // bitcast instructions are no-ops as far as dereferenceability is concerned. - if (const BitCastOperator *BC = dyn_cast(V)) - return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size, - DL, CtxI, DT, Visited); + static const unsigned MAX_ALIGN = 1U << 29; + if (!Visited.insert(V).second) + return MAX_ALIGN; + + unsigned Align = 0; + if (auto *GO = dyn_cast(V)) { + if (isa(GO)) { + MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign(); + unsigned Align = FunctionPtrAlign ? FunctionPtrAlign->value() : 0; + switch (DL.getFunctionPtrAlignType()) { + case DataLayout::FunctionPtrAlignType::Independent: + return Align; + case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign: + return std::max(Align, GO->getAlignment()); + } + } + Align = GO->getAlignment(); + if (Align == 0) { + if (auto *GVar = dyn_cast(GO)) { + Type *ObjectType = GVar->getValueType(); + if (ObjectType->isSized()) { + // If the object is defined in the current Module, we'll be giving + // it the preferred alignment. Otherwise, we have to assume that it + // may only have the minimum ABI alignment. + if (GVar->isStrongDefinitionForLinker()) + Align = DL.getPreferredAlignment(GVar); + else + Align = DL.getABITypeAlignment(ObjectType); + } + } + } + } else if (auto *A = dyn_cast(V)) { + Align = A->getParamAlignment(); + + if (!Align && A->hasStructRetAttr()) { + // An sret parameter has at least the ABI alignment of the return type. + Type *EltTy = cast(A->getType())->getElementType(); + if (EltTy->isSized()) + Align = DL.getABITypeAlignment(EltTy); + } + } else if (auto *AI = dyn_cast(V)) { + Align = AI->getAlignment(); + if (Align == 0) { + Type *AllocatedType = AI->getAllocatedType(); + if (AllocatedType->isSized()) + Align = DL.getPrefTypeAlignment(AllocatedType); + } + } else if (auto *Call = dyn_cast(V)) { + Align = Call->getRetAlignment(); + if (Align == 0 && Call->getCalledFunction()) + Align = Call->getCalledFunction()->getAttributes().getRetAlignment(); + if (Align == 0) + if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) + Align = getPointerAlignment(RP, DL, Visited); + } else if (auto *LI = dyn_cast(V)) { + if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) { + ConstantInt *CI = mdconst::extract(MD->getOperand(0)); + Align = CI->getLimitedValue(); + } + } else if (auto *RI = dyn_cast(V)) { + // For gc.relocate, look through relocations + Align = getPointerAlignment(RI->getDerivedPtr(), DL, Visited); + } else if (auto *ASC = dyn_cast(V)) { + Align = getPointerAlignment(ASC->getPointerOperand(), DL, Visited); + } else if (auto *GEP = dyn_cast(V)) { + const Value *Base = GEP->getPointerOperand(); - bool CheckForNonNull = false; - APInt KnownDerefBytes(Size.getBitWidth(), - V->getPointerDereferenceableBytes(DL, CheckForNonNull)); - if (KnownDerefBytes.getBoolValue()) { - if (KnownDerefBytes.uge(Size)) - if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) - return isAligned(V, Align, DL); + APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0); + if (GEP->accumulateConstantOffset(DL, Offset) && Offset != 0) { + auto BaseAlign = getPointerAlignment(Base, DL, Visited); + if (BaseAlign > 1) { + // For the purpose of alignment computation we treat negative offsets as + // if they were positive, so we only discuss positive offsets below. + // We assume k_x values are existentially quantified. + // + // Base has alignment BA, thus: + // Base = k_0 * BA + // GEP equals Base + Offset, thus: + // GEP = k_0 * BA + Offset + // With GCD = gcd(BA, Offset), BA = k_1 * GCD, and Offset = k_2 * GCD we + // can express GEP as follows: + // GEP = k_0 * (k_1 * GCD) + k_2 * GCD + // The common factor in both terms is GCD, so we can express it as: + // GEP = GCD * (k_0 * k_1 + k_2) + // Which implies that the GEP has GCD alignment. + APInt GCD = APIntOps::GreatestCommonDivisor( + Offset.abs(), APInt(Offset.getBitWidth(), BaseAlign)); + Align = GCD.getZExtValue(); + } + } } - // For GEPs, determine if the indexing lands within the allocated object. - if (const GEPOperator *GEP = dyn_cast(V)) { - const Value *Base = GEP->getPointerOperand(); + return Align; +} - APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0); - if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() || - !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue()) - return false; +unsigned llvm::getPointerAlignment(const Value *V, const DataLayout &DL) { + SmallPtrSet Visited; + return ::getPointerAlignment(V, DL, Visited); +} - // If the base pointer is dereferenceable for Offset+Size bytes, then the - // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base - // pointer is aligned to Align bytes, and the Offset is divisible by Align - // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also - // aligned to Align bytes. - - // Offset and Size may have different bit widths if we have visited an - // addrspacecast, so we can't do arithmetic directly on the APInt values. - return isDereferenceableAndAlignedPointer( - Base, Align, Offset + Size.sextOrTrunc(Offset.getBitWidth()), - DL, CtxI, DT, Visited); +static uint64_t +getPointerDereferenceableBytes(const Value *V, const DataLayout &DL, + bool &CanBeNull, + SmallPtrSetImpl &Visited) { + assert(V->getType()->isPointerTy() && "must be pointer"); + if (!Visited.insert(V).second) + return std::numeric_limits::max(); + + const Value *Stripped = V->stripPointerCastsSameRepresentation(); + if (Stripped != V) + return getPointerDereferenceableBytes(Stripped, DL, CanBeNull, Visited); + + const Function *F = nullptr; + uint64_t DerefBytes = 0; + CanBeNull = false; + if (const Argument *A = dyn_cast(V)) { + F = A->getParent(); + DerefBytes = A->getDereferenceableBytes(); + if (DerefBytes == 0 && (A->hasByValAttr() || A->hasStructRetAttr())) { + Type *PT = cast(A->getType())->getElementType(); + if (PT->isSized()) + DerefBytes = DL.getTypeStoreSize(PT); + } + if (DerefBytes == 0) { + DerefBytes = A->getDereferenceableOrNullBytes(); + CanBeNull = true; + } + } else if (const auto *Call = dyn_cast(V)) { + DerefBytes = Call->getDereferenceableBytes(AttributeList::ReturnIndex); + if (DerefBytes == 0) { + DerefBytes = + Call->getDereferenceableOrNullBytes(AttributeList::ReturnIndex); + CanBeNull = true; + } + if (DerefBytes == 0) + if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) + return getPointerDereferenceableBytes(RP, DL, CanBeNull, Visited); + } else if (const LoadInst *LI = dyn_cast(V)) { + if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { + ConstantInt *CI = mdconst::extract(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + if (DerefBytes == 0) { + if (MDNode *MD = + LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { + ConstantInt *CI = mdconst::extract(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + CanBeNull = true; + } + } else if (auto *IP = dyn_cast(V)) { + if (MDNode *MD = IP->getMetadata(LLVMContext::MD_dereferenceable)) { + ConstantInt *CI = mdconst::extract(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + if (DerefBytes == 0) { + if (MDNode *MD = + IP->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { + ConstantInt *CI = mdconst::extract(MD->getOperand(0)); + DerefBytes = CI->getLimitedValue(); + } + CanBeNull = true; + } + } else if (auto *AI = dyn_cast(V)) { + if (!AI->isArrayAllocation()) { + DerefBytes = DL.getTypeStoreSize(AI->getAllocatedType()); + CanBeNull = false; + } + } else if (auto *GV = dyn_cast(V)) { + if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) { + // TODO: Don't outright reject hasExternalWeakLinkage but set the + // CanBeNull flag. + DerefBytes = DL.getTypeStoreSize(GV->getValueType()); + CanBeNull = false; + } + } else if (auto *RI = dyn_cast(V)) { + // For gc.relocate, look through relocations + return getPointerDereferenceableBytes(RI->getDerivedPtr(), DL, CanBeNull, + Visited); + } else if (auto *ASC = dyn_cast(V)) { + DerefBytes = getPointerDereferenceableBytes(ASC->getPointerOperand(), DL, + CanBeNull, Visited); + } else if (auto *GEP = dyn_cast(V)) { + APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0); + // Give up on non-constant GEPs. + if (!GEP->accumulateConstantOffset(DL, Offset)) + return 0; + + // If Base has N dereferenceable bytes and is O (=Offset) away from the + // GEP, then: + // - if 0 is null, we take the base result. + // - if O is positive, the GEP has max(0, N - O) dereferenceable bytes + // because it is O bytes advanced from the base. + // - if O is negative and inbounds, the GEP has N + abs(O) dereferenceable + // bytes because inbounds need to stay in + // the same allocation. + // - if O is negative and not inbounds, the GEP has 0 dereferenceable bytes + // because we do not know if we are + // still in the allocation or not. + // + // The "can be null" is set to false if we have an offset that is not null + // and an inbounds GEP. Note that this is later overwritten if null pointers + // are defined. + const Value *Base = GEP->getPointerOperand(); + uint64_t BaseDerefBytes = + getPointerDereferenceableBytes(Base, DL, CanBeNull, Visited); + if (Offset == 0) { + DerefBytes = BaseDerefBytes; + } else if (Offset.getSExtValue() > 0) { + DerefBytes = + std::max(int64_t(BaseDerefBytes - Offset.getZExtValue()), int64_t(0)); + CanBeNull = !GEP->isInBounds(); + } else { + assert(Offset.getSExtValue() < 0 && "Did not expect zero offset!"); + if (GEP->isInBounds()) + DerefBytes = BaseDerefBytes + Offset.abs().getZExtValue(); + else + DerefBytes = 0; + CanBeNull = !GEP->isInBounds(); + } } - // For gc.relocate, look through relocations - if (const GCRelocateInst *RelocateInst = dyn_cast(V)) - return isDereferenceableAndAlignedPointer( - RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited); - - if (const AddrSpaceCastInst *ASC = dyn_cast(V)) - return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size, - DL, CtxI, DT, Visited); + if (auto *I = dyn_cast(V)) + F = I->getFunction(); - if (const auto *Call = dyn_cast(V)) - if (auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) - return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT, - Visited); + CanBeNull |= NullPointerIsDefined(F, V->getType()->getPointerAddressSpace()); + return DerefBytes; +} - // If we don't know, assume the worst. - return false; +uint64_t llvm::getPointerDereferenceableBytes(const Value *V, + const DataLayout &DL, + bool &CanBeNull) { + SmallPtrSet Visited; + return ::getPointerDereferenceableBytes(V, DL,CanBeNull, Visited); } bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align, @@ -120,9 +295,16 @@ const DataLayout &DL, const Instruction *CtxI, const DominatorTree *DT) { - SmallPtrSet Visited; - return ::isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT, - Visited); + bool CheckForNonNull = false; + APInt KnownDerefBytes(Size.getBitWidth(), + getPointerDereferenceableBytes(V, DL, CheckForNonNull)); + if (KnownDerefBytes.getBoolValue()) + if (KnownDerefBytes.uge(Size)) + if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT)) + return ::isAligned(V, Align, DL); + + // If we don't know, assume the worst. + return false; } bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, @@ -142,11 +324,10 @@ if (!Ty->isSized()) return false; - SmallPtrSet Visited; return ::isDereferenceableAndAlignedPointer( V, Align, APInt(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty)), - DL, CtxI, DT, Visited); + DL, CtxI, DT); } bool llvm::isDereferenceablePointer(const Value *V, Type *Ty, diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1721,7 +1721,7 @@ // Aligned pointers have trailing zeros - refine Known.Zero set if (V->getType()->isPointerTy()) { - unsigned Align = V->getPointerAlignment(Q.DL); + unsigned Align = getPointerAlignment(V, Q.DL); if (Align) Known.Zero.setLowBits(countTrailingZeros(Align)); } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -25,6 +25,7 @@ #include "llvm/Analysis/BranchProbabilityInfo.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" +#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/ProfileSummaryInfo.h" @@ -1819,7 +1820,7 @@ // forbidden. GlobalVariable *GV; if ((GV = dyn_cast(Val)) && GV->canIncreaseAlignment() && - GV->getPointerAlignment(*DL) < PrefAlign && + getPointerAlignment(GV, *DL) < PrefAlign && DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2) GV->setAlignment(PrefAlign); @@ -1857,7 +1858,7 @@ case Intrinsic::experimental_widenable_condition: { // Give up on future widening oppurtunties so that we can fold away dead // paths and merge blocks before going into block-local instruction - // selection. + // selection. if (II->use_empty()) { II->eraseFromParent(); return true; diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -19,6 +19,7 @@ #include "ConstantFold.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/Analysis/Loads.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" @@ -1127,8 +1128,12 @@ unsigned GVAlign; - if (Module *TheModule = GV->getParent()) { - GVAlign = GV->getPointerAlignment(TheModule->getDataLayout()); + if (GV->getParent()) { + // Deduction of alignment is done via getPointerAlignment but that + // function is not accessible from the IR library so we do only look + // at the annotated alignment. Alternatively we can copy the logic + // for global objects here. + GVAlign = GV->getAlignment(); // If the function alignment is not specified then assume that it // is 4. diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SetVector.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" @@ -609,139 +610,6 @@ return stripPointerCastsAndOffsets(this); } -uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL, - bool &CanBeNull) const { - assert(getType()->isPointerTy() && "must be pointer"); - - auto AS = getType()->getPointerAddressSpace(); - bool NullIsValid = (AS != 0); - if (auto *I = dyn_cast(this)) - NullIsValid = NullPointerIsDefined(I->getFunction(), AS); - - uint64_t DerefBytes = 0; - CanBeNull = false; - if (const Argument *A = dyn_cast(this)) { - NullIsValid = NullPointerIsDefined(A->getParent(), AS); - DerefBytes = A->getDereferenceableBytes(); - if (DerefBytes == 0 && (A->hasByValAttr() || A->hasStructRetAttr())) { - Type *PT = cast(A->getType())->getElementType(); - if (PT->isSized()) - DerefBytes = DL.getTypeStoreSize(PT); - } - if (DerefBytes == 0) { - DerefBytes = A->getDereferenceableOrNullBytes(); - CanBeNull = true; - } - } else if (const auto *Call = dyn_cast(this)) { - DerefBytes = Call->getDereferenceableBytes(AttributeList::ReturnIndex); - if (DerefBytes == 0) { - DerefBytes = - Call->getDereferenceableOrNullBytes(AttributeList::ReturnIndex); - CanBeNull = true; - } - } else if (const LoadInst *LI = dyn_cast(this)) { - if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { - ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - DerefBytes = CI->getLimitedValue(); - } - if (DerefBytes == 0) { - if (MDNode *MD = - LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { - ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - DerefBytes = CI->getLimitedValue(); - } - CanBeNull = true; - } - } else if (auto *IP = dyn_cast(this)) { - if (MDNode *MD = IP->getMetadata(LLVMContext::MD_dereferenceable)) { - ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - DerefBytes = CI->getLimitedValue(); - } - if (DerefBytes == 0) { - if (MDNode *MD = - IP->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { - ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - DerefBytes = CI->getLimitedValue(); - } - CanBeNull = true; - } - } else if (auto *AI = dyn_cast(this)) { - if (!AI->isArrayAllocation()) { - DerefBytes = DL.getTypeStoreSize(AI->getAllocatedType()); - CanBeNull = false; - } - } else if (auto *GV = dyn_cast(this)) { - if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) { - // TODO: Don't outright reject hasExternalWeakLinkage but set the - // CanBeNull flag. - DerefBytes = DL.getTypeStoreSize(GV->getValueType()); - CanBeNull = false; - } - } - - CanBeNull = !NullIsValid; - return DerefBytes; -} - -unsigned Value::getPointerAlignment(const DataLayout &DL) const { - assert(getType()->isPointerTy() && "must be pointer"); - - unsigned Align = 0; - if (auto *GO = dyn_cast(this)) { - if (isa(GO)) { - MaybeAlign FunctionPtrAlign = DL.getFunctionPtrAlign(); - unsigned Align = FunctionPtrAlign ? FunctionPtrAlign->value() : 0; - switch (DL.getFunctionPtrAlignType()) { - case DataLayout::FunctionPtrAlignType::Independent: - return Align; - case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign: - return std::max(Align, GO->getAlignment()); - } - } - Align = GO->getAlignment(); - if (Align == 0) { - if (auto *GVar = dyn_cast(GO)) { - Type *ObjectType = GVar->getValueType(); - if (ObjectType->isSized()) { - // If the object is defined in the current Module, we'll be giving - // it the preferred alignment. Otherwise, we have to assume that it - // may only have the minimum ABI alignment. - if (GVar->isStrongDefinitionForLinker()) - Align = DL.getPreferredAlignment(GVar); - else - Align = DL.getABITypeAlignment(ObjectType); - } - } - } - } else if (const Argument *A = dyn_cast(this)) { - Align = A->getParamAlignment(); - - if (!Align && A->hasStructRetAttr()) { - // An sret parameter has at least the ABI alignment of the return type. - Type *EltTy = cast(A->getType())->getElementType(); - if (EltTy->isSized()) - Align = DL.getABITypeAlignment(EltTy); - } - } else if (const AllocaInst *AI = dyn_cast(this)) { - Align = AI->getAlignment(); - if (Align == 0) { - Type *AllocatedType = AI->getAllocatedType(); - if (AllocatedType->isSized()) - Align = DL.getPrefTypeAlignment(AllocatedType); - } - } else if (const auto *Call = dyn_cast(this)) { - Align = Call->getRetAlignment(); - if (Align == 0 && Call->getCalledFunction()) - Align = Call->getCalledFunction()->getAttributes().getRetAlignment(); - } else if (const LoadInst *LI = dyn_cast(this)) - if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) { - ConstantInt *CI = mdconst::extract(MD->getOperand(0)); - Align = CI->getLimitedValue(); - } - - return Align; -} - const Value *Value::DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) const { auto *PN = dyn_cast(this); diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -2188,7 +2188,7 @@ def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{ if (auto *G = dyn_cast(N)) { const DataLayout &DL = MF->getDataLayout(); - unsigned Align = G->getGlobal()->getPointerAlignment(DL); + unsigned Align = getPointerAlignment(G->getGlobal(), DL); return Align >= 4 && G->getOffset() % 4 == 0; } if (auto *C = dyn_cast(N)) diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/Attributor.cpp @@ -2059,7 +2059,7 @@ // Use IR information if we did not strip anything. // TODO: track globally. bool CanBeNull; - DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull); + DerefBytes = getPointerDereferenceableBytes(Base, DL, CanBeNull); T.GlobalState.indicatePessimisticFixpoint(); } else { const DerefState &DS = static_cast(AA.getState()); @@ -2208,7 +2208,7 @@ const auto &AA = A.getAAFor(*this, IRPosition::value(V)); if (!Stripped && this == &AA) { // Use only IR information if we did not strip anything. - T.takeKnownMaximum(V.getPointerAlignment(DL)); + T.takeKnownMaximum(getPointerAlignment(&V, DL)); T.indicatePessimisticFixpoint(); } else { // Use abstract attribute information. diff --git a/llvm/test/Transforms/InstCombine/constant-fold-gep.ll b/llvm/test/Transforms/InstCombine/constant-fold-gep.ll --- a/llvm/test/Transforms/InstCombine/constant-fold-gep.ll +++ b/llvm/test/Transforms/InstCombine/constant-fold-gep.ll @@ -17,7 +17,7 @@ store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 2), align 4 ; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 0), align 4 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 3), align 4 -; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 1), align 4 +; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 1), align 16 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 4), align 4 ; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 2), align 4 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 5), align 4 @@ -25,11 +25,11 @@ store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 6), align 4 ; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 1), align 4 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 7), align 4 -; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 2), align 8 +; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 2), align 16 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 8), align 4 ; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 0), align 4 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 9), align 4 -; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 1), align 4 +; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 1), align 8 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 10), align 4 ; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 2), align 4 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 11), align 4 @@ -41,7 +41,7 @@ store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 14), align 8 ; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 0), align 8 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 15), align 8 -; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 1), align 8 +; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 1), align 16 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 16), align 8 ; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 2), align 8 store i32 1, i32* getelementptr ([3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 17), align 8 diff --git a/llvm/unittests/IR/FunctionTest.cpp b/llvm/unittests/IR/FunctionTest.cpp --- a/llvm/unittests/IR/FunctionTest.cpp +++ b/llvm/unittests/IR/FunctionTest.cpp @@ -8,6 +8,7 @@ #include "llvm/IR/Function.h" #include "llvm/IR/Module.h" +#include "llvm/Analysis/Loads.h" #include "gtest/gtest.h" using namespace llvm; @@ -143,23 +144,23 @@ FunctionType *FuncType(FunctionType::get(VoidType, false)); std::unique_ptr Func(Function::Create( FuncType, GlobalValue::ExternalLinkage)); - EXPECT_EQ(0U, Func->getPointerAlignment(DataLayout(""))); - EXPECT_EQ(1U, Func->getPointerAlignment(DataLayout("Fi8"))); - EXPECT_EQ(1U, Func->getPointerAlignment(DataLayout("Fn8"))); - EXPECT_EQ(2U, Func->getPointerAlignment(DataLayout("Fi16"))); - EXPECT_EQ(2U, Func->getPointerAlignment(DataLayout("Fn16"))); - EXPECT_EQ(4U, Func->getPointerAlignment(DataLayout("Fi32"))); - EXPECT_EQ(4U, Func->getPointerAlignment(DataLayout("Fn32"))); + EXPECT_EQ(0U, getPointerAlignment(Func, DataLayout(""))); + EXPECT_EQ(1U, getPointerAlignment(Func, DataLayout("Fi8"))); + EXPECT_EQ(1U, getPointerAlignment(Func, DataLayout("Fn8"))); + EXPECT_EQ(2U, getPointerAlignment(Func, DataLayout("Fi16"))); + EXPECT_EQ(2U, getPointerAlignment(Func, DataLayout("Fn16"))); + EXPECT_EQ(4U, getPointerAlignment(Func, DataLayout("Fi32"))); + EXPECT_EQ(4U, getPointerAlignment(Func, DataLayout("Fn32"))); Func->setAlignment(4U); - EXPECT_EQ(0U, Func->getPointerAlignment(DataLayout(""))); - EXPECT_EQ(1U, Func->getPointerAlignment(DataLayout("Fi8"))); - EXPECT_EQ(4U, Func->getPointerAlignment(DataLayout("Fn8"))); - EXPECT_EQ(2U, Func->getPointerAlignment(DataLayout("Fi16"))); - EXPECT_EQ(4U, Func->getPointerAlignment(DataLayout("Fn16"))); - EXPECT_EQ(4U, Func->getPointerAlignment(DataLayout("Fi32"))); - EXPECT_EQ(4U, Func->getPointerAlignment(DataLayout("Fn32"))); + EXPECT_EQ(0U, getPointerAlignment(Func, DataLayout(""))); + EXPECT_EQ(1U, getPointerAlignment(Func, DataLayout("Fi8"))); + EXPECT_EQ(4U, getPointerAlignment(Func, DataLayout("Fn8"))); + EXPECT_EQ(2U, getPointerAlignment(Func, DataLayout("Fi16"))); + EXPECT_EQ(4U, getPointerAlignment(Func, DataLayout("Fn16"))); + EXPECT_EQ(4U, getPointerAlignment(Func, DataLayout("Fi32"))); + EXPECT_EQ(4U, getPointerAlignment(Func, DataLayout("Fn32"))); } } // end namespace