diff --git a/llvm/include/llvm/IR/Operator.h b/llvm/include/llvm/IR/Operator.h --- a/llvm/include/llvm/IR/Operator.h +++ b/llvm/include/llvm/IR/Operator.h @@ -571,6 +571,11 @@ bool accumulateConstantOffset( const DataLayout &DL, APInt &Offset, function_ref ExternalAnalysis = nullptr) const; + + static bool accumulateConstantOffset( + Type *SourceType, ArrayRef Index, const DataLayout &DL, + APInt &Offset, + function_ref ExternalAnalysis = nullptr); }; class PtrToIntOperator diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp --- a/llvm/lib/IR/Operator.cpp +++ b/llvm/lib/IR/Operator.cpp @@ -61,10 +61,17 @@ bool GEPOperator::accumulateConstantOffset( const DataLayout &DL, APInt &Offset, function_ref ExternalAnalysis) const { - assert(Offset.getBitWidth() == - DL.getIndexSizeInBits(getPointerAddressSpace()) && - "The offset bit width does not match DL specification."); + assert(Offset.getBitWidth() == + DL.getIndexSizeInBits(getPointerAddressSpace()) && + "The offset bit width does not match DL specification."); + SmallVector Index(value_op_begin() + 1, value_op_end()); + return GEPOperator::accumulateConstantOffset(getSourceElementType(), Index, + DL, Offset, ExternalAnalysis); +} +bool GEPOperator::accumulateConstantOffset( + Type *SourceType, ArrayRef Index, const DataLayout &DL, + APInt &Offset, function_ref ExternalAnalysis) { bool UsedExternalAnalysis = false; auto AccumulateOffset = [&](APInt Index, uint64_t Size) -> bool { Index = Index.sextOrTrunc(Offset.getBitWidth()); @@ -85,9 +92,10 @@ } return true; }; - - for (gep_type_iterator GTI = gep_type_begin(this), GTE = gep_type_end(this); - GTI != GTE; ++GTI) { + auto begin = generic_gep_type_iterator::begin( + SourceType, Index.begin()); + auto end = generic_gep_type_iterator::end(Index.end()); + for (auto GTI = begin, GTE = end; GTI != GTE; ++GTI) { // Scalable vectors are multiplied by a runtime constant. bool ScalableType = false; if (isa(GTI.getIndexedType())) diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -3382,13 +3382,11 @@ LoadInst *Load = IRB.CreateAlignedLoad(Ty, GEP, Alignment, Name + ".load"); - // Make a temporary GEP to compute the offset in case its constant folded - auto GEPToCompute = GetElementPtrInst::Create(BaseTy, Ptr, GEPIndices); APInt Offset( - DL.getIndexSizeInBits(GEPToCompute->getPointerAddressSpace()), 0); - if (AATags && GEPToCompute->accumulateConstantOffset(DL, Offset)) + DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); + if (AATags && + GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset)) Load->setAAMetadata(AATags.shift(Offset.getZExtValue())); - delete GEPToCompute; Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); LLVM_DEBUG(dbgs() << " to: " << *Load << "\n"); @@ -3436,13 +3434,12 @@ StoreInst *Store = IRB.CreateAlignedStore(ExtractValue, InBoundsGEP, Alignment); - // Make a temporary GEP to compute the offset in case its constant folded - auto GEPToCompute = GetElementPtrInst::Create(BaseTy, Ptr, GEPIndices); APInt Offset( - DL.getIndexSizeInBits(GEPToCompute->getPointerAddressSpace()), 0); - if (AATags && GEPToCompute->accumulateConstantOffset(DL, Offset)) + DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace()), 0); + if (AATags && + GEPOperator::accumulateConstantOffset(BaseTy, GEPIndices, DL, Offset)) Store->setAAMetadata(AATags.shift(Offset.getZExtValue())); - delete GEPToCompute; + LLVM_DEBUG(dbgs() << " to: " << *Store << "\n"); } };