Index: llvm/include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -482,10 +482,7 @@ int64_t BaseOffset = 0; int64_t Scale = 0; - // Assumes the address space is 0 when Ptr is nullptr. - unsigned AS = - (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace()); - auto GTI = gep_type_begin(PointeeType, AS, Operands); + auto GTI = gep_type_begin(PointeeType, Operands); for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) { // We assume that the cost of Scalar GEP with constant index and the // cost of Vector GEP with splat constant index are the same. @@ -493,7 +490,12 @@ if (!ConstIdx) if (auto Splat = getSplatValue(*I)) ConstIdx = dyn_cast(Splat); - if (isa(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { + // For structures the index is always splat or scalar constant + assert(ConstIdx && "Unexpected GEP index"); + uint64_t Field = ConstIdx->getZExtValue(); + BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field); + } else { int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType()); if (ConstIdx) BaseOffset += ConstIdx->getSExtValue() * ElementSize; @@ -504,17 +506,15 @@ return TTI::TCC_Basic; Scale = ElementSize; } - } else { - StructType *STy = cast(*GTI); - // For structures the index is always splat or scalar constant - assert(ConstIdx && "Unexpected GEP index"); - uint64_t Field = ConstIdx->getZExtValue(); - BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field); } } + // Assumes the address space is 0 when Ptr is nullptr. + unsigned AS = + (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace()); if (static_cast(this)->isLegalAddressingMode( - PointerType::get(*GTI, AS), const_cast(BaseGV), + PointerType::get(Type::getInt8Ty(PointeeType->getContext()), AS), + const_cast(BaseGV), BaseOffset, HasBaseReg, Scale, AS)) { return TTI::TCC_Free; } Index: llvm/include/llvm/IR/GetElementPtrTypeIterator.h =================================================================== --- llvm/include/llvm/IR/GetElementPtrTypeIterator.h +++ llvm/include/llvm/IR/GetElementPtrTypeIterator.h @@ -18,7 +18,7 @@ #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Operator.h" #include "llvm/IR/User.h" -#include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/PointerUnion.h" namespace llvm { template @@ -28,17 +28,15 @@ Type *, ptrdiff_t> super; ItTy OpIt; - PointerIntPair CurTy; - unsigned AddrSpace; + PointerUnion CurTy; + enum { Unbounded = -1ull }; + uint64_t ArrayBound = Unbounded; generic_gep_type_iterator() {} public: - static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace, - ItTy It) { + static generic_gep_type_iterator begin(Type *Ty, ItTy It) { generic_gep_type_iterator I; - I.CurTy.setPointer(Ty); - I.CurTy.setInt(true); - I.AddrSpace = AddrSpace; + I.CurTy = Ty; I.OpIt = It; return I; } @@ -55,34 +53,49 @@ return !operator==(x); } - Type *operator*() const { - if (CurTy.getInt()) - return CurTy.getPointer()->getPointerTo(AddrSpace); - return CurTy.getPointer(); + Type *getIndexedType() const { + if (CurTy.is()) + return CurTy.get(); + return CurTy.get()->getTypeAtIndex(getOperand()); } - Type *getIndexedType() const { - if (CurTy.getInt()) - return CurTy.getPointer(); - CompositeType *CT = cast(CurTy.getPointer()); - return CT->getTypeAtIndex(getOperand()); + bool isStruct() const { + return CurTy.is(); + } + + StructType *getStructType() const { + return CurTy.get(); } - // This is a non-standard operator->. It allows you to call methods on the - // current type directly. - Type *operator->() const { return operator*(); } + StructType *getStructTypeOrNull() const { + return CurTy.dyn_cast(); + } Value *getOperand() const { return const_cast(&**OpIt); } + bool isArray() const { + return CurTy.is(); + } + + bool isBoundedArray() const { + return isArray() && ArrayBound != Unbounded; + } + + uint64_t getArrayBound() const { + assert(isBoundedArray()); + return ArrayBound; + } + generic_gep_type_iterator& operator++() { // Preincrement - if (CurTy.getInt()) { - CurTy.setInt(false); - } else if (CompositeType *CT = - dyn_cast(CurTy.getPointer())) { - CurTy.setPointer(CT->getTypeAtIndex(getOperand())); - } else { - CurTy.setPointer(nullptr); - } + Type *Ty = getIndexedType(); + if (auto *ATy = dyn_cast(Ty)) { + CurTy = ATy->getElementType(); + ArrayBound = ATy->getNumElements(); + } else if (auto *VTy = dyn_cast(Ty)) { + CurTy = VTy->getElementType(); + ArrayBound = VTy->getNumElements(); + } else + CurTy = dyn_cast(Ty); ++OpIt; return *this; } @@ -98,8 +111,6 @@ auto *GEPOp = cast(GEP); return gep_type_iterator::begin( GEPOp->getSourceElementType(), - cast(GEPOp->getPointerOperandType()->getScalarType()) - ->getAddressSpace(), GEP->op_begin() + 1); } inline gep_type_iterator gep_type_end(const User *GEP) { @@ -109,8 +120,6 @@ auto &GEPOp = cast(GEP); return gep_type_iterator::begin( GEPOp.getSourceElementType(), - cast(GEPOp.getPointerOperandType()->getScalarType()) - ->getAddressSpace(), GEP.op_begin() + 1); } inline gep_type_iterator gep_type_end(const User &GEP) { @@ -119,13 +128,13 @@ template inline generic_gep_type_iterator - gep_type_begin(Type *Op0, unsigned AS, ArrayRef A) { - return generic_gep_type_iterator::begin(Op0, AS, A.begin()); + gep_type_begin(Type *Op0, ArrayRef A) { + return generic_gep_type_iterator::begin(Op0, A.begin()); } template inline generic_gep_type_iterator - gep_type_end(Type * /*Op0*/, unsigned /*AS*/, ArrayRef A) { + gep_type_end(Type * /*Op0*/, ArrayRef A) { return generic_gep_type_iterator::end(A.end()); } } // end namespace llvm Index: llvm/include/llvm/Transforms/Utils/Local.h =================================================================== --- llvm/include/llvm/Transforms/Utils/Local.h +++ llvm/include/llvm/Transforms/Utils/Local.h @@ -217,7 +217,7 @@ continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { if (OpC->getType()->isVectorTy()) OpC = OpC->getSplatValue(); Index: llvm/lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -412,10 +412,10 @@ // Assume all GEP operands are constants until proven otherwise. bool GepHasConstantOffset = true; for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); - I != E; ++I) { + I != E; ++I, ++GTI) { const Value *Index = *I; // Compute the (potentially symbolic) offset in bytes for this index. - if (StructType *STy = dyn_cast(*GTI++)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { // For a struct, add the member offset. unsigned FieldNo = cast(Index)->getZExtValue(); if (FieldNo == 0) @@ -431,13 +431,13 @@ if (CIdx->isZero()) continue; Decomposed.OtherOffset += - DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue(); + DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue(); continue; } GepHasConstantOffset = false; - uint64_t Scale = DL.getTypeAllocSize(*GTI); + uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType()); unsigned ZExtBits = 0, SExtBits = 0; // If the integer type is smaller than the pointer size, it is implicitly Index: llvm/lib/Analysis/InlineCost.cpp =================================================================== --- llvm/lib/Analysis/InlineCost.cpp +++ llvm/lib/Analysis/InlineCost.cpp @@ -318,7 +318,7 @@ continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); Index: llvm/lib/Analysis/ValueTracking.cpp =================================================================== --- llvm/lib/Analysis/ValueTracking.cpp +++ llvm/lib/Analysis/ValueTracking.cpp @@ -1232,7 +1232,7 @@ gep_type_iterator GTI = gep_type_begin(I); for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { Value *Index = I->getOperand(i); - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { // Handle struct member offset arithmetic. // Handle case when index is vector zeroinitializer @@ -1731,7 +1731,7 @@ for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); GTI != GTE; ++GTI) { // Struct types are easy -- they must always be indexed by a constant. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { ConstantInt *OpC = cast(GTI.getOperand()); unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = Q.DL.getStructLayout(STy); Index: llvm/lib/Analysis/VectorUtils.cpp =================================================================== --- llvm/lib/Analysis/VectorUtils.cpp +++ llvm/lib/Analysis/VectorUtils.cpp @@ -107,11 +107,11 @@ while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { // Find the type we're currently indexing into. gep_type_iterator GEPTI = gep_type_begin(Gep); - std::advance(GEPTI, LastOperand - 1); + std::advance(GEPTI, LastOperand - 2); // If it's a type with the same allocation size as the result of the GEP we // can peel off the zero index. - if (DL.getTypeAllocSize(*GEPTI) != GEPAllocSize) + if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) break; --LastOperand; } Index: llvm/lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- llvm/lib/CodeGen/CodeGenPrepare.cpp +++ llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -3253,7 +3253,7 @@ int64_t ConstantOffset = 0; gep_type_iterator GTI = gep_type_begin(AddrInst); for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(AddrInst->getOperand(i))->getZExtValue(); Index: llvm/lib/CodeGen/SelectionDAG/FastISel.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -488,7 +488,7 @@ for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); - if (auto *StTy = dyn_cast(*GTI)) { + if (StructType *StTy = GTI.getStructTypeOrNull()) { uint64_t Field = cast(Idx)->getZExtValue(); if (Field) { // N = N + Offset Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3275,7 +3275,7 @@ for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); - if (StructType *StTy = dyn_cast(*GTI)) { + if (StructType *StTy = GTI.getStructTypeOrNull()) { unsigned Field = cast(Idx)->getUniqueInteger().getZExtValue(); if (Field) { // N = N + Offset Index: llvm/lib/ExecutionEngine/Interpreter/Execution.cpp =================================================================== --- llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -999,7 +999,7 @@ uint64_t Total = 0; for (; I != E; ++I) { - if (StructType *STy = dyn_cast(*I)) { + if (StructType *STy = I.getStructTypeOrNull()) { const StructLayout *SLO = getDataLayout().getStructLayout(STy); const ConstantInt *CPU = cast(I.getOperand()); @@ -1007,7 +1007,6 @@ Total += SLO->getElementOffset(Index); } else { - SequentialType *ST = cast(*I); // Get the index number for the array... which must be long type... GenericValue IdxGV = getOperandValue(I.getOperand(), SF); @@ -1020,7 +1019,7 @@ assert(BitWidth == 64 && "Invalid index type for getelementptr"); Idx = (int64_t)IdxGV.IntVal.getZExtValue(); } - Total += getDataLayout().getTypeAllocSize(ST->getElementType()) * Idx; + Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx; } } Index: llvm/lib/IR/ConstantFold.cpp =================================================================== --- llvm/lib/IR/ConstantFold.cpp +++ llvm/lib/IR/ConstantFold.cpp @@ -2019,22 +2019,8 @@ } /// Test whether a given ConstantInt is in-range for a SequentialType. -static bool isIndexInRangeOfSequentialType(SequentialType *STy, - const ConstantInt *CI) { - // And indices are valid when indexing along a pointer - if (isa(STy)) - return true; - - uint64_t NumElements = 0; - // Determine the number of elements in our sequential type. - if (auto *ATy = dyn_cast(STy)) - NumElements = ATy->getNumElements(); - else if (auto *VTy = dyn_cast(STy)) - NumElements = VTy->getNumElements(); - - assert((isa(STy) || NumElements > 0) && - "didn't expect non-array type to have zero elements!"); - +static bool isIndexInRangeOfArrayType(uint64_t NumElements, + const ConstantInt *CI) { // We cannot bounds check the index if it doesn't fit in an int64_t. if (CI->getValue().getActiveBits() > 64) return false; @@ -2089,10 +2075,10 @@ // getelementptr instructions into a single instruction. // if (CE->getOpcode() == Instruction::GetElementPtr) { - Type *LastTy = nullptr; + gep_type_iterator LastI = gep_type_end(CE); for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE); I != E; ++I) - LastTy = *I; + LastI = I; // We cannot combine indices if doing so would take us outside of an // array or vector. Doing otherwise could trick us if we evaluated such a @@ -2115,9 +2101,10 @@ bool PerformFold = false; if (Idx0->isNullValue()) PerformFold = true; - else if (SequentialType *STy = dyn_cast_or_null(LastTy)) + else if (LastI.isArray()) if (ConstantInt *CI = dyn_cast(Idx0)) - PerformFold = isIndexInRangeOfSequentialType(STy, CI); + PerformFold = !LastI.isBoundedArray() || + isIndexInRangeOfArrayType(LastI.getArrayBound(), CI); if (PerformFold) { SmallVector NewIndices; @@ -2228,7 +2215,10 @@ Unknown = true; continue; } - if (isIndexInRangeOfSequentialType(STy, CI)) + if (isIndexInRangeOfArrayType(isa(STy) + ? cast(STy)->getNumElements() + : cast(STy)->getNumElements(), + CI)) // It's in range, skip to the next index. continue; if (!isa(Prev)) { Index: llvm/lib/IR/Constants.cpp =================================================================== --- llvm/lib/IR/Constants.cpp +++ llvm/lib/IR/Constants.cpp @@ -1073,19 +1073,14 @@ gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this); User::const_op_iterator OI = std::next(this->op_begin()); - // Skip the first index, as it has no static limit. - ++GEPI; - ++OI; - // The remaining indices must be compile-time known integers within the // bounds of the corresponding notional static array types. for (; GEPI != E; ++GEPI, ++OI) { ConstantInt *CI = dyn_cast(*OI); - if (!CI) return false; - if (ArrayType *ATy = dyn_cast(*GEPI)) - if (CI->getValue().getActiveBits() > 64 || - CI->getZExtValue() >= ATy->getNumElements()) - return false; + if (GEPI.isBoundedArray() && + (CI->getValue().getActiveBits() > 64 || + CI->getZExtValue() >= GEPI.getArrayBound())) + return false; } // All the indices checked out. Index: llvm/lib/IR/DataLayout.cpp =================================================================== --- llvm/lib/IR/DataLayout.cpp +++ llvm/lib/IR/DataLayout.cpp @@ -737,15 +737,12 @@ ArrayRef Indices) const { int64_t Result = 0; - // We can use 0 as the address space as we don't need - // to get pointer types back from gep_type_iterator. - unsigned AS = 0; generic_gep_type_iterator - GTI = gep_type_begin(ElemTy, AS, Indices), - GTE = gep_type_end(ElemTy, AS, Indices); + GTI = gep_type_begin(ElemTy, Indices), + GTE = gep_type_end(ElemTy, Indices); for (; GTI != GTE; ++GTI) { Value *Idx = GTI.getOperand(); - if (auto *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { assert(Idx->getType()->isIntegerTy(32) && "Illegal struct idx"); unsigned FieldNo = cast(Idx)->getZExtValue(); Index: llvm/lib/IR/Operator.cpp =================================================================== --- llvm/lib/IR/Operator.cpp +++ llvm/lib/IR/Operator.cpp @@ -33,7 +33,7 @@ continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); Offset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); Index: llvm/lib/Target/AArch64/AArch64FastISel.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -557,7 +557,7 @@ for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U); GTI != E; ++GTI) { const Value *Op = GTI.getOperand(); - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); @@ -4885,7 +4885,7 @@ for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); - if (auto *StTy = dyn_cast(*GTI)) { + if (auto *StTy = GTI.getStructTypeOrNull()) { unsigned Field = cast(Idx)->getZExtValue(); // N = N + Offset if (Field) Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -7112,8 +7112,8 @@ case Instruction::GetElementPtr: { gep_type_iterator GTI = gep_type_begin(Instr); auto &DL = Ext->getModule()->getDataLayout(); - std::advance(GTI, U.getOperandNo()); - Type *IdxTy = *GTI; + std::advance(GTI, U.getOperandNo()-1); + Type *IdxTy = GTI.getIndexedType(); // This extension will end up with a shift because of the scaling factor. // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0. // Get the shift amount based on the scaling factor: Index: llvm/lib/Target/ARM/ARMFastISel.cpp =================================================================== --- llvm/lib/Target/ARM/ARMFastISel.cpp +++ llvm/lib/Target/ARM/ARMFastISel.cpp @@ -733,7 +733,7 @@ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i, ++GTI) { const Value *Op = *i; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); Index: llvm/lib/Target/Mips/MipsFastISel.cpp =================================================================== --- llvm/lib/Target/Mips/MipsFastISel.cpp +++ llvm/lib/Target/Mips/MipsFastISel.cpp @@ -445,7 +445,7 @@ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i, ++GTI) { const Value *Op = *i; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); Index: llvm/lib/Target/PowerPC/PPCFastISel.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -358,7 +358,7 @@ for (User::const_op_iterator II = U->op_begin() + 1, IE = U->op_end(); II != IE; ++II, ++GTI) { const Value *Op = *II; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); Index: llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp =================================================================== --- llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -241,7 +241,7 @@ for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U); GTI != E; ++GTI) { const Value *Op = GTI.getOperand(); - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); Index: llvm/lib/Target/X86/X86FastISel.cpp =================================================================== --- llvm/lib/Target/X86/X86FastISel.cpp +++ llvm/lib/Target/X86/X86FastISel.cpp @@ -936,7 +936,7 @@ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i, ++GTI) { const Value *Op = *i; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); Disp += SL->getElementOffset(cast(Op)->getZExtValue()); continue; Index: llvm/lib/Transforms/IPO/GlobalOpt.cpp =================================================================== --- llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -371,14 +371,13 @@ ++GEPI; // Skip over the pointer index. // If this is a use of an array allocation, do a bit more checking for sanity. - if (ArrayType *AT = dyn_cast(*GEPI)) { - uint64_t NumElements = AT->getNumElements(); + if (GEPI.isArray()) { ConstantInt *Idx = cast(U->getOperand(2)); // Check to make sure that index falls within the array. If not, // something funny is going on, so we won't do the optimization. // - if (Idx->getZExtValue() >= NumElements) + if (GEPI.isBoundedArray() && Idx->getZExtValue() >= GEPI.getArrayBound()) return false; // We cannot scalar repl this level of the array unless any array @@ -391,19 +390,12 @@ for (++GEPI; // Skip array index. GEPI != E; ++GEPI) { - uint64_t NumElements; - if (ArrayType *SubArrayTy = dyn_cast(*GEPI)) - NumElements = SubArrayTy->getNumElements(); - else if (VectorType *SubVectorTy = dyn_cast(*GEPI)) - NumElements = SubVectorTy->getNumElements(); - else { - assert((*GEPI)->isStructTy() && - "Indexed GEP type is not array, vector, or struct!"); + if (GEPI.isStruct()) continue; - } ConstantInt *IdxVal = dyn_cast(GEPI.getOperand()); - if (!IdxVal || IdxVal->getZExtValue() >= NumElements) + if (!IdxVal || (GEPI.isBoundedArray() && + IdxVal->getZExtValue() >= GEPI.getArrayBound())) return false; } } Index: llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -517,7 +517,7 @@ if (CI->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); @@ -547,7 +547,7 @@ if (CI->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); Index: llvm/lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1397,7 +1397,7 @@ for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; ++I, ++GTI) { // Skip indices into struct types. - if (isa(*GTI)) + if (GTI.isStruct()) continue; // Index type should have the same width as IntPtr @@ -1554,7 +1554,7 @@ bool EndsWithSequential = false; for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); I != E; ++I) - EndsWithSequential = !(*I)->isStructTy(); + EndsWithSequential = I.isArray(); // Can we combine the two pointer arithmetics offsets? if (EndsWithSequential) { Index: llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp =================================================================== --- llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -52,7 +52,7 @@ if (OpC->isZero()) continue; // No offset. // Handle struct indices, which add their field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); continue; } Index: llvm/lib/Transforms/Scalar/NaryReassociate.cpp =================================================================== --- llvm/lib/Transforms/Scalar/NaryReassociate.cpp +++ llvm/lib/Transforms/Scalar/NaryReassociate.cpp @@ -282,8 +282,9 @@ gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { - if (isa(*GTI++)) { - if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) { + if (GTI.isArray()) { + if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, + GTI.getIndexedType())) { return NewGEP; } } Index: llvm/lib/Transforms/Scalar/SROA.cpp =================================================================== --- llvm/lib/Transforms/Scalar/SROA.cpp +++ llvm/lib/Transforms/Scalar/SROA.cpp @@ -692,7 +692,7 @@ break; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); GEPOffset += Index: llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp =================================================================== --- llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -722,7 +722,7 @@ for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end(); I != E; ++I, ++GTI) { // Skip struct member indices which must be i32. - if (isa(*GTI)) { + if (GTI.isArray()) { if ((*I)->getType() != IntPtrTy) { *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP); Changed = true; @@ -739,7 +739,7 @@ int64_t AccumulativeByteOffset = 0; gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isArray()) { // Tries to extract a constant offset from this GEP index. int64_t ConstantOffset = ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT); @@ -752,7 +752,7 @@ ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType()); } } else if (LowerGEP) { - StructType *StTy = cast(*GTI); + StructType *StTy = GTI.getStructType(); uint64_t Field = cast(GEP->getOperand(I))->getZExtValue(); // Skip field 0 as the offset is always 0. if (Field != 0) { @@ -787,7 +787,7 @@ // Create an ugly GEP for each sequential index. We don't create GEPs for // structure indices, as they are accumulated in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isArray()) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast(Idx)) @@ -848,7 +848,7 @@ // don't create arithmetics for structure indices, as they are accumulated // in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isArray()) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast(Idx)) @@ -928,7 +928,7 @@ // handle the constant offset and won't need a new structure index. gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isArray()) { // Splits this GEP index into a variadic part and a constant offset, and // uses the variadic part as the new index. Value *OldIdx = GEP->getOperand(I); Index: llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp =================================================================== --- llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp +++ llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp @@ -490,8 +490,8 @@ IndexExprs.push_back(SE->getSCEV(*I)); gep_type_iterator GTI = gep_type_begin(GEP); - for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { - if (!isa(*GTI++)) + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (GTI.isStruct()) continue; const SCEV *OrigIndexExpr = IndexExprs[I - 1]; @@ -501,7 +501,7 @@ // indices except this current one. const SCEV *BaseExpr = SE->getGEPExpr(cast(GEP), IndexExprs); Value *ArrayIdx = GEP->getOperand(I); - uint64_t ElementSize = DL->getTypeAllocSize(*GTI); + uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); if (ArrayIdx->getType()->getIntegerBitWidth() <= DL->getPointerSizeInBits(GEP->getAddressSpace())) { // Skip factoring if ArrayIdx is wider than the pointer size, because Index: llvm/lib/Transforms/Utils/SimplifyCFG.cpp =================================================================== --- llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1385,7 +1385,7 @@ if (OpIdx == 0) return true; gep_type_iterator It = std::next(gep_type_begin(I), OpIdx - 1); - return !It->isStructTy(); + return It.isArray(); } }