Index: llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h +++ llvm/trunk/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -482,10 +482,7 @@ int64_t BaseOffset = 0; int64_t Scale = 0; - // Assumes the address space is 0 when Ptr is nullptr. - unsigned AS = - (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace()); - auto GTI = gep_type_begin(PointeeType, AS, Operands); + auto GTI = gep_type_begin(PointeeType, Operands); for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) { // We assume that the cost of Scalar GEP with constant index and the // cost of Vector GEP with splat constant index are the same. @@ -493,7 +490,12 @@ if (!ConstIdx) if (auto Splat = getSplatValue(*I)) ConstIdx = dyn_cast(Splat); - if (isa(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { + // For structures the index is always splat or scalar constant + assert(ConstIdx && "Unexpected GEP index"); + uint64_t Field = ConstIdx->getZExtValue(); + BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field); + } else { int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType()); if (ConstIdx) BaseOffset += ConstIdx->getSExtValue() * ElementSize; @@ -504,17 +506,15 @@ return TTI::TCC_Basic; Scale = ElementSize; } - } else { - StructType *STy = cast(*GTI); - // For structures the index is always splat or scalar constant - assert(ConstIdx && "Unexpected GEP index"); - uint64_t Field = ConstIdx->getZExtValue(); - BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field); } } + // Assumes the address space is 0 when Ptr is nullptr. + unsigned AS = + (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace()); if (static_cast(this)->isLegalAddressingMode( - PointerType::get(*GTI, AS), const_cast(BaseGV), + PointerType::get(Type::getInt8Ty(PointeeType->getContext()), AS), + const_cast(BaseGV), BaseOffset, HasBaseReg, Scale, AS)) { return TTI::TCC_Free; } Index: llvm/trunk/include/llvm/IR/GetElementPtrTypeIterator.h =================================================================== --- llvm/trunk/include/llvm/IR/GetElementPtrTypeIterator.h +++ llvm/trunk/include/llvm/IR/GetElementPtrTypeIterator.h @@ -16,7 +16,7 @@ #define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H #include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/PointerUnion.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Operator.h" #include "llvm/IR/User.h" @@ -33,18 +33,15 @@ Type *, ptrdiff_t> super; ItTy OpIt; - PointerIntPair CurTy; - unsigned AddrSpace; - + PointerUnion CurTy; + enum { Unbounded = -1ull }; + uint64_t NumElements = Unbounded; generic_gep_type_iterator() = default; public: - static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace, - ItTy It) { + static generic_gep_type_iterator begin(Type *Ty, ItTy It) { generic_gep_type_iterator I; - I.CurTy.setPointer(Ty); - I.CurTy.setInt(true); - I.AddrSpace = AddrSpace; + I.CurTy = Ty; I.OpIt = It; return I; } @@ -63,34 +60,28 @@ return !operator==(x); } - Type *operator*() const { - if (CurTy.getInt()) - return CurTy.getPointer()->getPointerTo(AddrSpace); - return CurTy.getPointer(); - } - + // FIXME: Make this the iterator's operator*() after the 4.0 release. + // operator*() had a different meaning in earlier releases, so we're + // temporarily not giving this iterator an operator*() to avoid a subtle + // semantics break. Type *getIndexedType() const { - if (CurTy.getInt()) - return CurTy.getPointer(); - CompositeType *CT = cast(CurTy.getPointer()); - return CT->getTypeAtIndex(getOperand()); + if (auto *T = CurTy.dyn_cast()) + return T; + return CurTy.get()->getTypeAtIndex(getOperand()); } - // This is a non-standard operator->. It allows you to call methods on the - // current type directly. - Type *operator->() const { return operator*(); } - Value *getOperand() const { return const_cast(&**OpIt); } generic_gep_type_iterator& operator++() { // Preincrement - if (CurTy.getInt()) { - CurTy.setInt(false); - } else if (CompositeType *CT = - dyn_cast(CurTy.getPointer())) { - CurTy.setPointer(CT->getTypeAtIndex(getOperand())); - } else { - CurTy.setPointer(nullptr); - } + Type *Ty = getIndexedType(); + if (auto *ATy = dyn_cast(Ty)) { + CurTy = ATy->getElementType(); + NumElements = ATy->getNumElements(); + } else if (auto *VTy = dyn_cast(Ty)) { + CurTy = VTy->getElementType(); + NumElements = VTy->getNumElements(); + } else + CurTy = dyn_cast(Ty); ++OpIt; return *this; } @@ -98,6 +89,39 @@ generic_gep_type_iterator operator++(int) { // Postincrement generic_gep_type_iterator tmp = *this; ++*this; return tmp; } + + // All of the below API is for querying properties of the "outer type", i.e. + // the type that contains the indexed type. Most of the time this is just + // the type that was visited immediately prior to the indexed type, but for + // the first element this is an unbounded array of the GEP's source element + // type, for which there is no clearly corresponding IR type (we've + // historically used a pointer type as the outer type in this case, but + // pointers will soon lose their element type). + // + // FIXME: Most current users of this class are just interested in byte + // offsets (a few need to know whether the outer type is a struct because + // they are trying to replace a constant with a variable, which is only + // legal for arrays, e.g. canReplaceOperandWithVariable in SimplifyCFG.cpp); + // we should provide a more minimal API here that exposes not much more than + // that. + + bool isStruct() const { return CurTy.is(); } + bool isSequential() const { return CurTy.is(); } + + StructType *getStructType() const { return CurTy.get(); } + + StructType *getStructTypeOrNull() const { + return CurTy.dyn_cast(); + } + + bool isBoundedSequential() const { + return isSequential() && NumElements != Unbounded; + } + + uint64_t getSequentialNumElements() const { + assert(isBoundedSequential()); + return NumElements; + } }; typedef generic_gep_type_iterator<> gep_type_iterator; @@ -106,8 +130,6 @@ auto *GEPOp = cast(GEP); return gep_type_iterator::begin( GEPOp->getSourceElementType(), - cast(GEPOp->getPointerOperandType()->getScalarType()) - ->getAddressSpace(), GEP->op_begin() + 1); } @@ -119,8 +141,6 @@ auto &GEPOp = cast(GEP); return gep_type_iterator::begin( GEPOp.getSourceElementType(), - cast(GEPOp.getPointerOperandType()->getScalarType()) - ->getAddressSpace(), GEP.op_begin() + 1); } @@ -130,13 +150,13 @@ template inline generic_gep_type_iterator - gep_type_begin(Type *Op0, unsigned AS, ArrayRef A) { - return generic_gep_type_iterator::begin(Op0, AS, A.begin()); + gep_type_begin(Type *Op0, ArrayRef A) { + return generic_gep_type_iterator::begin(Op0, A.begin()); } template inline generic_gep_type_iterator - gep_type_end(Type * /*Op0*/, unsigned /*AS*/, ArrayRef A) { + gep_type_end(Type * /*Op0*/, ArrayRef A) { return generic_gep_type_iterator::end(A.end()); } Index: llvm/trunk/include/llvm/Transforms/Utils/Local.h =================================================================== --- llvm/trunk/include/llvm/Transforms/Utils/Local.h +++ llvm/trunk/include/llvm/Transforms/Utils/Local.h @@ -217,7 +217,7 @@ continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { if (OpC->getType()->isVectorTy()) OpC = OpC->getSplatValue(); Index: llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp =================================================================== --- llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp +++ llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp @@ -412,10 +412,10 @@ // Assume all GEP operands are constants until proven otherwise. bool GepHasConstantOffset = true; for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); - I != E; ++I) { + I != E; ++I, ++GTI) { const Value *Index = *I; // Compute the (potentially symbolic) offset in bytes for this index. - if (StructType *STy = dyn_cast(*GTI++)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { // For a struct, add the member offset. unsigned FieldNo = cast(Index)->getZExtValue(); if (FieldNo == 0) @@ -431,13 +431,13 @@ if (CIdx->isZero()) continue; Decomposed.OtherOffset += - DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue(); + DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue(); continue; } GepHasConstantOffset = false; - uint64_t Scale = DL.getTypeAllocSize(*GTI); + uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType()); unsigned ZExtBits = 0, SExtBits = 0; // If the integer type is smaller than the pointer size, it is implicitly Index: llvm/trunk/lib/Analysis/InlineCost.cpp =================================================================== --- llvm/trunk/lib/Analysis/InlineCost.cpp +++ llvm/trunk/lib/Analysis/InlineCost.cpp @@ -318,7 +318,7 @@ continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); Index: llvm/trunk/lib/Analysis/ValueTracking.cpp =================================================================== --- llvm/trunk/lib/Analysis/ValueTracking.cpp +++ llvm/trunk/lib/Analysis/ValueTracking.cpp @@ -1231,7 +1231,7 @@ gep_type_iterator GTI = gep_type_begin(I); for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { Value *Index = I->getOperand(i); - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { // Handle struct member offset arithmetic. // Handle case when index is vector zeroinitializer @@ -1730,7 +1730,7 @@ for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); GTI != GTE; ++GTI) { // Struct types are easy -- they must always be indexed by a constant. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { ConstantInt *OpC = cast(GTI.getOperand()); unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = Q.DL.getStructLayout(STy); Index: llvm/trunk/lib/Analysis/VectorUtils.cpp =================================================================== --- llvm/trunk/lib/Analysis/VectorUtils.cpp +++ llvm/trunk/lib/Analysis/VectorUtils.cpp @@ -107,11 +107,11 @@ while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { // Find the type we're currently indexing into. gep_type_iterator GEPTI = gep_type_begin(Gep); - std::advance(GEPTI, LastOperand - 1); + std::advance(GEPTI, LastOperand - 2); // If it's a type with the same allocation size as the result of the GEP we // can peel off the zero index. - if (DL.getTypeAllocSize(*GEPTI) != GEPAllocSize) + if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) break; --LastOperand; } Index: llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp +++ llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp @@ -3261,7 +3261,7 @@ int64_t ConstantOffset = 0; gep_type_iterator GTI = gep_type_begin(AddrInst); for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(AddrInst->getOperand(i))->getZExtValue(); Index: llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -488,7 +488,7 @@ for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); - if (auto *StTy = dyn_cast(*GTI)) { + if (StructType *StTy = GTI.getStructTypeOrNull()) { uint64_t Field = cast(Idx)->getZExtValue(); if (Field) { // N = N + Offset Index: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3274,7 +3274,7 @@ for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); - if (StructType *StTy = dyn_cast(*GTI)) { + if (StructType *StTy = GTI.getStructTypeOrNull()) { unsigned Field = cast(Idx)->getUniqueInteger().getZExtValue(); if (Field) { // N = N + Offset Index: llvm/trunk/lib/ExecutionEngine/Interpreter/Execution.cpp =================================================================== --- llvm/trunk/lib/ExecutionEngine/Interpreter/Execution.cpp +++ llvm/trunk/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -999,7 +999,7 @@ uint64_t Total = 0; for (; I != E; ++I) { - if (StructType *STy = dyn_cast(*I)) { + if (StructType *STy = I.getStructTypeOrNull()) { const StructLayout *SLO = getDataLayout().getStructLayout(STy); const ConstantInt *CPU = cast(I.getOperand()); @@ -1007,7 +1007,6 @@ Total += SLO->getElementOffset(Index); } else { - SequentialType *ST = cast(*I); // Get the index number for the array... which must be long type... GenericValue IdxGV = getOperandValue(I.getOperand(), SF); @@ -1020,7 +1019,7 @@ assert(BitWidth == 64 && "Invalid index type for getelementptr"); Idx = (int64_t)IdxGV.IntVal.getZExtValue(); } - Total += getDataLayout().getTypeAllocSize(ST->getElementType()) * Idx; + Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx; } } Index: llvm/trunk/lib/IR/ConstantFold.cpp =================================================================== --- llvm/trunk/lib/IR/ConstantFold.cpp +++ llvm/trunk/lib/IR/ConstantFold.cpp @@ -2019,22 +2019,8 @@ } /// Test whether a given ConstantInt is in-range for a SequentialType. -static bool isIndexInRangeOfSequentialType(SequentialType *STy, - const ConstantInt *CI) { - // And indices are valid when indexing along a pointer - if (isa(STy)) - return true; - - uint64_t NumElements = 0; - // Determine the number of elements in our sequential type. - if (auto *ATy = dyn_cast(STy)) - NumElements = ATy->getNumElements(); - else if (auto *VTy = dyn_cast(STy)) - NumElements = VTy->getNumElements(); - - assert((isa(STy) || NumElements > 0) && - "didn't expect non-array type to have zero elements!"); - +static bool isIndexInRangeOfArrayType(uint64_t NumElements, + const ConstantInt *CI) { // We cannot bounds check the index if it doesn't fit in an int64_t. if (CI->getValue().getActiveBits() > 64) return false; @@ -2089,10 +2075,10 @@ // getelementptr instructions into a single instruction. // if (CE->getOpcode() == Instruction::GetElementPtr) { - Type *LastTy = nullptr; + gep_type_iterator LastI = gep_type_end(CE); for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE); I != E; ++I) - LastTy = *I; + LastI = I; // We cannot combine indices if doing so would take us outside of an // array or vector. Doing otherwise could trick us if we evaluated such a @@ -2115,9 +2101,11 @@ bool PerformFold = false; if (Idx0->isNullValue()) PerformFold = true; - else if (SequentialType *STy = dyn_cast_or_null(LastTy)) + else if (LastI.isSequential()) if (ConstantInt *CI = dyn_cast(Idx0)) - PerformFold = isIndexInRangeOfSequentialType(STy, CI); + PerformFold = + !LastI.isBoundedSequential() || + isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI); if (PerformFold) { SmallVector NewIndices; @@ -2228,7 +2216,10 @@ Unknown = true; continue; } - if (isIndexInRangeOfSequentialType(STy, CI)) + if (isIndexInRangeOfArrayType(isa(STy) + ? cast(STy)->getNumElements() + : cast(STy)->getNumElements(), + CI)) // It's in range, skip to the next index. continue; if (!isa(Prev)) { Index: llvm/trunk/lib/IR/Constants.cpp =================================================================== --- llvm/trunk/lib/IR/Constants.cpp +++ llvm/trunk/lib/IR/Constants.cpp @@ -1073,19 +1073,14 @@ gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this); User::const_op_iterator OI = std::next(this->op_begin()); - // Skip the first index, as it has no static limit. - ++GEPI; - ++OI; - // The remaining indices must be compile-time known integers within the // bounds of the corresponding notional static array types. for (; GEPI != E; ++GEPI, ++OI) { ConstantInt *CI = dyn_cast(*OI); - if (!CI) return false; - if (ArrayType *ATy = dyn_cast(*GEPI)) - if (CI->getValue().getActiveBits() > 64 || - CI->getZExtValue() >= ATy->getNumElements()) - return false; + if (GEPI.isBoundedSequential() && + (CI->getValue().getActiveBits() > 64 || + CI->getZExtValue() >= GEPI.getSequentialNumElements())) + return false; } // All the indices checked out. Index: llvm/trunk/lib/IR/DataLayout.cpp =================================================================== --- llvm/trunk/lib/IR/DataLayout.cpp +++ llvm/trunk/lib/IR/DataLayout.cpp @@ -737,15 +737,12 @@ ArrayRef Indices) const { int64_t Result = 0; - // We can use 0 as the address space as we don't need - // to get pointer types back from gep_type_iterator. - unsigned AS = 0; generic_gep_type_iterator - GTI = gep_type_begin(ElemTy, AS, Indices), - GTE = gep_type_end(ElemTy, AS, Indices); + GTI = gep_type_begin(ElemTy, Indices), + GTE = gep_type_end(ElemTy, Indices); for (; GTI != GTE; ++GTI) { Value *Idx = GTI.getOperand(); - if (auto *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { assert(Idx->getType()->isIntegerTy(32) && "Illegal struct idx"); unsigned FieldNo = cast(Idx)->getZExtValue(); Index: llvm/trunk/lib/IR/Operator.cpp =================================================================== --- llvm/trunk/lib/IR/Operator.cpp +++ llvm/trunk/lib/IR/Operator.cpp @@ -33,7 +33,7 @@ continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); Offset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); Index: llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64FastISel.cpp @@ -557,7 +557,7 @@ for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U); GTI != E; ++GTI) { const Value *Op = GTI.getOperand(); - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); @@ -4885,7 +4885,7 @@ for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); - if (auto *StTy = dyn_cast(*GTI)) { + if (auto *StTy = GTI.getStructTypeOrNull()) { unsigned Field = cast(Idx)->getZExtValue(); // N = N + Offset if (Field) Index: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -7157,8 +7157,8 @@ case Instruction::GetElementPtr: { gep_type_iterator GTI = gep_type_begin(Instr); auto &DL = Ext->getModule()->getDataLayout(); - std::advance(GTI, U.getOperandNo()); - Type *IdxTy = *GTI; + std::advance(GTI, U.getOperandNo()-1); + Type *IdxTy = GTI.getIndexedType(); // This extension will end up with a shift because of the scaling factor. // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0. // Get the shift amount based on the scaling factor: Index: llvm/trunk/lib/Target/ARM/ARMFastISel.cpp =================================================================== --- llvm/trunk/lib/Target/ARM/ARMFastISel.cpp +++ llvm/trunk/lib/Target/ARM/ARMFastISel.cpp @@ -733,7 +733,7 @@ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i, ++GTI) { const Value *Op = *i; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); Index: llvm/trunk/lib/Target/Mips/MipsFastISel.cpp =================================================================== --- llvm/trunk/lib/Target/Mips/MipsFastISel.cpp +++ llvm/trunk/lib/Target/Mips/MipsFastISel.cpp @@ -445,7 +445,7 @@ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i, ++GTI) { const Value *Op = *i; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); Index: llvm/trunk/lib/Target/PowerPC/PPCFastISel.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCFastISel.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCFastISel.cpp @@ -358,7 +358,7 @@ for (User::const_op_iterator II = U->op_begin() + 1, IE = U->op_end(); II != IE; ++II, ++GTI) { const Value *Op = *II; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); Index: llvm/trunk/lib/Target/WebAssembly/WebAssemblyFastISel.cpp =================================================================== --- llvm/trunk/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ llvm/trunk/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -241,7 +241,7 @@ for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U); GTI != E; ++GTI) { const Value *Op = GTI.getOperand(); - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); Index: llvm/trunk/lib/Target/X86/X86FastISel.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86FastISel.cpp +++ llvm/trunk/lib/Target/X86/X86FastISel.cpp @@ -936,7 +936,7 @@ for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i, ++GTI) { const Value *Op = *i; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); Disp += SL->getElementOffset(cast(Op)->getZExtValue()); continue; Index: llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp =================================================================== --- llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp +++ llvm/trunk/lib/Transforms/IPO/GlobalOpt.cpp @@ -371,14 +371,14 @@ ++GEPI; // Skip over the pointer index. // If this is a use of an array allocation, do a bit more checking for sanity. - if (ArrayType *AT = dyn_cast(*GEPI)) { - uint64_t NumElements = AT->getNumElements(); + if (GEPI.isSequential()) { ConstantInt *Idx = cast(U->getOperand(2)); // Check to make sure that index falls within the array. If not, // something funny is going on, so we won't do the optimization. // - if (Idx->getZExtValue() >= NumElements) + if (GEPI.isBoundedSequential() && + Idx->getZExtValue() >= GEPI.getSequentialNumElements()) return false; // We cannot scalar repl this level of the array unless any array @@ -391,19 +391,13 @@ for (++GEPI; // Skip array index. GEPI != E; ++GEPI) { - uint64_t NumElements; - if (ArrayType *SubArrayTy = dyn_cast(*GEPI)) - NumElements = SubArrayTy->getNumElements(); - else if (VectorType *SubVectorTy = dyn_cast(*GEPI)) - NumElements = SubVectorTy->getNumElements(); - else { - assert((*GEPI)->isStructTy() && - "Indexed GEP type is not array, vector, or struct!"); + if (GEPI.isStruct()) continue; - } ConstantInt *IdxVal = dyn_cast(GEPI.getOperand()); - if (!IdxVal || IdxVal->getZExtValue() >= NumElements) + if (!IdxVal || + (GEPI.isBoundedSequential() && + IdxVal->getZExtValue() >= GEPI.getSequentialNumElements())) return false; } } Index: llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -517,7 +517,7 @@ if (CI->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); @@ -547,7 +547,7 @@ if (CI->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); Index: llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp +++ llvm/trunk/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1389,7 +1389,7 @@ for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; ++I, ++GTI) { // Skip indices into struct types. - if (isa(*GTI)) + if (GTI.isStruct()) continue; // Index type should have the same width as IntPtr @@ -1546,7 +1546,7 @@ bool EndsWithSequential = false; for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); I != E; ++I) - EndsWithSequential = !(*I)->isStructTy(); + EndsWithSequential = I.isSequential(); // Can we combine the two pointer arithmetics offsets? if (EndsWithSequential) { Index: llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ llvm/trunk/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -52,7 +52,7 @@ if (OpC->isZero()) continue; // No offset. // Handle struct indices, which add their field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); continue; } Index: llvm/trunk/lib/Transforms/Scalar/NaryReassociate.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/NaryReassociate.cpp +++ llvm/trunk/lib/Transforms/Scalar/NaryReassociate.cpp @@ -281,9 +281,10 @@ return nullptr; gep_type_iterator GTI = gep_type_begin(*GEP); - for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { - if (isa(*GTI++)) { - if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) { + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (GTI.isSequential()) { + if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, + GTI.getIndexedType())) { return NewGEP; } } Index: llvm/trunk/lib/Transforms/Scalar/SROA.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/SROA.cpp +++ llvm/trunk/lib/Transforms/Scalar/SROA.cpp @@ -692,7 +692,7 @@ break; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); GEPOffset += Index: llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ llvm/trunk/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -722,7 +722,7 @@ for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end(); I != E; ++I, ++GTI) { // Skip struct member indices which must be i32. - if (isa(*GTI)) { + if (GTI.isSequential()) { if ((*I)->getType() != IntPtrTy) { *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP); Changed = true; @@ -739,7 +739,7 @@ int64_t AccumulativeByteOffset = 0; gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isSequential()) { // Tries to extract a constant offset from this GEP index. int64_t ConstantOffset = ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT); @@ -752,7 +752,7 @@ ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType()); } } else if (LowerGEP) { - StructType *StTy = cast(*GTI); + StructType *StTy = GTI.getStructType(); uint64_t Field = cast(GEP->getOperand(I))->getZExtValue(); // Skip field 0 as the offset is always 0. if (Field != 0) { @@ -787,7 +787,7 @@ // Create an ugly GEP for each sequential index. We don't create GEPs for // structure indices, as they are accumulated in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isSequential()) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast(Idx)) @@ -848,7 +848,7 @@ // don't create arithmetics for structure indices, as they are accumulated // in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isSequential()) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast(Idx)) @@ -928,7 +928,7 @@ // handle the constant offset and won't need a new structure index. gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isSequential()) { // Splits this GEP index into a variadic part and a constant offset, and // uses the variadic part as the new index. Value *OldIdx = GEP->getOperand(I); Index: llvm/trunk/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp +++ llvm/trunk/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp @@ -490,8 +490,8 @@ IndexExprs.push_back(SE->getSCEV(*I)); gep_type_iterator GTI = gep_type_begin(GEP); - for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { - if (!isa(*GTI++)) + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (GTI.isStruct()) continue; const SCEV *OrigIndexExpr = IndexExprs[I - 1]; @@ -501,7 +501,7 @@ // indices except this current one. const SCEV *BaseExpr = SE->getGEPExpr(cast(GEP), IndexExprs); Value *ArrayIdx = GEP->getOperand(I); - uint64_t ElementSize = DL->getTypeAllocSize(*GTI); + uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); if (ArrayIdx->getType()->getIntegerBitWidth() <= DL->getPointerSizeInBits(GEP->getAddressSpace())) { // Skip factoring if ArrayIdx is wider than the pointer size, because Index: llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp =================================================================== --- llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp +++ llvm/trunk/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1416,7 +1416,7 @@ if (OpIdx == 0) return true; gep_type_iterator It = std::next(gep_type_begin(I), OpIdx - 1); - return !It->isStructTy(); + return It.isSequential(); } }