Index: include/llvm/Analysis/TargetTransformInfoImpl.h =================================================================== --- include/llvm/Analysis/TargetTransformInfoImpl.h +++ include/llvm/Analysis/TargetTransformInfoImpl.h @@ -421,7 +421,7 @@ // Assumes the address space is 0 when Ptr is nullptr. unsigned AS = (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace()); - auto GTI = gep_type_begin(PointerType::get(PointeeType, AS), Operands); + auto GTI = gep_type_begin(PointeeType, AS, Operands); for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) { // We assume that the cost of Scalar GEP with constant index and the // cost of Vector GEP with splat constant index are the same. Index: include/llvm/IR/DataLayout.h =================================================================== --- include/llvm/IR/DataLayout.h +++ include/llvm/IR/DataLayout.h @@ -441,8 +441,9 @@ /// \brief Returns the offset from the beginning of the type for the specified /// indices. /// + /// Note that this takes the element type, not the pointer type. /// This is used to implement getelementptr. - uint64_t getIndexedOffset(Type *Ty, ArrayRef Indices) const; + uint64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef Indices) const; /// \brief Returns a StructLayout object, indicating the alignment of the /// struct, its size, and the offsets of its fields. Index: include/llvm/IR/GetElementPtrTypeIterator.h =================================================================== --- include/llvm/IR/GetElementPtrTypeIterator.h +++ include/llvm/IR/GetElementPtrTypeIterator.h @@ -33,12 +33,6 @@ generic_gep_type_iterator() {} public: - static generic_gep_type_iterator begin(Type *Ty, ItTy It) { - generic_gep_type_iterator I; - I.CurTy.setPointer(Ty); - I.OpIt = It; - return I; - } static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace, ItTy It) { generic_gep_type_iterator I; @@ -125,13 +119,13 @@ template inline generic_gep_type_iterator - gep_type_begin(Type *Op0, ArrayRef A) { - return generic_gep_type_iterator::begin(Op0, A.begin()); + gep_type_begin(Type *Op0, unsigned AS, ArrayRef A) { + return generic_gep_type_iterator::begin(Op0, AS, A.begin()); } template inline generic_gep_type_iterator - gep_type_end(Type * /*Op0*/, ArrayRef A) { + gep_type_end(Type * /*Op0*/, unsigned /*AS*/, ArrayRef A) { return generic_gep_type_iterator::end(A.end()); } } // end namespace llvm Index: lib/Analysis/ConstantFolding.cpp =================================================================== --- lib/Analysis/ConstantFolding.cpp +++ lib/Analysis/ConstantFolding.cpp @@ -763,8 +763,8 @@ unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy); APInt Offset = APInt(BitWidth, - DL.getIndexedOffset( - Ptr->getType(), + DL.getIndexedOffsetInType( + SrcTy, makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1))); Ptr = StripPtrCastKeepAS(Ptr, SrcTy); @@ -783,7 +783,8 @@ break; Ptr = cast(GEP->getOperand(0)); - Offset += APInt(BitWidth, DL.getIndexedOffset(Ptr->getType(), NestedOps)); + SrcTy = GEP->getSourceElementType(); + Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcTy, NestedOps)); Ptr = StripPtrCastKeepAS(Ptr, SrcTy); } Index: lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -192,14 +192,14 @@ addToAccelTable = true; // GV is a merged global. DIELoc *Loc = new (DIEValueAllocator) DIELoc; - Value *Ptr = CE->getOperand(0); - MCSymbol *Sym = Asm->getSymbol(cast(Ptr)); + auto *Ptr = cast(CE->getOperand(0)); + MCSymbol *Sym = Asm->getSymbol(Ptr); DD->addArangeLabel(SymbolCU(this, Sym)); addOpAddress(*Loc, Sym); addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_constu); SmallVector Idx(CE->op_begin() + 1, CE->op_end()); addUInt(*Loc, dwarf::DW_FORM_udata, - Asm->getDataLayout().getIndexedOffset(Ptr->getType(), Idx)); + Asm->getDataLayout().getIndexedOffsetInType(Ptr->getValueType(), Idx)); addUInt(*Loc, dwarf::DW_FORM_data1, dwarf::DW_OP_plus); addBlock(*VariableDIE, dwarf::DW_AT_location, Loc); } Index: lib/IR/DataLayout.cpp =================================================================== --- lib/IR/DataLayout.cpp +++ lib/IR/DataLayout.cpp @@ -723,36 +723,33 @@ return Max != LegalIntWidths.end() ? *Max : 0; } -uint64_t DataLayout::getIndexedOffset(Type *ptrTy, - ArrayRef Indices) const { - Type *Ty = ptrTy; - assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()"); +uint64_t DataLayout::getIndexedOffsetInType(Type *ElemTy, + ArrayRef Indices) const { uint64_t Result = 0; + // We can use 0 as the address space as we don't need + // to get pointer types back from gep_type_iterator. + unsigned AS = 0; generic_gep_type_iterator - TI = gep_type_begin(ptrTy, Indices); - for (unsigned CurIDX = 0, EndIDX = Indices.size(); CurIDX != EndIDX; - ++CurIDX, ++TI) { - if (StructType *STy = dyn_cast(*TI)) { - assert(Indices[CurIDX]->getType() == - Type::getInt32Ty(ptrTy->getContext()) && + GTI = gep_type_begin(ElemTy, AS, Indices), + GTE = gep_type_end(ElemTy, AS, Indices); + for (; GTI != GTE; ++GTI) { + Value *Idx = GTI.getOperand(); + if (StructType *STy = dyn_cast(*GTI)) { + assert(Idx->getType() == + Type::getInt32Ty(ElemTy->getContext()) && "Illegal struct idx"); - unsigned FieldNo = cast(Indices[CurIDX])->getZExtValue(); + unsigned FieldNo = cast(Idx)->getZExtValue(); // Get structure layout information... const StructLayout *Layout = getStructLayout(STy); // Add in the offset, as calculated by the structure layout info... Result += Layout->getElementOffset(FieldNo); - - // Update Ty to refer to current element - Ty = STy->getElementType(FieldNo); } else { - // Update Ty to refer to current element - Ty = cast(Ty)->getElementType(); - + Type *Ty = GTI.getIndexedType(); // Get the array index and the size of each array element. - if (int64_t arrayIdx = cast(Indices[CurIDX])->getSExtValue()) + if (int64_t arrayIdx = cast(Idx)->getSExtValue()) Result += (uint64_t)arrayIdx * getTypeAllocSize(Ty); } } Index: lib/Transforms/Scalar/ScalarReplAggregates.cpp =================================================================== --- lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -524,8 +524,8 @@ HadDynamicAccess = true; } else GEPNonConstantIdx = NonConstantIdx; - uint64_t GEPOffset = DL.getIndexedOffset(GEP->getPointerOperandType(), - Indices); + uint64_t GEPOffset = DL.getIndexedOffsetInType(GEP->getSourceElementType(), + Indices); // See if all uses can be converted. if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx)) return false; @@ -619,8 +619,8 @@ GEPNonConstantIdx = Indices.pop_back_val(); } else GEPNonConstantIdx = NonConstantIdx; - uint64_t GEPOffset = DL.getIndexedOffset(GEP->getPointerOperandType(), - Indices); + uint64_t GEPOffset = DL.getIndexedOffsetInType(GEP->getSourceElementType(), + Indices); ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx); GEP->eraseFromParent(); continue; @@ -1736,7 +1736,7 @@ Indices.pop_back(); const DataLayout &DL = GEPI->getModule()->getDataLayout(); - Offset += DL.getIndexedOffset(GEPI->getPointerOperandType(), Indices); + Offset += DL.getIndexedOffsetInType(GEPI->getSourceElementType(), Indices); if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, NonConstantIdxSize, DL)) MarkUnsafe(Info, GEPI); @@ -2052,7 +2052,7 @@ Value* NonConstantIdx = nullptr; if (!GEPI->hasAllConstantIndices()) NonConstantIdx = Indices.pop_back_val(); - Offset += DL.getIndexedOffset(GEPI->getPointerOperandType(), Indices); + Offset += DL.getIndexedOffsetInType(GEPI->getSourceElementType(), Indices); RewriteForScalarRepl(GEPI, AI, Offset, NewElts);