diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp --- a/clang/lib/CodeGen/CGDecl.cpp +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -1050,12 +1050,12 @@ llvm::Type *OrigTy = constant->getType(); if (const auto STy = dyn_cast(OrigTy)) return constStructWithPadding(CGM, isPattern, STy, constant); - if (auto *STy = dyn_cast(OrigTy)) { + if (isa(OrigTy) || isa(OrigTy)) { llvm::SmallVector Values; - unsigned Size = STy->getNumElements(); + uint64_t Size = OrigTy->getSequentialNumElements(); if (!Size) return constant; - llvm::Type *ElemTy = STy->getElementType(); + llvm::Type *ElemTy = OrigTy->getSequentialElementType(); bool ZeroInitializer = constant->isZeroValue(); llvm::Constant *OpValue, *PaddedOp; if (ZeroInitializer) { diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp --- a/clang/lib/CodeGen/CGExprConstant.cpp +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -321,9 +321,10 @@ replace(Elems, Index, Index + 1, llvm::map_range(llvm::seq(0u, CA->getNumOperands()), [&](unsigned Op) { return CA->getOperand(Op); })); - if (auto *Seq = dyn_cast(CA->getType())) { + if (isa(CA->getType()) || + isa(CA->getType())) { // Array or vector. - CharUnits ElemSize = getSize(Seq->getElementType()); + CharUnits ElemSize = getSize(CA->getType()->getSequentialElementType()); replace( Offsets, Index, Index + 1, llvm::map_range(llvm::seq(0u, CA->getNumOperands()), diff --git a/llvm/include/llvm/IR/Constants.h b/llvm/include/llvm/IR/Constants.h --- a/llvm/include/llvm/IR/Constants.h +++ b/llvm/include/llvm/IR/Constants.h @@ -44,7 +44,6 @@ class ArrayType; class IntegerType; class PointerType; -class SequentialType; class StructType; class VectorType; template struct ConstantAggrKeyType; @@ -631,12 +630,6 @@ /// efficient as getElementAsInteger/Float/Double. Constant *getElementAsConstant(unsigned i) const; - /// Specialize the getType() method to always return a SequentialType, which - /// reduces the amount of casting needed in parts of the compiler. - inline SequentialType *getType() const { - return cast(Value::getType()); - } - /// Return the element type of the array/vector. Type *getElementType() const; diff --git a/llvm/include/llvm/IR/DerivedTypes.h b/llvm/include/llvm/IR/DerivedTypes.h --- a/llvm/include/llvm/IR/DerivedTypes.h +++ b/llvm/include/llvm/IR/DerivedTypes.h @@ -354,47 +354,19 @@ return cast(this)->getElementType(N); } -/// This is the superclass of the array and vector type classes. Both of these -/// represent "arrays" in memory. The array type represents a specifically sized -/// array, and the vector type represents a specifically sized array that allows -/// for use of SIMD instructions. SequentialType holds the common features of -/// both, which stem from the fact that both lay their components out in memory -/// identically. -class SequentialType : public Type { +/// Class to represent array types. +class ArrayType : public Type { Type *ContainedType; ///< Storage for the single contained type. uint64_t NumElements; - -protected: - SequentialType(TypeID TID, Type *ElType, uint64_t NumElements) - : Type(ElType->getContext(), TID), ContainedType(ElType), - NumElements(NumElements) { - ContainedTys = &ContainedType; - NumContainedTys = 1; - } - -public: - SequentialType(const SequentialType &) = delete; - SequentialType &operator=(const SequentialType &) = delete; - - /// For scalable vectors, this will return the minimum number of elements - /// in the vector. - uint64_t getNumElements() const { return NumElements; } - Type *getElementType() const { return ContainedType; } - - /// Methods for support type inquiry through isa, cast, and dyn_cast. - static bool classof(const Type *T) { - return T->getTypeID() == ArrayTyID || T->getTypeID() == VectorTyID; - } -}; - -/// Class to represent array types. -class ArrayType : public SequentialType { ArrayType(Type *ElType, uint64_t NumEl); public: ArrayType(const ArrayType &) = delete; ArrayType &operator=(const ArrayType &) = delete; + uint64_t getNumElements() const { return NumElements; } + Type *getElementType() const { return ContainedType; } + /// This static method is the primary way to construct an ArrayType static ArrayType *get(Type *ElementType, uint64_t NumElements); @@ -412,7 +384,7 @@ } /// Class to represent vector types. -class VectorType : public SequentialType { +class VectorType : public Type { /// A fully specified VectorType is of the form . 'n' is the /// minimum number of elements of type Ty contained within the vector, and /// 'vscale x' indicates that the total element count is an integer multiple @@ -426,6 +398,9 @@ /// - a vector containing an unknown integer multiple /// of 4 i32s + Type *ContainedType; ///< Storage for the single contained type. + uint64_t NumElements; + VectorType(Type *ElType, unsigned NumEl, bool Scalable = false); VectorType(Type *ElType, ElementCount EC); @@ -438,6 +413,11 @@ VectorType(const VectorType &) = delete; VectorType &operator=(const VectorType &) = delete; + /// For scalable vectors, this will return the minimum number of elements + /// in the vector. + uint64_t getNumElements() const { return NumElements; } + Type *getElementType() const { return ContainedType; } + /// This static method is the primary way to construct an VectorType. static VectorType *get(Type *ElementType, ElementCount EC); static VectorType *get(Type *ElementType, unsigned NumElements, @@ -558,6 +538,14 @@ return cast(this)->getElementCount(); } +uint64_t Type::getSequentialNumElements() const { + if (const ArrayType *ATy = dyn_cast(this)) + return ATy->getNumElements(); + const VectorType *VTy = cast(this); + assert(!VTy->isScalable()); + return VTy->getNumElements(); +} + /// Class to represent pointers. class PointerType : public Type { explicit PointerType(Type *ElType, unsigned AddrSpace); diff --git a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h --- a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h +++ b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h @@ -75,9 +75,15 @@ generic_gep_type_iterator& operator++() { // Preincrement Type *Ty = getIndexedType(); - if (auto *STy = dyn_cast(Ty)) { - CurTy = STy->getElementType(); - NumElements = STy->getNumElements(); + if (auto *ATy = dyn_cast(Ty)) { + CurTy = ATy->getElementType(); + NumElements = ATy->getNumElements(); + } else if (auto *VTy = dyn_cast(Ty)) { + CurTy = VTy->getElementType(); + if (VTy->isScalable()) + NumElements = Unbounded; + else + NumElements = VTy->getNumElements(); } else CurTy = dyn_cast(Ty); ++OpIt; diff --git a/llvm/include/llvm/IR/Type.h b/llvm/include/llvm/IR/Type.h --- a/llvm/include/llvm/IR/Type.h +++ b/llvm/include/llvm/IR/Type.h @@ -363,6 +363,8 @@ return ContainedTys[0]; } + inline uint64_t getSequentialNumElements() const; + inline uint64_t getArrayNumElements() const; Type *getArrayElementType() const { diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -1145,7 +1145,7 @@ GEP1->getSourceElementType(), IntermediateIndices); StructType *LastIndexedStruct = dyn_cast(Ty); - if (isa(Ty)) { + if (isa(Ty) || isa(Ty)) { // We know that: // - both GEPs begin indexing from the exact same pointer; // - the last indices in both GEPs are constants, indexing into a sequential @@ -1158,7 +1158,7 @@ // partially overlap. We also need to check that the loaded size matches // the element size, otherwise we could still have overlap. const uint64_t ElementSize = - DL.getTypeStoreSize(cast(Ty)->getElementType()); + DL.getTypeStoreSize(Ty->getSequentialElementType()); if (V1Size != ElementSize || V2Size != ElementSize) return MayAlias; diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -940,8 +940,10 @@ // Only handle pointers to sized types, not pointers to functions. if (!Ty->isSized()) return nullptr; - } else if (auto *ATy = dyn_cast(Ty)) { + } else if (auto *ATy = dyn_cast(Ty)) { Ty = ATy->getElementType(); + } else if (auto *VTy = dyn_cast(Ty)) { + Ty = VTy->getElementType(); } else { // We've reached some non-indexable type. break; diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3519,7 +3519,7 @@ CurTy = STy->getTypeAtIndex(Index); } else { // Update CurTy to its element type. - CurTy = cast(CurTy)->getElementType(); + CurTy = CurTy->getSequentialElementType(); // For an array, add the element offset, explicitly scaled. const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy); // Getelementptr indices are signed. diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -2471,7 +2471,7 @@ if (Record.empty()) return error("Invalid record"); - Type *EltTy = cast(CurTy)->getElementType(); + Type *EltTy = CurTy->getSequentialElementType(); if (EltTy->isIntegerTy(8)) { SmallVector Elts(Record.begin(), Record.end()); if (isa(CurTy)) diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -2417,7 +2417,7 @@ } else if (const ConstantDataSequential *CDS = dyn_cast(C)) { Code = bitc::CST_CODE_DATA; - Type *EltTy = CDS->getType()->getElementType(); + Type *EltTy = CDS->getElementType(); if (isa(EltTy)) { for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) Record.push_back(CDS->getElementAsInteger(i)); diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2476,7 +2476,7 @@ } unsigned Size = DL.getTypeAllocSize(CDS->getType()); - unsigned EmittedSize = DL.getTypeAllocSize(CDS->getType()->getElementType()) * + unsigned EmittedSize = DL.getTypeAllocSize(CDS->getElementType()) * CDS->getNumElements(); assert(EmittedSize <= Size && "Size cannot be less than EmittedSize!"); if (unsigned Padding = Size - EmittedSize) diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -119,18 +119,9 @@ Constant::getNullValue(Type::getInt32Ty(DPTy->getContext())); IdxList.push_back(Zero); Type *ElTy = PTy->getElementType(); - while (ElTy != DPTy->getElementType()) { - if (StructType *STy = dyn_cast(ElTy)) { - if (STy->getNumElements() == 0) break; - ElTy = STy->getElementType(0); - IdxList.push_back(Zero); - } else if (SequentialType *STy = - dyn_cast(ElTy)) { - ElTy = STy->getElementType(); - IdxList.push_back(Zero); - } else { - break; - } + while (ElTy && ElTy != DPTy->getElementType()) { + ElTy = GetElementPtrInst::getIndexedTypeStep(ElTy, (uint64_t)0); + IdxList.push_back(Zero); } if (ElTy == DPTy->getElementType()) @@ -936,7 +927,7 @@ if (StructType *ST = dyn_cast(Agg->getType())) NumElts = ST->getNumElements(); else - NumElts = cast(Agg->getType())->getNumElements(); + NumElts = cast(Agg->getType())->getNumElements(); SmallVector Result; for (unsigned i = 0; i != NumElts; ++i) { @@ -951,9 +942,7 @@ if (StructType *ST = dyn_cast(Agg->getType())) return ConstantStruct::get(ST, Result); - if (ArrayType *AT = dyn_cast(Agg->getType())) - return ConstantArray::get(AT, Result); - return ConstantVector::get(Result); + return ConstantArray::get(cast(Agg->getType()), Result); } Constant *llvm::ConstantFoldUnaryInstruction(unsigned Opcode, Constant *C) { @@ -2408,12 +2397,12 @@ // The verify makes sure that GEPs into a struct are in range. continue; } - auto *STy = cast(Ty); - if (isa(STy)) { + if (isa(Ty)) { // There can be awkward padding in after a non-power of two vector. Unknown = true; continue; } + auto *STy = cast(Ty); if (ConstantInt *CI = dyn_cast(Idxs[i])) { if (isIndexInRangeOfArrayType(STy->getNumElements(), CI)) // It's in range, skip to the next index. diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -931,13 +931,13 @@ } Constant *ConstantAggregateZero::getElementValue(Constant *C) const { - if (isa(getType())) + if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(cast(C)->getZExtValue()); } Constant *ConstantAggregateZero::getElementValue(unsigned Idx) const { - if (isa(getType())) + if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(Idx); } @@ -964,21 +964,23 @@ } UndefValue *UndefValue::getElementValue(Constant *C) const { - if (isa(getType())) + if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(cast(C)->getZExtValue()); } UndefValue *UndefValue::getElementValue(unsigned Idx) const { - if (isa(getType())) + if (isa(getType()) || isa(getType())) return getSequentialElement(); return getStructElement(Idx); } unsigned UndefValue::getNumElements() const { Type *Ty = getType(); - if (auto *ST = dyn_cast(Ty)) - return ST->getNumElements(); + if (auto *AT = dyn_cast(Ty)) + return AT->getNumElements(); + if (auto *VT = dyn_cast(Ty)) + return VT->getNumElements(); return Ty->getStructNumElements(); } @@ -2506,7 +2508,7 @@ // ConstantData* implementations Type *ConstantDataSequential::getElementType() const { - return getType()->getElementType(); + return getType()->getSequentialElementType(); } StringRef ConstantDataSequential::getRawDataValues() const { diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -753,7 +753,7 @@ auto *Ty = unwrap(WrappedTy); if (auto *PTy = dyn_cast(Ty)) return wrap(PTy->getElementType()); - return wrap(cast(Ty)->getElementType()); + return wrap(Ty->getSequentialElementType()); } unsigned LLVMGetNumContainedTypes(LLVMTypeRef Tp) { diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1617,11 +1617,12 @@ if (!Struct->indexValid(Idx)) return nullptr; return Struct->getTypeAtIndex(Idx); } - if (auto Sequential = dyn_cast(Ty)) { - if (!Idx->getType()->isIntOrIntVectorTy()) - return nullptr; - return Sequential->getElementType(); - } + if (!Idx->getType()->isIntOrIntVectorTy()) + return nullptr; + if (auto Array = dyn_cast(Ty)) + return Array->getElementType(); + if (auto Vector = dyn_cast(Ty)) + return Vector->getElementType(); return nullptr; } @@ -1631,8 +1632,10 @@ return nullptr; return Struct->getElementType(Idx); } - if (auto Sequential = dyn_cast(Ty)) - return Sequential->getElementType(); + if (auto Array = dyn_cast(Ty)) + return Array->getElementType(); + if (auto Vector = dyn_cast(Ty)) + return Vector->getElementType(); return nullptr; } diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp --- a/llvm/lib/IR/Type.cpp +++ b/llvm/lib/IR/Type.cpp @@ -553,7 +553,11 @@ //===----------------------------------------------------------------------===// ArrayType::ArrayType(Type *ElType, uint64_t NumEl) - : SequentialType(ArrayTyID, ElType, NumEl) {} + : Type(ElType->getContext(), ArrayTyID), ContainedType(ElType), + NumElements(NumEl) { + ContainedTys = &ContainedType; + NumContainedTys = 1; +} ArrayType *ArrayType::get(Type *ElementType, uint64_t NumElements) { assert(isValidElementType(ElementType) && "Invalid type for array element!"); @@ -580,7 +584,11 @@ //===----------------------------------------------------------------------===// VectorType::VectorType(Type *ElType, ElementCount EC) - : SequentialType(VectorTyID, ElType, EC.Min), Scalable(EC.Scalable) {} + : Type(ElType->getContext(), VectorTyID), ContainedType(ElType), + NumElements(EC.Min), Scalable(EC.Scalable) { + ContainedTys = &ContainedType; + NumContainedTys = 1; +} VectorType *VectorType::get(Type *ElementType, ElementCount EC) { assert(EC.Min > 0 && "#Elements of a VectorType must be greater than 0"); diff --git a/llvm/lib/Linker/IRMover.cpp b/llvm/lib/Linker/IRMover.cpp --- a/llvm/lib/Linker/IRMover.cpp +++ b/llvm/lib/Linker/IRMover.cpp @@ -173,9 +173,11 @@ if (DSTy->isLiteral() != SSTy->isLiteral() || DSTy->isPacked() != SSTy->isPacked()) return false; - } else if (auto *DSeqTy = dyn_cast(DstTy)) { - if (DSeqTy->getNumElements() != - cast(SrcTy)->getNumElements()) + } else if (auto *DArrTy = dyn_cast(DstTy)) { + if (DArrTy->getNumElements() != cast(SrcTy)->getNumElements()) + return false; + } else if (auto *DVecTy = dyn_cast(DstTy)) { + if (DVecTy->getElementCount() != cast(SrcTy)->getElementCount()) return false; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -364,8 +364,11 @@ return false; } - Type *AT = Alloca->getAllocatedType(); - SequentialType *AllocaTy = dyn_cast(AT); + Type *AllocaTy = Alloca->getAllocatedType(); + VectorType *VectorTy = dyn_cast(AllocaTy); + if (auto *ArrayTy = dyn_cast(AllocaTy)) + if (VectorType::isValidElementType(ArrayTy->getElementType())) + VectorTy = arrayTypeToVecType(ArrayTy); LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n"); @@ -373,10 +376,9 @@ // are just being conservative for now. // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these // could also be promoted but we don't currently handle this case - if (!AllocaTy || - AllocaTy->getNumElements() > 16 || - AllocaTy->getNumElements() < 2 || - !VectorType::isValidElementType(AllocaTy->getElementType())) { + if (!VectorTy || + VectorTy->getNumElements() > 16 || + VectorTy->getNumElements() < 2) { LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n"); return false; } @@ -412,10 +414,6 @@ } } - VectorType *VectorTy = dyn_cast(AllocaTy); - if (!VectorTy) - VectorTy = arrayTypeToVecType(cast(AllocaTy)); - LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> " << *VectorTy << '\n'); @@ -424,7 +422,7 @@ IRBuilder<> Builder(Inst); switch (Inst->getOpcode()) { case Instruction::Load: { - if (Inst->getType() == AT) + if (Inst->getType() == AllocaTy) break; Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); @@ -440,7 +438,7 @@ } case Instruction::Store: { StoreInst *SI = cast(Inst); - if (SI->getValueOperand()->getType() == AT) + if (SI->getValueOperand()->getType() == AllocaTy) break; Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); diff --git a/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp b/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp --- a/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp +++ b/llvm/lib/Target/Hexagon/HexagonCommonGEP.cpp @@ -204,17 +204,7 @@ Type *next_type(Type *Ty, Value *Idx) { if (auto *PTy = dyn_cast(Ty)) return PTy->getElementType(); - // Advance the type. - if (!Ty->isStructTy()) { - Type *NexTy = cast(Ty)->getElementType(); - return NexTy; - } - // Otherwise it is a struct type. - ConstantInt *CI = dyn_cast(Idx); - assert(CI && "Struct type with non-constant index"); - int64_t i = CI->getValue().getSExtValue(); - Type *NextTy = cast(Ty)->getElementType(i); - return NextTy; + return GetElementPtrInst::getIndexedTypeStep(Ty, Idx); } raw_ostream &operator<< (raw_ostream &OS, const GepNode &GN) { diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -784,13 +784,17 @@ if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type)) return false; - if (!isa(type) && !isa(type)) - return true; - // For homogenous sequential types, check for padding within members. - if (SequentialType *seqTy = dyn_cast(type)) + // FIXME: This isn't the right way to check for padding in vectors with + // non-byte-size elements. + if (VectorType *seqTy = dyn_cast(type)) + return isDenselyPacked(seqTy->getElementType(), DL); + if (ArrayType *seqTy = dyn_cast(type)) return isDenselyPacked(seqTy->getElementType(), DL); + if (!isa(type)) + return true; + // Check for padding within and between elements of a struct. StructType *StructTy = cast(type); const StructLayout *Layout = DL.getStructLayout(StructTy); diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -131,8 +131,7 @@ case Type::PointerTyID: return true; case Type::ArrayTyID: case Type::VectorTyID: { - SequentialType *STy = cast(Ty); - Types.push_back(STy->getElementType()); + Types.push_back(Ty->getSequentialElementType()); break; } case Type::StructTyID: { @@ -142,7 +141,8 @@ E = STy->element_end(); I != E; ++I) { Type *InnerTy = *I; if (isa(InnerTy)) return true; - if (isa(InnerTy) || isa(InnerTy)) + if (isa(InnerTy) || isa(InnerTy) || + isa(InnerTy)) Types.push_back(InnerTy); } break; @@ -438,8 +438,10 @@ if (isa(Init->getType())) { // nothing to check - } else if (SequentialType *STy = dyn_cast(Init->getType())) { - if (STy->getNumElements() > 16 && GV->hasNUsesOrMore(16)) + } else if (isa(Init->getType()) || + isa(Init->getType())) { + if (Init->getType()->getSequentialNumElements() > 16 && + GV->hasNUsesOrMore(16)) return false; // It's not worth it. } else return false; @@ -509,8 +511,8 @@ Type *ElTy = nullptr; if (StructType *STy = dyn_cast(Ty)) ElTy = STy->getElementType(ElementIdx); - else if (SequentialType *STy = dyn_cast(Ty)) - ElTy = STy->getElementType(); + else + ElTy = Ty->getSequentialElementType(); assert(ElTy); Constant *In = Init->getAggregateElement(ElementIdx); @@ -541,7 +543,7 @@ uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(ElementIdx); transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size, STy->getNumElements()); - } else if (SequentialType *STy = dyn_cast(Ty)) { + } else { uint64_t EltSize = DL.getTypeAllocSize(ElTy); Align EltAlign(DL.getABITypeAlignment(ElTy)); uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy); @@ -553,7 +555,8 @@ if (NewAlign > EltAlign) NGV->setAlignment(NewAlign); transferSRADebugInfo(GV, NGV, FragmentSizeInBits * ElementIdx, - FragmentSizeInBits, STy->getNumElements()); + FragmentSizeInBits, + Ty->getSequentialNumElements()); } } @@ -2427,8 +2430,7 @@ } ConstantInt *CI = cast(Addr->getOperand(OpNo)); - SequentialType *InitTy = cast(Init->getType()); - uint64_t NumElts = InitTy->getNumElements(); + uint64_t NumElts = Init->getType()->getSequentialNumElements(); // Break up the array into elements. for (uint64_t i = 0, e = NumElts; i != e; ++i) @@ -2439,7 +2441,7 @@ EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); if (Init->getType()->isArrayTy()) - return ConstantArray::get(cast(InitTy), Elts); + return ConstantArray::get(cast(Init->getType()), Elts); return ConstantVector::get(Elts); } @@ -2562,7 +2564,7 @@ if (auto *STy = dyn_cast(Ty)) NumElts = STy->getNumElements(); else - NumElts = cast(Ty)->getNumElements(); + NumElts = Ty->getSequentialNumElements(); for (unsigned i = 0, e = NumElts; i != e; ++i) Elts.push_back(Init->getAggregateElement(i)); } diff --git a/llvm/lib/Transforms/IPO/StripSymbols.cpp b/llvm/lib/Transforms/IPO/StripSymbols.cpp --- a/llvm/lib/Transforms/IPO/StripSymbols.cpp +++ b/llvm/lib/Transforms/IPO/StripSymbols.cpp @@ -150,7 +150,8 @@ } else if (!isa(C)) { // FIXME: Why does the type of the constant matter here? - if (isa(C->getType()) || isa(C->getType())) + if (isa(C->getType()) || isa(C->getType()) || + isa(C->getType())) C->destroyConstant(); } diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -3507,11 +3507,11 @@ (DL.getTypeAllocSize(Ty) - Offset) < Size) return nullptr; - if (SequentialType *SeqTy = dyn_cast(Ty)) { - Type *ElementTy = SeqTy->getElementType(); + if (isa(Ty) || isa(Ty)) { + Type *ElementTy = Ty->getSequentialElementType(); uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); uint64_t NumSkippedElements = Offset / ElementSize; - if (NumSkippedElements >= SeqTy->getNumElements()) + if (NumSkippedElements >= Ty->getSequentialNumElements()) return nullptr; Offset -= NumSkippedElements * ElementSize; diff --git a/llvm/lib/Transforms/Utils/FunctionComparator.cpp b/llvm/lib/Transforms/Utils/FunctionComparator.cpp --- a/llvm/lib/Transforms/Utils/FunctionComparator.cpp +++ b/llvm/lib/Transforms/Utils/FunctionComparator.cpp @@ -476,10 +476,17 @@ return 0; } - case Type::ArrayTyID: + case Type::ArrayTyID: { + auto *STyL = cast(TyL); + auto *STyR = cast(TyR); + if (STyL->getNumElements() != STyR->getNumElements()) + return cmpNumbers(STyL->getNumElements(), STyR->getNumElements()); + return cmpTypes(STyL->getElementType(), STyR->getElementType()); + } case Type::VectorTyID: { - auto *STyL = cast(TyL); - auto *STyR = cast(TyR); + auto *STyL = cast(TyL); + auto *STyR = cast(TyR); + // FIXME: Handle scalable vectors if (STyL->getNumElements() != STyR->getNumElements()) return cmpNumbers(STyL->getNumElements(), STyR->getNumElements()); return cmpTypes(STyL->getElementType(), STyR->getElementType()); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -3116,7 +3116,8 @@ unsigned N = 1; Type *EltTy = T; - while (isa(EltTy) || isa(EltTy)) { + while (isa(EltTy) || isa(EltTy) || + isa(EltTy)) { if (auto *ST = dyn_cast(EltTy)) { // Check that struct is homogeneous. for (const auto *Ty : ST->elements()) @@ -3125,9 +3126,8 @@ N *= ST->getNumElements(); EltTy = *ST->element_begin(); } else { - auto *SeqT = cast(EltTy); - N *= SeqT->getNumElements(); - EltTy = SeqT->getElementType(); + N *= EltTy->getSequentialNumElements(); + EltTy = EltTy->getSequentialElementType(); } }