diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -119,8 +119,9 @@ ValueTy = lvalue.getType(); ValueSizeInBits = C.getTypeSize(ValueTy); AtomicTy = ValueTy = CGF.getContext().getExtVectorType( - lvalue.getType(), lvalue.getExtVectorAddress() - .getElementType()->getVectorNumElements()); + lvalue.getType(), cast( + lvalue.getExtVectorAddress().getElementType()) + ->getNumElements()); AtomicSizeInBits = C.getTypeSize(AtomicTy); AtomicAlign = ValueAlign = lvalue.getAlignment(); LVal = lvalue; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -4502,7 +4502,7 @@ } Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { - ElementCount EC = V->getType()->getVectorElementCount(); + ElementCount EC = cast(V->getType())->getElementCount(); return EmitNeonSplat(V, C, EC); } @@ -5426,8 +5426,8 @@ assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()); // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate // it before inserting. - Ops[j] = - CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType()); + Ops[j] = CGF.Builder.CreateTruncOrBitCast( + Ops[j], cast(ArgTy)->getElementType()); Ops[j] = CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0); } @@ -5715,7 +5715,7 @@ case NEON::BI__builtin_neon_vld1q_x3_v: case NEON::BI__builtin_neon_vld1_x4_v: case NEON::BI__builtin_neon_vld1q_x4_v: { - llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); + llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); llvm::Type *Tys[2] = { VTy, PTy }; Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); @@ -5827,8 +5827,9 @@ llvm::Type *RTy = Ty; if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) - RTy = llvm::VectorType::get(Ty->getVectorElementType(), - Ty->getVectorNumElements() * 2); + RTy = llvm::VectorType::get(cast(Ty)->getElementType(), + cast(Ty)->getNumElements() * + 2); llvm::Type *Tys[2] = { RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false, /*isQuad*/ false))}; @@ -5917,7 +5918,7 @@ case NEON::BI__builtin_neon_vst1q_x3_v: case NEON::BI__builtin_neon_vst1_x4_v: case NEON::BI__builtin_neon_vst1q_x4_v: { - llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); + llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); // TODO: Currently in AArch32 mode the pointer operand comes first, whereas // in AArch64 it comes last. We may want to stick to one or another. if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be || @@ -7063,8 +7064,9 @@ // equal to the lane size. In LLVM IR, an LShr with that parameter would be // undefined behavior, but in MVE it's legal, so we must convert it to code // that is not undefined in IR. - unsigned LaneBits = - V->getType()->getVectorElementType()->getPrimitiveSizeInBits(); + unsigned LaneBits = cast(V->getType()) + ->getElementType() + ->getPrimitiveSizeInBits(); if (Shift == LaneBits) { // An unsigned shift of the full lane size always generates zero, so we can // simply emit a zero vector. A signed shift of the full lane size does the @@ -7115,7 +7117,8 @@ // Make a shufflevector that extracts every other element of a vector (evens // or odds, as desired). SmallVector Indices; - unsigned InputElements = V->getType()->getVectorNumElements(); + unsigned InputElements = + cast(V->getType())->getNumElements(); for (unsigned i = 0; i < InputElements; i += 2) Indices.push_back(i + Odd); return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()), @@ -7127,7 +7130,8 @@ // Make a shufflevector that interleaves two vectors element by element. assert(V0->getType() == V1->getType() && "Can't zip different vector types"); SmallVector Indices; - unsigned InputElements = V0->getType()->getVectorNumElements(); + unsigned InputElements = + cast(V0->getType())->getNumElements(); for (unsigned i = 0; i < InputElements; i++) { Indices.push_back(i); Indices.push_back(i + InputElements); @@ -7139,7 +7143,7 @@ static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) { // MVE-specific helper function to make a vector splat of a constant such as // UINT_MAX or INT_MIN, in which all bits below the highest one are equal. - llvm::Type *T = VT->getVectorElementType(); + llvm::Type *T = cast(VT)->getElementType(); unsigned LaneBits = T->getPrimitiveSizeInBits(); uint32_t Value = HighBit << (LaneBits - 1); if (OtherBits) @@ -7472,8 +7476,7 @@ // The vector type that is returned may be different from the // eventual type loaded from memory. auto VectorTy = cast(ReturnTy); - auto MemoryTy = - llvm::VectorType::get(MemEltTy, VectorTy->getVectorElementCount()); + auto MemoryTy = llvm::VectorType::get(MemEltTy, VectorTy->getElementCount()); Value *Offset = Builder.getInt32(0); Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); @@ -9984,8 +9987,8 @@ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - Ops[1]->getType()->getVectorNumElements()); + Value *MaskVec = getMaskVecValue( + CGF, Ops[2], cast(Ops[1]->getType())->getNumElements()); return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec); } @@ -9996,23 +9999,22 @@ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - Ops[1]->getType()->getVectorNumElements()); + Value *MaskVec = getMaskVecValue( + CGF, Ops[2], cast(Ops[1]->getType())->getNumElements()); return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]); } static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, ArrayRef Ops) { - llvm::Type *ResultTy = Ops[1]->getType(); - llvm::Type *PtrTy = ResultTy->getVectorElementType(); + auto *ResultTy = cast(Ops[1]->getType()); + llvm::Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(PtrTy)); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - ResultTy->getVectorNumElements()); + Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, ResultTy); @@ -10022,10 +10024,9 @@ static Value *EmitX86CompressExpand(CodeGenFunction &CGF, ArrayRef Ops, bool IsCompress) { - llvm::Type *ResultTy = Ops[1]->getType(); + auto *ResultTy = cast(Ops[1]->getType()); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - ResultTy->getVectorNumElements()); + Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress : Intrinsic::x86_avx512_mask_expand; @@ -10035,15 +10036,14 @@ static Value *EmitX86CompressStore(CodeGenFunction &CGF, ArrayRef Ops) { - llvm::Type *ResultTy = Ops[1]->getType(); - llvm::Type *PtrTy = ResultTy->getVectorElementType(); + auto *ResultTy = cast(Ops[1]->getType()); + llvm::Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(PtrTy)); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - ResultTy->getVectorNumElements()); + Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, ResultTy); @@ -10072,7 +10072,7 @@ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so // we only care about the lowest log2 bits anyway. if (Amt->getType() != Ty) { - unsigned NumElts = Ty->getVectorNumElements(); + unsigned NumElts = cast(Ty)->getNumElements(); Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false); Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt); } @@ -10130,7 +10130,8 @@ if (C->isAllOnesValue()) return Op0; - Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements()); + Mask = getMaskVecValue( + CGF, Mask, cast(Op0->getType())->getNumElements()); return CGF.Builder.CreateSelect(Mask, Op0, Op1); } @@ -10177,7 +10178,8 @@ bool Signed, ArrayRef Ops) { assert((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments"); - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *Cmp; if (CC == 3) { @@ -10454,7 +10456,7 @@ static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, llvm::Type *DstTy) { - unsigned NumberOfElements = DstTy->getVectorNumElements(); + unsigned NumberOfElements = cast(DstTy)->getNumElements(); Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); } @@ -10492,11 +10494,11 @@ return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]}); } - unsigned NumDstElts = DstTy->getVectorNumElements(); + unsigned NumDstElts = cast(DstTy)->getNumElements(); Value *Src = Ops[0]; // Extract the subvector. - if (NumDstElts != Src->getType()->getVectorNumElements()) { + if (NumDstElts != cast(Src->getType())->getNumElements()) { assert(NumDstElts == 4 && "Unexpected vector size"); uint32_t ShuffleMask[4] = {0, 1, 2, 3}; Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()), @@ -10796,7 +10798,8 @@ case X86::BI__builtin_ia32_vec_ext_v16hi: case X86::BI__builtin_ia32_vec_ext_v8si: case X86::BI__builtin_ia32_vec_ext_v4di: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); uint64_t Index = cast(Ops[1])->getZExtValue(); Index &= NumElts - 1; // These builtins exist so we can ensure the index is an ICE and in range. @@ -10811,7 +10814,8 @@ case X86::BI__builtin_ia32_vec_set_v16hi: case X86::BI__builtin_ia32_vec_set_v8si: case X86::BI__builtin_ia32_vec_set_v4di: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); unsigned Index = cast(Ops[2])->getZExtValue(); Index &= NumElts - 1; // These builtins exist so we can ensure the index is an ICE and in range. @@ -11237,8 +11241,9 @@ break; } - unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(), - Ops[2]->getType()->getVectorNumElements()); + unsigned MinElts = + std::min(cast(Ops[0]->getType())->getNumElements(), + cast(Ops[2]->getType())->getNumElements()); Ops[3] = getMaskVecValue(*this, Ops[3], MinElts); Function *Intr = CGM.getIntrinsic(IID); return Builder.CreateCall(Intr, Ops); @@ -11345,8 +11350,9 @@ break; } - unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(), - Ops[3]->getType()->getVectorNumElements()); + unsigned MinElts = + std::min(cast(Ops[2]->getType())->getNumElements(), + cast(Ops[3]->getType())->getNumElements()); Ops[1] = getMaskVecValue(*this, Ops[1], MinElts); Function *Intr = CGM.getIntrinsic(IID); return Builder.CreateCall(Intr, Ops); @@ -11368,9 +11374,10 @@ case X86::BI__builtin_ia32_extracti64x2_256_mask: case X86::BI__builtin_ia32_extractf64x2_512_mask: case X86::BI__builtin_ia32_extracti64x2_512_mask: { - llvm::Type *DstTy = ConvertType(E->getType()); - unsigned NumElts = DstTy->getVectorNumElements(); - unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements(); + auto *DstTy = cast(ConvertType(E->getType())); + unsigned NumElts = DstTy->getNumElements(); + unsigned SrcNumElts = + cast(Ops[0]->getType())->getNumElements(); unsigned SubVectors = SrcNumElts / NumElts; unsigned Index = cast(Ops[1])->getZExtValue(); assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); @@ -11407,8 +11414,10 @@ case X86::BI__builtin_ia32_inserti64x2_256: case X86::BI__builtin_ia32_insertf64x2_512: case X86::BI__builtin_ia32_inserti64x2_512: { - unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements(); - unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements(); + unsigned DstNumElts = + cast(Ops[0]->getType())->getNumElements(); + unsigned SrcNumElts = + cast(Ops[1]->getType())->getNumElements(); unsigned SubVectors = DstNumElts / SrcNumElts; unsigned Index = cast(Ops[2])->getZExtValue(); assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); @@ -11472,7 +11481,8 @@ case X86::BI__builtin_ia32_pblendw256: case X86::BI__builtin_ia32_pblendd128: case X86::BI__builtin_ia32_pblendd256: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); unsigned Imm = cast(Ops[2])->getZExtValue(); uint32_t Indices[16]; @@ -11489,8 +11499,8 @@ case X86::BI__builtin_ia32_pshuflw256: case X86::BI__builtin_ia32_pshuflw512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); // Splat the 8-bits of immediate 4 times to help the loop wrap around. Imm = (Imm & 0xff) * 0x01010101; @@ -11513,8 +11523,8 @@ case X86::BI__builtin_ia32_pshufhw256: case X86::BI__builtin_ia32_pshufhw512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); // Splat the 8-bits of immediate 4 times to help the loop wrap around. Imm = (Imm & 0xff) * 0x01010101; @@ -11543,8 +11553,8 @@ case X86::BI__builtin_ia32_vpermilpd512: case X86::BI__builtin_ia32_vpermilps512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; unsigned NumLaneElts = NumElts / NumLanes; @@ -11570,8 +11580,8 @@ case X86::BI__builtin_ia32_shufps256: case X86::BI__builtin_ia32_shufps512: { uint32_t Imm = cast(Ops[2])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; unsigned NumLaneElts = NumElts / NumLanes; @@ -11598,8 +11608,8 @@ case X86::BI__builtin_ia32_permdi512: case X86::BI__builtin_ia32_permdf512: { unsigned Imm = cast(Ops[1])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); // These intrinsics operate on 256-bit lanes of four 64-bit elements. uint32_t Indices[8]; @@ -11616,7 +11626,8 @@ case X86::BI__builtin_ia32_palignr512: { unsigned ShiftVal = cast(Ops[2])->getZExtValue() & 0xff; - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); assert(NumElts % 16 == 0); // If palignr is shifting the pair of vectors more than the size of two @@ -11653,7 +11664,8 @@ case X86::BI__builtin_ia32_alignq128: case X86::BI__builtin_ia32_alignq256: case X86::BI__builtin_ia32_alignq512: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); unsigned ShiftVal = cast(Ops[2])->getZExtValue() & 0xff; // Mask the shift amount to width of two vectors. @@ -11676,8 +11688,8 @@ case X86::BI__builtin_ia32_shuf_i32x4: case X86::BI__builtin_ia32_shuf_i64x2: { unsigned Imm = cast(Ops[2])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; unsigned NumLaneElts = NumElts / NumLanes; @@ -11702,7 +11714,8 @@ case X86::BI__builtin_ia32_vperm2f128_si256: case X86::BI__builtin_ia32_permti256: { unsigned Imm = cast(Ops[2])->getZExtValue(); - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); // This takes a very simple approach since there are two lanes and a // shuffle can have 2 inputs. So we reserve the first input for the first @@ -11740,9 +11753,9 @@ case X86::BI__builtin_ia32_pslldqi256_byteshift: case X86::BI__builtin_ia32_pslldqi512_byteshift: { unsigned ShiftVal = cast(Ops[1])->getZExtValue() & 0xff; - llvm::Type *ResultType = Ops[0]->getType(); + auto *ResultType = cast(Ops[0]->getType()); // Builtin type is vXi64 so multiply by 8 to get bytes. - unsigned NumElts = ResultType->getVectorNumElements() * 8; + unsigned NumElts = ResultType->getNumElements() * 8; // If pslldq is shifting the vector more than 15 bytes, emit zero. if (ShiftVal >= 16) @@ -11770,9 +11783,9 @@ case X86::BI__builtin_ia32_psrldqi256_byteshift: case X86::BI__builtin_ia32_psrldqi512_byteshift: { unsigned ShiftVal = cast(Ops[1])->getZExtValue() & 0xff; - llvm::Type *ResultType = Ops[0]->getType(); + auto *ResultType = cast(Ops[0]->getType()); // Builtin type is vXi64 so multiply by 8 to get bytes. - unsigned NumElts = ResultType->getVectorNumElements() * 8; + unsigned NumElts = ResultType->getNumElements() * 8; // If psrldq is shifting the vector more than 15 bytes, emit zero. if (ShiftVal >= 16) @@ -12416,7 +12429,8 @@ case X86::BI__builtin_ia32_fpclasspd128_mask: case X86::BI__builtin_ia32_fpclasspd256_mask: case X86::BI__builtin_ia32_fpclasspd512_mask: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[2]; Ops.erase(&Ops[2]); @@ -12453,7 +12467,8 @@ case X86::BI__builtin_ia32_vp2intersect_d_512: case X86::BI__builtin_ia32_vp2intersect_d_256: case X86::BI__builtin_ia32_vp2intersect_d_128: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Intrinsic::ID ID; switch (BuiltinID) { @@ -12511,7 +12526,8 @@ case X86::BI__builtin_ia32_vpshufbitqmb128_mask: case X86::BI__builtin_ia32_vpshufbitqmb256_mask: case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[2]; Ops.erase(&Ops[2]); @@ -12651,8 +12667,11 @@ } Function *Intr = CGM.getIntrinsic(IID); - if (Intr->getReturnType()->getVectorElementType()->isIntegerTy(1)) { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + if (cast(Intr->getReturnType()) + ->getElementType() + ->isIntegerTy(1)) { + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[3]; Ops.erase(&Ops[3]); @@ -12673,7 +12692,8 @@ case X86::BI__builtin_ia32_cmppd128_mask: case X86::BI__builtin_ia32_cmppd256_mask: { // FIXME: Support SAE. - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *Cmp; if (IsSignaling) Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); @@ -12730,8 +12750,9 @@ // AVX512 bf16 intrinsics case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { - Ops[2] = getMaskVecValue(*this, Ops[2], - Ops[0]->getType()->getVectorNumElements()); + Ops[2] = getMaskVecValue( + *this, Ops[2], + cast(Ops[0]->getType())->getNumElements()); Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); } @@ -15047,7 +15068,8 @@ switch (BuiltinID) { case WebAssembly::BI__builtin_wasm_replace_lane_i8x16: case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: { - llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType(); + llvm::Type *ElemType = + cast(ConvertType(E->getType()))->getElementType(); Value *Trunc = Builder.CreateTrunc(Val, ElemType); return Builder.CreateInsertElement(Vec, Trunc, Lane); } @@ -15510,8 +15532,9 @@ if (ID == Intrinsic::not_intrinsic) return nullptr; - auto IsVectorPredTy = [] (llvm::Type *T) { - return T->isVectorTy() && T->getVectorElementType()->isIntegerTy(1); + auto IsVectorPredTy = [](llvm::Type *T) { + return T->isVectorTy() && + cast(T)->getElementType()->isIntegerTy(1); }; llvm::Function *IntrFn = CGM.getIntrinsic(ID); diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -2127,7 +2127,8 @@ if (const VectorType *VTy = Dst.getType()->getAs()) { unsigned NumSrcElts = VTy->getNumElements(); - unsigned NumDstElts = Vec->getType()->getVectorNumElements(); + unsigned NumDstElts = + cast(Vec->getType())->getNumElements(); if (NumDstElts == NumSrcElts) { // Use shuffle vector is the src and destination are the same number of // elements and restore the vector mask since it is on the side it will be diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -1306,7 +1306,7 @@ "Splatted expr doesn't match with vector element type?"); // Splat the element across to all elements - unsigned NumElements = DstTy->getVectorNumElements(); + unsigned NumElements = cast(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Src, "splat"); } @@ -1324,8 +1324,8 @@ // short or half vector. // Source and destination are both expected to be vectors. - llvm::Type *SrcElementTy = SrcTy->getVectorElementType(); - llvm::Type *DstElementTy = DstTy->getVectorElementType(); + llvm::Type *SrcElementTy = cast(SrcTy)->getElementType(); + llvm::Type *DstElementTy = cast(DstTy)->getElementType(); (void)DstElementTy; assert(((SrcElementTy->isIntegerTy() && @@ -1691,8 +1691,8 @@ assert(DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"); - llvm::Type *SrcEltTy = SrcTy->getVectorElementType(), - *DstEltTy = DstTy->getVectorElementType(); + llvm::Type *SrcEltTy = cast(SrcTy)->getElementType(), + *DstEltTy = cast(DstTy)->getElementType(); if (DstEltType->isBooleanType()) { assert((SrcEltTy->isFloatingPointTy() || @@ -2219,7 +2219,7 @@ llvm::Type *DstTy = ConvertType(DestTy); Value *Elt = Visit(const_cast(E)); // Splat the element across to all elements - unsigned NumElements = DstTy->getVectorNumElements(); + unsigned NumElements = cast(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } @@ -4545,7 +4545,8 @@ // get a vec3. if (NumElementsSrc != 3 && NumElementsDst == 3) { if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { - auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4); + auto Vec4Ty = llvm::VectorType::get( + cast(DstTy)->getElementType(), 4); Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, Vec4Ty); } diff --git a/clang/lib/CodeGen/PatternInit.cpp b/clang/lib/CodeGen/PatternInit.cpp --- a/clang/lib/CodeGen/PatternInit.cpp +++ b/clang/lib/CodeGen/PatternInit.cpp @@ -34,9 +34,11 @@ constexpr bool NegativeNaN = true; constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull; if (Ty->isIntOrIntVectorTy()) { - unsigned BitWidth = cast( - Ty->isVectorTy() ? Ty->getVectorElementType() : Ty) - ->getBitWidth(); + unsigned BitWidth = + cast( + Ty->isVectorTy() ? cast(Ty)->getElementType() + : Ty) + ->getBitWidth(); if (BitWidth <= 64) return llvm::ConstantInt::get(Ty, IntValue); return llvm::ConstantInt::get( @@ -44,7 +46,7 @@ } if (Ty->isPtrOrPtrVectorTy()) { auto *PtrTy = cast( - Ty->isVectorTy() ? Ty->getVectorElementType() : Ty); + Ty->isVectorTy() ? cast(Ty)->getElementType() : Ty); unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth( PtrTy->getAddressSpace()); if (PtrWidth > 64) @@ -55,7 +57,7 @@ } if (Ty->isFPOrFPVectorTy()) { unsigned BitWidth = llvm::APFloat::semanticsSizeInBits( - (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty) + (Ty->isVectorTy() ? cast(Ty)->getElementType() : Ty) ->getFltSemantics()); llvm::APInt Payload(64, NaNPayload); if (BitWidth >= 64) diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -3054,7 +3054,7 @@ // Don't pass vXi128 vectors in their native type, the backend can't // legalize them. if (passInt128VectorsInMem() && - IRType->getVectorElementType()->isIntegerTy(128)) { + cast(IRType)->getElementType()->isIntegerTy(128)) { // Use a vXi64 vector. uint64_t Size = getContext().getTypeSize(Ty); return llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), diff --git a/llvm/include/llvm/Analysis/Utils/Local.h b/llvm/include/llvm/Analysis/Utils/Local.h --- a/llvm/include/llvm/Analysis/Utils/Local.h +++ b/llvm/include/llvm/Analysis/Utils/Local.h @@ -63,7 +63,8 @@ // Splat the constant if needed. if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy()) - OpC = ConstantVector::getSplat(IntIdxTy->getVectorElementCount(), OpC); + OpC = ConstantVector::getSplat( + cast(IntIdxTy)->getElementCount(), OpC); Constant *Scale = ConstantInt::get(IntIdxTy, Size); Constant *OC = ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/); @@ -76,7 +77,8 @@ // Splat the index if needed. if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy()) - Op = Builder->CreateVectorSplat(IntIdxTy->getVectorNumElements(), Op); + Op = Builder->CreateVectorSplat( + cast(IntIdxTy)->getNumElements(), Op); // Convert to correct type. if (Op->getType() != IntIdxTy) diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -82,16 +82,17 @@ /// Estimate a cost of Broadcast as an extract and sequence of insert /// operations. unsigned getBroadcastShuffleOverhead(Type *Ty) { - assert(Ty->isVectorTy() && "Can only shuffle vectors"); + auto *VTy = dyn_cast(Ty); + assert(VTy && "Can only shuffle vectors"); unsigned Cost = 0; // Broadcast cost is equal to the cost of extracting the zero'th element // plus the cost of inserting it into every element of the result vector. Cost += static_cast(this)->getVectorInstrCost( - Instruction::ExtractElement, Ty, 0); + Instruction::ExtractElement, VTy, 0); - for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { + for (int i = 0, e = VTy->getNumElements(); i < e; ++i) { Cost += static_cast(this)->getVectorInstrCost( - Instruction::InsertElement, Ty, i); + Instruction::InsertElement, VTy, i); } return Cost; } @@ -99,7 +100,8 @@ /// Estimate a cost of shuffle as a sequence of extract and insert /// operations. unsigned getPermuteShuffleOverhead(Type *Ty) { - assert(Ty->isVectorTy() && "Can only shuffle vectors"); + auto *VTy = dyn_cast(Ty); + assert(VTy && "Can only shuffle vectors"); unsigned Cost = 0; // Shuffle cost is equal to the cost of extracting element from its argument // plus the cost of inserting them onto the result vector. @@ -108,11 +110,11 @@ // index 0 of first vector, index 1 of second vector,index 2 of first // vector and finally index 3 of second vector and insert them at index // <0,1,2,3> of result vector. - for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { - Cost += static_cast(this) - ->getVectorInstrCost(Instruction::InsertElement, Ty, i); - Cost += static_cast(this) - ->getVectorInstrCost(Instruction::ExtractElement, Ty, i); + for (int i = 0, e = VTy->getNumElements(); i < e; ++i) { + Cost += static_cast(this)->getVectorInstrCost( + Instruction::InsertElement, VTy, i); + Cost += static_cast(this)->getVectorInstrCost( + Instruction::ExtractElement, VTy, i); } return Cost; } @@ -122,8 +124,10 @@ unsigned getExtractSubvectorOverhead(Type *Ty, int Index, Type *SubTy) { assert(Ty && Ty->isVectorTy() && SubTy && SubTy->isVectorTy() && "Can only extract subvectors from vectors"); - int NumSubElts = SubTy->getVectorNumElements(); - assert((Index + NumSubElts) <= (int)Ty->getVectorNumElements() && + auto *VTy = cast(Ty); + auto *SubVTy = cast(SubTy); + int NumSubElts = SubVTy->getNumElements(); + assert((Index + NumSubElts) <= (int)VTy->getNumElements() && "SK_ExtractSubvector index out of range"); unsigned Cost = 0; @@ -132,9 +136,9 @@ // type. for (int i = 0; i != NumSubElts; ++i) { Cost += static_cast(this)->getVectorInstrCost( - Instruction::ExtractElement, Ty, i + Index); + Instruction::ExtractElement, VTy, i + Index); Cost += static_cast(this)->getVectorInstrCost( - Instruction::InsertElement, SubTy, i); + Instruction::InsertElement, SubVTy, i); } return Cost; } @@ -144,8 +148,10 @@ unsigned getInsertSubvectorOverhead(Type *Ty, int Index, Type *SubTy) { assert(Ty && Ty->isVectorTy() && SubTy && SubTy->isVectorTy() && "Can only insert subvectors into vectors"); - int NumSubElts = SubTy->getVectorNumElements(); - assert((Index + NumSubElts) <= (int)Ty->getVectorNumElements() && + auto *VTy = cast(Ty); + auto *SubVTy = cast(SubTy); + int NumSubElts = SubVTy->getNumElements(); + assert((Index + NumSubElts) <= (int)VTy->getNumElements() && "SK_InsertSubvector index out of range"); unsigned Cost = 0; @@ -154,9 +160,9 @@ // type. for (int i = 0; i != NumSubElts; ++i) { Cost += static_cast(this)->getVectorInstrCost( - Instruction::ExtractElement, SubTy, i); + Instruction::ExtractElement, SubVTy, i); Cost += static_cast(this)->getVectorInstrCost( - Instruction::InsertElement, Ty, i + Index); + Instruction::InsertElement, VTy, i + Index); } return Cost; } @@ -569,16 +575,17 @@ /// Estimate the overhead of scalarizing an instruction. Insert and Extract /// are set if the result needs to be inserted and/or extracted from vectors. unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) { - assert(Ty->isVectorTy() && "Can only scalarize vectors"); + auto *VTy = dyn_cast(Ty); + assert(VTy && "Can only scalarize vectors"); unsigned Cost = 0; - for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { + for (int i = 0, e = VTy->getNumElements(); i < e; ++i) { if (Insert) - Cost += static_cast(this) - ->getVectorInstrCost(Instruction::InsertElement, Ty, i); + Cost += static_cast(this)->getVectorInstrCost( + Instruction::InsertElement, VTy, i); if (Extract) - Cost += static_cast(this) - ->getVectorInstrCost(Instruction::ExtractElement, Ty, i); + Cost += static_cast(this)->getVectorInstrCost( + Instruction::ExtractElement, VTy, i); } return Cost; @@ -597,7 +604,7 @@ if (A->getType()->isVectorTy()) { VecTy = A->getType(); // If A is a vector operand, VF should be 1 or correspond to A. - assert((VF == 1 || VF == VecTy->getVectorNumElements()) && + assert((VF == 1 || VF == cast(VecTy)->getNumElements()) && "Vector argument does not match VF"); } else @@ -611,18 +618,17 @@ } unsigned getScalarizationOverhead(Type *VecTy, ArrayRef Args) { - assert(VecTy->isVectorTy()); + auto *VecVTy = cast(VecTy); unsigned Cost = 0; - Cost += getScalarizationOverhead(VecTy, true, false); + Cost += getScalarizationOverhead(VecVTy, true, false); if (!Args.empty()) - Cost += getOperandsScalarizationOverhead(Args, - VecTy->getVectorNumElements()); + Cost += getOperandsScalarizationOverhead(Args, VecVTy->getNumElements()); else // When no information on arguments is provided, we add the cost // associated with one argument as a heuristic. - Cost += getScalarizationOverhead(VecTy, false, true); + Cost += getScalarizationOverhead(VecVTy, false, true); return Cost; } @@ -664,13 +670,13 @@ // Else, assume that we need to scalarize this op. // TODO: If one of the types get legalized by splitting, handle this // similarly to what getCastInstrCost() does. - if (Ty->isVectorTy()) { - unsigned Num = Ty->getVectorNumElements(); - unsigned Cost = static_cast(this) - ->getArithmeticInstrCost(Opcode, Ty->getScalarType()); + if (auto VTy = dyn_cast(Ty)) { + unsigned Num = VTy->getNumElements(); + unsigned Cost = static_cast(this)->getArithmeticInstrCost( + Opcode, VTy->getScalarType()); // Return the cost of multiple scalar invocation plus the cost of // inserting and extracting the values. - return getScalarizationOverhead(Ty, Args) + Num * Cost; + return getScalarizationOverhead(VTy, Args) + Num * Cost; } // We don't know anything about this scalar instruction. @@ -765,6 +771,8 @@ // Check vector-to-vector casts. if (Dst->isVectorTy() && Src->isVectorTy()) { + auto *SrcVTy = cast(Src); + auto *DstVTy = cast(Dst); // If the cast is between same-sized registers, then the check is simple. if (SrcLT.first == DstLT.first && SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) { @@ -792,11 +800,11 @@ TargetLowering::TypeSplitVector || TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) == TargetLowering::TypeSplitVector) && - Src->getVectorNumElements() > 1 && Dst->getVectorNumElements() > 1) { - Type *SplitDst = VectorType::get(Dst->getVectorElementType(), - Dst->getVectorNumElements() / 2); - Type *SplitSrc = VectorType::get(Src->getVectorElementType(), - Src->getVectorNumElements() / 2); + SrcVTy->getNumElements() > 1 && DstVTy->getNumElements() > 1) { + Type *SplitDst = VectorType::get(DstVTy->getElementType(), + DstVTy->getNumElements() / 2); + Type *SplitSrc = VectorType::get(SrcVTy->getElementType(), + SrcVTy->getNumElements() / 2); T *TTI = static_cast(this); return TTI->getVectorSplitCost() + (2 * TTI->getCastInstrCost(Opcode, SplitDst, SplitSrc, I)); @@ -804,7 +812,7 @@ // In other cases where the source or destination are illegal, assume // the operation will get scalarized. - unsigned Num = Dst->getVectorNumElements(); + unsigned Num = DstVTy->getNumElements(); unsigned Cost = static_cast(this)->getCastInstrCost( Opcode, Dst->getScalarType(), Src->getScalarType(), I); @@ -864,16 +872,16 @@ // Otherwise, assume that the cast is scalarized. // TODO: If one of the types get legalized by splitting, handle this // similarly to what getCastInstrCost() does. - if (ValTy->isVectorTy()) { - unsigned Num = ValTy->getVectorNumElements(); + if (auto *ValVTy = dyn_cast(ValTy)) { + unsigned Num = ValVTy->getNumElements(); if (CondTy) CondTy = CondTy->getScalarType(); unsigned Cost = static_cast(this)->getCmpSelInstrCost( - Opcode, ValTy->getScalarType(), CondTy, I); + Opcode, ValVTy->getScalarType(), CondTy, I); // Return the cost of multiple scalar invocation plus the cost of // inserting and extracting the values. - return getScalarizationOverhead(ValTy, true, false) + Num * Cost; + return getScalarizationOverhead(ValVTy, true, false) + Num * Cost; } // Unknown scalar opcode. @@ -1079,7 +1087,8 @@ ArrayRef Args, FastMathFlags FMF, unsigned VF = 1, const Instruction *I = nullptr) { - unsigned RetVF = (RetTy->isVectorTy() ? RetTy->getVectorNumElements() : 1); + unsigned RetVF = + (RetTy->isVectorTy() ? cast(RetTy)->getNumElements() : 1); assert((RetVF == 1 || VF == 1) && "VF > 1 and RetVF is a vector type"); auto *ConcreteTTI = static_cast(this); @@ -1202,7 +1211,8 @@ if (RetTy->isVectorTy()) { if (ScalarizationCostPassed == std::numeric_limits::max()) ScalarizationCost = getScalarizationOverhead(RetTy, true, false); - ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); + ScalarCalls = std::max( + ScalarCalls, (unsigned)cast(RetTy)->getNumElements()); ScalarRetTy = RetTy->getScalarType(); } SmallVector ScalarTys; @@ -1211,7 +1221,8 @@ if (Ty->isVectorTy()) { if (ScalarizationCostPassed == std::numeric_limits::max()) ScalarizationCost += getScalarizationOverhead(Ty, false, true); - ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements()); + ScalarCalls = std::max( + ScalarCalls, (unsigned)cast(Ty)->getNumElements()); Ty = Ty->getScalarType(); } ScalarTys.push_back(Ty); @@ -1543,7 +1554,7 @@ ((ScalarizationCostPassed != std::numeric_limits::max()) ? ScalarizationCostPassed : getScalarizationOverhead(RetTy, true, false)); - unsigned ScalarCalls = RetTy->getVectorNumElements(); + unsigned ScalarCalls = cast(RetTy)->getNumElements(); SmallVector ScalarTys; for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { Type *Ty = Tys[i]; @@ -1557,7 +1568,9 @@ if (Tys[i]->isVectorTy()) { if (ScalarizationCostPassed == std::numeric_limits::max()) ScalarizationCost += getScalarizationOverhead(Tys[i], false, true); - ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements()); + ScalarCalls = + std::max(ScalarCalls, + (unsigned)cast(Tys[i])->getNumElements()); } } @@ -1631,8 +1644,8 @@ unsigned getArithmeticReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) { assert(Ty->isVectorTy() && "Expect a vector type"); - Type *ScalarTy = Ty->getVectorElementType(); - unsigned NumVecElts = Ty->getVectorNumElements(); + Type *ScalarTy = cast(Ty)->getElementType(); + unsigned NumVecElts = cast(Ty)->getNumElements(); unsigned NumReduxLevels = Log2_32(NumVecElts); unsigned ArithCost = 0; unsigned ShuffleCost = 0; @@ -1681,9 +1694,9 @@ unsigned getMinMaxReductionCost(Type *Ty, Type *CondTy, bool IsPairwise, bool) { assert(Ty->isVectorTy() && "Expect a vector type"); - Type *ScalarTy = Ty->getVectorElementType(); - Type *ScalarCondTy = CondTy->getVectorElementType(); - unsigned NumVecElts = Ty->getVectorNumElements(); + Type *ScalarTy = cast(Ty)->getElementType(); + Type *ScalarCondTy = cast(CondTy)->getElementType(); + unsigned NumVecElts = cast(Ty)->getNumElements(); unsigned NumReduxLevels = Log2_32(NumVecElts); unsigned CmpOpcode; if (Ty->isFPOrFPVectorTy()) { diff --git a/llvm/include/llvm/IR/DerivedTypes.h b/llvm/include/llvm/IR/DerivedTypes.h --- a/llvm/include/llvm/IR/DerivedTypes.h +++ b/llvm/include/llvm/IR/DerivedTypes.h @@ -546,18 +546,6 @@ } }; -unsigned Type::getVectorNumElements() const { - return cast(this)->getNumElements(); -} - -bool Type::getVectorIsScalable() const { - return cast(this)->isScalable(); -} - -ElementCount Type::getVectorElementCount() const { - return cast(this)->getElementCount(); -} - /// Class to represent pointers. class PointerType : public Type { explicit PointerType(Type *ElType, unsigned AddrSpace); @@ -610,8 +598,8 @@ isIntOrIntVectorTy() && "Original type expected to be a vector of integers or a scalar integer."); Type *NewType = getIntNTy(getContext(), NewBitWidth); - if (isVectorTy()) - NewType = VectorType::get(NewType, getVectorElementCount()); + if (auto *VTy = dyn_cast(this)) + NewType = VectorType::get(NewType, VTy->getElementCount()); return NewType; } diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -1066,13 +1066,13 @@ Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), Ptr->getType()->getPointerAddressSpace()); // Vector GEP - if (Ptr->getType()->isVectorTy()) { - ElementCount EltCount = Ptr->getType()->getVectorElementCount(); + if (auto *PtrVTy = dyn_cast(Ptr->getType())) { + ElementCount EltCount = PtrVTy->getElementCount(); return VectorType::get(PtrTy, EltCount); } for (Value *Index : IdxList) - if (Index->getType()->isVectorTy()) { - ElementCount EltCount = Index->getType()->getVectorElementCount(); + if (auto *IndexVTy = dyn_cast(Index->getType())) { + ElementCount EltCount = IndexVTy->getElementCount(); return VectorType::get(PtrTy, EltCount); } // Scalar GEP @@ -2077,7 +2077,8 @@ /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> bool changesLength() const { - unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements(); + unsigned NumSourceElts = + cast(Op<0>()->getType())->getNumElements(); unsigned NumMaskElts = ShuffleMask.size(); return NumSourceElts != NumMaskElts; } @@ -2086,7 +2087,8 @@ /// elements than its source vectors. /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> bool increasesLength() const { - unsigned NumSourceElts = Op<0>()->getType()->getVectorNumElements(); + unsigned NumSourceElts = + cast(Op<0>()->getType())->getNumElements(); unsigned NumMaskElts = ShuffleMask.size(); return NumSourceElts < NumMaskElts; } @@ -2279,7 +2281,7 @@ /// Return true if this shuffle mask is an extract subvector mask. bool isExtractSubvectorMask(int &Index) const { - int NumSrcElts = Op<0>()->getType()->getVectorNumElements(); + int NumSrcElts = cast(Op<0>()->getType())->getNumElements(); return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); } diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -275,7 +275,7 @@ return this->isValue(CI->getValue()); // Non-splat vector constant: check each element for a match. - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); assert(NumElts != 0 && "Constant vector with no elements?"); bool HasNonUndefElements = false; for (unsigned i = 0; i != NumElts; ++i) { @@ -334,7 +334,7 @@ return this->isValue(CF->getValueAPF()); // Non-splat vector constant: check each element for a match. - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); assert(NumElts != 0 && "Constant vector with no elements?"); bool HasNonUndefElements = false; for (unsigned i = 0; i != NumElts; ++i) { @@ -2173,8 +2173,8 @@ if (m_PtrToInt(m_OffsetGep(m_Zero(), m_SpecificInt(1))).match(V)) { Type *PtrTy = cast(V)->getOperand(0)->getType(); - Type *DerefTy = PtrTy->getPointerElementType(); - if (DerefTy->isVectorTy() && DerefTy->getVectorIsScalable() && + auto *DerefTy = dyn_cast(PtrTy->getPointerElementType()); + if (DerefTy && DerefTy->isScalable() && DL.getTypeAllocSizeInBits(DerefTy).getKnownMinSize() == 8) return true; } diff --git a/llvm/include/llvm/IR/Type.h b/llvm/include/llvm/IR/Type.h --- a/llvm/include/llvm/IR/Type.h +++ b/llvm/include/llvm/IR/Type.h @@ -304,11 +304,7 @@ /// If this is a vector type, return the element type, otherwise return /// 'this'. - Type *getScalarType() const { - if (isVectorTy()) - return getVectorElementType(); - return const_cast(this); - } + Type *getScalarType() const; //===--------------------------------------------------------------------===// // Type Iteration support. @@ -343,8 +339,8 @@ //===--------------------------------------------------------------------===// // Helper methods corresponding to subclass methods. This forces a cast to - // the specified subclass and calls its accessor. "getVectorNumElements" (for - // example) is shorthand for cast(Ty)->getNumElements(). This is + // the specified subclass and calls its accessor. "getArrayNumElements" (for + // example) is shorthand for cast(Ty)->getNumElements(). This is // only intended to cover the core methods that are frequently used, helper // methods should not be added here. @@ -370,14 +366,6 @@ return ContainedTys[0]; } - inline bool getVectorIsScalable() const; - inline unsigned getVectorNumElements() const; - inline ElementCount getVectorElementCount() const; - Type *getVectorElementType() const { - assert(getTypeID() == VectorTyID); - return ContainedTys[0]; - } - Type *getPointerElementType() const { assert(getTypeID() == PointerTyID); return ContainedTys[0]; diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -153,13 +153,15 @@ if (!isa(C) && !isa(C)) return ConstantExpr::getBitCast(C, DestTy); + auto *CVTy = cast(C->getType()); + // If the element types match, IR can fold it. unsigned NumDstElt = DestVTy->getNumElements(); - unsigned NumSrcElt = C->getType()->getVectorNumElements(); + unsigned NumSrcElt = CVTy->getNumElements(); if (NumDstElt == NumSrcElt) return ConstantExpr::getBitCast(C, DestTy); - Type *SrcEltTy = C->getType()->getVectorElementType(); + Type *SrcEltTy = CVTy->getElementType(); Type *DstEltTy = DestVTy->getElementType(); // Otherwise, we're changing the number of elements in a vector, which @@ -218,7 +220,7 @@ for (unsigned j = 0; j != Ratio; ++j) { Constant *Src = C->getAggregateElement(SrcElt++); if (Src && isa(Src)) - Src = Constant::getNullValue(C->getType()->getVectorElementType()); + Src = Constant::getNullValue(CVTy->getElementType()); else Src = dyn_cast_or_null(Src); if (!Src) // Reject constantexpr elements. @@ -471,7 +473,7 @@ if (auto *AT = dyn_cast(C->getType())) NumElts = AT->getNumElements(); else - NumElts = C->getType()->getVectorNumElements(); + NumElts = cast(C->getType())->getNumElements(); for (; Index != NumElts; ++Index) { if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, @@ -505,7 +507,7 @@ Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, const DataLayout &DL) { // Bail out early. Not expect to load from scalable global variable. - if (LoadTy->isVectorTy() && LoadTy->getVectorIsScalable()) + if (LoadTy->isVectorTy() && cast(LoadTy)->isScalable()) return nullptr; auto *PTy = cast(C->getType()); @@ -834,7 +836,7 @@ Type *ResElemTy = GEP->getResultElementType(); Type *ResTy = GEP->getType(); if (!SrcElemTy->isSized() || - (SrcElemTy->isVectorTy() && SrcElemTy->getVectorIsScalable())) + (SrcElemTy->isVectorTy() && cast(SrcElemTy)->isScalable())) return nullptr; if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, @@ -2571,7 +2573,7 @@ // Do not iterate on scalable vector. The number of elements is unknown at // compile-time. - if (VTy->getVectorIsScalable()) + if (VTy->isScalable()) return nullptr; if (IntrinsicID == Intrinsic::masked_load) { diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -945,8 +945,9 @@ // If any element of a constant divisor vector is zero or undef, the whole op // is undef. auto *Op1C = dyn_cast(Op1); - if (Op1C && Ty->isVectorTy()) { - unsigned NumElts = Ty->getVectorNumElements(); + auto *VTy = dyn_cast(Ty); + if (Op1C && VTy) { + unsigned NumElts = VTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = Op1C->getAggregateElement(i); if (Elt && (Elt->isNullValue() || isa(Elt))) @@ -1221,7 +1222,8 @@ // If all lanes of a vector shift are undefined the whole shift is. if (isa(C) || isa(C)) { - for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) + for (unsigned I = 0, E = cast(C->getType())->getNumElements(); + I != E; ++I) if (!isUndefShift(C->getAggregateElement(I))) return false; return true; @@ -4011,7 +4013,7 @@ Constant *TrueC, *FalseC; if (TrueVal->getType()->isVectorTy() && match(TrueVal, m_Constant(TrueC)) && match(FalseVal, m_Constant(FalseC))) { - unsigned NumElts = TrueC->getType()->getVectorNumElements(); + unsigned NumElts = cast(TrueC->getType())->getNumElements(); SmallVector NewC; for (unsigned i = 0; i != NumElts; ++i) { // Bail out on incomplete vector constants. @@ -4081,7 +4083,7 @@ return UndefValue::get(GEPTy); bool IsScalableVec = - SrcTy->isVectorTy() ? SrcTy->getVectorIsScalable() : false; + isa(SrcTy) && cast(SrcTy)->isScalable(); if (Ops.size() == 2) { // getelementptr P, 0 -> P. @@ -4223,8 +4225,8 @@ // For fixed-length vector, fold into undef if index is out of bounds. if (auto *CI = dyn_cast(Idx)) { - if (!Vec->getType()->getVectorIsScalable() && - CI->uge(Vec->getType()->getVectorNumElements())) + if (!cast(Vec->getType())->isScalable() && + CI->uge(cast(Vec->getType())->getNumElements())) return UndefValue::get(Vec->getType()); } @@ -4280,6 +4282,7 @@ /// If not, this returns null. static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &, unsigned) { + auto *VecVTy = cast(Vec->getType()); if (auto *CVec = dyn_cast(Vec)) { if (auto *CIdx = dyn_cast(Idx)) return ConstantFoldExtractElementInstruction(CVec, CIdx); @@ -4289,16 +4292,15 @@ return Splat; if (isa(Vec)) - return UndefValue::get(Vec->getType()->getVectorElementType()); + return UndefValue::get(VecVTy->getElementType()); } // If extracting a specified index from the vector, see if we can recursively // find a previously computed scalar that was inserted into the vector. if (auto *IdxC = dyn_cast(Idx)) { // For fixed-length vector, fold into undef if index is out of bounds. - if (!Vec->getType()->getVectorIsScalable() && - IdxC->getValue().uge(Vec->getType()->getVectorNumElements())) - return UndefValue::get(Vec->getType()->getVectorElementType()); + if (!VecVTy->isScalable() && IdxC->getValue().uge(VecVTy->getNumElements())) + return UndefValue::get(VecVTy->getElementType()); if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) return Elt; } @@ -4306,7 +4308,7 @@ // An undef extract index can be arbitrarily chosen to be an out-of-range // index value, which would result in the instruction being undef. if (isa(Idx)) - return UndefValue::get(Vec->getType()->getVectorElementType()); + return UndefValue::get(VecVTy->getElementType()); return nullptr; } @@ -4403,7 +4405,7 @@ return nullptr; // The mask value chooses which source operand we need to look at next. - int InVecNumElts = Op0->getType()->getVectorNumElements(); + int InVecNumElts = cast(Op0->getType())->getNumElements(); int RootElt = MaskVal; Value *SourceOp = Op0; if (MaskVal >= InVecNumElts) { @@ -4446,9 +4448,9 @@ if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; })) return UndefValue::get(RetTy); - Type *InVecTy = Op0->getType(); + auto *InVecTy = cast(Op0->getType()); unsigned MaskNumElts = Mask.size(); - ElementCount InVecEltCount = InVecTy->getVectorElementCount(); + ElementCount InVecEltCount = InVecTy->getElementCount(); bool Scalable = InVecEltCount.Scalable; diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -148,7 +148,8 @@ const DominatorTree *DT) { // For unsized types or scalable vectors we don't know exactly how many bytes // are dereferenced, so bail out. - if (!Ty->isSized() || (Ty->isVectorTy() && Ty->getVectorIsScalable())) + if (!Ty->isSized() || + (Ty->isVectorTy() && cast(Ty)->isScalable())) return false; // When dereferenceability information is provided by a dereferenceable diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -650,7 +650,7 @@ return unknown(); if (I.getAllocatedType()->isVectorTy() && - I.getAllocatedType()->getVectorIsScalable()) + cast(I.getAllocatedType())->isScalable()) return unknown(); APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -866,7 +866,8 @@ else if (!SI) return false; - SmallVector Mask(SI->getType()->getVectorNumElements(), -1); + SmallVector Mask(cast(SI->getType())->getNumElements(), + -1); // Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether // we look at the left or right side. @@ -1028,8 +1029,8 @@ if (!RD) return RK_None; - Type *VecTy = RdxStart->getType(); - unsigned NumVecElems = VecTy->getVectorNumElements(); + auto *VecTy = cast(RdxStart->getType()); + unsigned NumVecElems = VecTy->getNumElements(); if (!isPowerOf2_32(NumVecElems)) return RK_None; @@ -1093,8 +1094,8 @@ if (!RD) return RK_None; - Type *VecTy = ReduxRoot->getOperand(0)->getType(); - unsigned NumVecElems = VecTy->getVectorNumElements(); + auto *VecTy = cast(ReduxRoot->getOperand(0)->getType()); + unsigned NumVecElems = VecTy->getNumElements(); if (!isPowerOf2_32(NumVecElems)) return RK_None; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -168,11 +168,12 @@ APInt &DemandedLHS, APInt &DemandedRHS) { // The length of scalable vectors is unknown at compile time, thus we // cannot check their values - if (Shuf->getType()->getVectorElementCount().Scalable) + if (Shuf->getType()->isScalable()) return false; - int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements(); - int NumMaskElts = Shuf->getType()->getVectorNumElements(); + int NumElts = + cast(Shuf->getOperand(0)->getType())->getNumElements(); + int NumMaskElts = Shuf->getType()->getNumElements(); DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts); if (DemandedElts.isNullValue()) return true; @@ -206,9 +207,10 @@ static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Query &Q) { Type *Ty = V->getType(); - APInt DemandedElts = Ty->isVectorTy() - ? APInt::getAllOnesValue(Ty->getVectorNumElements()) - : APInt(1, 1); + APInt DemandedElts = + Ty->isVectorTy() + ? APInt::getAllOnesValue(cast(Ty)->getNumElements()) + : APInt(1, 1); computeKnownBits(V, DemandedElts, Known, Depth, Q); } @@ -373,9 +375,10 @@ static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, const Query &Q) { Type *Ty = V->getType(); - APInt DemandedElts = Ty->isVectorTy() - ? APInt::getAllOnesValue(Ty->getVectorNumElements()) - : APInt(1, 1); + APInt DemandedElts = + Ty->isVectorTy() + ? APInt::getAllOnesValue(cast(Ty)->getNumElements()) + : APInt(1, 1); return ComputeNumSignBits(V, DemandedElts, Depth, Q); } @@ -1809,7 +1812,7 @@ const Value *Vec = I->getOperand(0); const Value *Idx = I->getOperand(1); auto *CIdx = dyn_cast(Idx); - unsigned NumElts = Vec->getType()->getVectorNumElements(); + unsigned NumElts = cast(Vec->getType())->getNumElements(); APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); if (CIdx && CIdx->getValue().ult(NumElts)) DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); @@ -1888,8 +1891,8 @@ Type *Ty = V->getType(); assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"); - assert(((Ty->isVectorTy() && - Ty->getVectorNumElements() == DemandedElts.getBitWidth()) || + assert(((Ty->isVectorTy() && cast(Ty)->getNumElements() == + DemandedElts.getBitWidth()) || (!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) && "Unexpected vector size"); @@ -2528,7 +2531,7 @@ const Value *Vec = EEI->getVectorOperand(); const Value *Idx = EEI->getIndexOperand(); auto *CIdx = dyn_cast(Idx); - unsigned NumElts = Vec->getType()->getVectorNumElements(); + unsigned NumElts = cast(Vec->getType())->getNumElements(); APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); if (CIdx && CIdx->getValue().ult(NumElts)) DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); @@ -2542,9 +2545,10 @@ bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { Type *Ty = V->getType(); - APInt DemandedElts = Ty->isVectorTy() - ? APInt::getAllOnesValue(Ty->getVectorNumElements()) - : APInt(1, 1); + APInt DemandedElts = + Ty->isVectorTy() + ? APInt::getAllOnesValue(cast(Ty)->getNumElements()) + : APInt(1, 1); return isKnownNonZero(V, DemandedElts, Depth, Q); } @@ -2645,7 +2649,7 @@ return 0; unsigned MinSignBits = TyBits; - unsigned NumElts = CV->getType()->getVectorNumElements(); + unsigned NumElts = cast(CV->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { if (!DemandedElts[i]) continue; @@ -2688,8 +2692,8 @@ // same behavior for poison though -- that's a FIXME today. Type *Ty = V->getType(); - assert(((Ty->isVectorTy() && - Ty->getVectorNumElements() == DemandedElts.getBitWidth()) || + assert(((Ty->isVectorTy() && cast(Ty)->getNumElements() == + DemandedElts.getBitWidth()) || (!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) && "Unexpected vector size"); @@ -3264,8 +3268,8 @@ // Handle vector of constants. if (auto *CV = dyn_cast(V)) { - if (CV->getType()->isVectorTy()) { - unsigned NumElts = CV->getType()->getVectorNumElements(); + if (auto *CVVTy = dyn_cast(CV->getType())) { + unsigned NumElts = CVVTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { auto *CFP = dyn_cast_or_null(CV->getAggregateElement(i)); if (!CFP) @@ -3441,7 +3445,7 @@ return false; // For vectors, verify that each element is not infinity. - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = cast(V)->getAggregateElement(i); if (!Elt) @@ -3542,7 +3546,7 @@ return false; // For vectors, verify that each element is not NaN. - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = cast(V)->getAggregateElement(i); if (!Elt) diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -263,7 +263,7 @@ assert(V->getType()->isVectorTy() && "Not looking at a vector?"); VectorType *VTy = cast(V->getType()); // For fixed-length vector, return undef for out of range access. - if (!V->getType()->getVectorIsScalable()) { + if (!VTy->isScalable()) { unsigned Width = VTy->getNumElements(); if (EltNo >= Width) return UndefValue::get(VTy->getElementType()); @@ -289,7 +289,8 @@ } if (ShuffleVectorInst *SVI = dyn_cast(V)) { - unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements(); + unsigned LHSWidth = + cast(SVI->getOperand(0)->getType())->getNumElements(); int InEl = SVI->getMaskValue(EltNo); if (InEl < 0) return UndefValue::get(VTy->getElementType()); @@ -805,8 +806,9 @@ return false; if (ConstMask->isNullValue() || isa(ConstMask)) return true; - for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; - ++I) { + for (unsigned I = 0, + E = cast(ConstMask->getType())->getNumElements(); + I != E; ++I) { if (auto *MaskElt = ConstMask->getAggregateElement(I)) if (MaskElt->isNullValue() || isa(MaskElt)) continue; @@ -822,8 +824,9 @@ return false; if (ConstMask->isAllOnesValue() || isa(ConstMask)) return true; - for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; - ++I) { + for (unsigned I = 0, + E = cast(ConstMask->getType())->getNumElements(); + I != E; ++I) { if (auto *MaskElt = ConstMask->getAggregateElement(I)) if (MaskElt->isAllOnesValue() || isa(MaskElt)) continue; diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -3588,16 +3588,17 @@ ExplicitTypeLoc, "explicit pointee type doesn't match operand's pointee type"); - unsigned GEPWidth = - BaseType->isVectorTy() ? BaseType->getVectorNumElements() : 0; + unsigned GEPWidth = BaseType->isVectorTy() + ? cast(BaseType)->getNumElements() + : 0; ArrayRef Indices(Elts.begin() + 1, Elts.end()); for (Constant *Val : Indices) { Type *ValTy = Val->getType(); if (!ValTy->isIntOrIntVectorTy()) return Error(ID.Loc, "getelementptr index must be an integer"); - if (ValTy->isVectorTy()) { - unsigned ValNumEl = ValTy->getVectorNumElements(); + if (auto *ValVTy = dyn_cast(ValTy)) { + unsigned ValNumEl = ValVTy->getNumElements(); if (GEPWidth && (ValNumEl != GEPWidth)) return Error( ID.Loc, @@ -7233,8 +7234,9 @@ bool AteExtraComma = false; // GEP returns a vector of pointers if at least one of parameters is a vector. // All vector parameters should have the same vector width. - ElementCount GEPWidth = BaseType->isVectorTy() ? - BaseType->getVectorElementCount() : ElementCount(0, false); + ElementCount GEPWidth = BaseType->isVectorTy() + ? cast(BaseType)->getElementCount() + : ElementCount(0, false); while (EatIfPresent(lltok::comma)) { if (Lex.getKind() == lltok::MetadataVar) { @@ -7245,8 +7247,8 @@ if (!Val->getType()->isIntOrIntVectorTy()) return Error(EltLoc, "getelementptr index must be an integer"); - if (Val->getType()->isVectorTy()) { - ElementCount ValNumEl = Val->getType()->getVectorElementCount(); + if (auto *ValVTy = dyn_cast(Val->getType())) { + ElementCount ValNumEl = ValVTy->getElementCount(); if (GEPWidth != ElementCount(0, false) && GEPWidth != ValNumEl) return Error(EltLoc, "getelementptr vector index has a wrong number of elements"); diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -4164,7 +4164,7 @@ if (!Vec->getType()->isVectorTy()) return error("Invalid type for value"); I = ExtractElementInst::Create(Vec, Idx); - FullTy = FullTy->getVectorElementType(); + FullTy = cast(FullTy)->getElementType(); InstructionList.push_back(I); break; } @@ -4198,8 +4198,9 @@ return error("Invalid type for value"); I = new ShuffleVectorInst(Vec1, Vec2, Mask); - FullTy = VectorType::get(FullTy->getVectorElementType(), - Mask->getType()->getVectorElementCount()); + FullTy = + VectorType::get(cast(FullTy)->getElementType(), + cast(Mask->getType())->getElementCount()); InstructionList.push_back(I); break; } @@ -5191,8 +5192,8 @@ !FullTy->isPointerTy() && !isa(FullTy) && !isa(FullTy) && (!isa(FullTy) || - FullTy->getVectorElementType()->isFloatingPointTy() || - FullTy->getVectorElementType()->isIntegerTy()) && + cast(FullTy)->getElementType()->isFloatingPointTy() || + cast(FullTy)->getElementType()->isIntegerTy()) && "Structured types must be assigned with corresponding non-opaque " "pointer type"); } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -6577,7 +6577,7 @@ UseSplat = true; } - ElementCount EC = getTransitionType()->getVectorElementCount(); + ElementCount EC = cast(getTransitionType())->getElementCount(); if (UseSplat) return ConstantVector::getSplat(EC, Val); @@ -6840,7 +6840,7 @@ // whereas scalable vectors would have to be shifted by // <2log(vscale) + number of bits> in order to store the // low/high parts. Bailing out for now. - if (StoreType->isVectorTy() && StoreType->getVectorIsScalable()) + if (StoreType->isVectorTy() && cast(StoreType)->isScalable()) return false; if (!DL.typeSizeEqualsStoreSize(StoreType) || diff --git a/llvm/lib/CodeGen/ExpandReductions.cpp b/llvm/lib/CodeGen/ExpandReductions.cpp --- a/llvm/lib/CodeGen/ExpandReductions.cpp +++ b/llvm/lib/CodeGen/ExpandReductions.cpp @@ -125,7 +125,7 @@ if (!FMF.allowReassoc()) Rdx = getOrderedReduction(Builder, Acc, Vec, getOpcode(ID), MRK); else { - if (!isPowerOf2_32(Vec->getType()->getVectorNumElements())) + if (!isPowerOf2_32(cast(Vec->getType())->getNumElements())) continue; Rdx = getShuffleReduction(Builder, Vec, getOpcode(ID), MRK); @@ -146,7 +146,7 @@ case Intrinsic::experimental_vector_reduce_fmax: case Intrinsic::experimental_vector_reduce_fmin: { Value *Vec = II->getArgOperand(0); - if (!isPowerOf2_32(Vec->getType()->getVectorNumElements())) + if (!isPowerOf2_32(cast(Vec->getType())->getNumElements())) continue; Rdx = getShuffleReduction(Builder, Vec, getOpcode(ID), MRK); diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1914,7 +1914,7 @@ MachineIRBuilder &MIRBuilder) { // If it is a <1 x Ty> vector, use the scalar as it is // not a legal vector type in LLT. - if (U.getType()->getVectorNumElements() == 1) { + if (cast(U.getType())->getNumElements() == 1) { Register Elt = getOrCreateVReg(*U.getOperand(1)); auto &Regs = *VMap.getVRegs(U); if (Regs.empty()) { @@ -1938,7 +1938,7 @@ MachineIRBuilder &MIRBuilder) { // If it is a <1 x Ty> vector, use the scalar as it is // not a legal vector type in LLT. - if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { + if (cast(U.getOperand(0)->getType())->getNumElements() == 1) { Register Elt = getOrCreateVReg(*U.getOperand(0)); auto &Regs = *VMap.getVRegs(U); if (Regs.empty()) { diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp --- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp +++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp @@ -308,7 +308,7 @@ unsigned Factor, Index; - unsigned NumLoadElements = LI->getType()->getVectorNumElements(); + unsigned NumLoadElements = cast(LI->getType())->getNumElements(); // Check if the first shufflevector is DE-interleave shuffle. if (!isDeInterleaveMask(Shuffles[0]->getShuffleMask(), Factor, Index, MaxFactor, NumLoadElements)) @@ -426,7 +426,8 @@ // Check if the shufflevector is RE-interleave shuffle. unsigned Factor; - unsigned OpNumElts = SVI->getOperand(0)->getType()->getVectorNumElements(); + unsigned OpNumElts = + cast(SVI->getOperand(0)->getType())->getNumElements(); if (!isReInterleaveMask(SVI->getShuffleMask(), Factor, MaxFactor, OpNumElts)) return false; diff --git a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp --- a/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp +++ b/llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp @@ -82,7 +82,7 @@ if (!C) return false; - unsigned NumElts = Mask->getType()->getVectorNumElements(); + unsigned NumElts = cast(Mask->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *CElt = C->getAggregateElement(i); if (!CElt || !isa(CElt)) @@ -521,9 +521,10 @@ assert(isa(Src->getType()) && "Unexpected data type in masked scatter intrinsic"); - assert(isa(Ptrs->getType()) && - isa(Ptrs->getType()->getVectorElementType()) && - "Vector of pointers is expected in masked scatter intrinsic"); + assert( + isa(Ptrs->getType()) && + isa(cast(Ptrs->getType())->getElementType()) && + "Vector of pointers is expected in masked scatter intrinsic"); IRBuilder<> Builder(CI->getContext()); Instruction *InsertPt = CI; @@ -532,7 +533,7 @@ Builder.SetCurrentDebugLocation(CI->getDebugLoc()); MaybeAlign AlignVal(cast(Alignment)->getZExtValue()); - unsigned VectorWidth = Src->getType()->getVectorNumElements(); + unsigned VectorWidth = cast(Src->getType())->getNumElements(); // Shorten the way if the mask is a vector of constants. if (isConstantIntVector(Mask)) { @@ -725,7 +726,7 @@ Builder.SetInsertPoint(InsertPt); Builder.SetCurrentDebugLocation(CI->getDebugLoc()); - Type *EltTy = VecType->getVectorElementType(); + Type *EltTy = VecType->getElementType(); unsigned VectorWidth = VecType->getNumElements(); diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -161,7 +161,7 @@ // Scalable vectors may need a special StackID to distinguish // them from other (fixed size) stack objects. - if (Ty->isVectorTy() && Ty->getVectorIsScalable()) + if (Ty->isVectorTy() && cast(Ty)->isScalable()) MF->getFrameInfo().setStackID(FrameIndex, TFI->getStackIDForScalableVectors()); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3741,8 +3741,9 @@ // Normalize Vector GEP - all scalar operands should be converted to the // splat vector. bool IsVectorGEP = I.getType()->isVectorTy(); - ElementCount VectorElementCount = IsVectorGEP ? - I.getType()->getVectorElementCount() : ElementCount(0, false); + ElementCount VectorElementCount = + IsVectorGEP ? cast(I.getType())->getElementCount() + : ElementCount(0, false); if (IsVectorGEP && !N.getValueType().isVector()) { LLVMContext &Context = *DAG.getContext(); @@ -4302,7 +4303,7 @@ IndexType = ISD::SIGNED_SCALED; if (STy || !Index.getValueType().isVector()) { - unsigned GEPWidth = GEP->getType()->getVectorNumElements(); + unsigned GEPWidth = cast(GEP->getType())->getNumElements(); EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth); Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index); } diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp --- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -1681,8 +1681,8 @@ return APIntToHexString(CI->getValue()); } else { unsigned NumElements; - if (isa(Ty)) - NumElements = Ty->getVectorNumElements(); + if (auto *VTy = dyn_cast(Ty)) + NumElements = VTy->getNumElements(); else NumElements = Ty->getArrayNumElements(); std::string HexString; diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -464,7 +464,7 @@ static void PrintShuffleMask(raw_ostream &Out, Type *Ty, ArrayRef Mask) { Out << ", <"; - if (Ty->getVectorIsScalable()) + if (cast(Ty)->isScalable()) Out << "vscale x "; Out << Mask.size() << " x i32> "; bool FirstElt = true; @@ -1504,13 +1504,14 @@ } if (isa(CV) || isa(CV)) { - Type *ETy = CV->getType()->getVectorElementType(); + auto *CVVTy = cast(CV->getType()); + Type *ETy = CVVTy->getElementType(); Out << '<'; TypePrinter.print(ETy, Out); Out << ' '; WriteAsOperandInternal(Out, CV->getAggregateElement(0U), &TypePrinter, Machine, Context); - for (unsigned i = 1, e = CV->getType()->getVectorNumElements(); i != e;++i){ + for (unsigned i = 1, e = CVVTy->getNumElements(); i != e; ++i) { Out << ", "; TypePrinter.print(ETy, Out); Out << ' '; diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -899,8 +899,8 @@ // to byte shuffles. static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift) { - Type *ResultTy = Op->getType(); - unsigned NumElts = ResultTy->getVectorNumElements() * 8; + auto *ResultTy = cast(Op->getType()); + unsigned NumElts = ResultTy->getNumElements() * 8; // Bitcast from a 64-bit element type to a byte element type. Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); @@ -933,8 +933,8 @@ // to byte shuffles. static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, unsigned Shift) { - Type *ResultTy = Op->getType(); - unsigned NumElts = ResultTy->getVectorNumElements() * 8; + auto *ResultTy = cast(Op->getType()); + unsigned NumElts = ResultTy->getNumElements() * 8; // Bitcast from a 64-bit element type to a byte element type. Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); @@ -990,7 +990,8 @@ if (C->isAllOnesValue()) return Op0; - Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); + Mask = getX86MaskVec(Builder, Mask, + cast(Op0->getType())->getNumElements()); return Builder.CreateSelect(Mask, Op0, Op1); } @@ -1018,7 +1019,7 @@ bool IsVALIGN) { unsigned ShiftVal = cast(Shift)->getZExtValue(); - unsigned NumElts = Op0->getType()->getVectorNumElements(); + unsigned NumElts = cast(Op0->getType())->getNumElements(); assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); @@ -1149,7 +1150,7 @@ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so // we only care about the lowest log2 bits anyway. if (Amt->getType() != Ty) { - unsigned NumElts = Ty->getVectorNumElements(); + unsigned NumElts = cast(Ty)->getNumElements(); Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); Amt = Builder.CreateVectorSplat(NumElts, Amt); } @@ -1219,7 +1220,7 @@ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so // we only care about the lowest log2 bits anyway. if (Amt->getType() != Ty) { - unsigned NumElts = Ty->getVectorNumElements(); + unsigned NumElts = cast(Ty)->getNumElements(); Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); Amt = Builder.CreateVectorSplat(NumElts, Amt); } @@ -1255,7 +1256,7 @@ return Builder.CreateAlignedStore(Data, Ptr, Alignment); // Convert the mask from an integer type to a vector of i1. - unsigned NumElts = Data->getType()->getVectorNumElements(); + unsigned NumElts = cast(Data->getType())->getNumElements(); Mask = getX86MaskVec(Builder, Mask, NumElts); return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask); } @@ -1276,7 +1277,7 @@ return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment); // Convert the mask from an integer type to a vector of i1. - unsigned NumElts = Passthru->getType()->getVectorNumElements(); + unsigned NumElts = cast(Passthru->getType())->getNumElements(); Mask = getX86MaskVec(Builder, Mask, NumElts); return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru); } @@ -1340,7 +1341,7 @@ // Applying mask on vector of i1's and make sure result is at least 8 bits wide. static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, Value *Mask) { - unsigned NumElts = Vec->getType()->getVectorNumElements(); + unsigned NumElts = cast(Vec->getType())->getNumElements(); if (Mask) { const auto *C = dyn_cast(Mask); if (!C || !C->isAllOnesValue()) @@ -1363,7 +1364,7 @@ static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, unsigned CC, bool Signed) { Value *Op0 = CI.getArgOperand(0); - unsigned NumElts = Op0->getType()->getVectorNumElements(); + unsigned NumElts = cast(Op0->getType())->getNumElements(); Value *Cmp; if (CC == 3) { @@ -1416,7 +1417,7 @@ static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { Value* Op = CI.getArgOperand(0); Type* ReturnOp = CI.getType(); - unsigned NumElts = CI.getType()->getVectorNumElements(); + unsigned NumElts = cast(CI.getType())->getNumElements(); Value *Mask = getX86MaskVec(Builder, Op, NumElts); return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); } @@ -1866,7 +1867,7 @@ Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask); } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ unsigned NumElts = - CI->getArgOperand(1)->getType()->getVectorNumElements(); + cast(CI->getArgOperand(1)->getType())->getNumElements(); Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, CI->getArgOperand(1)); @@ -2084,16 +2085,19 @@ Name == "sse2.cvtsi2sd" || Name == "sse.cvtsi642ss" || Name == "sse2.cvtsi642sd")) { - Rep = Builder.CreateSIToFP(CI->getArgOperand(1), - CI->getType()->getVectorElementType()); + Rep = Builder.CreateSIToFP( + CI->getArgOperand(1), + cast(CI->getType())->getElementType()); Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); } else if (IsX86 && Name == "avx512.cvtusi2sd") { - Rep = Builder.CreateUIToFP(CI->getArgOperand(1), - CI->getType()->getVectorElementType()); + Rep = Builder.CreateUIToFP( + CI->getArgOperand(1), + cast(CI->getType())->getElementType()); Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); } else if (IsX86 && Name == "sse2.cvtss2sd") { Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0); - Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType()); + Rep = Builder.CreateFPExt( + Rep, cast(CI->getType())->getElementType()); Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); } else if (IsX86 && (Name == "sse2.cvtdq2pd" || Name == "sse2.cvtdq2ps" || @@ -2113,18 +2117,18 @@ Name == "avx.cvt.ps2.pd.256" || Name == "avx512.mask.cvtps2pd.128" || Name == "avx512.mask.cvtps2pd.256")) { - Type *DstTy = CI->getType(); + auto *DstTy = cast(CI->getType()); Rep = CI->getArgOperand(0); - Type *SrcTy = Rep->getType(); + auto *SrcTy = cast(Rep->getType()); - unsigned NumDstElts = DstTy->getVectorNumElements(); - if (NumDstElts < SrcTy->getVectorNumElements()) { + unsigned NumDstElts = DstTy->getNumElements(); + if (NumDstElts < SrcTy->getNumElements()) { assert(NumDstElts == 2 && "Unexpected vector size"); uint32_t ShuffleMask[2] = { 0, 1 }; Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); } - bool IsPS2PD = SrcTy->getVectorElementType()->isFloatTy(); + bool IsPS2PD = SrcTy->getElementType()->isFloatTy(); bool IsUnsigned = (StringRef::npos != Name.find("cvtu")); if (IsPS2PD) Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); @@ -2146,11 +2150,11 @@ CI->getArgOperand(1)); } else if (IsX86 && (Name.startswith("avx512.mask.vcvtph2ps.") || Name.startswith("vcvtph2ps."))) { - Type *DstTy = CI->getType(); + auto *DstTy = cast(CI->getType()); Rep = CI->getArgOperand(0); - Type *SrcTy = Rep->getType(); - unsigned NumDstElts = DstTy->getVectorNumElements(); - if (NumDstElts != SrcTy->getVectorNumElements()) { + auto *SrcTy = cast(Rep->getType()); + unsigned NumDstElts = DstTy->getNumElements(); + if (NumDstElts != SrcTy->getNumElements()) { assert(NumDstElts == 4 && "Unexpected vector size"); uint32_t ShuffleMask[4] = {0, 1, 2, 3}; Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); @@ -2170,30 +2174,30 @@ CI->getArgOperand(1),CI->getArgOperand(2), /*Aligned*/true); } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { - Type *ResultTy = CI->getType(); - Type *PtrTy = ResultTy->getVectorElementType(); + auto *ResultTy = cast(CI->getType()); + Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), llvm::PointerType::getUnqual(PtrTy)); Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), - ResultTy->getVectorNumElements()); + ResultTy->getNumElements()); Function *ELd = Intrinsic::getDeclaration(F->getParent(), Intrinsic::masked_expandload, ResultTy); Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) }); } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) { - Type *ResultTy = CI->getArgOperand(1)->getType(); - Type *PtrTy = ResultTy->getVectorElementType(); + auto *ResultTy = cast(CI->getArgOperand(1)->getType()); + Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), llvm::PointerType::getUnqual(PtrTy)); Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), - ResultTy->getVectorNumElements()); + ResultTy->getNumElements()); Function *CSt = Intrinsic::getDeclaration(F->getParent(), Intrinsic::masked_compressstore, @@ -2201,10 +2205,10 @@ Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec }); } else if (IsX86 && (Name.startswith("avx512.mask.compress.") || Name.startswith("avx512.mask.expand."))) { - Type *ResultTy = CI->getType(); + auto *ResultTy = cast(CI->getType()); Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), - ResultTy->getVectorNumElements()); + ResultTy->getNumElements()); bool IsCompress = Name[12] == 'c'; Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress @@ -2281,9 +2285,9 @@ } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") || Name.startswith("avx512.vbroadcast.s"))) { // Replace broadcasts with a series of insertelements. - Type *VecTy = CI->getType(); - Type *EltTy = VecTy->getVectorElementType(); - unsigned EltNum = VecTy->getVectorNumElements(); + auto *VecTy = cast(CI->getType()); + Type *EltTy = VecTy->getElementType(); + unsigned EltNum = VecTy->getNumElements(); Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), EltTy->getPointerTo()); Value *Load = Builder.CreateLoad(EltTy, Cast); @@ -2328,7 +2332,7 @@ } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || Name == "avx2.vbroadcasti128")) { // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. - Type *EltTy = CI->getType()->getVectorElementType(); + Type *EltTy = cast(CI->getType())->getElementType(); unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); Type *VT = VectorType::get(EltTy, NumSrcElts); Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), @@ -2366,8 +2370,8 @@ }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || Name.startswith("avx512.mask.broadcasti"))) { unsigned NumSrcElts = - CI->getArgOperand(0)->getType()->getVectorNumElements(); - unsigned NumDstElts = CI->getType()->getVectorNumElements(); + cast(CI->getArgOperand(0)->getType())->getNumElements(); + unsigned NumDstElts = cast(CI->getType())->getNumElements(); SmallVector ShuffleMask(NumDstElts); for (unsigned i = 0; i != NumDstElts; ++i) @@ -2384,8 +2388,8 @@ Name.startswith("avx512.mask.broadcast.s"))) { // Replace vp?broadcasts with a vector shuffle. Value *Op = CI->getArgOperand(0); - unsigned NumElts = CI->getType()->getVectorNumElements(); - Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); + ElementCount EC = cast(CI->getType())->getElementCount(); + Type *MaskTy = VectorType::get(Type::getInt32Ty(C), EC); Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), Constant::getNullValue(MaskTy)); @@ -2470,8 +2474,8 @@ Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); unsigned Imm = cast(CI->getArgOperand(2))->getZExtValue(); - unsigned DstNumElts = CI->getType()->getVectorNumElements(); - unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); + unsigned DstNumElts = cast(CI->getType())->getNumElements(); + unsigned SrcNumElts = cast(Op1->getType())->getNumElements(); unsigned Scale = DstNumElts / SrcNumElts; // Mask off the high bits of the immediate value; hardware ignores those. @@ -2514,8 +2518,8 @@ Name.startswith("avx512.mask.vextract"))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - unsigned DstNumElts = CI->getType()->getVectorNumElements(); - unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); + unsigned DstNumElts = cast(CI->getType())->getNumElements(); + unsigned SrcNumElts = cast(Op0->getType())->getNumElements(); unsigned Scale = SrcNumElts / DstNumElts; // Mask off the high bits of the immediate value; hardware ignores those. @@ -2562,7 +2566,7 @@ uint8_t Imm = cast(CI->getArgOperand(2))->getZExtValue(); - unsigned NumElts = CI->getType()->getVectorNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); unsigned HalfSize = NumElts / 2; SmallVector ShuffleMask(NumElts); @@ -2614,7 +2618,7 @@ Name.startswith("avx512.mask.pshufl.w."))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - unsigned NumElts = CI->getType()->getVectorNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); SmallVector Idxs(NumElts); for (unsigned l = 0; l != NumElts; l += 8) { @@ -2633,7 +2637,7 @@ Name.startswith("avx512.mask.pshufh.w."))) { Value *Op0 = CI->getArgOperand(0); unsigned Imm = cast(CI->getArgOperand(1))->getZExtValue(); - unsigned NumElts = CI->getType()->getVectorNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); SmallVector Idxs(NumElts); for (unsigned l = 0; l != NumElts; l += 8) { @@ -2652,7 +2656,7 @@ Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); unsigned Imm = cast(CI->getArgOperand(2))->getZExtValue(); - unsigned NumElts = CI->getType()->getVectorNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); unsigned HalfLaneElts = NumLaneElts / 2; @@ -2677,7 +2681,7 @@ Name.startswith("avx512.mask.movshdup") || Name.startswith("avx512.mask.movsldup"))) { Value *Op0 = CI->getArgOperand(0); - unsigned NumElts = CI->getType()->getVectorNumElements(); + unsigned NumElts = cast(CI->getType())->getNumElements(); unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); unsigned Offset = 0; @@ -2699,7 +2703,7 @@ Name.startswith("avx512.mask.unpckl."))) { Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); - int NumElts = CI->getType()->getVectorNumElements(); + int NumElts = cast(CI->getType())->getNumElements(); int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); SmallVector Idxs(NumElts); @@ -2715,7 +2719,7 @@ Name.startswith("avx512.mask.unpckh."))) { Value *Op0 = CI->getArgOperand(0); Value *Op1 = CI->getArgOperand(1); - int NumElts = CI->getType()->getVectorNumElements(); + int NumElts = cast(CI->getType())->getNumElements(); int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); SmallVector Idxs(NumElts); @@ -3283,7 +3287,7 @@ Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), Ops); } else { - int NumElts = CI->getType()->getVectorNumElements(); + int NumElts = cast(CI->getType())->getNumElements(); Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), CI->getArgOperand(2) }; diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -56,13 +56,13 @@ // doing so requires endianness information. This should be handled by // Analysis/ConstantFolding.cpp unsigned NumElts = DstTy->getNumElements(); - if (NumElts != CV->getType()->getVectorNumElements()) + if (NumElts != cast(CV->getType())->getNumElements()) return nullptr; Type *DstEltTy = DstTy->getElementType(); // Fast path for splatted constants. if (Constant *Splat = CV->getSplatValue()) { - return ConstantVector::getSplat(DstTy->getVectorElementCount(), + return ConstantVector::getSplat(DstTy->getElementCount(), ConstantExpr::getBitCast(Splat, DstEltTy)); } @@ -581,18 +581,20 @@ // count may be mismatched; don't attempt to handle that here. if ((isa(V) || isa(V)) && DestTy->isVectorTy() && - DestTy->getVectorNumElements() == V->getType()->getVectorNumElements()) { + cast(DestTy)->getNumElements() == + cast(V->getType())->getNumElements()) { VectorType *DestVecTy = cast(DestTy); Type *DstEltTy = DestVecTy->getElementType(); // Fast path for splatted constants. if (Constant *Splat = V->getSplatValue()) { return ConstantVector::getSplat( - DestTy->getVectorElementCount(), + cast(DestTy)->getElementCount(), ConstantExpr::getCast(opc, Splat, DstEltTy)); } SmallVector res; Type *Ty = IntegerType::get(V->getContext(), 32); - for (unsigned i = 0, e = V->getType()->getVectorNumElements(); i != e; ++i) { + for (unsigned i = 0, e = cast(V->getType())->getNumElements(); + i != e; ++i) { Constant *C = ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i)); res.push_back(ConstantExpr::getCast(opc, C, DstEltTy)); @@ -752,11 +754,13 @@ if (Cond->isNullValue()) return V2; if (Cond->isAllOnesValue()) return V1; + auto *V1VTy = cast(V1->getType()); + // If the condition is a vector constant, fold the result elementwise. if (ConstantVector *CondV = dyn_cast(Cond)) { SmallVector Result; Type *Ty = IntegerType::get(CondV->getContext(), 32); - for (unsigned i = 0, e = V1->getType()->getVectorNumElements(); i != e;++i){ + for (unsigned i = 0, e = V1VTy->getNumElements(); i != e; ++i) { Constant *V; Constant *V1Element = ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, i)); @@ -775,7 +779,7 @@ } // If we were able to build the vector, return it. - if (Result.size() == V1->getType()->getVectorNumElements()) + if (Result.size() == V1VTy->getNumElements()) return ConstantVector::get(Result); } @@ -803,18 +807,20 @@ Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx) { + auto *ValVTy = cast(Val->getType()); + // extractelt undef, C -> undef // extractelt C, undef -> undef if (isa(Val) || isa(Idx)) - return UndefValue::get(Val->getType()->getVectorElementType()); + return UndefValue::get(ValVTy->getElementType()); auto *CIdx = dyn_cast(Idx); if (!CIdx) return nullptr; // ee({w,x,y,z}, wrong_value) -> undef - if (CIdx->uge(Val->getType()->getVectorNumElements())) - return UndefValue::get(Val->getType()->getVectorElementType()); + if (CIdx->uge(ValVTy->getNumElements())) + return UndefValue::get(ValVTy->getElementType()); // ee (gep (ptr, idx0, ...), idx) -> gep (ee (ptr, idx), ee (idx0, idx), ...) if (auto *CE = dyn_cast(Val)) { @@ -831,8 +837,7 @@ } else Ops.push_back(Op); } - return CE->getWithOperands(Ops, CE->getType()->getVectorElementType(), - false, + return CE->getWithOperands(Ops, ValVTy->getElementType(), false, Ops[0]->getType()->getPointerElementType()); } } @@ -855,7 +860,7 @@ if (ValTy->isScalable()) return nullptr; - unsigned NumElts = Val->getType()->getVectorNumElements(); + unsigned NumElts = cast(Val->getType())->getNumElements(); if (CIdx->uge(NumElts)) return UndefValue::get(Val->getType()); @@ -878,10 +883,10 @@ Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2, ArrayRef Mask) { + auto *V1VTy = cast(V1->getType()); unsigned MaskNumElts = Mask.size(); - ElementCount MaskEltCount = {MaskNumElts, - V1->getType()->getVectorIsScalable()}; - Type *EltTy = V1->getType()->getVectorElementType(); + ElementCount MaskEltCount = {MaskNumElts, V1VTy->isScalable()}; + Type *EltTy = V1VTy->getElementType(); // Undefined shuffle mask -> undefined value. if (all_of(Mask, [](int Elt) { return Elt == UndefMaskElem; })) { @@ -899,11 +904,10 @@ } // Do not iterate on scalable vector. The num of elements is unknown at // compile-time. - VectorType *ValTy = cast(V1->getType()); - if (ValTy->isScalable()) + if (V1VTy->isScalable()) return nullptr; - unsigned SrcNumElts = V1->getType()->getVectorNumElements(); + unsigned SrcNumElts = V1VTy->getNumElements(); // Loop over the shuffle mask, evaluating each element. SmallVector Result; @@ -979,8 +983,8 @@ // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length // vectors are always evaluated per element. - bool IsScalableVector = - C->getType()->isVectorTy() && C->getType()->getVectorIsScalable(); + bool IsScalableVector = isa(C->getType()) && + cast(C->getType())->isScalable(); bool HasScalarUndefOrScalableVectorUndef = (!C->getType()->isVectorTy() || IsScalableVector) && isa(C); @@ -1053,8 +1057,8 @@ // Handle scalar UndefValue and scalable vector UndefValue. Fixed-length // vectors are always evaluated per element. - bool IsScalableVector = - C1->getType()->isVectorTy() && C1->getType()->getVectorIsScalable(); + bool IsScalableVector = isa(C1->getType()) && + cast(C1->getType())->isScalable(); bool HasScalarUndefOrScalableVectorUndef = (!C1->getType()->isVectorTy() || IsScalableVector) && (isa(C1) || isa(C2)); @@ -1386,7 +1390,7 @@ return UndefValue::get(VTy); if (Constant *C1Splat = C1->getSplatValue()) { return ConstantVector::getSplat( - VTy->getVectorElementCount(), + VTy->getElementCount(), ConstantExpr::get(Opcode, C1Splat, C2Splat)); } } @@ -2003,24 +2007,26 @@ return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan || R==APFloat::cmpEqual); } - } else if (C1->getType()->isVectorTy()) { - // Do not iterate on scalable vector. The number of elements is unknown at - // compile-time. - if (C1->getType()->getVectorIsScalable()) - return nullptr; + } else if (auto *C1VTy = dyn_cast(C1->getType())) { + // Fast path for splatted constants. if (Constant *C1Splat = C1->getSplatValue()) if (Constant *C2Splat = C2->getSplatValue()) return ConstantVector::getSplat( - C1->getType()->getVectorElementCount(), + C1VTy->getElementCount(), ConstantExpr::getCompare(pred, C1Splat, C2Splat)); + // Do not iterate on scalable vector. The number of elements is unknown at + // compile-time. + if (C1VTy->isScalable()) + return nullptr; + // If we can constant fold the comparison of each element, constant fold // the whole vector comparison. SmallVector ResElts; Type *Ty = IntegerType::get(C1->getContext(), 32); // Compare the elements, producing an i1 result or constant expr. - for (unsigned i = 0, e = C1->getType()->getVectorNumElements(); i != e;++i){ + for (unsigned i = 0, e = C1VTy->getNumElements(); i != e; ++i) { Constant *C1E = ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i)); Constant *C2E = @@ -2273,7 +2279,8 @@ Constant *Idx0 = cast(Idxs[0]); if (Idxs.size() == 1 && (Idx0->isNullValue() || isa(Idx0))) return GEPTy->isVectorTy() && !C->getType()->isVectorTy() - ? ConstantVector::getSplat(GEPTy->getVectorElementCount(), C) + ? ConstantVector::getSplat( + cast(GEPTy)->getElementCount(), C) : C; if (C->isNullValue()) { @@ -2505,18 +2512,19 @@ if (!IsCurrIdxVector && IsPrevIdxVector) CurrIdx = ConstantDataVector::getSplat( - PrevIdx->getType()->getVectorNumElements(), CurrIdx); + cast(PrevIdx->getType())->getNumElements(), CurrIdx); if (!IsPrevIdxVector && IsCurrIdxVector) PrevIdx = ConstantDataVector::getSplat( - CurrIdx->getType()->getVectorNumElements(), PrevIdx); + cast(CurrIdx->getType())->getNumElements(), PrevIdx); Constant *Factor = ConstantInt::get(CurrIdx->getType()->getScalarType(), NumElements); if (UseVector) Factor = ConstantDataVector::getSplat( - IsPrevIdxVector ? PrevIdx->getType()->getVectorNumElements() - : CurrIdx->getType()->getVectorNumElements(), + IsPrevIdxVector + ? cast(PrevIdx->getType())->getNumElements() + : cast(CurrIdx->getType())->getNumElements(), Factor); NewIdxs[i] = ConstantExpr::getSRem(CurrIdx, Factor); @@ -2533,9 +2541,10 @@ Type *ExtendedTy = Type::getIntNTy(Div->getContext(), CommonExtendedWidth); if (UseVector) ExtendedTy = VectorType::get( - ExtendedTy, IsPrevIdxVector - ? PrevIdx->getType()->getVectorNumElements() - : CurrIdx->getType()->getVectorNumElements()); + ExtendedTy, + IsPrevIdxVector + ? cast(PrevIdx->getType())->getNumElements() + : cast(CurrIdx->getType())->getNumElements()); if (!PrevIdx->getType()->isIntOrIntVectorTy(CommonExtendedWidth)) PrevIdx = ConstantExpr::getSExt(PrevIdx, ExtendedTy); diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -160,8 +160,8 @@ return !CFP->getValueAPF().bitcastToAPInt().isOneValue(); // Check that vectors don't contain 1 - if (this->getType()->isVectorTy()) { - unsigned NumElts = this->getType()->getVectorNumElements(); + if (auto *VTy = dyn_cast(this->getType())) { + unsigned NumElts = VTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = this->getAggregateElement(i); if (!Elt || !Elt->isNotOneValue()) @@ -210,8 +210,8 @@ return !CFP->getValueAPF().bitcastToAPInt().isMinSignedValue(); // Check that vectors don't contain INT_MIN - if (this->getType()->isVectorTy()) { - unsigned NumElts = this->getType()->getVectorNumElements(); + if (auto *VTy = dyn_cast(this->getType())) { + unsigned NumElts = VTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = this->getAggregateElement(i); if (!Elt || !Elt->isNotMinSignedValue()) @@ -227,9 +227,10 @@ bool Constant::isFiniteNonZeroFP() const { if (auto *CFP = dyn_cast(this)) return CFP->getValueAPF().isFiniteNonZero(); - if (!getType()->isVectorTy()) + auto *VTy = dyn_cast(getType()); + if (!VTy || VTy->isScalable()) return false; - for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) { + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { auto *CFP = dyn_cast_or_null(this->getAggregateElement(i)); if (!CFP || !CFP->getValueAPF().isFiniteNonZero()) return false; @@ -240,9 +241,10 @@ bool Constant::isNormalFP() const { if (auto *CFP = dyn_cast(this)) return CFP->getValueAPF().isNormal(); - if (!getType()->isVectorTy()) + auto *VTy = dyn_cast(getType()); + if (!VTy || VTy->isScalable()) return false; - for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) { + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { auto *CFP = dyn_cast_or_null(this->getAggregateElement(i)); if (!CFP || !CFP->getValueAPF().isNormal()) return false; @@ -253,9 +255,10 @@ bool Constant::hasExactInverseFP() const { if (auto *CFP = dyn_cast(this)) return CFP->getValueAPF().getExactInverse(nullptr); - if (!getType()->isVectorTy()) + auto *VTy = dyn_cast(getType()); + if (!VTy || VTy->isScalable()) return false; - for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) { + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { auto *CFP = dyn_cast_or_null(this->getAggregateElement(i)); if (!CFP || !CFP->getValueAPF().getExactInverse(nullptr)) return false; @@ -266,9 +269,10 @@ bool Constant::isNaN() const { if (auto *CFP = dyn_cast(this)) return CFP->isNaN(); - if (!getType()->isVectorTy()) + auto *VTy = dyn_cast(getType()); + if (!VTy || VTy->isScalable()) return false; - for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) { + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { auto *CFP = dyn_cast_or_null(this->getAggregateElement(i)); if (!CFP || !CFP->isNaN()) return false; @@ -282,18 +286,18 @@ return true; // The input value must be a vector constant with the same type. - Type *Ty = getType(); - if (!isa(Y) || !Ty->isVectorTy() || Ty != Y->getType()) + auto *VTy = dyn_cast(getType()); + if (!isa(Y) || !VTy || VTy != Y->getType()) return false; // TODO: Compare pointer constants? - if (!(Ty->getVectorElementType()->isIntegerTy() || - Ty->getVectorElementType()->isFloatingPointTy())) + if (!(VTy->getElementType()->isIntegerTy() || + VTy->getElementType()->isFloatingPointTy())) return false; // They may still be identical element-wise (if they have `undef`s). // Bitcast to integer to allow exact bitwise comparison for all types. - Type *IntTy = VectorType::getInteger(cast(Ty)); + Type *IntTy = VectorType::getInteger(VTy); Constant *C0 = ConstantExpr::getBitCast(const_cast(this), IntTy); Constant *C1 = ConstantExpr::getBitCast(cast(Y), IntTy); Constant *CmpEq = ConstantExpr::getICmp(ICmpInst::ICMP_EQ, C0, C1); @@ -301,21 +305,25 @@ } bool Constant::containsUndefElement() const { - if (!getType()->isVectorTy()) - return false; - for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) - if (isa(getAggregateElement(i))) - return true; + if (auto *VTy = dyn_cast(getType())) { + if (VTy->isScalable()) + return false; // FIXME: the vector may contain undefined + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) + if (isa(getAggregateElement(i))) + return true; + } return false; } bool Constant::containsConstantExpression() const { - if (!getType()->isVectorTy()) - return false; - for (unsigned i = 0, e = getType()->getVectorNumElements(); i != e; ++i) - if (isa(getAggregateElement(i))) - return true; + if (auto *VTy = dyn_cast(getType())) { + if (VTy->isScalable()) + return false; // FIXME: the vector may contain CE + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) + if (isa(getAggregateElement(i))) + return true; + } return false; } @@ -639,10 +647,11 @@ } // Don't know how to deal with this constant. - if (!Ty->isVectorTy()) + auto *VTy = dyn_cast(Ty); + if (!VTy || VTy->isScalable()) return C; - unsigned NumElts = Ty->getVectorNumElements(); + unsigned NumElts = VTy->getNumElements(); SmallVector NewC(NumElts); for (unsigned i = 0; i != NumElts; ++i) { Constant *EltC = C->getAggregateElement(i); @@ -1484,7 +1493,7 @@ Constant *Constant::getSplatValue(bool AllowUndefs) const { assert(this->getType()->isVectorTy() && "Only valid for vectors!"); if (isa(this)) - return getNullValue(this->getType()->getVectorElementType()); + return getNullValue(cast(getType())->getElementType()); if (const ConstantDataVector *CV = dyn_cast(this)) return CV->getSplatValue(); if (const ConstantVector *CV = dyn_cast(this)) @@ -1884,8 +1893,9 @@ assert(DstTy->isIntOrIntVectorTy() && "PtrToInt destination must be integer or integer vector"); assert(isa(C->getType()) == isa(DstTy)); - if (isa(C->getType())) - assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&& + if (auto *CVTy = dyn_cast(C->getType())) + assert(CVTy->getElementCount() == + cast(DstTy)->getElementCount() && "Invalid cast between a different number of vector elements"); return getFoldedCast(Instruction::PtrToInt, C, DstTy, OnlyIfReduced); } @@ -1897,8 +1907,9 @@ assert(DstTy->isPtrOrPtrVectorTy() && "IntToPtr destination must be a pointer or pointer vector"); assert(isa(C->getType()) == isa(DstTy)); - if (isa(C->getType())) - assert(C->getType()->getVectorNumElements()==DstTy->getVectorNumElements()&& + if (auto *CVTy = dyn_cast(C->getType())) + assert(CVTy->getElementCount() == + cast(DstTy)->getElementCount() && "Invalid cast between a different number of vector elements"); return getFoldedCast(Instruction::IntToPtr, C, DstTy, OnlyIfReduced); } @@ -2145,9 +2156,10 @@ ArgVec.reserve(1 + Idxs.size()); ArgVec.push_back(C); for (unsigned i = 0, e = Idxs.size(); i != e; ++i) { - assert((!Idxs[i]->getType()->isVectorTy() || - Idxs[i]->getType()->getVectorElementCount() == EltCount) && - "getelementptr index type missmatch"); + assert( + (!isa(Idxs[i]->getType()) || + cast(Idxs[i]->getType())->getElementCount() == EltCount) && + "getelementptr index type missmatch"); Constant *Idx = cast(Idxs[i]); if (EltCount.Min != 0 && !Idxs[i]->getType()->isVectorTy()) @@ -2225,7 +2237,7 @@ if (Constant *FC = ConstantFoldExtractElementInstruction(Val, Idx)) return FC; // Fold a few common cases. - Type *ReqTy = Val->getType()->getVectorElementType(); + Type *ReqTy = cast(Val->getType())->getElementType(); if (OnlyIfReducedTy == ReqTy) return nullptr; @@ -2241,7 +2253,7 @@ Constant *Idx, Type *OnlyIfReducedTy) { assert(Val->getType()->isVectorTy() && "Tried to create insertelement operation on non-vector type!"); - assert(Elt->getType() == Val->getType()->getVectorElementType() && + assert(Elt->getType() == cast(Val->getType())->getElementType() && "Insertelement types must match!"); assert(Idx->getType()->isIntegerTy() && "Insertelement index must be i32 type!"); @@ -2270,8 +2282,9 @@ return FC; // Fold a few common cases. unsigned NElts = Mask.size(); - Type *EltTy = V1->getType()->getVectorElementType(); - bool TypeIsScalable = V1->getType()->getVectorIsScalable(); + auto V1VTy = cast(V1->getType()); + Type *EltTy = V1VTy->getElementType(); + bool TypeIsScalable = V1VTy->isScalable(); Type *ShufTy = VectorType::get(EltTy, NElts, TypeIsScalable); if (OnlyIfReducedTy == ShufTy) @@ -2561,7 +2574,7 @@ unsigned ConstantDataSequential::getNumElements() const { if (ArrayType *AT = dyn_cast(getType())) return AT->getNumElements(); - return getType()->getVectorNumElements(); + return cast(getType())->getNumElements(); } diff --git a/llvm/lib/IR/ConstantsContext.h b/llvm/lib/IR/ConstantsContext.h --- a/llvm/lib/IR/ConstantsContext.h +++ b/llvm/lib/IR/ConstantsContext.h @@ -150,7 +150,8 @@ ShuffleVectorConstantExpr(Constant *C1, Constant *C2, ArrayRef Mask) : ConstantExpr( VectorType::get(cast(C1->getType())->getElementType(), - Mask.size(), C1->getType()->getVectorIsScalable()), + Mask.size(), + cast(C1->getType())->isScalable()), Instruction::ShuffleVector, &Op<0>(), 2) { assert(ShuffleVectorInst::isValidOperands(C1, C2, Mask) && "Invalid shuffle vector instruction operands!"); diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -634,8 +634,8 @@ } else if (VectorType* VTy = dyn_cast(Ty)) { if (VTy->isScalable()) Result += "nx"; - Result += "v" + utostr(VTy->getVectorNumElements()) + - getMangledTypeStr(VTy->getVectorElementType()); + Result += "v" + utostr(VTy->getNumElements()) + + getMangledTypeStr(VTy->getElementType()); } else if (Ty) { switch (Ty->getTypeID()) { default: llvm_unreachable("Unhandled type"); @@ -1043,7 +1043,7 @@ VectorType *VTy = dyn_cast(Ty); if (!VTy) llvm_unreachable("Expected an argument of Vector Type"); - Type *EltTy = VTy->getVectorElementType(); + Type *EltTy = VTy->getElementType(); return PointerType::getUnqual(EltTy); } case IITDescriptor::VecElementArgument: { @@ -1062,9 +1062,9 @@ // Return the overloaded type (which determines the pointers address space) return Tys[D.getOverloadArgNumber()]; case IITDescriptor::ScalableVecArgument: { - Type *Ty = DecodeFixedType(Infos, Tys, Context); - return VectorType::get(Ty->getVectorElementType(), - { Ty->getVectorNumElements(), true }); + auto *Ty = cast(DecodeFixedType(Infos, Tys, Context)); + return VectorType::get(Ty->getElementType(), + {(unsigned)Ty->getNumElements(), true}); } } llvm_unreachable("unhandled"); @@ -1269,7 +1269,7 @@ if (ReferenceType->getElementCount() != ThisArgType->getElementCount()) return true; - EltTy = ThisArgType->getVectorElementType(); + EltTy = ThisArgType->getElementType(); } return matchIntrinsicType(EltTy, Infos, ArgTys, DeferredChecks, IsDeferredCheck); @@ -1314,15 +1314,13 @@ VectorType *ReferenceType = dyn_cast(ArgTys[RefArgNumber]); VectorType *ThisArgVecTy = dyn_cast(Ty); if (!ThisArgVecTy || !ReferenceType || - (ReferenceType->getVectorNumElements() != - ThisArgVecTy->getVectorNumElements())) + (ReferenceType->getNumElements() != ThisArgVecTy->getNumElements())) return true; PointerType *ThisArgEltTy = - dyn_cast(ThisArgVecTy->getVectorElementType()); + dyn_cast(ThisArgVecTy->getElementType()); if (!ThisArgEltTy) return true; - return ThisArgEltTy->getElementType() != - ReferenceType->getVectorElementType(); + return ThisArgEltTy->getElementType() != ReferenceType->getElementType(); } case IITDescriptor::VecElementArgument: { if (D.getArgumentNumber() >= ArgTys.size()) diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -534,7 +534,7 @@ const Twine &Name) { auto PtrsTy = cast(Ptrs->getType()); auto PtrTy = cast(PtrsTy->getElementType()); - unsigned NumElts = PtrsTy->getVectorNumElements(); + unsigned NumElts = PtrsTy->getNumElements(); Type *DataTy = VectorType::get(PtrTy->getElementType(), NumElts); if (!Mask) @@ -564,11 +564,11 @@ Align Alignment, Value *Mask) { auto PtrsTy = cast(Ptrs->getType()); auto DataTy = cast(Data->getType()); - unsigned NumElts = PtrsTy->getVectorNumElements(); + unsigned NumElts = PtrsTy->getNumElements(); #ifndef NDEBUG auto PtrTy = cast(PtrsTy->getElementType()); - assert(NumElts == DataTy->getVectorNumElements() && + assert(NumElts == DataTy->getNumElements() && PtrTy->getElementType() == DataTy->getElementType() && "Incompatible pointer and data types"); #endif diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1882,7 +1882,8 @@ Instruction *InsertBefore) : Instruction( VectorType::get(cast(V1->getType())->getElementType(), - Mask.size(), V1->getType()->getVectorIsScalable()), + Mask.size(), + cast(V1->getType())->isScalable()), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), InsertBefore) { assert(isValidOperands(V1, V2, Mask) && @@ -1897,7 +1898,8 @@ const Twine &Name, BasicBlock *InsertAtEnd) : Instruction( VectorType::get(cast(V1->getType())->getElementType(), - Mask.size(), V1->getType()->getVectorIsScalable()), + Mask.size(), + cast(V1->getType())->isScalable()), ShuffleVector, OperandTraits::op_begin(this), OperandTraits::operands(this), InsertAtEnd) { assert(isValidOperands(V1, V2, Mask) && @@ -1910,7 +1912,7 @@ } void ShuffleVectorInst::commute() { - int NumOpElts = Op<0>()->getType()->getVectorNumElements(); + int NumOpElts = cast(Op<0>()->getType())->getNumElements(); int NumMaskElts = ShuffleMask.size(); SmallVector NewMask(NumMaskElts); for (int i = 0; i != NumMaskElts; ++i) { @@ -1939,7 +1941,7 @@ if (Elem != UndefMaskElem && Elem >= V1Size * 2) return false; - if (V1->getType()->getVectorIsScalable()) + if (cast(V1->getType())->isScalable()) if ((Mask[0] != 0 && Mask[0] != UndefMaskElem) || !is_splat(Mask)) return false; @@ -1955,7 +1957,7 @@ // Mask must be vector of i32. auto *MaskTy = dyn_cast(Mask->getType()); if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) || - MaskTy->isScalable() != V1->getType()->getVectorIsScalable()) + MaskTy->isScalable() != cast(V1->getType())->isScalable()) return false; // Check to see if Mask is valid. @@ -1988,7 +1990,7 @@ void ShuffleVectorInst::getShuffleMask(const Constant *Mask, SmallVectorImpl &Result) { - unsigned NumElts = Mask->getType()->getVectorElementCount().Min; + unsigned NumElts = cast(Mask->getType())->getElementCount().Min; if (isa(Mask)) { Result.resize(NumElts, 0); return; @@ -2013,7 +2015,7 @@ Constant *ShuffleVectorInst::convertShuffleMaskForBitcode(ArrayRef Mask, Type *ResultTy) { Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext()); - if (ResultTy->getVectorIsScalable()) { + if (cast(ResultTy)->isScalable()) { assert(is_splat(Mask) && "Unexpected shuffle"); Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true); if (Mask[0] == 0) @@ -2173,8 +2175,8 @@ } bool ShuffleVectorInst::isIdentityWithPadding() const { - int NumOpElts = Op<0>()->getType()->getVectorNumElements(); - int NumMaskElts = getType()->getVectorNumElements(); + int NumOpElts = cast(Op<0>()->getType())->getNumElements(); + int NumMaskElts = cast(getType())->getNumElements(); if (NumMaskElts <= NumOpElts) return false; @@ -2192,8 +2194,8 @@ } bool ShuffleVectorInst::isIdentityWithExtract() const { - int NumOpElts = Op<0>()->getType()->getVectorNumElements(); - int NumMaskElts = getType()->getVectorNumElements(); + int NumOpElts = cast(Op<0>()->getType())->getNumElements(); + int NumMaskElts = getType()->getNumElements(); if (NumMaskElts >= NumOpElts) return false; @@ -2205,8 +2207,8 @@ if (isa(Op<0>()) || isa(Op<1>())) return false; - int NumOpElts = Op<0>()->getType()->getVectorNumElements(); - int NumMaskElts = getType()->getVectorNumElements(); + int NumOpElts = cast(Op<0>()->getType())->getNumElements(); + int NumMaskElts = getType()->getNumElements(); if (NumMaskElts != NumOpElts * 2) return false; @@ -2947,7 +2949,8 @@ "Invalid cast"); assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); assert((!Ty->isVectorTy() || - Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && + cast(Ty)->getElementCount() == + cast(S->getType())->getElementCount()) && "Invalid cast"); if (Ty->isIntOrIntVectorTy()) @@ -2965,7 +2968,8 @@ "Invalid cast"); assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast"); assert((!Ty->isVectorTy() || - Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) && + cast(Ty)->getElementCount() == + cast(S->getType())->getElementCount()) && "Invalid cast"); if (Ty->isIntOrIntVectorTy()) diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp --- a/llvm/lib/IR/Type.cpp +++ b/llvm/lib/IR/Type.cpp @@ -149,6 +149,12 @@ return -1; } +Type *Type::getScalarType() const { + if (isVectorTy()) + return cast(this)->getElementType(); + return const_cast(this); +} + bool Type::isSizedDerivedType(SmallPtrSetImpl *Visited) const { if (auto *ATy = dyn_cast(this)) return ATy->getElementType()->isSized(Visited); diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -2825,8 +2825,9 @@ &I); Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(), "AddrSpaceCast must be between different address spaces", &I); - if (SrcTy->isVectorTy()) - Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(), + if (auto *SrcVTy = dyn_cast(SrcTy)) + Assert(SrcVTy->getElementCount() == + cast(DestTy)->getElementCount(), "AddrSpaceCast vector pointer number of elements mismatch", &I); visitInstruction(I); } @@ -3334,16 +3335,18 @@ GEP.getResultElementType() == ElTy, "GEP is not of right type for indices!", &GEP, ElTy); - if (GEP.getType()->isVectorTy()) { + if (auto *GEPVTy = dyn_cast(GEP.getType())) { // Additional checks for vector GEPs. - unsigned GEPWidth = GEP.getType()->getVectorNumElements(); + ElementCount GEPWidth = GEPVTy->getElementCount(); if (GEP.getPointerOperandType()->isVectorTy()) - Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(), - "Vector GEP result width doesn't match operand's", &GEP); + Assert( + GEPWidth == + cast(GEP.getPointerOperandType())->getElementCount(), + "Vector GEP result width doesn't match operand's", &GEP); for (Value *Idx : Idxs) { Type *IndexTy = Idx->getType(); - if (IndexTy->isVectorTy()) { - unsigned IndexWidth = IndexTy->getVectorNumElements(); + if (auto *IndexVTy = dyn_cast(IndexTy)) { + ElementCount IndexWidth = IndexVTy->getElementCount(); Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP); } Assert(IndexTy->isIntOrIntVectorTy(), @@ -4657,8 +4660,8 @@ "masked_load: return must match pointer type", Call); Assert(PassThru->getType() == DataTy, "masked_load: pass through and data type must match", Call); - Assert(Mask->getType()->getVectorNumElements() == - DataTy->getVectorNumElements(), + Assert(cast(Mask->getType())->getElementCount() == + cast(DataTy)->getElementCount(), "masked_load: vector mask must be same length as data", Call); break; } @@ -4676,8 +4679,8 @@ Type *DataTy = cast(Ptr->getType())->getElementType(); Assert(DataTy == Val->getType(), "masked_store: storee must match pointer type", Call); - Assert(Mask->getType()->getVectorNumElements() == - DataTy->getVectorNumElements(), + Assert(cast(Mask->getType())->getElementCount() == + cast(DataTy)->getElementCount(), "masked_store: vector mask must be same length as data", Call); break; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9376,10 +9376,9 @@ // A pointer vector can not be the return type of the ldN intrinsics. Need to // load integer vectors first and then convert to pointer vectors. - Type *EltTy = VecTy->getVectorElementType(); + Type *EltTy = VecTy->getElementType(); if (EltTy->isPointerTy()) - VecTy = - VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); + VecTy = VectorType::get(DL.getIntPtrType(EltTy), VecTy->getNumElements()); IRBuilder<> Builder(LI); @@ -9389,15 +9388,15 @@ if (NumLoads > 1) { // If we're going to generate more than one load, reset the sub-vector type // to something legal. - VecTy = VectorType::get(VecTy->getVectorElementType(), - VecTy->getVectorNumElements() / NumLoads); + VecTy = VectorType::get(VecTy->getElementType(), + VecTy->getNumElements() / NumLoads); // We will compute the pointer operand of each load from the original base // address using GEPs. Cast the base address to a pointer to the scalar // element type. BaseAddr = Builder.CreateBitCast( - BaseAddr, VecTy->getVectorElementType()->getPointerTo( - LI->getPointerAddressSpace())); + BaseAddr, + VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())); } Type *PtrTy = VecTy->getPointerTo(LI->getPointerAddressSpace()); @@ -9418,9 +9417,8 @@ // If we're generating more than one load, compute the base address of // subsequent loads as an offset from the previous. if (LoadCount > 0) - BaseAddr = - Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr, - VecTy->getVectorNumElements() * Factor); + BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr, + VecTy->getNumElements() * Factor); CallInst *LdN = Builder.CreateCall( LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy), "ldN"); @@ -9435,8 +9433,8 @@ // Convert the integer vector to pointer vector if the element is pointer. if (EltTy->isPointerTy()) SubVec = Builder.CreateIntToPtr( - SubVec, VectorType::get(SVI->getType()->getVectorElementType(), - VecTy->getVectorNumElements())); + SubVec, VectorType::get(SVI->getType()->getElementType(), + VecTy->getNumElements())); SubVecs[SVI].push_back(SubVec); } } @@ -9488,11 +9486,10 @@ "Invalid interleave factor"); VectorType *VecTy = SVI->getType(); - assert(VecTy->getVectorNumElements() % Factor == 0 && - "Invalid interleaved store"); + assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store"); - unsigned LaneLen = VecTy->getVectorNumElements() / Factor; - Type *EltTy = VecTy->getVectorElementType(); + unsigned LaneLen = VecTy->getNumElements() / Factor; + Type *EltTy = VecTy->getElementType(); VectorType *SubVecTy = VectorType::get(EltTy, LaneLen); const DataLayout &DL = SI->getModule()->getDataLayout(); @@ -9513,7 +9510,7 @@ // vectors to integer vectors. if (EltTy->isPointerTy()) { Type *IntTy = DL.getIntPtrType(EltTy); - unsigned NumOpElts = Op0->getType()->getVectorNumElements(); + unsigned NumOpElts = cast(Op0->getType())->getNumElements(); // Convert to the corresponding integer vector. Type *IntVecTy = VectorType::get(IntTy, NumOpElts); @@ -9530,14 +9527,14 @@ // If we're going to generate more than one store, reset the lane length // and sub-vector type to something legal. LaneLen /= NumStores; - SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen); + SubVecTy = VectorType::get(SubVecTy->getElementType(), LaneLen); // We will compute the pointer operand of each store from the original base // address using GEPs. Cast the base address to a pointer to the scalar // element type. BaseAddr = Builder.CreateBitCast( - BaseAddr, SubVecTy->getVectorElementType()->getPointerTo( - SI->getPointerAddressSpace())); + BaseAddr, + SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())); } auto Mask = SVI->getShuffleMask(); @@ -9582,7 +9579,7 @@ // If we generating more than one store, we compute the base address of // subsequent stores as an offset from the previous. if (StoreCount > 0) - BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType(), + BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(), BaseAddr, LaneLen * Factor); Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy)); @@ -9697,7 +9694,7 @@ return false; // FIXME: Update this method to support scalable addressing modes. - if (Ty->isVectorTy() && Ty->getVectorIsScalable()) + if (Ty->isVectorTy() && cast(Ty)->isScalable()) return AM.HasBaseReg && !AM.BaseOffs && !AM.Scale; // check reg + imm case: diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -153,7 +153,7 @@ if (!isa(DataType) || !ST->hasSVE()) return false; - Type *Ty = DataType->getVectorElementType(); + Type *Ty = cast(DataType)->getElementType(); if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) return true; @@ -181,9 +181,9 @@ // the element type fits into a register and the number of elements is a // power of 2 > 1. if (isa(DataType)) { - unsigned NumElements = DataType->getVectorNumElements(); + unsigned NumElements = cast(DataType)->getNumElements(); unsigned EltSize = - DataType->getVectorElementType()->getScalarSizeInBits(); + cast(DataType)->getElementType()->getScalarSizeInBits(); return NumElements > 1 && isPowerOf2_64(NumElements) && EltSize >= 8 && EltSize <= 128 && isPowerOf2_64(EltSize); } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -209,7 +209,7 @@ // elements in type Ty determine the vector width. auto toVectorTy = [&](Type *ArgTy) { return VectorType::get(ArgTy->getScalarType(), - DstTy->getVectorNumElements()); + cast(DstTy)->getElementCount()); }; // Exit early if DstTy is not a vector type whose elements are at least @@ -656,7 +656,8 @@ return LT.first * 2 * AmortizationCost; } - if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8)) { + if (Ty->isVectorTy() && + cast(Ty)->getElementType()->isIntegerTy(8)) { unsigned ProfitableNumElements; if (Opcode == Instruction::Store) // We use a custom trunc store lowering so v.4b should be profitable. @@ -666,8 +667,8 @@ // have to promote the elements to v.2. ProfitableNumElements = 8; - if (Ty->getVectorNumElements() < ProfitableNumElements) { - unsigned NumVecElts = Ty->getVectorNumElements(); + if (cast(Ty)->getNumElements() < ProfitableNumElements) { + unsigned NumVecElts = cast(Ty)->getNumElements(); unsigned NumVectorizableInstsToAmortize = NumVecElts * 2; // We generate 2 instructions per vector element. return NumVectorizableInstsToAmortize * NumVecElts * 2; @@ -685,11 +686,12 @@ bool UseMaskForCond, bool UseMaskForGaps) { assert(Factor >= 2 && "Invalid interleave factor"); - assert(isa(VecTy) && "Expect a vector type"); + auto *VecVTy = dyn_cast(VecTy); + assert(VecVTy && "Expect a vector type"); if (!UseMaskForCond && !UseMaskForGaps && Factor <= TLI->getMaxSupportedInterleaveFactor()) { - unsigned NumElts = VecTy->getVectorNumElements(); + unsigned NumElts = VecVTy->getNumElements(); auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); // ldN/stN only support legal vector types of size 64 or 128 in bits. @@ -710,7 +712,7 @@ for (auto *I : Tys) { if (!I->isVectorTy()) continue; - if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128) + if (I->getScalarSizeInBits() * cast(I)->getNumElements() == 128) Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0) + getMemoryOpCost(Instruction::Load, I, Align(128), 0); } @@ -902,7 +904,8 @@ bool AArch64TTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const { - assert(isa(Ty) && "Expected Ty to be a vector type"); + auto *VTy = dyn_cast(Ty); + assert(VTy && "Expected Ty to be a vector type"); unsigned ScalarBits = Ty->getScalarSizeInBits(); switch (Opcode) { case Instruction::FAdd: @@ -913,10 +916,9 @@ case Instruction::Mul: return false; case Instruction::Add: - return ScalarBits * Ty->getVectorNumElements() >= 128; + return ScalarBits * VTy->getNumElements() >= 128; case Instruction::ICmp: - return (ScalarBits < 64) && - (ScalarBits * Ty->getVectorNumElements() >= 128); + return (ScalarBits < 64) && (ScalarBits * VTy->getNumElements() >= 128); case Instruction::FCmp: return Flags.NoNaN; default: diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp @@ -153,7 +153,7 @@ case Type::PointerTyID: return getValueType(Ty->getPointerElementType(), TypeName); case Type::VectorTyID: - return getValueType(Ty->getVectorElementType(), TypeName); + return getValueType(cast(Ty)->getElementType(), TypeName); default: return ValueType::Struct; } @@ -188,7 +188,7 @@ case Type::VectorTyID: { auto VecTy = cast(Ty); auto ElTy = VecTy->getElementType(); - auto NumElements = VecTy->getVectorNumElements(); + auto NumElements = VecTy->getNumElements(); return (Twine(getTypeName(ElTy, Signed)) + Twine(NumElements)).str(); } default: @@ -600,7 +600,7 @@ case Type::PointerTyID: return getValueType(Ty->getPointerElementType(), TypeName); case Type::VectorTyID: - return getValueType(Ty->getVectorElementType(), TypeName); + return getValueType(cast(Ty)->getElementType(), TypeName); default: return "struct"; } @@ -635,7 +635,7 @@ case Type::VectorTyID: { auto VecTy = cast(Ty); auto ElTy = VecTy->getElementType(); - auto NumElements = VecTy->getVectorNumElements(); + auto NumElements = VecTy->getNumElements(); return (Twine(getTypeName(ElTy, Signed)) + Twine(NumElements)).str(); } default: diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp @@ -152,7 +152,7 @@ } if (IsV3 && Size >= 32) { - V4Ty = VectorType::get(VT->getVectorElementType(), 4); + V4Ty = VectorType::get(VT->getElementType(), 4); // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads AdjustedArgTy = V4Ty; } diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -17656,7 +17656,7 @@ "Unmatched number of shufflevectors and indices"); VectorType *VecTy = Shuffles[0]->getType(); - Type *EltTy = VecTy->getVectorElementType(); + Type *EltTy = VecTy->getElementType(); const DataLayout &DL = LI->getModule()->getDataLayout(); @@ -17671,8 +17671,7 @@ // A pointer vector can not be the return type of the ldN intrinsics. Need to // load integer vectors first and then convert to pointer vectors. if (EltTy->isPointerTy()) - VecTy = - VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); + VecTy = VectorType::get(DL.getIntPtrType(EltTy), VecTy->getNumElements()); IRBuilder<> Builder(LI); @@ -17682,15 +17681,15 @@ if (NumLoads > 1) { // If we're going to generate more than one load, reset the sub-vector type // to something legal. - VecTy = VectorType::get(VecTy->getVectorElementType(), - VecTy->getVectorNumElements() / NumLoads); + VecTy = VectorType::get(VecTy->getElementType(), + VecTy->getNumElements() / NumLoads); // We will compute the pointer operand of each load from the original base // address using GEPs. Cast the base address to a pointer to the scalar // element type. BaseAddr = Builder.CreateBitCast( - BaseAddr, VecTy->getVectorElementType()->getPointerTo( - LI->getPointerAddressSpace())); + BaseAddr, + VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace())); } assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!"); @@ -17715,8 +17714,8 @@ "expected interleave factor of 2 or 4 for MVE"); Intrinsic::ID LoadInts = Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q; - Type *VecEltTy = VecTy->getVectorElementType()->getPointerTo( - LI->getPointerAddressSpace()); + Type *VecEltTy = + VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()); Type *Tys[] = {VecTy, VecEltTy}; Function *VldnFunc = Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys); @@ -17736,9 +17735,8 @@ // If we're generating more than one load, compute the base address of // subsequent loads as an offset from the previous. if (LoadCount > 0) - BaseAddr = - Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr, - VecTy->getVectorNumElements() * Factor); + BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr, + VecTy->getNumElements() * Factor); CallInst *VldN = createLoadIntrinsic(BaseAddr); @@ -17753,8 +17751,8 @@ // Convert the integer vector to pointer vector if the element is pointer. if (EltTy->isPointerTy()) SubVec = Builder.CreateIntToPtr( - SubVec, VectorType::get(SV->getType()->getVectorElementType(), - VecTy->getVectorNumElements())); + SubVec, VectorType::get(SV->getType()->getElementType(), + VecTy->getNumElements())); SubVecs[SV].push_back(SubVec); } @@ -17807,11 +17805,10 @@ "Invalid interleave factor"); VectorType *VecTy = SVI->getType(); - assert(VecTy->getVectorNumElements() % Factor == 0 && - "Invalid interleaved store"); + assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store"); - unsigned LaneLen = VecTy->getVectorNumElements() / Factor; - Type *EltTy = VecTy->getVectorElementType(); + unsigned LaneLen = VecTy->getNumElements() / Factor; + Type *EltTy = VecTy->getElementType(); VectorType *SubVecTy = VectorType::get(EltTy, LaneLen); const DataLayout &DL = SI->getModule()->getDataLayout(); @@ -17834,8 +17831,8 @@ Type *IntTy = DL.getIntPtrType(EltTy); // Convert to the corresponding integer vector. - Type *IntVecTy = - VectorType::get(IntTy, Op0->getType()->getVectorNumElements()); + Type *IntVecTy = VectorType::get( + IntTy, cast(Op0->getType())->getNumElements()); Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); @@ -17849,14 +17846,14 @@ // If we're going to generate more than one store, reset the lane length // and sub-vector type to something legal. LaneLen /= NumStores; - SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen); + SubVecTy = VectorType::get(SubVecTy->getElementType(), LaneLen); // We will compute the pointer operand of each store from the original base // address using GEPs. Cast the base address to a pointer to the scalar // element type. BaseAddr = Builder.CreateBitCast( - BaseAddr, SubVecTy->getVectorElementType()->getPointerTo( - SI->getPointerAddressSpace())); + BaseAddr, + SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace())); } assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!"); @@ -17886,7 +17883,7 @@ "expected interleave factor of 2 or 4 for MVE"); Intrinsic::ID StoreInts = Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q; - Type *EltPtrTy = SubVecTy->getVectorElementType()->getPointerTo( + Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo( SI->getPointerAddressSpace()); Type *Tys[] = {EltPtrTy, SubVecTy}; Function *VstNFunc = @@ -17908,7 +17905,7 @@ // If we generating more than one store, we compute the base address of // subsequent stores as an offset from the previous. if (StoreCount > 0) - BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType(), + BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(), BaseAddr, LaneLen * Factor); SmallVector Shuffles; diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -434,7 +434,7 @@ Opcode == Instruction::ExtractElement)) { // Cross-class copies are expensive on many microarchitectures, // so assume they are expensive by default. - if (ValTy->getVectorElementType()->isIntegerTy()) + if (cast(ValTy)->getElementType()->isIntegerTy()) return 3; // Even if it's not a cross class copy, this likely leads to mixing @@ -452,7 +452,7 @@ // result anyway. return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), ST->getMVEVectorCostFactor()) * - ValTy->getVectorNumElements() / 2; + cast(ValTy)->getNumElements() / 2; } return BaseT::getVectorInstrCost(Opcode, ValTy, Index); @@ -794,8 +794,8 @@ return LT.first * BaseCost; // Else this is expand, assume that we need to scalarize this op. - if (Ty->isVectorTy()) { - unsigned Num = Ty->getVectorNumElements(); + if (auto *VTy = dyn_cast(Ty)) { + unsigned Num = VTy->getNumElements(); unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType()); // Return the cost of multiple scalar invocation plus the cost of // inserting and extracting the values. @@ -812,7 +812,7 @@ if (ST->hasNEON() && Src->isVectorTy() && (Alignment && *Alignment != Align(16)) && - Src->getVectorElementType()->isDoubleTy()) { + cast(Src)->getElementType()->isDoubleTy()) { // Unaligned loads/stores are extremely inefficient. // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr. return LT.first * 4; @@ -835,7 +835,7 @@ if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits && !UseMaskForCond && !UseMaskForGaps) { - unsigned NumElts = VecTy->getVectorNumElements(); + unsigned NumElts = cast(VecTy)->getNumElements(); auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); // vldN/vstN only support legal vector types of size 64 or 128 in bits. @@ -1403,7 +1403,7 @@ case Instruction::ICmp: case Instruction::Add: return ScalarBits < 64 && - (ScalarBits * Ty->getVectorNumElements()) % 128 == 0; + (ScalarBits * cast(Ty)->getNumElements()) % 128 == 0; default: llvm_unreachable("Unhandled reduction opcode"); } diff --git a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp --- a/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp +++ b/llvm/lib/Target/ARM/MVEGatherScatterLowering.cpp @@ -146,8 +146,8 @@ } Offsets = GEP->getOperand(1); // Paranoid check whether the number of parallel lanes is the same - assert(Ty->getVectorNumElements() == - Offsets->getType()->getVectorNumElements()); + assert(cast(Ty)->getNumElements() == + cast(Offsets->getType())->getNumElements()); // Only offsets can be integrated into an arm gather, any smaller // type would have to be sign extended by the gep - and arm gathers can only // zero extend. Additionally, the offsets do have to originate from a zext of @@ -157,7 +157,7 @@ return nullptr; if (ZExtInst *ZextOffs = dyn_cast(Offsets)) Offsets = ZextOffs->getOperand(0); - else if (!(Offsets->getType()->getVectorNumElements() == 4 && + else if (!(cast(Offsets->getType())->getNumElements() == 4 && Offsets->getType()->getScalarSizeInBits() == 32)) return nullptr; @@ -180,9 +180,9 @@ void MVEGatherScatterLowering::lookThroughBitcast(Value *&Ptr) { // Look through bitcast instruction if #elements is the same if (auto *BitCast = dyn_cast(Ptr)) { - Type *BCTy = BitCast->getType(); - Type *BCSrcTy = BitCast->getOperand(0)->getType(); - if (BCTy->getVectorNumElements() == BCSrcTy->getVectorNumElements()) { + auto *BCTy = cast(BitCast->getType()); + auto *BCSrcTy = cast(BitCast->getOperand(0)->getType()); + if (BCTy->getNumElements() == BCSrcTy->getNumElements()) { LLVM_DEBUG( dbgs() << "masked gathers/scatters: looking through bitcast\n"); Ptr = BitCast->getOperand(0); @@ -212,14 +212,14 @@ // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0) // Attempt to turn the masked gather in I into a MVE intrinsic // Potentially optimising the addressing modes as we do so. - Type *Ty = I->getType(); + auto *Ty = cast(I->getType()); Value *Ptr = I->getArgOperand(0); unsigned Alignment = cast(I->getArgOperand(1))->getZExtValue(); Value *Mask = I->getArgOperand(2); Value *PassThru = I->getArgOperand(3); - if (!isLegalTypeAndAlignment(Ty->getVectorNumElements(), - Ty->getScalarSizeInBits(), Alignment)) + if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(), + Alignment)) return false; lookThroughBitcast(Ptr); assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); @@ -254,9 +254,9 @@ Value *MVEGatherScatterLowering::tryCreateMaskedGatherBase( IntrinsicInst *I, Value *Ptr, IRBuilder<> &Builder) { using namespace PatternMatch; - Type *Ty = I->getType(); + auto *Ty = cast(I->getType()); LLVM_DEBUG(dbgs() << "masked gathers: loading from vector of pointers\n"); - if (Ty->getVectorNumElements() != 4 || Ty->getScalarSizeInBits() != 32) + if (Ty->getNumElements() != 4 || Ty->getScalarSizeInBits() != 32) // Can't build an intrinsic for this return nullptr; Value *Mask = I->getArgOperand(2); @@ -344,10 +344,10 @@ Value *Input = I->getArgOperand(0); Value *Ptr = I->getArgOperand(1); unsigned Alignment = cast(I->getArgOperand(2))->getZExtValue(); - Type *Ty = Input->getType(); + auto *Ty = cast(Input->getType()); - if (!isLegalTypeAndAlignment(Ty->getVectorNumElements(), - Ty->getScalarSizeInBits(), Alignment)) + if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(), + Alignment)) return false; lookThroughBitcast(Ptr); assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type"); @@ -373,9 +373,9 @@ using namespace PatternMatch; Value *Input = I->getArgOperand(0); Value *Mask = I->getArgOperand(3); - Type *Ty = Input->getType(); + auto *Ty = cast(Input->getType()); // Only QR variants allow truncating - if (!(Ty->getVectorNumElements() == 4 && Ty->getScalarSizeInBits() == 32)) { + if (!(Ty->getNumElements() == 4 && Ty->getScalarSizeInBits() == 32)) { // Can't build an intrinsic for this return nullptr; } diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1087,7 +1087,7 @@ Constant *CVal = nullptr; bool isVTi1Type = false; if (auto *CV = dyn_cast(CPN->getConstVal())) { - if (CV->getType()->getVectorElementType()->isIntegerTy(1)) { + if (cast(CV->getType())->getElementType()->isIntegerTy(1)) { IRBuilder<> IRB(CV->getContext()); SmallVector NewConst; unsigned VecLen = CV->getNumOperands(); diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp @@ -60,8 +60,8 @@ } unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const { - if (Ty->isVectorTy()) - return Ty->getVectorNumElements(); + if (auto *VTy = dyn_cast(Ty)) + return VTy->getNumElements(); assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) && "Expecting scalar type"); return 1; diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp @@ -901,7 +901,7 @@ // stores, loads are expanded using the vector-load + permutation sequence, // which is much less expensive). if (Src->isVectorTy() && Opcode == Instruction::Store) - for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i) + for (int i = 0, e = cast(Src)->getNumElements(); i < e; ++i) Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i); return Cost; diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp --- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp @@ -342,7 +342,8 @@ // 3. static unsigned getNumVectorRegs(Type *Ty) { assert(Ty->isVectorTy() && "Expected vector type"); - unsigned WideBits = getScalarSizeInBits(Ty) * Ty->getVectorNumElements(); + unsigned WideBits = + getScalarSizeInBits(Ty) * cast(Ty)->getNumElements(); assert(WideBits > 0 && "Could not compute size of vector"); return ((WideBits % 128U) ? ((WideBits / 128U) + 1) : (WideBits / 128U)); } @@ -442,7 +443,7 @@ return DivInstrCost; } else if (ST->hasVector()) { - unsigned VF = Ty->getVectorNumElements(); + unsigned VF = cast(Ty)->getNumElements(); unsigned NumVectors = getNumVectorRegs(Ty); // These vector operations are custom handled, but are still supported @@ -563,8 +564,9 @@ assert (SrcTy->isVectorTy() && DstTy->isVectorTy()); assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() && "Packing must reduce size of vector type."); - assert (SrcTy->getVectorNumElements() == DstTy->getVectorNumElements() && - "Packing should not change number of elements."); + assert(cast(SrcTy)->getNumElements() == + cast(DstTy)->getNumElements() && + "Packing should not change number of elements."); // TODO: Since fp32 is expanded, the extract cost should always be 0. @@ -579,7 +581,7 @@ unsigned Cost = 0; unsigned Log2Diff = getElSizeLog2Diff(SrcTy, DstTy); - unsigned VF = SrcTy->getVectorNumElements(); + unsigned VF = cast(SrcTy)->getNumElements(); for (unsigned P = 0; P < Log2Diff; ++P) { if (NumParts > 1) NumParts /= 2; @@ -653,7 +655,7 @@ getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst, const Instruction *I) { assert (Dst->isVectorTy()); - unsigned VF = Dst->getVectorNumElements(); + unsigned VF = cast(Dst)->getNumElements(); unsigned Cost = 0; // If we know what the widths of the compared operands, get any cost of // converting it to match Dst. Otherwise assume same widths. @@ -702,7 +704,7 @@ } else if (ST->hasVector()) { assert (Dst->isVectorTy()); - unsigned VF = Src->getVectorNumElements(); + unsigned VF = cast(Src)->getNumElements(); unsigned NumDstVectors = getNumVectorRegs(Dst); unsigned NumSrcVectors = getNumVectorRegs(Src); @@ -829,7 +831,7 @@ } } else if (ST->hasVector()) { - unsigned VF = ValTy->getVectorNumElements(); + unsigned VF = cast(ValTy)->getNumElements(); // Called with a compare instruction. if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) { @@ -1072,7 +1074,7 @@ // Return the ceiling of dividing A by B. auto ceil = [](unsigned A, unsigned B) { return (A + B - 1) / B; }; - unsigned NumElts = VecTy->getVectorNumElements(); + unsigned NumElts = cast(VecTy)->getNumElements(); assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor"); unsigned VF = NumElts / Factor; unsigned NumEltsPerVecReg = (128U / getScalarSizeInBits(VecTy)); diff --git a/llvm/lib/Target/X86/X86InterleavedAccess.cpp b/llvm/lib/Target/X86/X86InterleavedAccess.cpp --- a/llvm/lib/Target/X86/X86InterleavedAccess.cpp +++ b/llvm/lib/Target/X86/X86InterleavedAccess.cpp @@ -127,7 +127,7 @@ bool X86InterleavedAccessGroup::isSupported() const { VectorType *ShuffleVecTy = Shuffles[0]->getType(); - Type *ShuffleEltTy = ShuffleVecTy->getVectorElementType(); + Type *ShuffleEltTy = ShuffleVecTy->getElementType(); unsigned ShuffleElemSize = DL.getTypeSizeInBits(ShuffleEltTy); unsigned WideInstSize; @@ -187,7 +187,7 @@ cast(Builder.CreateShuffleVector( Op0, Op1, createSequentialMask(Builder, Indices[i], - SubVecTy->getVectorNumElements(), 0)))); + SubVecTy->getNumElements(), 0)))); return; } @@ -727,8 +727,8 @@ // Try to generate target-sized register(/instruction). decompose(Inst, Factor, ShuffleTy, DecomposedVectors); - Type *ShuffleEltTy = Inst->getType(); - unsigned NumSubVecElems = ShuffleEltTy->getVectorNumElements() / Factor; + auto *ShuffleEltTy = cast(Inst->getType()); + unsigned NumSubVecElems = ShuffleEltTy->getNumElements() / Factor; // Perform matrix-transposition in order to compute interleaved // results by generating some sort of (optimized) target-specific // instructions. @@ -756,8 +756,8 @@ return true; } - Type *ShuffleEltTy = ShuffleTy->getVectorElementType(); - unsigned NumSubVecElems = ShuffleTy->getVectorNumElements() / Factor; + Type *ShuffleEltTy = ShuffleTy->getElementType(); + unsigned NumSubVecElems = ShuffleTy->getNumElements() / Factor; // Lower the interleaved stores: // 1. Decompose the interleaved wide shuffle into individual shuffle @@ -825,7 +825,7 @@ assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && "Invalid interleave factor"); - assert(SVI->getType()->getVectorNumElements() % Factor == 0 && + assert(SVI->getType()->getNumElements() % Factor == 0 && "Invalid interleaved store"); // Holds the indices of SVI that correspond to the starting index of each diff --git a/llvm/lib/Target/X86/X86PartialReduction.cpp b/llvm/lib/Target/X86/X86PartialReduction.cpp --- a/llvm/lib/Target/X86/X86PartialReduction.cpp +++ b/llvm/lib/Target/X86/X86PartialReduction.cpp @@ -89,7 +89,7 @@ return false; } - unsigned ElemNum = BO.getType()->getVectorNumElements(); + unsigned ElemNum = cast(BO.getType())->getNumElements(); // Ensure the reduction size is a power of 2. if (!isPowerOf2_32(ElemNum)) return false; @@ -141,7 +141,7 @@ // ElemNumToReduce / 2 elements, and store the result in // ElemNumToReduce / 2 elements in another vector. - unsigned ResultElements = ShufInst->getType()->getVectorNumElements(); + unsigned ResultElements = ShufInst->getType()->getNumElements(); if (ResultElements < ElemNum) return false; @@ -236,8 +236,8 @@ IRBuilder<> Builder(Add); - Type *MulTy = Op->getType(); - unsigned NumElts = MulTy->getVectorNumElements(); + auto *MulTy = cast(Op->getType()); + unsigned NumElts = MulTy->getNumElements(); // Extract even elements and odd elements and add them together. This will // be pattern matched by SelectionDAG to pmaddwd. This instruction will be @@ -272,11 +272,11 @@ return false; // Need at least 8 elements. - if (BO->getType()->getVectorNumElements() < 8) + if (cast(BO->getType())->getNumElements() < 8) return false; // Element type should be i32. - if (!BO->getType()->getVectorElementType()->isIntegerTy(32)) + if (!cast(BO->getType())->getElementType()->isIntegerTy(32)) return false; bool Changed = false; @@ -305,7 +305,9 @@ // Look for zero extend from i8. auto getZeroExtendedVal = [](Value *Op) -> Value * { if (auto *ZExt = dyn_cast(Op)) - if (ZExt->getOperand(0)->getType()->getVectorElementType()->isIntegerTy(8)) + if (cast(ZExt->getOperand(0)->getType()) + ->getElementType() + ->isIntegerTy(8)) return ZExt->getOperand(0); return nullptr; @@ -319,8 +321,8 @@ IRBuilder<> Builder(Add); - Type *OpTy = Op->getType(); - unsigned NumElts = OpTy->getVectorNumElements(); + auto *OpTy = cast(Op->getType()); + unsigned NumElts = OpTy->getNumElements(); unsigned IntrinsicNumElts; Intrinsic::ID IID; @@ -371,7 +373,8 @@ assert(isPowerOf2_32(NumSplits) && "Expected power of 2 splits"); unsigned Stages = Log2_32(NumSplits); for (unsigned s = Stages; s > 0; --s) { - unsigned NumConcatElts = Ops[0]->getType()->getVectorNumElements() * 2; + unsigned NumConcatElts = + cast(Ops[0]->getType())->getNumElements() * 2; for (unsigned i = 0; i != 1U << (s - 1); ++i) { SmallVector ConcatMask(NumConcatElts); std::iota(ConcatMask.begin(), ConcatMask.end(), 0); @@ -381,13 +384,13 @@ // At this point the final value should be in Ops[0]. Now we need to adjust // it to the final original type. - NumElts = OpTy->getVectorNumElements(); + NumElts = cast(OpTy)->getNumElements(); if (NumElts == 2) { // Extract down to 2 elements. Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef{0, 1}); } else if (NumElts >= 8) { SmallVector ConcatMask(NumElts); - unsigned SubElts = Ops[0]->getType()->getVectorNumElements(); + unsigned SubElts = cast(Ops[0]->getType())->getNumElements(); for (unsigned i = 0; i != SubElts; ++i) ConcatMask[i] = i; for (unsigned i = SubElts; i != NumElts; ++i) @@ -411,7 +414,7 @@ // TODO: There's nothing special about i32, any integer type above i16 should // work just as well. - if (!BO->getType()->getVectorElementType()->isIntegerTy(32)) + if (!cast(BO->getType())->getElementType()->isIntegerTy(32)) return false; bool Changed = false; diff --git a/llvm/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp b/llvm/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp --- a/llvm/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp +++ b/llvm/lib/Target/X86/X86ShuffleDecodeConstantPool.cpp @@ -36,17 +36,17 @@ // // <4 x i32> - Type *CstTy = C->getType(); - if (!CstTy->isVectorTy()) + auto *CstTy = dyn_cast(C->getType()); + if (!CstTy) return false; - Type *CstEltTy = CstTy->getVectorElementType(); + Type *CstEltTy = CstTy->getElementType(); if (!CstEltTy->isIntegerTy()) return false; unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits(); unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits(); - unsigned NumCstElts = CstTy->getVectorNumElements(); + unsigned NumCstElts = CstTy->getNumElements(); assert((CstSizeInBits % MaskEltSizeInBits) == 0 && "Unaligned shuffle mask size"); diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -919,18 +919,20 @@ // FIXME: Remove some of the alignment restrictions. // FIXME: We can use permq for 64-bit or larger extracts from 256-bit // vectors. - int OrigSubElts = SubTp->getVectorNumElements(); - if (NumSubElts > OrigSubElts && - (Index % OrigSubElts) == 0 && (NumSubElts % OrigSubElts) == 0 && + int OrigSubElts = cast(SubTp)->getNumElements(); + if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 && + (NumSubElts % OrigSubElts) == 0 && LT.second.getVectorElementType() == - SubLT.second.getVectorElementType() && + SubLT.second.getVectorElementType() && LT.second.getVectorElementType().getSizeInBits() == - Tp->getVectorElementType()->getPrimitiveSizeInBits()) { + cast(Tp) + ->getElementType() + ->getPrimitiveSizeInBits()) { assert(NumElts >= NumSubElts && NumElts > OrigSubElts && "Unexpected number of elements!"); - Type *VecTy = VectorType::get(Tp->getVectorElementType(), + Type *VecTy = VectorType::get(cast(Tp)->getElementType(), LT.second.getVectorNumElements()); - Type *SubTy = VectorType::get(Tp->getVectorElementType(), + Type *SubTy = VectorType::get(cast(Tp)->getElementType(), SubLT.second.getVectorNumElements()); int ExtractIndex = alignDown((Index % NumElts), NumSubElts); int ExtractCost = getShuffleCost(TTI::SK_ExtractSubvector, VecTy, @@ -956,8 +958,9 @@ MVT LegalVT = LT.second; if (LegalVT.isVector() && LegalVT.getVectorElementType().getSizeInBits() == - Tp->getVectorElementType()->getPrimitiveSizeInBits() && - LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { + cast(Tp)->getElementType()->getPrimitiveSizeInBits() && + LegalVT.getVectorNumElements() < + cast(Tp)->getNumElements()) { unsigned VecTySize = DL.getTypeStoreSize(Tp); unsigned LegalVTSize = LegalVT.getStoreSize(); @@ -966,7 +969,7 @@ // Number of destination vectors after legalization: unsigned NumOfDests = LT.first; - Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), + Type *SingleOpTy = VectorType::get(cast(Tp)->getElementType(), LegalVT.getVectorNumElements()); unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; @@ -2506,7 +2509,8 @@ // TODO: Under what circumstances should we shuffle using the full width? int ShuffleCost = 1; if (Opcode == Instruction::InsertElement) { - Type *SubTy = VectorType::get(Val->getVectorElementType(), SubNumElts); + Type *SubTy = + VectorType::get(cast(Val)->getElementType(), SubNumElts); ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, 0, SubTy); } int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1; @@ -2526,7 +2530,7 @@ const Instruction *I) { // Handle non-power-of-two vectors such as <3 x float> if (VectorType *VTy = dyn_cast(Src)) { - unsigned NumElem = VTy->getVectorNumElements(); + unsigned NumElem = VTy->getNumElements(); // Handle a few common cases: // <3 x float> @@ -2576,7 +2580,7 @@ // To calculate scalar take the regular cost, without mask return getMemoryOpCost(Opcode, SrcTy, MaybeAlign(Alignment), AddressSpace); - unsigned NumElem = SrcVTy->getVectorNumElements(); + unsigned NumElem = SrcVTy->getNumElements(); VectorType *MaskTy = VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); if ((IsLoad && !isLegalMaskedLoad(SrcVTy, MaybeAlign(Alignment))) || @@ -2607,7 +2611,7 @@ getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, 0, nullptr); else if (LT.second.getVectorNumElements() > NumElem) { - VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), + VectorType *NewMaskTy = VectorType::get(MaskTy->getElementType(), LT.second.getVectorNumElements()); // Expanding requires fill mask with zeroes Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); @@ -2714,10 +2718,10 @@ unsigned ArithmeticCost = 0; if (LT.first != 1 && MTy.isVector() && - MTy.getVectorNumElements() < ValTy->getVectorNumElements()) { + MTy.getVectorNumElements() < cast(ValTy)->getNumElements()) { // Type needs to be split. We need LT.first - 1 arithmetic ops. - Type *SingleOpTy = VectorType::get(ValTy->getVectorElementType(), - MTy.getVectorNumElements()); + Type *SingleOpTy = VectorType::get( + cast(ValTy)->getElementType(), MTy.getVectorNumElements()); ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy); ArithmeticCost *= LT.first - 1; } @@ -2781,13 +2785,15 @@ }; // Handle bool allof/anyof patterns. - if (ValTy->getVectorElementType()->isIntegerTy(1)) { + if (cast(ValTy)->getElementType()->isIntegerTy(1)) { unsigned ArithmeticCost = 0; if (LT.first != 1 && MTy.isVector() && - MTy.getVectorNumElements() < ValTy->getVectorNumElements()) { + MTy.getVectorNumElements() < + cast(ValTy)->getNumElements()) { // Type needs to be split. We need LT.first - 1 arithmetic ops. - Type *SingleOpTy = VectorType::get(ValTy->getVectorElementType(), - MTy.getVectorNumElements()); + Type *SingleOpTy = + VectorType::get(cast(ValTy)->getElementType(), + MTy.getVectorNumElements()); ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy); ArithmeticCost *= LT.first - 1; } @@ -2808,7 +2814,7 @@ return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise); } - unsigned NumVecElts = ValTy->getVectorNumElements(); + unsigned NumVecElts = cast(ValTy)->getNumElements(); unsigned ScalarSize = ValTy->getScalarSizeInBits(); // Special case power of 2 reductions where the scalar type isn't changed @@ -2820,9 +2826,9 @@ Type *Ty = ValTy; if (LT.first != 1 && MTy.isVector() && - MTy.getVectorNumElements() < ValTy->getVectorNumElements()) { + MTy.getVectorNumElements() < cast(ValTy)->getNumElements()) { // Type needs to be split. We need LT.first - 1 arithmetic ops. - Ty = VectorType::get(ValTy->getVectorElementType(), + Ty = VectorType::get(cast(ValTy)->getElementType(), MTy.getVectorNumElements()); ReductionCost = getArithmeticInstrCost(Opcode, Ty); ReductionCost *= LT.first - 1; @@ -2837,7 +2843,8 @@ NumVecElts /= 2; // If we're reducing from 256/512 bits, use an extract_subvector. if (Size > 128) { - Type *SubTy = VectorType::get(ValTy->getVectorElementType(), NumVecElts); + Type *SubTy = VectorType::get(cast(ValTy)->getElementType(), + NumVecElts); ReductionCost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy); Ty = SubTy; @@ -3306,7 +3313,7 @@ unsigned Alignment, unsigned AddressSpace) { assert(isa(SrcVTy) && "Unexpected type in getGSVectorCost"); - unsigned VF = SrcVTy->getVectorNumElements(); + unsigned VF = cast(SrcVTy)->getNumElements(); // Try to reduce index size from 64 bit (default for GEP) // to 32. It is essential for VF 16. If the index can't be reduced to 32, the @@ -3327,8 +3334,8 @@ if (isa(GEP->getOperand(i))) continue; Type *IndxTy = GEP->getOperand(i)->getType(); - if (IndxTy->isVectorTy()) - IndxTy = IndxTy->getVectorElementType(); + if (auto *IndexVTy = dyn_cast(IndxTy)) + IndxTy = IndexVTy->getElementType(); if ((IndxTy->getPrimitiveSizeInBits() == 64 && !isa(GEP->getOperand(i))) || ++NumOfVarIndices > 1) @@ -3376,7 +3383,7 @@ int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, bool VariableMask, unsigned Alignment, unsigned AddressSpace) { - unsigned VF = SrcVTy->getVectorNumElements(); + unsigned VF = cast(SrcVTy)->getNumElements(); int MaskUnpackCost = 0; if (VariableMask) { @@ -3415,10 +3422,11 @@ unsigned Alignment, const Instruction *I = nullptr) { assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); - unsigned VF = SrcVTy->getVectorNumElements(); + unsigned VF = cast(SrcVTy)->getNumElements(); PointerType *PtrTy = dyn_cast(Ptr->getType()); if (!PtrTy && Ptr->getType()->isVectorTy()) - PtrTy = dyn_cast(Ptr->getType()->getVectorElementType()); + PtrTy = dyn_cast( + cast(Ptr->getType())->getElementType()); assert(PtrTy && "Unexpected type for Ptr argument"); unsigned AddressSpace = PtrTy->getAddressSpace(); @@ -3464,7 +3472,8 @@ return false; // The backend can't handle a single element vector. - if (isa(DataTy) && DataTy->getVectorNumElements() == 1) + if (isa(DataTy) && + cast(DataTy)->getNumElements() == 1) return false; Type *ScalarTy = DataTy->getScalarType(); @@ -3529,10 +3538,10 @@ return false; // The backend can't handle a single element vector. - if (DataTy->getVectorNumElements() == 1) + if (cast(DataTy)->getNumElements() == 1) return false; - Type *ScalarTy = DataTy->getVectorElementType(); + Type *ScalarTy = cast(DataTy)->getElementType(); if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) return true; @@ -3568,8 +3577,8 @@ // In this case we can reject non-power-of-2 vectors. // We also reject single element vectors as the type legalizer can't // scalarize it. - if (isa(DataTy)) { - unsigned NumElts = DataTy->getVectorNumElements(); + if (auto *DataVTy = dyn_cast(DataTy)) { + unsigned NumElts = DataVTy->getNumElements(); if (NumElts == 1 || !isPowerOf2_32(NumElts)) return false; } @@ -3708,8 +3717,8 @@ return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, Alignment, AddressSpace); - unsigned VF = VecTy->getVectorNumElements() / Factor; - Type *ScalarTy = VecTy->getVectorElementType(); + unsigned VF = cast(VecTy)->getNumElements() / Factor; + Type *ScalarTy = cast(VecTy)->getElementType(); // Calculate the number of memory operations (NumOfMemOps), required // for load/store the VecTy. @@ -3718,8 +3727,9 @@ unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; // Get the cost of one memory operation. - Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), - LegalVT.getVectorNumElements()); + Type *SingleMemOpTy = + VectorType::get(cast(VecTy)->getElementType(), + LegalVT.getVectorNumElements()); unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace); @@ -3818,12 +3828,13 @@ unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; // Get the cost of one memory operation. - Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), - LegalVT.getVectorNumElements()); + Type *SingleMemOpTy = + VectorType::get(cast(VecTy)->getElementType(), + LegalVT.getVectorNumElements()); unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace); - unsigned VF = VecTy->getVectorNumElements() / Factor; + unsigned VF = cast(VecTy)->getNumElements() / Factor; MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); if (Opcode == Instruction::Load) { @@ -3855,8 +3866,9 @@ unsigned NumOfLoadsInInterleaveGrp = Indices.size() ? Indices.size() : Factor; - Type *ResultTy = VectorType::get(VecTy->getVectorElementType(), - VecTy->getVectorNumElements() / Factor); + Type *ResultTy = + VectorType::get(cast(VecTy)->getElementType(), + cast(VecTy)->getNumElements() / Factor); unsigned NumOfResults = getTLI()->getTypeLegalizationCost(DL, ResultTy).first * NumOfLoadsInInterleaveGrp; @@ -3926,7 +3938,7 @@ bool UseMaskForCond, bool UseMaskForGaps) { auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) { - Type *EltTy = VecTy->getVectorElementType(); + Type *EltTy = cast(VecTy)->getElementType(); if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || EltTy->isIntegerTy(32) || EltTy->isPointerTy()) return true; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1652,7 +1652,7 @@ if (C->getType()->isVectorTy()) { // Check each element of a constant vector. - unsigned NumElts = C->getType()->getVectorNumElements(); + unsigned NumElts = cast(C->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = C->getAggregateElement(i); if (!Elt) @@ -2082,7 +2082,7 @@ /// If all elements of two constant vectors are 0/-1 and inverses, return true. static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) { - unsigned NumElts = C1->getType()->getVectorNumElements(); + unsigned NumElts = cast(C1->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *EltC1 = C1->getAggregateElement(i); Constant *EltC2 = C2->getAggregateElement(i); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -523,7 +523,7 @@ auto Vec = II.getArgOperand(0); auto Amt = II.getArgOperand(1); auto VT = cast(II.getType()); - auto SVT = VT->getVectorElementType(); + auto SVT = VT->getElementType(); int NumElts = VT->getNumElements(); int BitWidth = SVT->getIntegerBitWidth(); @@ -620,10 +620,10 @@ if (isa(Arg0) && isa(Arg1)) return UndefValue::get(ResTy); - Type *ArgTy = Arg0->getType(); + auto *ArgTy = cast(Arg0->getType()); unsigned NumLanes = ResTy->getPrimitiveSizeInBits() / 128; - unsigned NumSrcElts = ArgTy->getVectorNumElements(); - assert(ResTy->getVectorNumElements() == (2 * NumSrcElts) && + unsigned NumSrcElts = ArgTy->getNumElements(); + assert(cast(ResTy)->getNumElements() == (2 * NumSrcElts) && "Unexpected packing types"); unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes; @@ -680,14 +680,14 @@ InstCombiner::BuilderTy &Builder) { Value *Arg = II.getArgOperand(0); Type *ResTy = II.getType(); - Type *ArgTy = Arg->getType(); + auto *ArgTy = dyn_cast(Arg->getType()); // movmsk(undef) -> zero as we must ensure the upper bits are zero. if (isa(Arg)) return Constant::getNullValue(ResTy); // We can't easily peek through x86_mmx types. - if (!ArgTy->isVectorTy()) + if (!ArgTy) return nullptr; // Expand MOVMSK to compare/bitcast/zext: @@ -695,8 +695,8 @@ // %cmp = icmp slt <16 x i8> %x, zeroinitializer // %int = bitcast <16 x i1> %cmp to i16 // %res = zext i16 %int to i32 - unsigned NumElts = ArgTy->getVectorNumElements(); - Type *IntegerVecTy = VectorType::getInteger(cast(ArgTy)); + unsigned NumElts = ArgTy->getNumElements(); + Type *IntegerVecTy = VectorType::getInteger(ArgTy); Type *IntegerTy = Builder.getIntNTy(NumElts); Value *Res = Builder.CreateBitCast(Arg, IntegerVecTy); @@ -1036,7 +1036,7 @@ auto *VecTy = cast(II.getType()); auto *MaskEltTy = Type::getInt32Ty(II.getContext()); - unsigned NumElts = VecTy->getVectorNumElements(); + unsigned NumElts = VecTy->getNumElements(); bool IsPD = VecTy->getScalarType()->isDoubleTy(); unsigned NumLaneElts = IsPD ? 2 : 4; assert(NumElts == 16 || NumElts == 8 || NumElts == 4 || NumElts == 2); @@ -1955,8 +1955,8 @@ } // For vector result intrinsics, use the generic demanded vector support. - if (II->getType()->isVectorTy()) { - auto VWidth = II->getType()->getVectorNumElements(); + if (auto *IIVTy = dyn_cast(II->getType())) { + auto VWidth = IIVTy->getNumElements(); APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) { @@ -2489,8 +2489,9 @@ // Turn PPC QPX qvlfs -> load if the pointer is known aligned. if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC, &DT) >= 16) { - Type *VTy = VectorType::get(Builder.getFloatTy(), - II->getType()->getVectorNumElements()); + Type *VTy = + VectorType::get(Builder.getFloatTy(), + cast(II->getType())->getElementCount()); Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(VTy)); Value *Load = Builder.CreateLoad(VTy, Ptr); @@ -2510,8 +2511,9 @@ // Turn PPC QPX qvstfs -> store if the pointer is known aligned. if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC, &DT) >= 16) { - Type *VTy = VectorType::get(Builder.getFloatTy(), - II->getArgOperand(0)->getType()->getVectorNumElements()); + Type *VTy = VectorType::get( + Builder.getFloatTy(), + cast(II->getArgOperand(0)->getType())->getElementCount()); Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy); Type *OpPtrTy = PointerType::getUnqual(VTy); Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy); @@ -2660,7 +2662,7 @@ // These intrinsics only demand the 0th element of their input vectors. If // we can simplify the input based on that, do so now. Value *Arg = II->getArgOperand(0); - unsigned VWidth = Arg->getType()->getVectorNumElements(); + unsigned VWidth = cast(Arg->getType())->getNumElements(); if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) return replaceOperand(*II, 0, V); break; @@ -2710,7 +2712,7 @@ bool MadeChange = false; Value *Arg0 = II->getArgOperand(0); Value *Arg1 = II->getArgOperand(1); - unsigned VWidth = Arg0->getType()->getVectorNumElements(); + unsigned VWidth = cast(Arg0->getType())->getNumElements(); if (Value *V = SimplifyDemandedVectorEltsLow(Arg0, VWidth, 1)) { replaceOperand(*II, 0, V); MadeChange = true; @@ -2928,7 +2930,7 @@ Value *Arg1 = II->getArgOperand(1); assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 && "Unexpected packed shift size"); - unsigned VWidth = Arg1->getType()->getVectorNumElements(); + unsigned VWidth = cast(Arg1->getType())->getNumElements(); if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) return replaceOperand(*II, 1, V); @@ -2995,7 +2997,7 @@ bool MadeChange = false; Value *Arg0 = II->getArgOperand(0); Value *Arg1 = II->getArgOperand(1); - unsigned VWidth = Arg0->getType()->getVectorNumElements(); + unsigned VWidth = cast(Arg0->getType())->getNumElements(); APInt UndefElts1(VWidth, 0); APInt DemandedElts1 = APInt::getSplat(VWidth, @@ -3035,8 +3037,8 @@ case Intrinsic::x86_sse4a_extrq: { Value *Op0 = II->getArgOperand(0); Value *Op1 = II->getArgOperand(1); - unsigned VWidth0 = Op0->getType()->getVectorNumElements(); - unsigned VWidth1 = Op1->getType()->getVectorNumElements(); + unsigned VWidth0 = cast(Op0->getType())->getNumElements(); + unsigned VWidth1 = cast(Op1->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 && VWidth1 == 16 && "Unexpected operand sizes"); @@ -3074,7 +3076,7 @@ // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining // bits of the lower 64-bits. The upper 64-bits are undefined. Value *Op0 = II->getArgOperand(0); - unsigned VWidth = Op0->getType()->getVectorNumElements(); + unsigned VWidth = cast(Op0->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 && "Unexpected operand size"); @@ -3096,10 +3098,10 @@ case Intrinsic::x86_sse4a_insertq: { Value *Op0 = II->getArgOperand(0); Value *Op1 = II->getArgOperand(1); - unsigned VWidth = Op0->getType()->getVectorNumElements(); + unsigned VWidth = cast(Op0->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 && - Op1->getType()->getVectorNumElements() == 2 && + cast(Op1->getType())->getNumElements() == 2 && "Unexpected operand size"); // See if we're dealing with constant values. @@ -3130,8 +3132,8 @@ // undefined. Value *Op0 = II->getArgOperand(0); Value *Op1 = II->getArgOperand(1); - unsigned VWidth0 = Op0->getType()->getVectorNumElements(); - unsigned VWidth1 = Op1->getType()->getVectorNumElements(); + unsigned VWidth0 = cast(Op0->getType())->getNumElements(); + unsigned VWidth1 = cast(Op1->getType())->getNumElements(); assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 && VWidth1 == 2 && "Unexpected operand sizes"); @@ -3198,8 +3200,10 @@ II->getType()->getPrimitiveSizeInBits() && "Not expecting mask and operands with different sizes"); - unsigned NumMaskElts = Mask->getType()->getVectorNumElements(); - unsigned NumOperandElts = II->getType()->getVectorNumElements(); + unsigned NumMaskElts = + cast(Mask->getType())->getNumElements(); + unsigned NumOperandElts = + cast(II->getType())->getNumElements(); if (NumMaskElts == NumOperandElts) return SelectInst::Create(BoolVec, Op1, Op0); @@ -3290,7 +3294,7 @@ // the permutation mask with respect to 31 and reverse the order of // V1 and V2. if (Constant *Mask = dyn_cast(II->getArgOperand(2))) { - assert(Mask->getType()->getVectorNumElements() == 16 && + assert(cast(Mask->getType())->getNumElements() == 16 && "Bad type for intrinsic!"); // Check that all of the elements are integer constants or undefs. @@ -3448,7 +3452,8 @@ if (auto *CI = dyn_cast(XorMask)) { if (CI->getValue().trunc(16).isAllOnesValue()) { auto TrueVector = Builder.CreateVectorSplat( - II->getType()->getVectorNumElements(), Builder.getTrue()); + cast(II->getType())->getNumElements(), + Builder.getTrue()); return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector); } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -856,10 +856,10 @@ Value *VecOp; if (match(Src, m_OneUse(m_ExtractElement(m_Value(VecOp), m_ConstantInt(Cst))))) { - Type *VecOpTy = VecOp->getType(); + auto *VecOpTy = cast(VecOp->getType()); unsigned DestScalarSize = DestTy->getScalarSizeInBits(); unsigned VecOpScalarSize = VecOpTy->getScalarSizeInBits(); - unsigned VecNumElts = VecOpTy->getVectorNumElements(); + unsigned VecNumElts = VecOpTy->getNumElements(); // A badly fit destination size would result in an invalid cast. if (VecOpScalarSize % DestScalarSize == 0) { @@ -1514,12 +1514,13 @@ // TODO: Make these support undef elements. static Type *shrinkFPConstantVector(Value *V) { auto *CV = dyn_cast(V); - if (!CV || !CV->getType()->isVectorTy()) + auto *CVVTy = dyn_cast(V->getType()); + if (!CV || !CVVTy) return nullptr; Type *MinType = nullptr; - unsigned NumElts = CV->getType()->getVectorNumElements(); + unsigned NumElts = CVVTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { auto *CFP = dyn_cast_or_null(CV->getAggregateElement(i)); if (!CFP) @@ -1820,8 +1821,9 @@ if (CI.getOperand(0)->getType()->getScalarSizeInBits() != DL.getPointerSizeInBits(AS)) { Type *Ty = DL.getIntPtrType(CI.getContext(), AS); - if (CI.getType()->isVectorTy()) // Handle vectors of pointers. - Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements()); + if (auto *CIVTy = + dyn_cast(CI.getType())) // Handle vectors of pointers. + Ty = VectorType::get(Ty, CIVTy->getElementCount()); Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); return new IntToPtrInst(P, CI.getType()); @@ -1868,8 +1870,8 @@ return commonPointerCastTransforms(CI); Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS); - if (Ty->isVectorTy()) // Handle vectors of pointers. - PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements()); + if (auto *VTy = dyn_cast(Ty)) // Handle vectors of pointers. + PtrTy = VectorType::get(PtrTy, VTy->getNumElements()); Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy); return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); @@ -2199,10 +2201,10 @@ // A vector select must maintain the same number of elements in its operands. Type *CondTy = Cond->getType(); Type *DestTy = BitCast.getType(); - if (CondTy->isVectorTy()) { + if (auto *CondVTy = dyn_cast(CondTy)) { if (!DestTy->isVectorTy()) return nullptr; - if (DestTy->getVectorNumElements() != CondTy->getVectorNumElements()) + if (cast(DestTy)->getNumElements() != CondVTy->getNumElements()) return nullptr; } @@ -2536,10 +2538,11 @@ // a bitcast to a vector with the same # elts. Value *ShufOp0 = Shuf->getOperand(0); Value *ShufOp1 = Shuf->getOperand(1); - unsigned NumShufElts = Shuf->getType()->getVectorNumElements(); - unsigned NumSrcVecElts = ShufOp0->getType()->getVectorNumElements(); + unsigned NumShufElts = Shuf->getType()->getNumElements(); + unsigned NumSrcVecElts = + cast(ShufOp0->getType())->getNumElements(); if (Shuf->hasOneUse() && DestTy->isVectorTy() && - DestTy->getVectorNumElements() == NumShufElts && + cast(DestTy)->getNumElements() == NumShufElts && NumShufElts == NumSrcVecElts) { BitCastInst *Tmp; // If either of the operands is a cast from CI.getType(), then diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -897,7 +897,7 @@ // For vectors, we apply the same reasoning on a per-lane basis. auto *Base = GEPLHS->getPointerOperand(); if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) { - int NumElts = GEPLHS->getType()->getVectorNumElements(); + int NumElts = cast(GEPLHS->getType())->getNumElements(); Base = Builder.CreateVectorSplat(NumElts, Base); } return new ICmpInst(Cond, Base, @@ -1861,8 +1861,8 @@ int32_t ExactLogBase2 = C2->exactLogBase2(); if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); - if (And->getType()->isVectorTy()) - NTy = VectorType::get(NTy, And->getType()->getVectorNumElements()); + if (auto *AndVTy = dyn_cast(And->getType())) + NTy = VectorType::get(NTy, AndVTy->getNumElements()); Value *Trunc = Builder.CreateTrunc(X, NTy); auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE : CmpInst::ICMP_SLT; @@ -2147,8 +2147,8 @@ if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt && DL.isLegalInteger(TypeBits - Amt)) { Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt); - if (ShType->isVectorTy()) - TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements()); + if (auto *ShVTy = dyn_cast(ShType)) + TruncTy = VectorType::get(TruncTy, ShVTy->getNumElements()); Constant *NewC = ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt)); return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC); @@ -2774,8 +2774,8 @@ // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1 Type *XType = X->getType(); Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits()); - if (XType->isVectorTy()) - NewType = VectorType::get(NewType, XType->getVectorNumElements()); + if (auto *XVTy = dyn_cast(XType)) + NewType = VectorType::get(NewType, XVTy->getNumElements()); Value *NewBitcast = Builder.CreateBitCast(X, NewType); if (TrueIfSigned) return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast, @@ -3352,8 +3352,9 @@ Type *OpTy = M->getType(); auto *VecC = dyn_cast(M); if (OpTy->isVectorTy() && VecC && VecC->containsUndefElement()) { + auto *OpVTy = cast(OpTy); Constant *SafeReplacementConstant = nullptr; - for (unsigned i = 0, e = OpTy->getVectorNumElements(); i != e; ++i) { + for (unsigned i = 0, e = OpVTy->getNumElements(); i != e; ++i) { if (!isa(VecC->getAggregateElement(i))) { SafeReplacementConstant = VecC->getAggregateElement(i); break; @@ -5187,8 +5188,8 @@ // Bail out if the constant can't be safely incremented/decremented. if (!ConstantIsOk(CI)) return llvm::None; - } else if (Type->isVectorTy()) { - unsigned NumElts = Type->getVectorNumElements(); + } else if (auto *VTy = dyn_cast(Type)) { + unsigned NumElts = VTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = C->getAggregateElement(i); if (!Elt) @@ -5409,7 +5410,8 @@ if (ScalarC && match(M, m_SplatOrUndefMask(MaskSplatIndex))) { // We allow undefs in matching, but this transform removes those for safety. // Demanded elements analysis should be able to recover some/all of that. - C = ConstantVector::getSplat(V1Ty->getVectorElementCount(), ScalarC); + C = ConstantVector::getSplat(cast(V1Ty)->getElementCount(), + ScalarC); SmallVector NewM(M.size(), MaskSplatIndex); Value *NewCmp = IsFP ? Builder.CreateFCmp(Pred, V1, C) : Builder.CreateICmp(Pred, V1, C); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -244,9 +244,10 @@ /// If no identity constant exists, replace undef with some other safe constant. static inline Constant *getSafeVectorConstantForBinop( BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant) { - assert(In->getType()->isVectorTy() && "Not expecting scalars here"); + auto *InVTy = dyn_cast(In->getType()); + assert(InVTy && "Not expecting scalars here"); - Type *EltTy = In->getType()->getVectorElementType(); + Type *EltTy = InVTy->getElementType(); auto *SafeC = ConstantExpr::getBinOpIdentity(Opcode, EltTy, IsRHSConstant); if (!SafeC) { // TODO: Should this be available as a constant utility function? It is @@ -284,7 +285,7 @@ } } assert(SafeC && "Must have safe constant for binop"); - unsigned NumElts = In->getType()->getVectorNumElements(); + unsigned NumElts = InVTy->getNumElements(); SmallVector Out(NumElts); for (unsigned i = 0; i != NumElts; ++i) { Constant *C = In->getAggregateElement(i); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -591,10 +591,9 @@ // infinite loop). Type *Dummy; if (!Ty->isIntegerTy() && Ty->isSized() && - !(Ty->isVectorTy() && Ty->getVectorIsScalable()) && + !(Ty->isVectorTy() && cast(Ty)->isScalable()) && DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) && - DL.typeSizeEqualsStoreSize(Ty) && - !DL.isNonIntegralPointerType(Ty) && + DL.typeSizeEqualsStoreSize(Ty) && !DL.isNonIntegralPointerType(Ty) && !isMinMaxWithLoads( peekThroughBitcast(LI.getPointerOperand(), /*OneUseOnly=*/true), Dummy)) { diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -108,7 +108,8 @@ return nullptr; SmallVector Elts; - for (unsigned I = 0, E = Ty->getVectorNumElements(); I != E; ++I) { + for (unsigned I = 0, E = cast(Ty)->getNumElements(); I != E; + ++I) { Constant *Elt = C->getAggregateElement(I); if (!Elt) return nullptr; @@ -1433,7 +1434,7 @@ // If it's a constant vector, flip any negative values positive. if (isa(Op1) || isa(Op1)) { Constant *C = cast(Op1); - unsigned VWidth = C->getType()->getVectorNumElements(); + unsigned VWidth = cast(C->getType())->getNumElements(); bool hasNegative = false; bool hasMissing = false; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -301,10 +301,11 @@ // The select condition may be a vector. We may only change the operand // type if the vector width remains the same (and matches the condition). - if (CondTy->isVectorTy()) { + if (auto *CondVTy = dyn_cast(CondTy)) { if (!FIOpndTy->isVectorTy()) return nullptr; - if (CondTy->getVectorNumElements() != FIOpndTy->getVectorNumElements()) + if (CondVTy->getNumElements() != + cast(FIOpndTy)->getNumElements()) return nullptr; // TODO: If the backend knew how to deal with casts better, we could @@ -1935,7 +1936,7 @@ if (!CondVal->getType()->isVectorTy() || !match(CondVal, m_Constant(CondC))) return nullptr; - unsigned NumElts = CondVal->getType()->getVectorNumElements(); + unsigned NumElts = cast(CondVal->getType())->getNumElements(); SmallVector Mask; Mask.reserve(NumElts); Type *Int32Ty = Type::getInt32Ty(CondVal->getContext()); @@ -1971,8 +1972,8 @@ /// is likely better for vector codegen. static Instruction *canonicalizeScalarSelectOfVecs( SelectInst &Sel, InstCombiner &IC) { - Type *Ty = Sel.getType(); - if (!Ty->isVectorTy()) + auto *Ty = dyn_cast(Sel.getType()); + if (!Ty) return nullptr; // We can replace a single-use extract with constant index. @@ -1983,7 +1984,7 @@ // select (extelt V, Index), T, F --> select (splat V, Index), T, F // Splatting the extracted condition reduces code (we could directly create a // splat shuffle of the source vector to eliminate the intermediate step). - unsigned NumElts = Ty->getVectorNumElements(); + unsigned NumElts = Ty->getNumElements(); return IC.replaceOperand(Sel, 0, IC.Builder.CreateVectorSplat(NumElts, Cond)); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -1074,7 +1074,8 @@ DemandedElts.getActiveBits() == 3) return nullptr; - unsigned VWidth = II->getType()->getVectorNumElements(); + auto *IIVTy = cast(II->getType()); + unsigned VWidth = IIVTy->getNumElements(); if (VWidth == 1) return nullptr; @@ -1180,7 +1181,7 @@ Intrinsic::matchIntrinsicSignature(FTy, TableRef, OverloadTys); Module *M = II->getParent()->getParent()->getParent(); - Type *EltTy = II->getType()->getVectorElementType(); + Type *EltTy = IIVTy->getElementType(); Type *NewTy = (NewNumElts == 1) ? EltTy : VectorType::get(EltTy, NewNumElts); OverloadTys[0] = NewTy; @@ -1227,7 +1228,7 @@ APInt &UndefElts, unsigned Depth, bool AllowMultipleUsers) { - unsigned VWidth = V->getType()->getVectorNumElements(); + unsigned VWidth = cast(V->getType())->getNumElements(); APInt EltMask(APInt::getAllOnesValue(VWidth)); assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!"); @@ -1386,7 +1387,7 @@ Shuffle->getOperand(1)->getType() && "Expected shuffle operands to have same type"); unsigned OpWidth = - Shuffle->getOperand(0)->getType()->getVectorNumElements(); + cast(Shuffle->getOperand(0)->getType())->getNumElements(); // Handle trivial case of a splat. Only check the first element of LHS // operand. if (all_of(Shuffle->getShuffleMask(), [](int Elt) { return Elt == 0; }) && @@ -1799,7 +1800,7 @@ case Intrinsic::x86_avx512_packusdw_512: case Intrinsic::x86_avx512_packuswb_512: { auto *Ty0 = II->getArgOperand(0)->getType(); - unsigned InnerVWidth = Ty0->getVectorNumElements(); + unsigned InnerVWidth = cast(Ty0)->getNumElements(); assert(VWidth == (InnerVWidth * 2) && "Unexpected input size"); unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -175,9 +175,9 @@ // If this extractelement is using a bitcast from a vector of the same number // of elements, see if we can find the source element from the source vector: // extelt (bitcast VecX), IndexC --> bitcast X[IndexC] - Type *SrcTy = X->getType(); + auto *SrcTy = cast(X->getType()); Type *DestTy = Ext.getType(); - unsigned NumSrcElts = SrcTy->getVectorNumElements(); + unsigned NumSrcElts = SrcTy->getNumElements(); unsigned NumElts = Ext.getVectorOperandType()->getNumElements(); if (NumSrcElts == NumElts) if (Value *Elt = findScalarElement(X, ExtIndexC)) @@ -258,7 +258,7 @@ /// Find elements of V demanded by UserInstr. static APInt findDemandedEltsBySingleUser(Value *V, Instruction *UserInstr) { - unsigned VWidth = V->getType()->getVectorNumElements(); + unsigned VWidth = cast(V->getType())->getNumElements(); // Conservatively assume that all elements are needed. APInt UsedElts(APInt::getAllOnesValue(VWidth)); @@ -275,7 +275,8 @@ } case Instruction::ShuffleVector: { ShuffleVectorInst *Shuffle = cast(UserInstr); - unsigned MaskNumElts = UserInstr->getType()->getVectorNumElements(); + unsigned MaskNumElts = + cast(UserInstr->getType())->getNumElements(); UsedElts = APInt(VWidth, 0); for (unsigned i = 0; i < MaskNumElts; i++) { @@ -301,7 +302,7 @@ /// no user demands an element of V, then the corresponding bit /// remains unset in the returned value. static APInt findDemandedEltsByAllUsers(Value *V) { - unsigned VWidth = V->getType()->getVectorNumElements(); + unsigned VWidth = cast(V->getType())->getNumElements(); APInt UnionUsedElts(VWidth, 0); for (const Use &U : V->uses()) { @@ -419,7 +420,7 @@ int SrcIdx = SVI->getMaskValue(Elt->getZExtValue()); Value *Src; unsigned LHSWidth = - SVI->getOperand(0)->getType()->getVectorNumElements(); + cast(SVI->getOperand(0)->getType())->getNumElements(); if (SrcIdx < 0) return replaceInstUsesWith(EI, UndefValue::get(EI.getType())); @@ -453,7 +454,7 @@ SmallVectorImpl &Mask) { assert(LHS->getType() == RHS->getType() && "Invalid CollectSingleShuffleElements"); - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); if (isa(V)) { Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext()))); @@ -495,7 +496,8 @@ if (isa(EI->getOperand(1))) { unsigned ExtractedIdx = cast(EI->getOperand(1))->getZExtValue(); - unsigned NumLHSElts = LHS->getType()->getVectorNumElements(); + unsigned NumLHSElts = + cast(LHS->getType())->getNumElements(); // This must be extracting from either LHS or RHS. if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) { @@ -531,8 +533,8 @@ InstCombiner &IC) { VectorType *InsVecType = InsElt->getType(); VectorType *ExtVecType = ExtElt->getVectorOperandType(); - unsigned NumInsElts = InsVecType->getVectorNumElements(); - unsigned NumExtElts = ExtVecType->getVectorNumElements(); + unsigned NumInsElts = InsVecType->getNumElements(); + unsigned NumExtElts = ExtVecType->getNumElements(); // The inserted-to vector must be wider than the extracted-from vector. if (InsVecType->getElementType() != ExtVecType->getElementType() || @@ -615,7 +617,7 @@ Value *PermittedRHS, InstCombiner &IC) { assert(V->getType()->isVectorTy() && "Invalid shuffle!"); - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); if (isa(V)) { Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext()))); @@ -659,7 +661,8 @@ return std::make_pair(V, nullptr); } - unsigned NumLHSElts = RHS->getType()->getVectorNumElements(); + unsigned NumLHSElts = + cast(RHS->getType())->getNumElements(); Mask[InsertedIdx % NumElts] = ConstantInt::get(Type::getInt32Ty(V->getContext()), NumLHSElts+ExtractedIdx); @@ -670,7 +673,7 @@ // We've gone as far as we can: anything on the other side of the // extractelement will already have been converted into a shuffle. unsigned NumLHSElts = - EI->getOperand(0)->getType()->getVectorNumElements(); + cast(EI->getOperand(0)->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) Mask.push_back(ConstantInt::get( Type::getInt32Ty(V->getContext()), @@ -731,7 +734,8 @@ static bool isShuffleEquivalentToSelect(ShuffleVectorInst &Shuf) { int MaskSize = Shuf.getShuffleMask().size(); - int VecSize = Shuf.getOperand(0)->getType()->getVectorNumElements(); + int VecSize = + cast(Shuf.getOperand(0)->getType())->getNumElements(); // A vector select does not change the size of the operands. if (MaskSize != VecSize) @@ -841,7 +845,7 @@ // For example: // inselt (shuf (inselt undef, X, 0), undef, <0,undef,0,undef>), X, 1 // --> shuf (inselt undef, X, 0), undef, <0,0,0,undef> - unsigned NumMaskElts = Shuf->getType()->getVectorNumElements(); + unsigned NumMaskElts = Shuf->getType()->getNumElements(); SmallVector NewMask(NumMaskElts); for (unsigned i = 0; i != NumMaskElts; ++i) NewMask[i] = i == IdxC ? 0 : Shuf->getMaskValue(i); @@ -874,7 +878,7 @@ // that same index value. // For example: // inselt (shuf X, IdMask), (extelt X, IdxC), IdxC --> shuf X, IdMask' - unsigned NumMaskElts = Shuf->getType()->getVectorNumElements(); + unsigned NumMaskElts = Shuf->getType()->getNumElements(); SmallVector NewMask(NumMaskElts); ArrayRef OldMask = Shuf->getShuffleMask(); for (unsigned i = 0; i != NumMaskElts; ++i) { @@ -1038,7 +1042,8 @@ match(ScalarOp, m_BitCast(m_Value(ScalarSrc))) && (VecOp->hasOneUse() || ScalarOp->hasOneUse()) && VecSrc->getType()->isVectorTy() && !ScalarSrc->getType()->isVectorTy() && - VecSrc->getType()->getVectorElementType() == ScalarSrc->getType()) { + cast(VecSrc->getType())->getElementType() == + ScalarSrc->getType()) { // inselt (bitcast VecSrc), (bitcast ScalarSrc), IdxOp --> // bitcast (inselt VecSrc, ScalarSrc, IdxOp) Value *NewInsElt = Builder.CreateInsertElement(VecSrc, ScalarSrc, IdxOp); @@ -1050,9 +1055,9 @@ uint64_t InsertedIdx, ExtractedIdx; Value *ExtVecOp; if (match(IdxOp, m_ConstantInt(InsertedIdx)) && - match(ScalarOp, m_ExtractElement(m_Value(ExtVecOp), - m_ConstantInt(ExtractedIdx))) && - ExtractedIdx < ExtVecOp->getType()->getVectorNumElements()) { + match(ScalarOp, + m_ExtractElement(m_Value(ExtVecOp), m_ConstantInt(ExtractedIdx))) && + ExtractedIdx < cast(ExtVecOp->getType())->getNumElements()) { // TODO: Looking at the user(s) to determine if this insert is a // fold-to-shuffle opportunity does not match the usual instcombine // constraints. We should decide if the transform is worthy based only @@ -1093,7 +1098,7 @@ } } - unsigned VWidth = VecOp->getType()->getVectorNumElements(); + unsigned VWidth = cast(VecOp->getType())->getNumElements(); APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts)) { @@ -1178,7 +1183,8 @@ // Bail out if we would create longer vector ops. We could allow creating // longer vector ops, but that may result in more expensive codegen. Type *ITy = I->getType(); - if (ITy->isVectorTy() && Mask.size() > ITy->getVectorNumElements()) + if (ITy->isVectorTy() && + Mask.size() > cast(ITy)->getNumElements()) return false; for (Value *Operand : I->operands()) { if (!canEvaluateShuffled(Operand, Mask, Depth - 1)) @@ -1266,9 +1272,9 @@ case Instruction::FPExt: { // It's possible that the mask has a different number of elements from // the original cast. We recompute the destination type to match the mask. - Type *DestTy = - VectorType::get(I->getType()->getScalarType(), - NewOps[0]->getType()->getVectorNumElements()); + Type *DestTy = VectorType::get( + I->getType()->getScalarType(), + cast(NewOps[0]->getType())->getElementCount()); assert(NewOps.size() == 1 && "cast with #ops != 1"); return CastInst::Create(cast(I)->getOpcode(), NewOps[0], DestTy, "", I); @@ -1335,7 +1341,8 @@ case Instruction::Select: case Instruction::GetElementPtr: { SmallVector NewOps; - bool NeedsRebuild = (Mask.size() != I->getType()->getVectorNumElements()); + bool NeedsRebuild = + (Mask.size() != cast(I->getType())->getNumElements()); for (int i = 0, e = I->getNumOperands(); i != e; ++i) { Value *V; // Recursively call evaluateInDifferentElementOrder on vector arguments @@ -1389,7 +1396,8 @@ // +--+--+--+--+ static bool isShuffleExtractingFromLHS(ShuffleVectorInst &SVI, ArrayRef Mask) { - unsigned LHSElems = SVI.getOperand(0)->getType()->getVectorNumElements(); + unsigned LHSElems = + cast(SVI.getOperand(0)->getType())->getNumElements(); unsigned MaskElems = Mask.size(); unsigned BegIdx = Mask.front(); unsigned EndIdx = Mask.back(); @@ -1521,7 +1529,7 @@ // For example: // shuf (inselt undef, X, 2), undef, <2,2,undef> // --> shuf (inselt undef, X, 0), undef, <0,0,undef> - unsigned NumMaskElts = Shuf.getType()->getVectorNumElements(); + unsigned NumMaskElts = Shuf.getType()->getNumElements(); SmallVector NewMask(NumMaskElts, 0); for (unsigned i = 0; i != NumMaskElts; ++i) if (Mask[i] == UndefMaskElem) @@ -1539,7 +1547,7 @@ // Canonicalize to choose from operand 0 first unless operand 1 is undefined. // Commuting undef to operand 0 conflicts with another canonicalization. - unsigned NumElts = Shuf.getType()->getVectorNumElements(); + unsigned NumElts = Shuf.getType()->getNumElements(); if (!isa(Shuf.getOperand(1)) && Shuf.getMaskValue(0) >= (int)NumElts) { // TODO: Can we assert that both operands of a shuffle-select are not undef @@ -1676,10 +1684,11 @@ // We need a narrow condition value. It must be extended with undef elements // and have the same number of elements as this shuffle. - unsigned NarrowNumElts = Shuf.getType()->getVectorNumElements(); + unsigned NarrowNumElts = Shuf.getType()->getNumElements(); Value *NarrowCond; if (!match(Cond, m_OneUse(m_ShuffleVector(m_Value(NarrowCond), m_Undef()))) || - NarrowCond->getType()->getVectorNumElements() != NarrowNumElts || + cast(NarrowCond->getType())->getNumElements() != + NarrowNumElts || !cast(Cond)->isIdentityWithPadding()) return nullptr; @@ -1718,7 +1727,7 @@ // new shuffle mask. Otherwise, copy the original mask element. Example: // shuf (shuf X, Y, ), undef, <0, undef, 2, 3> --> // shuf X, Y, - unsigned NumElts = Shuf.getType()->getVectorNumElements(); + unsigned NumElts = Shuf.getType()->getNumElements(); SmallVector NewMask(NumElts); assert(NumElts < Mask.size() && "Identity with extract must have less elements than its inputs"); @@ -1743,7 +1752,7 @@ // TODO: This restriction could be removed if the insert has only one use // (because the transform would require a new length-changing shuffle). int NumElts = Mask.size(); - if (NumElts != (int)(V0->getType()->getVectorNumElements())) + if (NumElts != (int)(cast(V0->getType())->getNumElements())) return nullptr; // This is a specialization of a fold in SimplifyDemandedVectorElts. We may @@ -1838,9 +1847,9 @@ Value *X = Shuffle0->getOperand(0); Value *Y = Shuffle1->getOperand(0); if (X->getType() != Y->getType() || - !isPowerOf2_32(Shuf.getType()->getVectorNumElements()) || - !isPowerOf2_32(Shuffle0->getType()->getVectorNumElements()) || - !isPowerOf2_32(X->getType()->getVectorNumElements()) || + !isPowerOf2_32(Shuf.getType()->getNumElements()) || + !isPowerOf2_32(Shuffle0->getType()->getNumElements()) || + !isPowerOf2_32(cast(X->getType())->getNumElements()) || isa(X) || isa(Y)) return nullptr; assert(isa(Shuffle0->getOperand(1)) && @@ -1851,8 +1860,8 @@ // operands directly by adjusting the shuffle mask to account for the narrower // types: // shuf (widen X), (widen Y), Mask --> shuf X, Y, Mask' - int NarrowElts = X->getType()->getVectorNumElements(); - int WideElts = Shuffle0->getType()->getVectorNumElements(); + int NarrowElts = cast(X->getType())->getNumElements(); + int WideElts = Shuffle0->getType()->getNumElements(); assert(WideElts > NarrowElts && "Unexpected types for identity with padding"); Type *I32Ty = IntegerType::getInt32Ty(Shuf.getContext()); @@ -1895,8 +1904,8 @@ return replaceInstUsesWith(SVI, V); // shuffle x, x, mask --> shuffle x, undef, mask' - unsigned VWidth = SVI.getType()->getVectorNumElements(); - unsigned LHSWidth = LHS->getType()->getVectorNumElements(); + unsigned VWidth = SVI.getType()->getNumElements(); + unsigned LHSWidth = cast(LHS->getType())->getNumElements(); ArrayRef Mask = SVI.getShuffleMask(); Type *Int32Ty = Type::getInt32Ty(SVI.getContext()); if (LHS == RHS) { @@ -2103,11 +2112,11 @@ if (LHSShuffle) { LHSOp0 = LHSShuffle->getOperand(0); LHSOp1 = LHSShuffle->getOperand(1); - LHSOp0Width = LHSOp0->getType()->getVectorNumElements(); + LHSOp0Width = cast(LHSOp0->getType())->getNumElements(); } if (RHSShuffle) { RHSOp0 = RHSShuffle->getOperand(0); - RHSOp0Width = RHSOp0->getType()->getVectorNumElements(); + RHSOp0Width = cast(RHSOp0->getType())->getNumElements(); } Value* newLHS = LHS; Value* newRHS = RHS; diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1627,7 +1627,7 @@ if (match(&Inst, m_c_BinOp(m_OneUse(m_ShuffleVector(m_Value(V1), m_Undef(), m_Mask(Mask))), m_Constant(C))) && - V1->getType()->getVectorNumElements() <= NumElts) { + cast(V1->getType())->getNumElements() <= NumElts) { assert(Inst.getType()->getScalarType() == V1->getType()->getScalarType() && "Shuffle should not change scalar type"); @@ -1638,7 +1638,7 @@ // ShMask = <1,1,2,2> and C = <5,5,6,6> --> NewC = bool ConstOp1 = isa(RHS); ArrayRef ShMask = Mask; - unsigned SrcVecNumElts = V1->getType()->getVectorNumElements(); + unsigned SrcVecNumElts = cast(V1->getType())->getNumElements(); UndefValue *UndefScalar = UndefValue::get(C->getType()->getScalarType()); SmallVector NewVecC(SrcVecNumElts, UndefScalar); bool MayChange = true; @@ -1849,8 +1849,8 @@ return replaceInstUsesWith(GEP, V); // For vector geps, use the generic demanded vector support. - if (GEP.getType()->isVectorTy()) { - auto VWidth = GEP.getType()->getVectorNumElements(); + if (auto *GEPVTy = dyn_cast(GEP.getType())) { + auto VWidth = GEPVTy->getNumElements(); APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); if (Value *V = SimplifyDemandedVectorElts(&GEP, AllOnesEltMask, @@ -1862,7 +1862,7 @@ // TODO: 1) Scalarize splat operands, 2) scalarize entire instruction if // possible (decide on canonical form for pointer broadcast), 3) exploit - // undef elements to decrease demanded bits + // undef elements to decrease demanded bits } Value *PtrOp = GEP.getOperand(0); @@ -1886,7 +1886,8 @@ Type *IndexTy = (*I)->getType(); Type *NewIndexType = IndexTy->isVectorTy() - ? VectorType::get(NewScalarIndexTy, IndexTy->getVectorNumElements()) + ? VectorType::get(NewScalarIndexTy, + cast(IndexTy)->getNumElements()) : NewScalarIndexTy; // If the element type has zero size then any index over it is equivalent @@ -2381,8 +2382,9 @@ // gep (bitcast [c x ty]* X to *), Y, Z --> gep X, Y, Z auto areMatchingArrayAndVecTypes = [](Type *ArrTy, Type *VecTy, const DataLayout &DL) { - return ArrTy->getArrayElementType() == VecTy->getVectorElementType() && - ArrTy->getArrayNumElements() == VecTy->getVectorNumElements() && + auto *VecVTy = cast(VecTy); + return ArrTy->getArrayElementType() == VecVTy->getElementType() && + ArrTy->getArrayNumElements() == VecVTy->getNumElements() && DL.getTypeAllocSize(ArrTy) == DL.getTypeAllocSize(VecTy); }; if (GEP.getNumOperands() == 3 && diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1513,9 +1513,10 @@ unsigned Granularity, uint32_t TypeSize, bool IsWrite, Value *SizeArgument, bool UseCalls, uint32_t Exp) { - auto *VTy = cast(Addr->getType())->getElementType(); + auto *VTy = + cast(cast(Addr->getType())->getElementType()); uint64_t ElemTypeSize = DL.getTypeStoreSizeInBits(VTy->getScalarType()); - unsigned Num = VTy->getVectorNumElements(); + unsigned Num = VTy->getNumElements(); auto Zero = ConstantInt::get(IntptrTy, 0); for (unsigned Idx = 0; Idx < Num; ++Idx) { Value *InstrumentedAddress = nullptr; diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -2068,9 +2068,9 @@ size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) { assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) && "Vector of pointers is not a valid shadow type"); - return Ty->isVectorTy() ? - Ty->getVectorNumElements() * Ty->getScalarSizeInBits() : - Ty->getPrimitiveSizeInBits(); + return Ty->isVectorTy() ? cast(Ty)->getNumElements() * + Ty->getScalarSizeInBits() + : Ty->getPrimitiveSizeInBits(); } /// Cast between two shadow types, extending or truncating as @@ -2086,7 +2086,8 @@ if (dstTy->isIntegerTy() && srcTy->isIntegerTy()) return IRB.CreateIntCast(V, dstTy, Signed); if (dstTy->isVectorTy() && srcTy->isVectorTy() && - dstTy->getVectorNumElements() == srcTy->getVectorNumElements()) + cast(dstTy)->getNumElements() == + cast(srcTy)->getNumElements()) return IRB.CreateIntCast(V, dstTy, Signed); Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits)); Value *V2 = @@ -2130,9 +2131,9 @@ Value *OtherArg) { Constant *ShadowMul; Type *Ty = ConstArg->getType(); - if (Ty->isVectorTy()) { - unsigned NumElements = Ty->getVectorNumElements(); - Type *EltTy = Ty->getSequentialElementType(); + if (auto *VTy = dyn_cast(Ty)) { + unsigned NumElements = VTy->getNumElements(); + Type *EltTy = VTy->getElementType(); SmallVector Elements; for (unsigned Idx = 0; Idx < NumElements; ++Idx) { if (ConstantInt *Elt = @@ -2657,7 +2658,7 @@ assert(CopyOp->getType() == I.getType()); assert(CopyOp->getType()->isVectorTy()); Value *ResultShadow = getShadow(CopyOp); - Type *EltTy = ResultShadow->getType()->getVectorElementType(); + Type *EltTy = cast(ResultShadow->getType())->getElementType(); for (int i = 0; i < NumUsedElements; ++i) { ResultShadow = IRB.CreateInsertElement( ResultShadow, ConstantInt::getNullValue(EltTy), @@ -2959,8 +2960,9 @@ Value *Acc = IRB.CreateExtractElement( MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), 0)); - for (int i = 1, N = PassThru->getType()->getVectorNumElements(); i < N; - ++i) { + for (int i = 1, + N = cast(PassThru->getType())->getNumElements(); + i < N; ++i) { Value *More = IRB.CreateExtractElement( MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), i)); Acc = IRB.CreateOr(Acc, More); @@ -3020,7 +3022,8 @@ void handlePclmulIntrinsic(IntrinsicInst &I) { IRBuilder<> IRB(&I); Type *ShadowTy = getShadowTy(&I); - unsigned Width = I.getArgOperand(0)->getType()->getVectorNumElements(); + unsigned Width = + cast(I.getArgOperand(0)->getType())->getNumElements(); assert(isa(I.getArgOperand(2)) && "pclmul 3rd operand must be a constant"); unsigned Imm = cast(I.getArgOperand(2))->getZExtValue(); diff --git a/llvm/lib/Transforms/Instrumentation/PoisonChecking.cpp b/llvm/lib/Transforms/Instrumentation/PoisonChecking.cpp --- a/llvm/lib/Transforms/Instrumentation/PoisonChecking.cpp +++ b/llvm/lib/Transforms/Instrumentation/PoisonChecking.cpp @@ -195,10 +195,11 @@ break; case Instruction::ExtractElement: { Value *Vec = I.getOperand(0); - if (Vec->getType()->getVectorIsScalable()) + auto *VecVTy = cast(Vec->getType()); + if (VecVTy->isScalable()) break; Value *Idx = I.getOperand(1); - unsigned NumElts = Vec->getType()->getVectorNumElements(); + unsigned NumElts = VecVTy->getNumElements(); Value *Check = B.CreateICmp(ICmpInst::ICMP_UGE, Idx, ConstantInt::get(Idx->getType(), NumElts)); @@ -207,10 +208,11 @@ } case Instruction::InsertElement: { Value *Vec = I.getOperand(0); - if (Vec->getType()->getVectorIsScalable()) + auto *VecVTy = cast(Vec->getType()); + if (VecVTy->isScalable()) break; Value *Idx = I.getOperand(2); - unsigned NumElts = Vec->getType()->getVectorNumElements(); + unsigned NumElts = VecVTy->getNumElements(); Value *Check = B.CreateICmp(ICmpInst::ICMP_UGE, Idx, ConstantInt::get(Idx->getType(), NumElts)); diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp --- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -2631,9 +2631,11 @@ unsigned VF = 0; for (unsigned i = 0; i < I.getNumOperands(); i++) if (I.getOperand(i)->getType()->isVectorTy()) { - assert(VF == 0 || - VF == I.getOperand(i)->getType()->getVectorNumElements()); - VF = I.getOperand(i)->getType()->getVectorNumElements(); + assert( + VF == 0 || + VF == + cast(I.getOperand(i)->getType())->getNumElements()); + VF = cast(I.getOperand(i)->getType())->getNumElements(); } // It's the vector to scalar traversal through the pointer operand which diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp --- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp +++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp @@ -252,7 +252,7 @@ PtrTy = dyn_cast(Ty); if (PtrTy) Ty = PtrTy->getElementType(); - Size = Ty->getVectorNumElements(); + Size = cast(Ty)->getNumElements(); if (!CachePtr) Tmp.resize(Size, nullptr); else if (CachePtr->empty()) @@ -269,7 +269,7 @@ return CV[I]; IRBuilder<> Builder(BB, BBI); if (PtrTy) { - Type *ElTy = PtrTy->getElementType()->getVectorElementType(); + Type *ElTy = cast(PtrTy->getElementType())->getElementType(); if (!CV[0]) { Type *NewPtrTy = PointerType::get(ElTy, PtrTy->getAddressSpace()); CV[0] = Builder.CreateBitCast(V, NewPtrTy, V->getName() + ".i0"); @@ -852,10 +852,10 @@ if (!Op->use_empty()) { // The value is still needed, so recreate it using a series of // InsertElements. - Type *Ty = Op->getType(); + auto *Ty = cast(Op->getType()); Value *Res = UndefValue::get(Ty); BasicBlock *BB = Op->getParent(); - unsigned Count = Ty->getVectorNumElements(); + unsigned Count = Ty->getNumElements(); IRBuilder<> Builder(Op); if (isa(Op)) Builder.SetInsertPoint(BB, BB->getFirstInsertionPt()); diff --git a/llvm/lib/Transforms/Utils/LoopUtils.cpp b/llvm/lib/Transforms/Utils/LoopUtils.cpp --- a/llvm/lib/Transforms/Utils/LoopUtils.cpp +++ b/llvm/lib/Transforms/Utils/LoopUtils.cpp @@ -880,7 +880,7 @@ unsigned Op, RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind, ArrayRef RedOps) { - unsigned VF = Src->getType()->getVectorNumElements(); + unsigned VF = cast(Src->getType())->getNumElements(); // Extract and apply reduction ops in ascending order: // e.g. ((((Acc + Scl[0]) + Scl[1]) + Scl[2]) + ) ... + Scl[VF-1] @@ -910,7 +910,7 @@ llvm::getShuffleReduction(IRBuilderBase &Builder, Value *Src, unsigned Op, RecurrenceDescriptor::MinMaxRecurrenceKind MinMaxKind, ArrayRef RedOps) { - unsigned VF = Src->getType()->getVectorNumElements(); + unsigned VF = cast(Src->getType())->getNumElements(); // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles // and vector ops, reducing the set of values being computed by half each // round. @@ -983,13 +983,15 @@ case Instruction::FAdd: BuildFunc = [&]() { auto Rdx = Builder.CreateFAddReduce( - Constant::getNullValue(Src->getType()->getVectorElementType()), Src); + Constant::getNullValue( + cast(Src->getType())->getElementType()), + Src); return Rdx; }; break; case Instruction::FMul: BuildFunc = [&]() { - Type *Ty = Src->getType()->getVectorElementType(); + Type *Ty = cast(Src->getType())->getElementType(); auto Rdx = Builder.CreateFMulReduce(ConstantFP::get(Ty, 1.0), Src); return Rdx; }; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1905,8 +1905,8 @@ Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx, Value *Step, Instruction::BinaryOps BinOp) { // Create and check the types. - assert(Val->getType()->isVectorTy() && "Must be a vector"); - int VLen = Val->getType()->getVectorNumElements(); + auto *ValVTy = cast(Val->getType()); + int VLen = ValVTy->getNumElements(); Type *STy = Val->getType()->getScalarType(); assert((STy->isIntegerTy() || STy->isFloatingPointTy()) && @@ -3318,13 +3318,14 @@ } static Type *smallestIntegerVectorType(Type *T1, Type *T2) { - auto *I1 = cast(T1->getVectorElementType()); - auto *I2 = cast(T2->getVectorElementType()); + auto *I1 = cast(cast(T1)->getElementType()); + auto *I2 = cast(cast(T2)->getElementType()); return I1->getBitWidth() < I2->getBitWidth() ? T1 : T2; } + static Type *largestIntegerVectorType(Type *T1, Type *T2) { - auto *I1 = cast(T1->getVectorElementType()); - auto *I2 = cast(T2->getVectorElementType()); + auto *I1 = cast(cast(T1)->getElementType()); + auto *I2 = cast(cast(T2)->getElementType()); return I1->getBitWidth() > I2->getBitWidth() ? T1 : T2; } @@ -3347,8 +3348,8 @@ Type *OriginalTy = I->getType(); Type *ScalarTruncatedTy = IntegerType::get(OriginalTy->getContext(), KV.second); - Type *TruncatedTy = VectorType::get(ScalarTruncatedTy, - OriginalTy->getVectorNumElements()); + Type *TruncatedTy = VectorType::get( + ScalarTruncatedTy, cast(OriginalTy)->getNumElements()); if (TruncatedTy == OriginalTy) continue; @@ -3398,10 +3399,12 @@ break; } } else if (auto *SI = dyn_cast(I)) { - auto Elements0 = SI->getOperand(0)->getType()->getVectorNumElements(); + auto Elements0 = + cast(SI->getOperand(0)->getType())->getNumElements(); auto *O0 = B.CreateZExtOrTrunc( SI->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements0)); - auto Elements1 = SI->getOperand(1)->getType()->getVectorNumElements(); + auto Elements1 = + cast(SI->getOperand(1)->getType())->getNumElements(); auto *O1 = B.CreateZExtOrTrunc( SI->getOperand(1), VectorType::get(ScalarTruncatedTy, Elements1)); @@ -3410,13 +3413,15 @@ // Don't do anything with the operands, just extend the result. continue; } else if (auto *IE = dyn_cast(I)) { - auto Elements = IE->getOperand(0)->getType()->getVectorNumElements(); + auto Elements = + cast(IE->getOperand(0)->getType())->getNumElements(); auto *O0 = B.CreateZExtOrTrunc( IE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); auto *O1 = B.CreateZExtOrTrunc(IE->getOperand(1), ScalarTruncatedTy); NewI = B.CreateInsertElement(O0, O1, IE->getOperand(2)); } else if (auto *EE = dyn_cast(I)) { - auto Elements = EE->getOperand(0)->getType()->getVectorNumElements(); + auto Elements = + cast(EE->getOperand(0)->getType())->getNumElements(); auto *O0 = B.CreateZExtOrTrunc( EE->getOperand(0), VectorType::get(ScalarTruncatedTy, Elements)); NewI = B.CreateExtractElement(O0, EE->getOperand(2)); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -285,7 +285,7 @@ static Optional isShuffle(ArrayRef VL) { auto *EI0 = cast(VL[0]); - unsigned Size = EI0->getVectorOperandType()->getVectorNumElements(); + unsigned Size = EI0->getVectorOperandType()->getNumElements(); Value *Vec1 = nullptr; Value *Vec2 = nullptr; enum ShuffleMode { Unknown, Select, Permute }; @@ -294,7 +294,7 @@ auto *EI = cast(VL[I]); auto *Vec = EI->getVectorOperand(); // All vector operands must have the same number of vector elements. - if (Vec->getType()->getVectorNumElements() != Size) + if (cast(Vec->getType())->getNumElements() != Size) return None; auto *Idx = dyn_cast(EI->getIndexOperand()); if (!Idx) @@ -3178,7 +3178,7 @@ if (!LI || !LI->isSimple() || !LI->hasNUses(VL.size())) return false; } else { - NElts = Vec->getType()->getVectorNumElements(); + NElts = cast(Vec->getType())->getNumElements(); } if (NElts != VL.size()) diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp --- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp +++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp @@ -218,10 +218,10 @@ // ShufMask = { 2, undef, undef, undef } uint64_t SplatIndex = ConvertToShuffle == Ext0 ? C0 : C1; uint64_t CheapExtIndex = ConvertToShuffle == Ext0 ? C1 : C0; - Type *VecTy = V0->getType(); + auto *VecTy = cast(V0->getType()); Type *I32Ty = IntegerType::getInt32Ty(I.getContext()); UndefValue *Undef = UndefValue::get(I32Ty); - SmallVector ShufMask(VecTy->getVectorNumElements(), Undef); + SmallVector ShufMask(VecTy->getNumElements(), Undef); ShufMask[CheapExtIndex] = ConstantInt::get(I32Ty, SplatIndex); IRBuilder<> Builder(ConvertToShuffle);