diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -119,8 +119,9 @@ ValueTy = lvalue.getType(); ValueSizeInBits = C.getTypeSize(ValueTy); AtomicTy = ValueTy = CGF.getContext().getExtVectorType( - lvalue.getType(), lvalue.getExtVectorAddress() - .getElementType()->getVectorNumElements()); + lvalue.getType(), cast( + lvalue.getExtVectorAddress().getElementType()) + ->getNumElements()); AtomicSizeInBits = C.getTypeSize(AtomicTy); AtomicAlign = ValueAlign = lvalue.getAlignment(); LVal = lvalue; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -4502,7 +4502,7 @@ } Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { - ElementCount EC = V->getType()->getVectorElementCount(); + ElementCount EC = cast(V->getType())->getElementCount(); return EmitNeonSplat(V, C, EC); } @@ -5426,8 +5426,8 @@ assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()); // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate // it before inserting. - Ops[j] = - CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType()); + Ops[j] = CGF.Builder.CreateTruncOrBitCast( + Ops[j], cast(ArgTy)->getElementType()); Ops[j] = CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0); } @@ -5715,7 +5715,7 @@ case NEON::BI__builtin_neon_vld1q_x3_v: case NEON::BI__builtin_neon_vld1_x4_v: case NEON::BI__builtin_neon_vld1q_x4_v: { - llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); + llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); llvm::Type *Tys[2] = { VTy, PTy }; Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); @@ -5824,11 +5824,11 @@ case NEON::BI__builtin_neon_vqdmulh_lane_v: case NEON::BI__builtin_neon_vqrdmulhq_lane_v: case NEON::BI__builtin_neon_vqrdmulh_lane_v: { - llvm::Type *RTy = Ty; + auto *RTy = cast(Ty); if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) - RTy = llvm::VectorType::get(Ty->getVectorElementType(), - Ty->getVectorNumElements() * 2); + RTy = llvm::VectorType::get(RTy->getElementType(), + RTy->getNumElements() * 2); llvm::Type *Tys[2] = { RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false, /*isQuad*/ false))}; @@ -5917,7 +5917,7 @@ case NEON::BI__builtin_neon_vst1q_x3_v: case NEON::BI__builtin_neon_vst1_x4_v: case NEON::BI__builtin_neon_vst1q_x4_v: { - llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType()); + llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); // TODO: Currently in AArch32 mode the pointer operand comes first, whereas // in AArch64 it comes last. We may want to stick to one or another. if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be || @@ -7063,8 +7063,9 @@ // equal to the lane size. In LLVM IR, an LShr with that parameter would be // undefined behavior, but in MVE it's legal, so we must convert it to code // that is not undefined in IR. - unsigned LaneBits = - V->getType()->getVectorElementType()->getPrimitiveSizeInBits(); + unsigned LaneBits = cast(V->getType()) + ->getElementType() + ->getPrimitiveSizeInBits(); if (Shift == LaneBits) { // An unsigned shift of the full lane size always generates zero, so we can // simply emit a zero vector. A signed shift of the full lane size does the @@ -7115,7 +7116,8 @@ // Make a shufflevector that extracts every other element of a vector (evens // or odds, as desired). SmallVector Indices; - unsigned InputElements = V->getType()->getVectorNumElements(); + unsigned InputElements = + cast(V->getType())->getNumElements(); for (unsigned i = 0; i < InputElements; i += 2) Indices.push_back(i + Odd); return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()), @@ -7127,7 +7129,8 @@ // Make a shufflevector that interleaves two vectors element by element. assert(V0->getType() == V1->getType() && "Can't zip different vector types"); SmallVector Indices; - unsigned InputElements = V0->getType()->getVectorNumElements(); + unsigned InputElements = + cast(V0->getType())->getNumElements(); for (unsigned i = 0; i < InputElements; i++) { Indices.push_back(i); Indices.push_back(i + InputElements); @@ -7139,7 +7142,7 @@ static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) { // MVE-specific helper function to make a vector splat of a constant such as // UINT_MAX or INT_MIN, in which all bits below the highest one are equal. - llvm::Type *T = VT->getVectorElementType(); + llvm::Type *T = cast(VT)->getElementType(); unsigned LaneBits = T->getPrimitiveSizeInBits(); uint32_t Value = HighBit << (LaneBits - 1); if (OtherBits) @@ -7472,8 +7475,7 @@ // The vector type that is returned may be different from the // eventual type loaded from memory. auto VectorTy = cast(ReturnTy); - auto MemoryTy = - llvm::VectorType::get(MemEltTy, VectorTy->getVectorElementCount()); + auto MemoryTy = llvm::VectorType::get(MemEltTy, VectorTy->getElementCount()); Value *Offset = Builder.getInt32(0); Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy); @@ -9984,8 +9986,8 @@ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - Ops[1]->getType()->getVectorNumElements()); + Value *MaskVec = getMaskVecValue( + CGF, Ops[2], cast(Ops[1]->getType())->getNumElements()); return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec); } @@ -9996,23 +9998,22 @@ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - Ops[1]->getType()->getVectorNumElements()); + Value *MaskVec = getMaskVecValue( + CGF, Ops[2], cast(Ops[1]->getType())->getNumElements()); return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]); } static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, ArrayRef Ops) { - llvm::Type *ResultTy = Ops[1]->getType(); - llvm::Type *PtrTy = ResultTy->getVectorElementType(); + auto *ResultTy = cast(Ops[1]->getType()); + llvm::Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(PtrTy)); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - ResultTy->getVectorNumElements()); + Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, ResultTy); @@ -10022,10 +10023,9 @@ static Value *EmitX86CompressExpand(CodeGenFunction &CGF, ArrayRef Ops, bool IsCompress) { - llvm::Type *ResultTy = Ops[1]->getType(); + auto *ResultTy = cast(Ops[1]->getType()); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - ResultTy->getVectorNumElements()); + Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress : Intrinsic::x86_avx512_mask_expand; @@ -10035,15 +10035,14 @@ static Value *EmitX86CompressStore(CodeGenFunction &CGF, ArrayRef Ops) { - llvm::Type *ResultTy = Ops[1]->getType(); - llvm::Type *PtrTy = ResultTy->getVectorElementType(); + auto *ResultTy = cast(Ops[1]->getType()); + llvm::Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(PtrTy)); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], - ResultTy->getVectorNumElements()); + Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, ResultTy); @@ -10072,7 +10071,7 @@ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so // we only care about the lowest log2 bits anyway. if (Amt->getType() != Ty) { - unsigned NumElts = Ty->getVectorNumElements(); + unsigned NumElts = cast(Ty)->getNumElements(); Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false); Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt); } @@ -10130,7 +10129,8 @@ if (C->isAllOnesValue()) return Op0; - Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements()); + Mask = getMaskVecValue( + CGF, Mask, cast(Op0->getType())->getNumElements()); return CGF.Builder.CreateSelect(Mask, Op0, Op1); } @@ -10177,7 +10177,8 @@ bool Signed, ArrayRef Ops) { assert((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments"); - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *Cmp; if (CC == 3) { @@ -10454,7 +10455,7 @@ static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, llvm::Type *DstTy) { - unsigned NumberOfElements = DstTy->getVectorNumElements(); + unsigned NumberOfElements = cast(DstTy)->getNumElements(); Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); } @@ -10492,11 +10493,11 @@ return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]}); } - unsigned NumDstElts = DstTy->getVectorNumElements(); + unsigned NumDstElts = cast(DstTy)->getNumElements(); Value *Src = Ops[0]; // Extract the subvector. - if (NumDstElts != Src->getType()->getVectorNumElements()) { + if (NumDstElts != cast(Src->getType())->getNumElements()) { assert(NumDstElts == 4 && "Unexpected vector size"); uint32_t ShuffleMask[4] = {0, 1, 2, 3}; Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()), @@ -10796,7 +10797,8 @@ case X86::BI__builtin_ia32_vec_ext_v16hi: case X86::BI__builtin_ia32_vec_ext_v8si: case X86::BI__builtin_ia32_vec_ext_v4di: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); uint64_t Index = cast(Ops[1])->getZExtValue(); Index &= NumElts - 1; // These builtins exist so we can ensure the index is an ICE and in range. @@ -10811,7 +10813,8 @@ case X86::BI__builtin_ia32_vec_set_v16hi: case X86::BI__builtin_ia32_vec_set_v8si: case X86::BI__builtin_ia32_vec_set_v4di: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); unsigned Index = cast(Ops[2])->getZExtValue(); Index &= NumElts - 1; // These builtins exist so we can ensure the index is an ICE and in range. @@ -11237,8 +11240,9 @@ break; } - unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(), - Ops[2]->getType()->getVectorNumElements()); + unsigned MinElts = + std::min(cast(Ops[0]->getType())->getNumElements(), + cast(Ops[2]->getType())->getNumElements()); Ops[3] = getMaskVecValue(*this, Ops[3], MinElts); Function *Intr = CGM.getIntrinsic(IID); return Builder.CreateCall(Intr, Ops); @@ -11345,8 +11349,9 @@ break; } - unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(), - Ops[3]->getType()->getVectorNumElements()); + unsigned MinElts = + std::min(cast(Ops[2]->getType())->getNumElements(), + cast(Ops[3]->getType())->getNumElements()); Ops[1] = getMaskVecValue(*this, Ops[1], MinElts); Function *Intr = CGM.getIntrinsic(IID); return Builder.CreateCall(Intr, Ops); @@ -11368,9 +11373,10 @@ case X86::BI__builtin_ia32_extracti64x2_256_mask: case X86::BI__builtin_ia32_extractf64x2_512_mask: case X86::BI__builtin_ia32_extracti64x2_512_mask: { - llvm::Type *DstTy = ConvertType(E->getType()); - unsigned NumElts = DstTy->getVectorNumElements(); - unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements(); + auto *DstTy = cast(ConvertType(E->getType())); + unsigned NumElts = DstTy->getNumElements(); + unsigned SrcNumElts = + cast(Ops[0]->getType())->getNumElements(); unsigned SubVectors = SrcNumElts / NumElts; unsigned Index = cast(Ops[1])->getZExtValue(); assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); @@ -11407,8 +11413,10 @@ case X86::BI__builtin_ia32_inserti64x2_256: case X86::BI__builtin_ia32_insertf64x2_512: case X86::BI__builtin_ia32_inserti64x2_512: { - unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements(); - unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements(); + unsigned DstNumElts = + cast(Ops[0]->getType())->getNumElements(); + unsigned SrcNumElts = + cast(Ops[1]->getType())->getNumElements(); unsigned SubVectors = DstNumElts / SrcNumElts; unsigned Index = cast(Ops[2])->getZExtValue(); assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); @@ -11472,7 +11480,8 @@ case X86::BI__builtin_ia32_pblendw256: case X86::BI__builtin_ia32_pblendd128: case X86::BI__builtin_ia32_pblendd256: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); unsigned Imm = cast(Ops[2])->getZExtValue(); uint32_t Indices[16]; @@ -11489,8 +11498,8 @@ case X86::BI__builtin_ia32_pshuflw256: case X86::BI__builtin_ia32_pshuflw512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); // Splat the 8-bits of immediate 4 times to help the loop wrap around. Imm = (Imm & 0xff) * 0x01010101; @@ -11513,8 +11522,8 @@ case X86::BI__builtin_ia32_pshufhw256: case X86::BI__builtin_ia32_pshufhw512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); // Splat the 8-bits of immediate 4 times to help the loop wrap around. Imm = (Imm & 0xff) * 0x01010101; @@ -11543,8 +11552,8 @@ case X86::BI__builtin_ia32_vpermilpd512: case X86::BI__builtin_ia32_vpermilps512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; unsigned NumLaneElts = NumElts / NumLanes; @@ -11570,8 +11579,8 @@ case X86::BI__builtin_ia32_shufps256: case X86::BI__builtin_ia32_shufps512: { uint32_t Imm = cast(Ops[2])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; unsigned NumLaneElts = NumElts / NumLanes; @@ -11598,8 +11607,8 @@ case X86::BI__builtin_ia32_permdi512: case X86::BI__builtin_ia32_permdf512: { unsigned Imm = cast(Ops[1])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); // These intrinsics operate on 256-bit lanes of four 64-bit elements. uint32_t Indices[8]; @@ -11616,7 +11625,8 @@ case X86::BI__builtin_ia32_palignr512: { unsigned ShiftVal = cast(Ops[2])->getZExtValue() & 0xff; - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); assert(NumElts % 16 == 0); // If palignr is shifting the pair of vectors more than the size of two @@ -11653,7 +11663,8 @@ case X86::BI__builtin_ia32_alignq128: case X86::BI__builtin_ia32_alignq256: case X86::BI__builtin_ia32_alignq512: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); unsigned ShiftVal = cast(Ops[2])->getZExtValue() & 0xff; // Mask the shift amount to width of two vectors. @@ -11676,8 +11687,8 @@ case X86::BI__builtin_ia32_shuf_i32x4: case X86::BI__builtin_ia32_shuf_i64x2: { unsigned Imm = cast(Ops[2])->getZExtValue(); - llvm::Type *Ty = Ops[0]->getType(); - unsigned NumElts = Ty->getVectorNumElements(); + auto *Ty = cast(Ops[0]->getType()); + unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; unsigned NumLaneElts = NumElts / NumLanes; @@ -11702,7 +11713,8 @@ case X86::BI__builtin_ia32_vperm2f128_si256: case X86::BI__builtin_ia32_permti256: { unsigned Imm = cast(Ops[2])->getZExtValue(); - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); // This takes a very simple approach since there are two lanes and a // shuffle can have 2 inputs. So we reserve the first input for the first @@ -11740,9 +11752,9 @@ case X86::BI__builtin_ia32_pslldqi256_byteshift: case X86::BI__builtin_ia32_pslldqi512_byteshift: { unsigned ShiftVal = cast(Ops[1])->getZExtValue() & 0xff; - llvm::Type *ResultType = Ops[0]->getType(); + auto *ResultType = cast(Ops[0]->getType()); // Builtin type is vXi64 so multiply by 8 to get bytes. - unsigned NumElts = ResultType->getVectorNumElements() * 8; + unsigned NumElts = ResultType->getNumElements() * 8; // If pslldq is shifting the vector more than 15 bytes, emit zero. if (ShiftVal >= 16) @@ -11770,9 +11782,9 @@ case X86::BI__builtin_ia32_psrldqi256_byteshift: case X86::BI__builtin_ia32_psrldqi512_byteshift: { unsigned ShiftVal = cast(Ops[1])->getZExtValue() & 0xff; - llvm::Type *ResultType = Ops[0]->getType(); + auto *ResultType = cast(Ops[0]->getType()); // Builtin type is vXi64 so multiply by 8 to get bytes. - unsigned NumElts = ResultType->getVectorNumElements() * 8; + unsigned NumElts = ResultType->getNumElements() * 8; // If psrldq is shifting the vector more than 15 bytes, emit zero. if (ShiftVal >= 16) @@ -12416,7 +12428,8 @@ case X86::BI__builtin_ia32_fpclasspd128_mask: case X86::BI__builtin_ia32_fpclasspd256_mask: case X86::BI__builtin_ia32_fpclasspd512_mask: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[2]; Ops.erase(&Ops[2]); @@ -12453,7 +12466,8 @@ case X86::BI__builtin_ia32_vp2intersect_d_512: case X86::BI__builtin_ia32_vp2intersect_d_256: case X86::BI__builtin_ia32_vp2intersect_d_128: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Intrinsic::ID ID; switch (BuiltinID) { @@ -12511,7 +12525,8 @@ case X86::BI__builtin_ia32_vpshufbitqmb128_mask: case X86::BI__builtin_ia32_vpshufbitqmb256_mask: case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[2]; Ops.erase(&Ops[2]); @@ -12651,8 +12666,11 @@ } Function *Intr = CGM.getIntrinsic(IID); - if (Intr->getReturnType()->getVectorElementType()->isIntegerTy(1)) { - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + if (cast(Intr->getReturnType()) + ->getElementType() + ->isIntegerTy(1)) { + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[3]; Ops.erase(&Ops[3]); @@ -12673,7 +12691,8 @@ case X86::BI__builtin_ia32_cmppd128_mask: case X86::BI__builtin_ia32_cmppd256_mask: { // FIXME: Support SAE. - unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + unsigned NumElts = + cast(Ops[0]->getType())->getNumElements(); Value *Cmp; if (IsSignaling) Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); @@ -12730,8 +12749,9 @@ // AVX512 bf16 intrinsics case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { - Ops[2] = getMaskVecValue(*this, Ops[2], - Ops[0]->getType()->getVectorNumElements()); + Ops[2] = getMaskVecValue( + *this, Ops[2], + cast(Ops[0]->getType())->getNumElements()); Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); } @@ -15047,7 +15067,8 @@ switch (BuiltinID) { case WebAssembly::BI__builtin_wasm_replace_lane_i8x16: case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: { - llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType(); + llvm::Type *ElemType = + cast(ConvertType(E->getType()))->getElementType(); Value *Trunc = Builder.CreateTrunc(Val, ElemType); return Builder.CreateInsertElement(Vec, Trunc, Lane); } @@ -15510,8 +15531,9 @@ if (ID == Intrinsic::not_intrinsic) return nullptr; - auto IsVectorPredTy = [] (llvm::Type *T) { - return T->isVectorTy() && T->getVectorElementType()->isIntegerTy(1); + auto IsVectorPredTy = [](llvm::Type *T) { + return T->isVectorTy() && + cast(T)->getElementType()->isIntegerTy(1); }; llvm::Function *IntrFn = CGM.getIntrinsic(ID); diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -2127,7 +2127,8 @@ if (const VectorType *VTy = Dst.getType()->getAs()) { unsigned NumSrcElts = VTy->getNumElements(); - unsigned NumDstElts = Vec->getType()->getVectorNumElements(); + unsigned NumDstElts = + cast(Vec->getType())->getNumElements(); if (NumDstElts == NumSrcElts) { // Use shuffle vector is the src and destination are the same number of // elements and restore the vector mask since it is on the side it will be diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -1306,7 +1306,7 @@ "Splatted expr doesn't match with vector element type?"); // Splat the element across to all elements - unsigned NumElements = DstTy->getVectorNumElements(); + unsigned NumElements = cast(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Src, "splat"); } @@ -1324,8 +1324,8 @@ // short or half vector. // Source and destination are both expected to be vectors. - llvm::Type *SrcElementTy = SrcTy->getVectorElementType(); - llvm::Type *DstElementTy = DstTy->getVectorElementType(); + llvm::Type *SrcElementTy = cast(SrcTy)->getElementType(); + llvm::Type *DstElementTy = cast(DstTy)->getElementType(); (void)DstElementTy; assert(((SrcElementTy->isIntegerTy() && @@ -1691,8 +1691,8 @@ assert(DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"); - llvm::Type *SrcEltTy = SrcTy->getVectorElementType(), - *DstEltTy = DstTy->getVectorElementType(); + llvm::Type *SrcEltTy = cast(SrcTy)->getElementType(), + *DstEltTy = cast(DstTy)->getElementType(); if (DstEltType->isBooleanType()) { assert((SrcEltTy->isFloatingPointTy() || @@ -2219,7 +2219,7 @@ llvm::Type *DstTy = ConvertType(DestTy); Value *Elt = Visit(const_cast(E)); // Splat the element across to all elements - unsigned NumElements = DstTy->getVectorNumElements(); + unsigned NumElements = cast(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } @@ -4545,7 +4545,8 @@ // get a vec3. if (NumElementsSrc != 3 && NumElementsDst == 3) { if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) { - auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4); + auto Vec4Ty = llvm::VectorType::get( + cast(DstTy)->getElementType(), 4); Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src, Vec4Ty); } diff --git a/clang/lib/CodeGen/PatternInit.cpp b/clang/lib/CodeGen/PatternInit.cpp --- a/clang/lib/CodeGen/PatternInit.cpp +++ b/clang/lib/CodeGen/PatternInit.cpp @@ -34,9 +34,11 @@ constexpr bool NegativeNaN = true; constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull; if (Ty->isIntOrIntVectorTy()) { - unsigned BitWidth = cast( - Ty->isVectorTy() ? Ty->getVectorElementType() : Ty) - ->getBitWidth(); + unsigned BitWidth = + cast( + Ty->isVectorTy() ? cast(Ty)->getElementType() + : Ty) + ->getBitWidth(); if (BitWidth <= 64) return llvm::ConstantInt::get(Ty, IntValue); return llvm::ConstantInt::get( @@ -44,7 +46,7 @@ } if (Ty->isPtrOrPtrVectorTy()) { auto *PtrTy = cast( - Ty->isVectorTy() ? Ty->getVectorElementType() : Ty); + Ty->isVectorTy() ? cast(Ty)->getElementType() : Ty); unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth( PtrTy->getAddressSpace()); if (PtrWidth > 64) @@ -55,7 +57,7 @@ } if (Ty->isFPOrFPVectorTy()) { unsigned BitWidth = llvm::APFloat::semanticsSizeInBits( - (Ty->isVectorTy() ? Ty->getVectorElementType() : Ty) + (Ty->isVectorTy() ? cast(Ty)->getElementType() : Ty) ->getFltSemantics()); llvm::APInt Payload(64, NaNPayload); if (BitWidth >= 64) diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -3054,7 +3054,7 @@ // Don't pass vXi128 vectors in their native type, the backend can't // legalize them. if (passInt128VectorsInMem() && - IRType->getVectorElementType()->isIntegerTy(128)) { + cast(IRType)->getElementType()->isIntegerTy(128)) { // Use a vXi64 vector. uint64_t Size = getContext().getTypeSize(Ty); return llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()),