diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -119,7 +119,7 @@ ValueTy = lvalue.getType(); ValueSizeInBits = C.getTypeSize(ValueTy); AtomicTy = ValueTy = CGF.getContext().getExtVectorType( - lvalue.getType(), cast( + lvalue.getType(), cast( lvalue.getExtVectorAddress().getElementType()) ->getNumElements()); AtomicSizeInBits = C.getTypeSize(AtomicTy); diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -4561,11 +4561,11 @@ getTarget().getTriple().getArch()); } -static llvm::VectorType *GetNeonType(CodeGenFunction *CGF, - NeonTypeFlags TypeFlags, - bool HasLegalHalfType = true, - bool V1Ty = false, - bool AllowBFloatArgsAndRet = true) { +static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF, + NeonTypeFlags TypeFlags, + bool HasLegalHalfType = true, + bool V1Ty = false, + bool AllowBFloatArgsAndRet = true) { int IsQuad = TypeFlags.isQuad(); switch (TypeFlags.getEltType()) { case NeonTypeFlags::Int8: @@ -5621,8 +5621,8 @@ const bool AllowBFloatArgsAndRet = getTargetHooks().getABIInfo().allowBFloatArgsAndRet(); - llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType, false, - AllowBFloatArgsAndRet); + llvm::FixedVectorType *VTy = + GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet); llvm::Type *Ty = VTy; if (!Ty) return nullptr; @@ -5663,8 +5663,8 @@ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs"); return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs"); case NEON::BI__builtin_neon_vaddhn_v: { - llvm::VectorType *SrcTy = - llvm::VectorType::getExtendedElementVectorType(VTy); + llvm::FixedVectorType *SrcTy = + llvm::FixedVectorType::getExtendedElementVectorType(VTy); // %sum = add <4 x i32> %lhs, %rhs Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); @@ -5936,14 +5936,16 @@ return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vmovl_v: { - llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy); + llvm::FixedVectorType *DTy = + llvm::FixedVectorType::getTruncatedElementVectorType(VTy); Ops[0] = Builder.CreateBitCast(Ops[0], DTy); if (Usgn) return Builder.CreateZExt(Ops[0], Ty, "vmovl"); return Builder.CreateSExt(Ops[0], Ty, "vmovl"); } case NEON::BI__builtin_neon_vmovn_v: { - llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy); + llvm::FixedVectorType *QTy = + llvm::FixedVectorType::getExtendedElementVectorType(VTy); Ops[0] = Builder.CreateBitCast(Ops[0], QTy); return Builder.CreateTrunc(Ops[0], Ty, "vmovn"); } @@ -5989,7 +5991,7 @@ case NEON::BI__builtin_neon_vqdmulh_lane_v: case NEON::BI__builtin_neon_vqrdmulhq_lane_v: case NEON::BI__builtin_neon_vqrdmulh_lane_v: { - auto *RTy = cast(Ty); + auto *RTy = cast(Ty); if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) RTy = llvm::FixedVectorType::get(RTy->getElementType(), @@ -6038,7 +6040,8 @@ return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n"); case NEON::BI__builtin_neon_vshll_n_v: { - llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy); + llvm::FixedVectorType *SrcTy = + llvm::FixedVectorType::getTruncatedElementVectorType(VTy); Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); if (Usgn) Ops[0] = Builder.CreateZExt(Ops[0], VTy); @@ -6048,7 +6051,8 @@ return Builder.CreateShl(Ops[0], Ops[1], "vshll_n"); } case NEON::BI__builtin_neon_vshrn_n_v: { - llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy); + llvm::FixedVectorType *SrcTy = + llvm::FixedVectorType::getExtendedElementVectorType(VTy); Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false); if (Usgn) @@ -6097,8 +6101,8 @@ return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); } case NEON::BI__builtin_neon_vsubhn_v: { - llvm::VectorType *SrcTy = - llvm::VectorType::getExtendedElementVectorType(VTy); + llvm::FixedVectorType *SrcTy = + llvm::FixedVectorType::getExtendedElementVectorType(VTy); // %sum = add <4 x i32> %lhs, %rhs Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy); @@ -6310,7 +6314,7 @@ // Build a vector containing sequential number like (0, 1, 2, ..., 15) SmallVector Indices; - llvm::VectorType *TblTy = cast(Ops[0]->getType()); + auto *TblTy = cast(Ops[0]->getType()); for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { Indices.push_back(2*i); Indices.push_back(2*i+1); @@ -7151,10 +7155,9 @@ bool usgn = Type.isUnsigned(); bool rightShift = false; - llvm::VectorType *VTy = GetNeonType(this, Type, - getTarget().hasLegalHalfType(), - false, - getTarget().hasBFloat16Type()); + llvm::FixedVectorType *VTy = + GetNeonType(this, Type, getTarget().hasLegalHalfType(), false, + getTarget().hasBFloat16Type()); llvm::Type *Ty = VTy; if (!Ty) return nullptr; @@ -7362,7 +7365,7 @@ // or odds, as desired). SmallVector Indices; unsigned InputElements = - cast(V->getType())->getNumElements(); + cast(V->getType())->getNumElements(); for (unsigned i = 0; i < InputElements; i += 2) Indices.push_back(i + Odd); return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()), @@ -7375,7 +7378,7 @@ assert(V0->getType() == V1->getType() && "Can't zip different vector types"); SmallVector Indices; unsigned InputElements = - cast(V0->getType())->getNumElements(); + cast(V0->getType())->getNumElements(); for (unsigned i = 0; i < InputElements; i++) { Indices.push_back(i); Indices.push_back(i + InputElements); @@ -7571,7 +7574,7 @@ // Determine the type of this overloaded NEON intrinsic. NeonTypeFlags Type = Result->getZExtValue(); - llvm::VectorType *Ty = GetNeonType(&CGF, Type); + llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type); if (!Ty) return nullptr; @@ -9773,7 +9776,7 @@ } } - llvm::VectorType *VTy = GetNeonType(this, Type); + llvm::FixedVectorType *VTy = GetNeonType(this, Type); llvm::Type *Ty = VTy; if (!Ty) return nullptr; @@ -9834,13 +9837,13 @@ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla"); } case NEON::BI__builtin_neon_vfma_laneq_v: { - llvm::VectorType *VTy = cast(Ty); + auto *VTy = cast(Ty); // v1f64 fma should be mapped to Neon scalar f64 fma if (VTy && VTy->getElementType() == DoubleTy) { Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy); - llvm::Type *VTy = GetNeonType(this, - NeonTypeFlags(NeonTypeFlags::Float64, false, true)); + llvm::FixedVectorType *VTy = + GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true)); Ops[2] = Builder.CreateBitCast(Ops[2], VTy); Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract"); Value *Result; @@ -10208,8 +10211,8 @@ if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v) Quad = true; Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); - llvm::Type *VTy = GetNeonType(this, - NeonTypeFlags(NeonTypeFlags::Float64, false, Quad)); + llvm::FixedVectorType *VTy = + GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad)); Ops[1] = Builder.CreateBitCast(Ops[1], VTy); Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract"); Value *Result = Builder.CreateFMul(Ops[0], Ops[1]); @@ -11081,7 +11084,8 @@ llvm::PointerType::getUnqual(Ops[1]->getType())); Value *MaskVec = getMaskVecValue( - CGF, Ops[2], cast(Ops[1]->getType())->getNumElements()); + CGF, Ops[2], + cast(Ops[1]->getType())->getNumElements()); return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec); } @@ -11093,7 +11097,8 @@ llvm::PointerType::getUnqual(Ops[1]->getType())); Value *MaskVec = getMaskVecValue( - CGF, Ops[2], cast(Ops[1]->getType())->getNumElements()); + CGF, Ops[2], + cast(Ops[1]->getType())->getNumElements()); return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]); } @@ -11107,7 +11112,8 @@ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(PtrTy)); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); + Value *MaskVec = getMaskVecValue( + CGF, Ops[2], cast(ResultTy)->getNumElements()); llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, ResultTy); @@ -11117,7 +11123,7 @@ static Value *EmitX86CompressExpand(CodeGenFunction &CGF, ArrayRef Ops, bool IsCompress) { - auto *ResultTy = cast(Ops[1]->getType()); + auto *ResultTy = cast(Ops[1]->getType()); Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); @@ -11129,7 +11135,7 @@ static Value *EmitX86CompressStore(CodeGenFunction &CGF, ArrayRef Ops) { - auto *ResultTy = cast(Ops[1]->getType()); + auto *ResultTy = cast(Ops[1]->getType()); llvm::Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. @@ -11165,7 +11171,7 @@ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so // we only care about the lowest log2 bits anyway. if (Amt->getType() != Ty) { - unsigned NumElts = cast(Ty)->getNumElements(); + unsigned NumElts = cast(Ty)->getNumElements(); Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false); Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt); } @@ -11224,7 +11230,7 @@ return Op0; Mask = getMaskVecValue( - CGF, Mask, cast(Op0->getType())->getNumElements()); + CGF, Mask, cast(Op0->getType())->getNumElements()); return CGF.Builder.CreateSelect(Mask, Op0, Op1); } @@ -11271,7 +11277,7 @@ assert((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments"); unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Value *Cmp; if (CC == 3) { @@ -11548,7 +11554,8 @@ static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, llvm::Type *DstTy) { - unsigned NumberOfElements = cast(DstTy)->getNumElements(); + unsigned NumberOfElements = + cast(DstTy)->getNumElements(); Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); } @@ -11584,11 +11591,12 @@ return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]}); } - unsigned NumDstElts = cast(DstTy)->getNumElements(); + unsigned NumDstElts = cast(DstTy)->getNumElements(); Value *Src = Ops[0]; // Extract the subvector. - if (NumDstElts != cast(Src->getType())->getNumElements()) { + if (NumDstElts != + cast(Src->getType())->getNumElements()) { assert(NumDstElts == 4 && "Unexpected vector size"); Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()), ArrayRef{0, 1, 2, 3}); @@ -11887,7 +11895,7 @@ case X86::BI__builtin_ia32_vec_ext_v8si: case X86::BI__builtin_ia32_vec_ext_v4di: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); uint64_t Index = cast(Ops[1])->getZExtValue(); Index &= NumElts - 1; // These builtins exist so we can ensure the index is an ICE and in range. @@ -11903,7 +11911,7 @@ case X86::BI__builtin_ia32_vec_set_v8si: case X86::BI__builtin_ia32_vec_set_v4di: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned Index = cast(Ops[2])->getZExtValue(); Index &= NumElts - 1; // These builtins exist so we can ensure the index is an ICE and in range. @@ -12329,9 +12337,9 @@ break; } - unsigned MinElts = - std::min(cast(Ops[0]->getType())->getNumElements(), - cast(Ops[2]->getType())->getNumElements()); + unsigned MinElts = std::min( + cast(Ops[0]->getType())->getNumElements(), + cast(Ops[2]->getType())->getNumElements()); Ops[3] = getMaskVecValue(*this, Ops[3], MinElts); Function *Intr = CGM.getIntrinsic(IID); return Builder.CreateCall(Intr, Ops); @@ -12438,9 +12446,9 @@ break; } - unsigned MinElts = - std::min(cast(Ops[2]->getType())->getNumElements(), - cast(Ops[3]->getType())->getNumElements()); + unsigned MinElts = std::min( + cast(Ops[2]->getType())->getNumElements(), + cast(Ops[3]->getType())->getNumElements()); Ops[1] = getMaskVecValue(*this, Ops[1], MinElts); Function *Intr = CGM.getIntrinsic(IID); return Builder.CreateCall(Intr, Ops); @@ -12462,10 +12470,10 @@ case X86::BI__builtin_ia32_extracti64x2_256_mask: case X86::BI__builtin_ia32_extractf64x2_512_mask: case X86::BI__builtin_ia32_extracti64x2_512_mask: { - auto *DstTy = cast(ConvertType(E->getType())); + auto *DstTy = cast(ConvertType(E->getType())); unsigned NumElts = DstTy->getNumElements(); unsigned SrcNumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned SubVectors = SrcNumElts / NumElts; unsigned Index = cast(Ops[1])->getZExtValue(); assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); @@ -12503,9 +12511,9 @@ case X86::BI__builtin_ia32_insertf64x2_512: case X86::BI__builtin_ia32_inserti64x2_512: { unsigned DstNumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned SrcNumElts = - cast(Ops[1]->getType())->getNumElements(); + cast(Ops[1]->getType())->getNumElements(); unsigned SubVectors = DstNumElts / SrcNumElts; unsigned Index = cast(Ops[2])->getZExtValue(); assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); @@ -12570,7 +12578,7 @@ case X86::BI__builtin_ia32_pblendd128: case X86::BI__builtin_ia32_pblendd256: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned Imm = cast(Ops[2])->getZExtValue(); int Indices[16]; @@ -12587,7 +12595,7 @@ case X86::BI__builtin_ia32_pshuflw256: case X86::BI__builtin_ia32_pshuflw512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); // Splat the 8-bits of immediate 4 times to help the loop wrap around. @@ -12611,7 +12619,7 @@ case X86::BI__builtin_ia32_pshufhw256: case X86::BI__builtin_ia32_pshufhw512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); // Splat the 8-bits of immediate 4 times to help the loop wrap around. @@ -12641,7 +12649,7 @@ case X86::BI__builtin_ia32_vpermilpd512: case X86::BI__builtin_ia32_vpermilps512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; unsigned NumLaneElts = NumElts / NumLanes; @@ -12668,7 +12676,7 @@ case X86::BI__builtin_ia32_shufps256: case X86::BI__builtin_ia32_shufps512: { uint32_t Imm = cast(Ops[2])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; unsigned NumLaneElts = NumElts / NumLanes; @@ -12696,7 +12704,7 @@ case X86::BI__builtin_ia32_permdi512: case X86::BI__builtin_ia32_permdf512: { unsigned Imm = cast(Ops[1])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); // These intrinsics operate on 256-bit lanes of four 64-bit elements. @@ -12715,7 +12723,7 @@ unsigned ShiftVal = cast(Ops[2])->getZExtValue() & 0xff; unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); assert(NumElts % 16 == 0); // If palignr is shifting the pair of vectors more than the size of two @@ -12753,7 +12761,7 @@ case X86::BI__builtin_ia32_alignq256: case X86::BI__builtin_ia32_alignq512: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned ShiftVal = cast(Ops[2])->getZExtValue() & 0xff; // Mask the shift amount to width of two vectors. @@ -12776,7 +12784,7 @@ case X86::BI__builtin_ia32_shuf_i32x4: case X86::BI__builtin_ia32_shuf_i64x2: { unsigned Imm = cast(Ops[2])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; unsigned NumLaneElts = NumElts / NumLanes; @@ -12803,7 +12811,7 @@ case X86::BI__builtin_ia32_permti256: { unsigned Imm = cast(Ops[2])->getZExtValue(); unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); // This takes a very simple approach since there are two lanes and a // shuffle can have 2 inputs. So we reserve the first input for the first @@ -12841,7 +12849,7 @@ case X86::BI__builtin_ia32_pslldqi256_byteshift: case X86::BI__builtin_ia32_pslldqi512_byteshift: { unsigned ShiftVal = cast(Ops[1])->getZExtValue() & 0xff; - auto *ResultType = cast(Ops[0]->getType()); + auto *ResultType = cast(Ops[0]->getType()); // Builtin type is vXi64 so multiply by 8 to get bytes. unsigned NumElts = ResultType->getNumElements() * 8; @@ -12871,7 +12879,7 @@ case X86::BI__builtin_ia32_psrldqi256_byteshift: case X86::BI__builtin_ia32_psrldqi512_byteshift: { unsigned ShiftVal = cast(Ops[1])->getZExtValue() & 0xff; - auto *ResultType = cast(Ops[0]->getType()); + auto *ResultType = cast(Ops[0]->getType()); // Builtin type is vXi64 so multiply by 8 to get bytes. unsigned NumElts = ResultType->getNumElements() * 8; @@ -13518,7 +13526,7 @@ case X86::BI__builtin_ia32_fpclasspd256_mask: case X86::BI__builtin_ia32_fpclasspd512_mask: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[2]; Ops.erase(&Ops[2]); @@ -13556,7 +13564,7 @@ case X86::BI__builtin_ia32_vp2intersect_d_256: case X86::BI__builtin_ia32_vp2intersect_d_128: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Intrinsic::ID ID; switch (BuiltinID) { @@ -13615,7 +13623,7 @@ case X86::BI__builtin_ia32_vpshufbitqmb256_mask: case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[2]; Ops.erase(&Ops[2]); @@ -13762,7 +13770,7 @@ Function *Intr = CGM.getIntrinsic(IID); if (IsMaskFCmp) { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Ops[3] = getMaskVecValue(*this, Ops[3], NumElts); Value *Cmp = Builder.CreateCall(Intr, Ops); return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr); @@ -13777,7 +13785,7 @@ // We ignore SAE if strict FP is disabled. We only keep precise // exception behavior under strict FP. unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Value *Cmp; if (IsSignaling) Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); @@ -13835,7 +13843,7 @@ case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { Ops[2] = getMaskVecValue( *this, Ops[2], - cast(Ops[0]->getType())->getNumElements()); + cast(Ops[0]->getType())->getNumElements()); Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); } diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1685,7 +1685,7 @@ if (Ty->isVectorType()) { const llvm::Type *EltTy = Addr.getElementType(); - const auto *VTy = cast(EltTy); + const auto *VTy = cast(EltTy); // Handle vectors of size 3 like size 4 for better performance. if (VTy->getNumElements() == 3) { @@ -1770,8 +1770,9 @@ auto *VectorTy = dyn_cast( cast(Addr.getPointer()->getType())->getElementType()); if (VectorTy && !IsVector) { - auto *ArrayTy = llvm::ArrayType::get(VectorTy->getElementType(), - VectorTy->getNumElements()); + auto *ArrayTy = llvm::ArrayType::get( + VectorTy->getElementType(), + cast(VectorTy)->getNumElements()); return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy)); } @@ -1802,7 +1803,7 @@ llvm::Type *SrcTy = Value->getType(); auto *VecTy = dyn_cast(SrcTy); // Handle vec3 special. - if (VecTy && VecTy->getNumElements() == 3) { + if (VecTy && cast(VecTy)->getNumElements() == 3) { // Our source is a vec3, do a shuffle vector to make it a vec4. Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy), ArrayRef{0, 1, 2, -1}, @@ -2217,7 +2218,7 @@ if (const VectorType *VTy = Dst.getType()->getAs()) { unsigned NumSrcElts = VTy->getNumElements(); unsigned NumDstElts = - cast(Vec->getType())->getNumElements(); + cast(Vec->getType())->getNumElements(); if (NumDstElts == NumSrcElts) { // Use shuffle vector is the src and destination are the same number of // elements and restore the vector mask since it is on the side it will be diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -1320,7 +1320,7 @@ "Splatted expr doesn't match with vector element type?"); // Splat the element across to all elements - unsigned NumElements = cast(DstTy)->getNumElements(); + unsigned NumElements = cast(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Src, "splat"); } @@ -1553,12 +1553,12 @@ Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); Value *Mask; - llvm::VectorType *LTy = cast(LHS->getType()); + auto *LTy = cast(LHS->getType()); unsigned LHSElts = LTy->getNumElements(); Mask = RHS; - llvm::VectorType *MTy = cast(Mask->getType()); + auto *MTy = cast(Mask->getType()); // Mask off the high bits of each shuffle index. Value *MaskBits = @@ -1763,7 +1763,7 @@ return Visit(E->getInit(0)); } - unsigned ResElts = VType->getNumElements(); + unsigned ResElts = cast(VType)->getNumElements(); // Loop over initializers collecting the Value for each, and remembering // whether the source was swizzle (ExtVectorElementExpr). This will allow @@ -1787,7 +1787,8 @@ if (isa(IE)) { llvm::ExtractElementInst *EI = cast(Init); - if (EI->getVectorOperandType()->getNumElements() == ResElts) { + if (cast(EI->getVectorOperandType()) + ->getNumElements() == ResElts) { llvm::ConstantInt *C = cast(EI->getIndexOperand()); Value *LHS = nullptr, *RHS = nullptr; if (CurIdx == 0) { @@ -1825,7 +1826,7 @@ continue; } - unsigned InitElts = VVT->getNumElements(); + unsigned InitElts = cast(VVT)->getNumElements(); // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's // input is the same width as the vector being constructed, generate an @@ -1834,7 +1835,7 @@ if (isa(IE)) { llvm::ShuffleVectorInst *SVI = cast(Init); Value *SVOp = SVI->getOperand(0); - llvm::VectorType *OpTy = cast(SVOp->getType()); + auto *OpTy = cast(SVOp->getType()); if (OpTy->getNumElements() == ResElts) { for (unsigned j = 0; j != CurIdx; ++j) { @@ -2170,7 +2171,7 @@ llvm::Type *DstTy = ConvertType(DestTy); Value *Elt = Visit(const_cast(E)); // Splat the element across to all elements - unsigned NumElements = cast(DstTy)->getNumElements(); + unsigned NumElements = cast(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } @@ -4331,7 +4332,7 @@ llvm::Value *RHS = Visit(rhsExpr); llvm::Type *condType = ConvertType(condExpr->getType()); - llvm::VectorType *vecTy = cast(condType); + auto *vecTy = cast(condType); unsigned numElem = vecTy->getNumElements(); llvm::Type *elemType = vecTy->getElementType(); @@ -4534,10 +4535,14 @@ llvm::Type *DstTy = ConvertType(E->getType()); llvm::Type *SrcTy = Src->getType(); - unsigned NumElementsSrc = isa(SrcTy) ? - cast(SrcTy)->getNumElements() : 0; - unsigned NumElementsDst = isa(DstTy) ? - cast(DstTy)->getNumElements() : 0; + unsigned NumElementsSrc = + isa(SrcTy) + ? cast(SrcTy)->getNumElements() + : 0; + unsigned NumElementsDst = + isa(DstTy) + ? cast(DstTy)->getNumElements() + : 0; // Going from vec3 to non-vec3 is a special case and requires a shuffle // vector to get a vec4, then a bitcast if the target type is different. diff --git a/clang/lib/CodeGen/SwiftCallingConv.cpp b/clang/lib/CodeGen/SwiftCallingConv.cpp --- a/clang/lib/CodeGen/SwiftCallingConv.cpp +++ b/clang/lib/CodeGen/SwiftCallingConv.cpp @@ -320,9 +320,12 @@ // If we have a vector type, split it. if (auto vecTy = dyn_cast_or_null(type)) { auto eltTy = vecTy->getElementType(); - CharUnits eltSize = (end - begin) / vecTy->getNumElements(); + CharUnits eltSize = + (end - begin) / cast(vecTy)->getNumElements(); assert(eltSize == getTypeStoreSize(CGM, eltTy)); - for (unsigned i = 0, e = vecTy->getNumElements(); i != e; ++i) { + for (unsigned i = 0, + e = cast(vecTy)->getNumElements(); + i != e; ++i) { addEntry(eltTy, begin, begin + eltSize); begin += eltSize; } @@ -674,8 +677,9 @@ bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy) { - return isLegalVectorType(CGM, vectorSize, vectorTy->getElementType(), - vectorTy->getNumElements()); + return isLegalVectorType( + CGM, vectorSize, vectorTy->getElementType(), + cast(vectorTy)->getNumElements()); } bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, @@ -688,7 +692,7 @@ std::pair swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy) { - auto numElts = vectorTy->getNumElements(); + auto numElts = cast(vectorTy)->getNumElements(); auto eltTy = vectorTy->getElementType(); // Try to split the vector type in half. @@ -710,7 +714,7 @@ } // Try to split the vector into legal subvectors. - auto numElts = origVectorTy->getNumElements(); + auto numElts = cast(origVectorTy)->getNumElements(); auto eltTy = origVectorTy->getElementType(); assert(numElts != 1);