diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -119,7 +119,7 @@ ValueTy = lvalue.getType(); ValueSizeInBits = C.getTypeSize(ValueTy); AtomicTy = ValueTy = CGF.getContext().getExtVectorType( - lvalue.getType(), cast( + lvalue.getType(), cast( lvalue.getExtVectorAddress().getElementType()) ->getNumElements()); AtomicSizeInBits = C.getTypeSize(AtomicTy); diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -5689,7 +5689,8 @@ Ty = HalfTy; break; } - auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements()); + auto *VecFlt = llvm::FixedVectorType::get( + Ty, cast(VTy)->getNumElements()); llvm::Type *Tys[] = { VTy, VecFlt }; Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); return EmitNeonCall(F, Ops, NameHint); @@ -5837,7 +5838,8 @@ case NEON::BI__builtin_neon_vextq_v: { int CV = cast(Ops[2])->getSExtValue(); SmallVector Indices; - for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; ++i) Indices.push_back(i+CV); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); @@ -5949,8 +5951,8 @@ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); - auto *NarrowTy = - llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); + auto *NarrowTy = llvm::FixedVectorType::get( + EltTy, cast(VTy)->getNumElements() * 2); llvm::Type *Tys[2] = { Ty, NarrowTy }; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); } @@ -5959,8 +5961,8 @@ // The source operand type has twice as many elements of half the size. unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); - auto *NarrowTy = - llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); + auto *NarrowTy = llvm::FixedVectorType::get( + EltTy, cast(VTy)->getNumElements() * 2); llvm::Type *Tys[2] = { Ty, NarrowTy }; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); } @@ -5979,8 +5981,9 @@ auto *RTy = cast(Ty); if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v || BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v) - RTy = llvm::FixedVectorType::get(RTy->getElementType(), - RTy->getNumElements() * 2); + RTy = llvm::FixedVectorType::get( + RTy->getElementType(), + cast(RTy)->getNumElements() * 2); llvm::Type *Tys[2] = { RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false, /*isQuad*/ false))}; @@ -6109,7 +6112,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; - for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; i += 2) { Indices.push_back(i+vi); Indices.push_back(i+e+vi); } @@ -6137,7 +6141,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; - for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; ++i) Indices.push_back(2*i+vi); Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); @@ -6155,7 +6160,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; - for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; i += 2) { Indices.push_back((i + vi*e) >> 1); Indices.push_back(((i + vi*e) >> 1)+e); } @@ -6297,7 +6303,7 @@ // Build a vector containing sequential number like (0, 1, 2, ..., 15) SmallVector Indices; - llvm::VectorType *TblTy = cast(Ops[0]->getType()); + auto *TblTy = cast(Ops[0]->getType()); for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { Indices.push_back(2*i); Indices.push_back(2*i+1); @@ -7341,7 +7347,7 @@ // or odds, as desired). SmallVector Indices; unsigned InputElements = - cast(V->getType())->getNumElements(); + cast(V->getType())->getNumElements(); for (unsigned i = 0; i < InputElements; i += 2) Indices.push_back(i + Odd); return Builder.CreateShuffleVector(V, llvm::UndefValue::get(V->getType()), @@ -7354,7 +7360,7 @@ assert(V0->getType() == V1->getType() && "Can't zip different vector types"); SmallVector Indices; unsigned InputElements = - cast(V0->getType())->getNumElements(); + cast(V0->getType())->getNumElements(); for (unsigned i = 0; i < InputElements; i++) { Indices.push_back(i); Indices.push_back(i + InputElements); @@ -9836,8 +9842,9 @@ // Now adjust things to handle the lane access. auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v - ? llvm::FixedVectorType::get(VTy->getElementType(), - VTy->getNumElements() / 2) + ? llvm::FixedVectorType::get( + VTy->getElementType(), + cast(VTy)->getNumElements() / 2) : VTy; llvm::Constant *cst = cast(Ops[3]); Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst); @@ -9868,8 +9875,9 @@ Ops[0] = Builder.CreateBitCast(Ops[0], Ty); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); - auto *STy = llvm::FixedVectorType::get(VTy->getElementType(), - VTy->getNumElements() * 2); + auto *STy = llvm::FixedVectorType::get( + VTy->getElementType(), + cast(VTy)->getNumElements() * 2); Ops[2] = Builder.CreateBitCast(Ops[2], STy); Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cast(Ops[3])); @@ -9937,7 +9945,7 @@ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); case NEON::BI__builtin_neon_vpadal_v: case NEON::BI__builtin_neon_vpadalq_v: { - unsigned ArgElts = VTy->getNumElements(); + unsigned ArgElts = cast(VTy)->getNumElements(); llvm::IntegerType *EltTy = cast(VTy->getElementType()); unsigned BitWidth = EltTy->getBitWidth(); auto *ArgTy = llvm::FixedVectorType::get( @@ -10842,7 +10850,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; - for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; i += 2) { Indices.push_back(i+vi); Indices.push_back(i+e+vi); } @@ -10861,7 +10870,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; - for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; ++i) Indices.push_back(2*i+vi); Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); @@ -10879,7 +10889,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; - for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { + for (unsigned i = 0, e = cast(VTy)->getNumElements(); + i != e; i += 2) { Indices.push_back((i + vi*e) >> 1); Indices.push_back(((i + vi*e) >> 1)+e); } @@ -11090,7 +11101,8 @@ llvm::PointerType::getUnqual(Ops[1]->getType())); Value *MaskVec = getMaskVecValue( - CGF, Ops[2], cast(Ops[1]->getType())->getNumElements()); + CGF, Ops[2], + cast(Ops[1]->getType())->getNumElements()); return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec); } @@ -11102,7 +11114,8 @@ llvm::PointerType::getUnqual(Ops[1]->getType())); Value *MaskVec = getMaskVecValue( - CGF, Ops[2], cast(Ops[1]->getType())->getNumElements()); + CGF, Ops[2], + cast(Ops[1]->getType())->getNumElements()); return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]); } @@ -11116,7 +11129,8 @@ Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(PtrTy)); - Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); + Value *MaskVec = getMaskVecValue( + CGF, Ops[2], cast(ResultTy)->getNumElements()); llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, ResultTy); @@ -11126,7 +11140,7 @@ static Value *EmitX86CompressExpand(CodeGenFunction &CGF, ArrayRef Ops, bool IsCompress) { - auto *ResultTy = cast(Ops[1]->getType()); + auto *ResultTy = cast(Ops[1]->getType()); Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); @@ -11138,7 +11152,7 @@ static Value *EmitX86CompressStore(CodeGenFunction &CGF, ArrayRef Ops) { - auto *ResultTy = cast(Ops[1]->getType()); + auto *ResultTy = cast(Ops[1]->getType()); llvm::Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. @@ -11174,7 +11188,7 @@ // Funnel shifts amounts are treated as modulo and types are all power-of-2 so // we only care about the lowest log2 bits anyway. if (Amt->getType() != Ty) { - unsigned NumElts = cast(Ty)->getNumElements(); + unsigned NumElts = cast(Ty)->getNumElements(); Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false); Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt); } @@ -11233,7 +11247,7 @@ return Op0; Mask = getMaskVecValue( - CGF, Mask, cast(Op0->getType())->getNumElements()); + CGF, Mask, cast(Op0->getType())->getNumElements()); return CGF.Builder.CreateSelect(Mask, Op0, Op1); } @@ -11280,7 +11294,7 @@ assert((Ops.size() == 2 || Ops.size() == 4) && "Unexpected number of arguments"); unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Value *Cmp; if (CC == 3) { @@ -11557,7 +11571,8 @@ static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op, llvm::Type *DstTy) { - unsigned NumberOfElements = cast(DstTy)->getNumElements(); + unsigned NumberOfElements = + cast(DstTy)->getNumElements(); Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements); return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2"); } @@ -11593,11 +11608,12 @@ return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]}); } - unsigned NumDstElts = cast(DstTy)->getNumElements(); + unsigned NumDstElts = cast(DstTy)->getNumElements(); Value *Src = Ops[0]; // Extract the subvector. - if (NumDstElts != cast(Src->getType())->getNumElements()) { + if (NumDstElts != + cast(Src->getType())->getNumElements()) { assert(NumDstElts == 4 && "Unexpected vector size"); Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()), ArrayRef{0, 1, 2, 3}); @@ -11897,7 +11913,7 @@ case X86::BI__builtin_ia32_vec_ext_v8si: case X86::BI__builtin_ia32_vec_ext_v4di: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); uint64_t Index = cast(Ops[1])->getZExtValue(); Index &= NumElts - 1; // These builtins exist so we can ensure the index is an ICE and in range. @@ -11913,7 +11929,7 @@ case X86::BI__builtin_ia32_vec_set_v8si: case X86::BI__builtin_ia32_vec_set_v4di: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned Index = cast(Ops[2])->getZExtValue(); Index &= NumElts - 1; // These builtins exist so we can ensure the index is an ICE and in range. @@ -12339,9 +12355,9 @@ break; } - unsigned MinElts = - std::min(cast(Ops[0]->getType())->getNumElements(), - cast(Ops[2]->getType())->getNumElements()); + unsigned MinElts = std::min( + cast(Ops[0]->getType())->getNumElements(), + cast(Ops[2]->getType())->getNumElements()); Ops[3] = getMaskVecValue(*this, Ops[3], MinElts); Function *Intr = CGM.getIntrinsic(IID); return Builder.CreateCall(Intr, Ops); @@ -12448,9 +12464,9 @@ break; } - unsigned MinElts = - std::min(cast(Ops[2]->getType())->getNumElements(), - cast(Ops[3]->getType())->getNumElements()); + unsigned MinElts = std::min( + cast(Ops[2]->getType())->getNumElements(), + cast(Ops[3]->getType())->getNumElements()); Ops[1] = getMaskVecValue(*this, Ops[1], MinElts); Function *Intr = CGM.getIntrinsic(IID); return Builder.CreateCall(Intr, Ops); @@ -12472,10 +12488,10 @@ case X86::BI__builtin_ia32_extracti64x2_256_mask: case X86::BI__builtin_ia32_extractf64x2_512_mask: case X86::BI__builtin_ia32_extracti64x2_512_mask: { - auto *DstTy = cast(ConvertType(E->getType())); + auto *DstTy = cast(ConvertType(E->getType())); unsigned NumElts = DstTy->getNumElements(); unsigned SrcNumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned SubVectors = SrcNumElts / NumElts; unsigned Index = cast(Ops[1])->getZExtValue(); assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); @@ -12513,9 +12529,9 @@ case X86::BI__builtin_ia32_insertf64x2_512: case X86::BI__builtin_ia32_inserti64x2_512: { unsigned DstNumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned SrcNumElts = - cast(Ops[1]->getType())->getNumElements(); + cast(Ops[1]->getType())->getNumElements(); unsigned SubVectors = DstNumElts / SrcNumElts; unsigned Index = cast(Ops[2])->getZExtValue(); assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors"); @@ -12580,7 +12596,7 @@ case X86::BI__builtin_ia32_pblendd128: case X86::BI__builtin_ia32_pblendd256: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned Imm = cast(Ops[2])->getZExtValue(); int Indices[16]; @@ -12597,7 +12613,7 @@ case X86::BI__builtin_ia32_pshuflw256: case X86::BI__builtin_ia32_pshuflw512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); // Splat the 8-bits of immediate 4 times to help the loop wrap around. @@ -12621,7 +12637,7 @@ case X86::BI__builtin_ia32_pshufhw256: case X86::BI__builtin_ia32_pshufhw512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); // Splat the 8-bits of immediate 4 times to help the loop wrap around. @@ -12651,7 +12667,7 @@ case X86::BI__builtin_ia32_vpermilpd512: case X86::BI__builtin_ia32_vpermilps512: { uint32_t Imm = cast(Ops[1])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; unsigned NumLaneElts = NumElts / NumLanes; @@ -12678,7 +12694,7 @@ case X86::BI__builtin_ia32_shufps256: case X86::BI__builtin_ia32_shufps512: { uint32_t Imm = cast(Ops[2])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128; unsigned NumLaneElts = NumElts / NumLanes; @@ -12706,7 +12722,7 @@ case X86::BI__builtin_ia32_permdi512: case X86::BI__builtin_ia32_permdf512: { unsigned Imm = cast(Ops[1])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); // These intrinsics operate on 256-bit lanes of four 64-bit elements. @@ -12725,7 +12741,7 @@ unsigned ShiftVal = cast(Ops[2])->getZExtValue() & 0xff; unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); assert(NumElts % 16 == 0); // If palignr is shifting the pair of vectors more than the size of two @@ -12763,7 +12779,7 @@ case X86::BI__builtin_ia32_alignq256: case X86::BI__builtin_ia32_alignq512: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); unsigned ShiftVal = cast(Ops[2])->getZExtValue() & 0xff; // Mask the shift amount to width of two vectors. @@ -12786,7 +12802,7 @@ case X86::BI__builtin_ia32_shuf_i32x4: case X86::BI__builtin_ia32_shuf_i64x2: { unsigned Imm = cast(Ops[2])->getZExtValue(); - auto *Ty = cast(Ops[0]->getType()); + auto *Ty = cast(Ops[0]->getType()); unsigned NumElts = Ty->getNumElements(); unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2; unsigned NumLaneElts = NumElts / NumLanes; @@ -12813,7 +12829,7 @@ case X86::BI__builtin_ia32_permti256: { unsigned Imm = cast(Ops[2])->getZExtValue(); unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); // This takes a very simple approach since there are two lanes and a // shuffle can have 2 inputs. So we reserve the first input for the first @@ -12851,7 +12867,7 @@ case X86::BI__builtin_ia32_pslldqi256_byteshift: case X86::BI__builtin_ia32_pslldqi512_byteshift: { unsigned ShiftVal = cast(Ops[1])->getZExtValue() & 0xff; - auto *ResultType = cast(Ops[0]->getType()); + auto *ResultType = cast(Ops[0]->getType()); // Builtin type is vXi64 so multiply by 8 to get bytes. unsigned NumElts = ResultType->getNumElements() * 8; @@ -12881,7 +12897,7 @@ case X86::BI__builtin_ia32_psrldqi256_byteshift: case X86::BI__builtin_ia32_psrldqi512_byteshift: { unsigned ShiftVal = cast(Ops[1])->getZExtValue() & 0xff; - auto *ResultType = cast(Ops[0]->getType()); + auto *ResultType = cast(Ops[0]->getType()); // Builtin type is vXi64 so multiply by 8 to get bytes. unsigned NumElts = ResultType->getNumElements() * 8; @@ -13528,7 +13544,7 @@ case X86::BI__builtin_ia32_fpclasspd256_mask: case X86::BI__builtin_ia32_fpclasspd512_mask: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[2]; Ops.erase(&Ops[2]); @@ -13566,7 +13582,7 @@ case X86::BI__builtin_ia32_vp2intersect_d_256: case X86::BI__builtin_ia32_vp2intersect_d_128: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Intrinsic::ID ID; switch (BuiltinID) { @@ -13625,7 +13641,7 @@ case X86::BI__builtin_ia32_vpshufbitqmb256_mask: case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[2]; Ops.erase(&Ops[2]); @@ -13769,7 +13785,7 @@ ->getElementType() ->isIntegerTy(1)) { unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Value *MaskIn = Ops[3]; Ops.erase(&Ops[3]); @@ -13791,7 +13807,7 @@ case X86::BI__builtin_ia32_cmppd256_mask: { // FIXME: Support SAE. unsigned NumElts = - cast(Ops[0]->getType())->getNumElements(); + cast(Ops[0]->getType())->getNumElements(); Value *Cmp; if (IsSignaling) Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]); @@ -13850,7 +13866,7 @@ case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { Ops[2] = getMaskVecValue( *this, Ops[2], - cast(Ops[0]->getType())->getNumElements()); + cast(Ops[0]->getType())->getNumElements()); Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128; return Builder.CreateCall(CGM.getIntrinsic(IID), Ops); } diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1680,7 +1680,7 @@ if (Ty->isVectorType()) { const llvm::Type *EltTy = Addr.getElementType(); - const auto *VTy = cast(EltTy); + const auto *VTy = cast(EltTy); // Handle vectors of size 3 like size 4 for better performance. if (VTy->getNumElements() == 3) { @@ -1765,8 +1765,9 @@ auto *VectorTy = dyn_cast( cast(Addr.getPointer()->getType())->getElementType()); if (VectorTy && !IsVector) { - auto *ArrayTy = llvm::ArrayType::get(VectorTy->getElementType(), - VectorTy->getNumElements()); + auto *ArrayTy = llvm::ArrayType::get( + VectorTy->getElementType(), + cast(VectorTy)->getNumElements()); return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy)); } @@ -1797,7 +1798,7 @@ llvm::Type *SrcTy = Value->getType(); auto *VecTy = dyn_cast(SrcTy); // Handle vec3 special. - if (VecTy && VecTy->getNumElements() == 3) { + if (VecTy && cast(VecTy)->getNumElements() == 3) { // Our source is a vec3, do a shuffle vector to make it a vec4. Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy), ArrayRef{0, 1, 2, -1}, @@ -2212,7 +2213,7 @@ if (const VectorType *VTy = Dst.getType()->getAs()) { unsigned NumSrcElts = VTy->getNumElements(); unsigned NumDstElts = - cast(Vec->getType())->getNumElements(); + cast(Vec->getType())->getNumElements(); if (NumDstElts == NumSrcElts) { // Use shuffle vector is the src and destination are the same number of // elements and restore the vector mask since it is on the side it will be diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -1323,7 +1323,7 @@ "Splatted expr doesn't match with vector element type?"); // Splat the element across to all elements - unsigned NumElements = cast(DstTy)->getNumElements(); + unsigned NumElements = cast(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Src, "splat"); } @@ -1630,7 +1630,7 @@ Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); Value *Mask; - llvm::VectorType *LTy = cast(LHS->getType()); + auto *LTy = cast(LHS->getType()); unsigned LHSElts = LTy->getNumElements(); Mask = RHS; @@ -1648,10 +1648,12 @@ // n = extract mask i // x = extract val n // newv = insert newv, x, i - auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(), - MTy->getNumElements()); + auto *RTy = llvm::FixedVectorType::get( + LTy->getElementType(), + cast(MTy)->getNumElements()); Value* NewV = llvm::UndefValue::get(RTy); - for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { + for (unsigned i = 0, e = cast(MTy)->getNumElements(); + i != e; ++i) { Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i); Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); @@ -1840,7 +1842,7 @@ return Visit(E->getInit(0)); } - unsigned ResElts = VType->getNumElements(); + unsigned ResElts = cast(VType)->getNumElements(); // Loop over initializers collecting the Value for each, and remembering // whether the source was swizzle (ExtVectorElementExpr). This will allow @@ -1864,7 +1866,8 @@ if (isa(IE)) { llvm::ExtractElementInst *EI = cast(Init); - if (EI->getVectorOperandType()->getNumElements() == ResElts) { + if (cast(EI->getVectorOperandType()) + ->getNumElements() == ResElts) { llvm::ConstantInt *C = cast(EI->getIndexOperand()); Value *LHS = nullptr, *RHS = nullptr; if (CurIdx == 0) { @@ -1902,7 +1905,7 @@ continue; } - unsigned InitElts = VVT->getNumElements(); + unsigned InitElts = cast(VVT)->getNumElements(); // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's // input is the same width as the vector being constructed, generate an @@ -1911,7 +1914,7 @@ if (isa(IE)) { llvm::ShuffleVectorInst *SVI = cast(Init); Value *SVOp = SVI->getOperand(0); - llvm::VectorType *OpTy = cast(SVOp->getType()); + auto *OpTy = cast(SVOp->getType()); if (OpTy->getNumElements() == ResElts) { for (unsigned j = 0; j != CurIdx; ++j) { @@ -2247,7 +2250,7 @@ llvm::Type *DstTy = ConvertType(DestTy); Value *Elt = Visit(const_cast(E)); // Splat the element across to all elements - unsigned NumElements = cast(DstTy)->getNumElements(); + unsigned NumElements = cast(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } @@ -4443,7 +4446,7 @@ llvm::Value *RHS = Visit(rhsExpr); llvm::Type *condType = ConvertType(condExpr->getType()); - llvm::VectorType *vecTy = cast(condType); + auto *vecTy = cast(condType); unsigned numElem = vecTy->getNumElements(); llvm::Type *elemType = vecTy->getElementType(); @@ -4646,10 +4649,14 @@ llvm::Type *DstTy = ConvertType(E->getType()); llvm::Type *SrcTy = Src->getType(); - unsigned NumElementsSrc = isa(SrcTy) ? - cast(SrcTy)->getNumElements() : 0; - unsigned NumElementsDst = isa(DstTy) ? - cast(DstTy)->getNumElements() : 0; + unsigned NumElementsSrc = + isa(SrcTy) + ? cast(SrcTy)->getNumElements() + : 0; + unsigned NumElementsDst = + isa(DstTy) + ? cast(DstTy)->getNumElements() + : 0; // Going from vec3 to non-vec3 is a special case and requires a shuffle // vector to get a vec4, then a bitcast if the target type is different. diff --git a/clang/lib/CodeGen/SwiftCallingConv.cpp b/clang/lib/CodeGen/SwiftCallingConv.cpp --- a/clang/lib/CodeGen/SwiftCallingConv.cpp +++ b/clang/lib/CodeGen/SwiftCallingConv.cpp @@ -320,9 +320,12 @@ // If we have a vector type, split it. if (auto vecTy = dyn_cast_or_null(type)) { auto eltTy = vecTy->getElementType(); - CharUnits eltSize = (end - begin) / vecTy->getNumElements(); + CharUnits eltSize = + (end - begin) / cast(vecTy)->getNumElements(); assert(eltSize == getTypeStoreSize(CGM, eltTy)); - for (unsigned i = 0, e = vecTy->getNumElements(); i != e; ++i) { + for (unsigned i = 0, + e = cast(vecTy)->getNumElements(); + i != e; ++i) { addEntry(eltTy, begin, begin + eltSize); begin += eltSize; } @@ -674,8 +677,9 @@ bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy) { - return isLegalVectorType(CGM, vectorSize, vectorTy->getElementType(), - vectorTy->getNumElements()); + return isLegalVectorType( + CGM, vectorSize, vectorTy->getElementType(), + cast(vectorTy)->getNumElements()); } bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, @@ -688,7 +692,7 @@ std::pair swiftcall::splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy) { - auto numElts = vectorTy->getNumElements(); + auto numElts = cast(vectorTy)->getNumElements(); auto eltTy = vectorTy->getElementType(); // Try to split the vector type in half. @@ -710,7 +714,7 @@ } // Try to split the vector into legal subvectors. - auto numElts = origVectorTy->getNumElements(); + auto numElts = cast(origVectorTy)->getNumElements(); auto eltTy = origVectorTy->getElementType(); assert(numElts != 1);