Index: clang/lib/CodeGen/CGBuiltin.cpp =================================================================== --- clang/lib/CodeGen/CGBuiltin.cpp +++ clang/lib/CodeGen/CGBuiltin.cpp @@ -5643,9 +5643,9 @@ case NEON::BI__builtin_neon_splatq_laneq_v: { auto NumElements = VTy->getElementCount(); if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v) - NumElements = NumElements * 2; + NumElements = NumElements.mul(2); if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v) - NumElements = NumElements / 2; + NumElements = NumElements.div(2); Ops[0] = Builder.CreateBitCast(Ops[0], VTy); return EmitNeonSplat(Ops[0], cast(Ops[1]), NumElements); @@ -7994,9 +7994,9 @@ llvm_unreachable("unknown intrinsic!"); } auto RetTy = llvm::VectorType::get(VTy->getElementType(), - VTy->getElementCount() * N); + VTy->getElementCount().mul(N)); - Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); + Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy); Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0); BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset); @@ -8027,8 +8027,8 @@ default: llvm_unreachable("unknown intrinsic!"); } - auto TupleTy = - llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N); + auto TupleTy = llvm::VectorType::get(VTy->getElementType(), + VTy->getElementCount().mul(N)); Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy); @@ -8478,8 +8478,7 @@ case SVE::BI__builtin_sve_svtbl2_f64: { SVETypeFlags TF(Builtin->TypeModifier); auto VTy = cast(getSVEType(TF)); - auto TupleTy = llvm::VectorType::get(VTy->getElementType(), - VTy->getElementCount() * 2); + auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy); Function *FExtr = CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy}); Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)}); Index: llvm/include/llvm/CodeGen/ValueTypes.h =================================================================== --- llvm/include/llvm/CodeGen/ValueTypes.h +++ llvm/include/llvm/CodeGen/ValueTypes.h @@ -339,9 +339,7 @@ /// If the value type is a scalable vector type, the scalable property will /// be set and the runtime size will be a positive integer multiple of the /// base size. - TypeSize getStoreSizeInBits() const { - return getStoreSize() * 8; - } + TypeSize getStoreSizeInBits() const { return getStoreSize().mul(8); } /// Rounds the bit-width of the given integer EVT up to the nearest power of /// two (and at least to eight), and returns the integer EVT with that @@ -384,7 +382,16 @@ EVT EltVT = getVectorElementType(); auto EltCnt = getVectorElementCount(); assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!"); - return EVT::getVectorVT(Context, EltVT, EltCnt / 2); + return EVT::getVectorVT(Context, EltVT, EltCnt.div(2)); + } + + // Return a VT for a vector type with the same element type but + // double the number of elements. The type returned may be an + // extended type. + EVT getDoubleNumVectorElementsVT(LLVMContext &Context) const { + EVT EltVT = getVectorElementType(); + auto EltCnt = getVectorElementCount(); + return EVT::getVectorVT(Context, EltVT, EltCnt.mul(2)); } /// Returns true if the given vector is a power of 2. Index: llvm/include/llvm/IR/DataLayout.h =================================================================== --- llvm/include/llvm/IR/DataLayout.h +++ llvm/include/llvm/IR/DataLayout.h @@ -478,7 +478,7 @@ /// /// For example, returns 40 for i36 and 80 for x86_fp80. TypeSize getTypeStoreSizeInBits(Type *Ty) const { - return 8 * getTypeStoreSize(Ty); + return getTypeStoreSize(Ty).mul(8); } /// Returns true if no extra padding bits are needed when storing the @@ -511,7 +511,7 @@ /// This is the amount that alloca reserves for this type. For example, /// returns 96 or 128 for x86_fp80, depending on alignment. TypeSize getTypeAllocSizeInBits(Type *Ty) const { - return 8 * getTypeAllocSize(Ty); + return getTypeAllocSize(Ty).mul(8); } /// Returns the minimum ABI-required alignment for the specified type. @@ -668,8 +668,8 @@ return TypeSize::Fixed(getPointerSizeInBits(Ty->getPointerAddressSpace())); case Type::ArrayTyID: { ArrayType *ATy = cast(Ty); - return ATy->getNumElements() * - getTypeAllocSizeInBits(ATy->getElementType()); + return getTypeAllocSizeInBits(ATy->getElementType()) + .mul(ATy->getNumElements()); } case Type::StructTyID: // Get the layout annotation... which is lazily created on demand. Index: llvm/include/llvm/IR/DerivedTypes.h =================================================================== --- llvm/include/llvm/IR/DerivedTypes.h +++ llvm/include/llvm/IR/DerivedTypes.h @@ -504,7 +504,7 @@ auto EltCnt = VTy->getElementCount(); assert(EltCnt.isKnownEven() && "Cannot halve vector with odd number of elements."); - return VectorType::get(VTy->getElementType(), EltCnt/2); + return VectorType::get(VTy->getElementType(), EltCnt.div(2)); } /// This static method returns a VectorType with twice as many elements as the @@ -513,7 +513,7 @@ auto EltCnt = VTy->getElementCount(); assert((EltCnt.getKnownMinValue() * 2ull) <= UINT_MAX && "Too many elements in vector"); - return VectorType::get(VTy->getElementType(), EltCnt * 2); + return VectorType::get(VTy->getElementType(), EltCnt.mul(2)); } /// Return true if the specified type is valid as a element type. Index: llvm/include/llvm/Support/MachineValueType.h =================================================================== --- llvm/include/llvm/Support/MachineValueType.h +++ llvm/include/llvm/Support/MachineValueType.h @@ -425,7 +425,7 @@ MVT EltVT = getVectorElementType(); auto EltCnt = getVectorElementCount(); assert(EltCnt.isKnownEven() && "Splitting vector, but not in half!"); - return getVectorVT(EltVT, EltCnt / 2); + return getVectorVT(EltVT, EltCnt.div(2)); } /// Returns true if the given vector is a power of 2. @@ -944,9 +944,7 @@ /// If the value type is a scalable vector type, the scalable property will /// be set and the runtime size will be a positive integer multiple of the /// base size. - TypeSize getStoreSizeInBits() const { - return getStoreSize() * 8; - } + TypeSize getStoreSizeInBits() const { return getStoreSize().mul(8); } /// Returns true if the number of bits for the type is a multiple of an /// 8-bit byte. Index: llvm/include/llvm/Support/TypeSize.h =================================================================== --- llvm/include/llvm/Support/TypeSize.h +++ llvm/include/llvm/Support/TypeSize.h @@ -41,14 +41,6 @@ public: ElementCount() = default; - ElementCount operator*(unsigned RHS) { - return { Min * RHS, Scalable }; - } - ElementCount operator/(unsigned RHS) { - assert(Min % RHS == 0 && "Min is not a multiple of RHS."); - return { Min / RHS, Scalable }; - } - friend ElementCount operator-(const ElementCount &LHS, const ElementCount &RHS) { assert(LHS.Scalable == RHS.Scalable && @@ -65,14 +57,12 @@ bool operator==(unsigned RHS) const { return Min == RHS && !Scalable; } bool operator!=(unsigned RHS) const { return !(*this == RHS); } - ElementCount &operator*=(unsigned RHS) { - Min *= RHS; - return *this; + ElementCount mul(unsigned RHS) const { + return ElementCount(Min * RHS, Scalable); } - ElementCount &operator/=(unsigned RHS) { - Min /= RHS; - return *this; + ElementCount div(unsigned RHS) const { + return ElementCount(Min / RHS, Scalable); } ElementCount NextPowerOf2() const { @@ -193,19 +183,9 @@ return !(LHS < RHS); } - // Convenience operators to obtain relative sizes independently of - // the scalable flag. - TypeSize operator*(unsigned RHS) const { - return { MinSize * RHS, IsScalable }; - } - - friend TypeSize operator*(const unsigned LHS, const TypeSize &RHS) { - return { LHS * RHS.MinSize, RHS.IsScalable }; - } + TypeSize mul(uint64_t RHS) const { return {MinSize * RHS, IsScalable}; } - TypeSize operator/(unsigned RHS) const { - return { MinSize / RHS, IsScalable }; - } + TypeSize div(uint64_t RHS) const { return {MinSize / RHS, IsScalable}; } TypeSize &operator-=(TypeSize RHS) { assert(IsScalable == RHS.IsScalable && @@ -227,18 +207,6 @@ return {LHS.MinSize - RHS.MinSize, LHS.IsScalable}; } - friend TypeSize operator/(const TypeSize &LHS, const TypeSize &RHS) { - assert(LHS.IsScalable == RHS.IsScalable && - "Arithmetic using mixed scalable and fixed types"); - return {LHS.MinSize / RHS.MinSize, LHS.IsScalable}; - } - - friend TypeSize operator%(const TypeSize &LHS, const TypeSize &RHS) { - assert(LHS.IsScalable == RHS.IsScalable && - "Arithmetic using mixed scalable and fixed types"); - return {LHS.MinSize % RHS.MinSize, LHS.IsScalable}; - } - // Return the minimum size with the assumption that the size is exact. // Use in places where a scalable size doesn't make sense (e.g. non-vector // types, or vectors in backends which don't support scalable vectors). @@ -270,6 +238,8 @@ // Returns true if the type size is zero. bool isZero() const { return MinSize == 0; } + bool isKnownMultipleOf(uint64_t RHS) const { return MinSize % RHS == 0; } + // Casts to a uint64_t if this is a fixed-width size. // // This interface is deprecated and will be removed in a future version @@ -300,44 +270,6 @@ #endif } - // Additional convenience operators needed to avoid ambiguous parses. - // TODO: Make uint64_t the default operator? - TypeSize operator*(uint64_t RHS) const { - return { MinSize * RHS, IsScalable }; - } - - TypeSize operator*(int RHS) const { - return { MinSize * RHS, IsScalable }; - } - - TypeSize operator*(int64_t RHS) const { - return { MinSize * RHS, IsScalable }; - } - - friend TypeSize operator*(const uint64_t LHS, const TypeSize &RHS) { - return { LHS * RHS.MinSize, RHS.IsScalable }; - } - - friend TypeSize operator*(const int LHS, const TypeSize &RHS) { - return { LHS * RHS.MinSize, RHS.IsScalable }; - } - - friend TypeSize operator*(const int64_t LHS, const TypeSize &RHS) { - return { LHS * RHS.MinSize, RHS.IsScalable }; - } - - TypeSize operator/(uint64_t RHS) const { - return { MinSize / RHS, IsScalable }; - } - - TypeSize operator/(int RHS) const { - return { MinSize / RHS, IsScalable }; - } - - TypeSize operator/(int64_t RHS) const { - return { MinSize / RHS, IsScalable }; - } - TypeSize NextPowerOf2() const { return TypeSize(llvm::NextPowerOf2(MinSize), IsScalable); } Index: llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11317,7 +11317,7 @@ auto EltCnt = VecTy.getVectorElementCount(); unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits(); - auto NewEltCnt = EltCnt * SizeRatio; + auto NewEltCnt = EltCnt.mul(SizeRatio); EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, NewEltCnt); assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size"); @@ -19007,7 +19007,7 @@ // check the other type in the cast to make sure this is really legal. EVT VT = N->getValueType(0); EVT SrcEltVT = SrcVT.getVectorElementType(); - ElementCount NumElts = SrcVT.getVectorElementCount() * N->getNumOperands(); + ElementCount NumElts = SrcVT.getVectorElementCount().mul(N->getNumOperands()); EVT ConcatSrcVT = EVT::getVectorVT(*DAG.getContext(), SrcEltVT, NumElts); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); switch (CastOpcode) { @@ -19421,7 +19421,7 @@ "multiple of the result's element count"); // It's fine to use TypeSize here as we know the offset will not be negative. - TypeSize Offset = VT.getStoreSize() * (Index / NumElts); + TypeSize Offset = VT.getStoreSize().mul(Index / NumElts); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); if (!TLI.shouldReduceLoadWidth(Ld, Ld->getExtensionType(), VT)) @@ -19484,7 +19484,7 @@ unsigned DestNumElts = V.getValueType().getVectorMinNumElements(); if ((SrcNumElts % DestNumElts) == 0) { unsigned SrcDestRatio = SrcNumElts / DestNumElts; - ElementCount NewExtEC = NVT.getVectorElementCount() * SrcDestRatio; + ElementCount NewExtEC = NVT.getVectorElementCount().mul(SrcDestRatio); EVT NewExtVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getScalarType(), NewExtEC); if (TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) { @@ -19498,7 +19498,7 @@ if ((DestNumElts % SrcNumElts) == 0) { unsigned DestSrcRatio = DestNumElts / SrcNumElts; if ((NVT.getVectorMinNumElements() % DestSrcRatio) == 0) { - ElementCount NewExtEC = NVT.getVectorElementCount() / DestSrcRatio; + ElementCount NewExtEC = NVT.getVectorElementCount().div(DestSrcRatio); EVT ScalarVT = SrcVT.getScalarType(); if ((ExtIdx % DestSrcRatio) == 0) { SDLoc DL(N); @@ -20660,12 +20660,12 @@ unsigned EltSizeInBits = VT.getScalarSizeInBits(); if ((EltSizeInBits % N1SrcSVT.getSizeInBits()) == 0) { unsigned Scale = EltSizeInBits / N1SrcSVT.getSizeInBits(); - NewVT = EVT::getVectorVT(Ctx, N1SrcSVT, NumElts * Scale); + NewVT = EVT::getVectorVT(Ctx, N1SrcSVT, NumElts.mul(Scale)); NewIdx = DAG.getVectorIdxConstant(InsIdx * Scale, DL); } else if ((N1SrcSVT.getSizeInBits() % EltSizeInBits) == 0) { unsigned Scale = N1SrcSVT.getSizeInBits() / EltSizeInBits; if (NumElts.isKnownMultipleOf(Scale) && (InsIdx % Scale) == 0) { - NewVT = EVT::getVectorVT(Ctx, N1SrcSVT, NumElts / Scale); + NewVT = EVT::getVectorVT(Ctx, N1SrcSVT, NumElts.div(Scale)); NewIdx = DAG.getVectorIdxConstant(InsIdx / Scale, DL); } } Index: llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -836,7 +836,7 @@ SDValue Hi) { assert(Lo.getValueType().getVectorElementType() == Op.getValueType().getVectorElementType() && - Lo.getValueType().getVectorElementCount() * 2 == + Lo.getValueType().getVectorElementCount().mul(2) == Op.getValueType().getVectorElementCount() && Hi.getValueType() == Lo.getValueType() && "Invalid type for split vector"); Index: llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1822,7 +1822,7 @@ // to scalarization in many cases due to the input vector being split too // far. if ((SrcVT.getVectorMinNumElements() & 1) == 0 && - SrcVT.getSizeInBits() * 2 < DestVT.getSizeInBits()) { + SrcVT.getSizeInBits().mul(2) < DestVT.getSizeInBits()) { LLVMContext &Ctx = *DAG.getContext(); EVT NewSrcVT = SrcVT.widenIntegerVectorElementType(Ctx); EVT SplitSrcVT = SrcVT.getHalfNumVectorElementsVT(Ctx); @@ -2635,8 +2635,8 @@ EVT HalfElementVT = IsFloat ? EVT::getFloatingPointVT(InElementSize/2) : EVT::getIntegerVT(*DAG.getContext(), InElementSize/2); - EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), HalfElementVT, - NumElements/2); + EVT HalfVT = + EVT::getVectorVT(*DAG.getContext(), HalfElementVT, NumElements.div(2)); SDValue HalfLo; SDValue HalfHi; @@ -2693,7 +2693,7 @@ LLVMContext &Context = *DAG.getContext(); EVT PartResVT = EVT::getVectorVT(Context, MVT::i1, PartEltCnt); - EVT WideResVT = EVT::getVectorVT(Context, MVT::i1, PartEltCnt*2); + EVT WideResVT = EVT::getVectorVT(Context, MVT::i1, PartEltCnt.mul(2)); LoRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Lo0, Lo1, N->getOperand(2)); HiRes = DAG.getNode(ISD::SETCC, DL, PartResVT, Hi0, Hi1, N->getOperand(2)); @@ -5062,11 +5062,12 @@ EVT NewLdTy = LdOps[i].getValueType(); if (NewLdTy != LdTy) { // Create a larger vector. + TypeSize LdTySize = LdTy.getSizeInBits(); + TypeSize NewLdTySize = NewLdTy.getSizeInBits(); + assert(NewLdTySize.isScalable() == LdTySize.isScalable() && + NewLdTySize.isKnownMultipleOf(LdTySize.getKnownMinSize())); unsigned NumOps = - (NewLdTy.getSizeInBits() / LdTy.getSizeInBits()).getKnownMinSize(); - assert( - (NewLdTy.getSizeInBits() % LdTy.getSizeInBits()).getKnownMinSize() == - 0); + NewLdTySize.getKnownMinSize() / LdTySize.getKnownMinSize(); SmallVector WidenOps(NumOps); unsigned j = 0; for (; j != End-Idx; ++j) @@ -5082,12 +5083,13 @@ ConcatOps[--Idx] = LdOps[i]; } - if (WidenWidth == LdTy.getSizeInBits() * (End - Idx)) + TypeSize LdTySize = LdTy.getSizeInBits(); + if (WidenWidth == LdTySize.mul(End - Idx)) return DAG.getNode(ISD::CONCAT_VECTORS, dl, WidenVT, makeArrayRef(&ConcatOps[Idx], End - Idx)); // We need to fill the rest with undefs to build the vector. - unsigned NumOps = (WidenWidth / LdTy.getSizeInBits()).getKnownMinSize(); + unsigned NumOps = WidenWidth.getKnownMinSize() / LdTySize.getKnownMinSize(); SmallVector WidenOps(NumOps); SDValue UndefVal = DAG.getUNDEF(LdTy); { Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -4310,7 +4310,7 @@ return Ops[0].getValueType() == Op.getValueType(); }) && "Concatenation of vectors with inconsistent value types!"); - assert((Ops[0].getValueType().getVectorElementCount() * Ops.size()) == + assert((Ops[0].getValueType().getVectorElementCount().mul(Ops.size())) == VT.getVectorElementCount() && "Incorrect element count in vector concatenation!"); Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -407,7 +407,7 @@ IntermediateVT.isVector() ? EVT::getVectorVT( *DAG.getContext(), IntermediateVT.getScalarType(), - IntermediateVT.getVectorElementCount() * NumParts) + IntermediateVT.getVectorElementCount().mul(NumParts)) : EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(), NumIntermediates); @@ -732,7 +732,7 @@ Optional DestEltCnt; if (IntermediateVT.isVector()) - DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates; + DestEltCnt = IntermediateVT.getVectorElementCount().mul(NumIntermediates); else DestEltCnt = ElementCount::getFixed(NumIntermediates); Index: llvm/lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- llvm/lib/CodeGen/TargetLoweringBase.cpp +++ llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -833,7 +833,7 @@ if (LA == TypeSplitVector) return LegalizeKind(LA, EVT::getVectorVT(Context, SVT.getVectorElementType(), - SVT.getVectorElementCount() / 2)); + SVT.getVectorElementCount().div(2))); if (LA == TypeScalarizeVector) return LegalizeKind(LA, SVT.getVectorElementType()); return LegalizeKind(LA, NVT); @@ -889,7 +889,7 @@ // <4 x i140> -> <2 x i140> if (LK.first == TypeExpandInteger) return LegalizeKind(TypeSplitVector, - EVT::getVectorVT(Context, EltVT, NumElts / 2)); + VT.getHalfNumVectorElementsVT(Context)); // Promote the integer element types until a legal vector type is found // or until the element integer type is too big. If a legal type was not @@ -949,7 +949,7 @@ } // Vectors with illegal element types are expanded. - EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorElementCount() / 2); + EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorElementCount().div(2)); return LegalizeKind(TypeSplitVector, NVT); } @@ -982,7 +982,7 @@ // scalar. while (EC.getKnownMinValue() > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) { - EC /= 2; + EC = EC.div(2); NumVectorRegs <<= 1; } @@ -1464,7 +1464,7 @@ // breakdowns, like nxv7i64 -> nxv8i64 -> 4 x nxv2i64. Currently the only // supported cases are vectors that are broken down into equal parts // such as nxv6i64 -> 3 x nxv2i64. - assert((PartVT.getVectorElementCount() * NumIntermediates) == + assert((PartVT.getVectorElementCount().mul(NumIntermediates)) == VT.getVectorElementCount() && "Expected an integer multiple of PartVT"); IntermediateVT = PartVT; @@ -1483,7 +1483,7 @@ // end with a scalar if the target doesn't support vectors. while (EltCnt.getKnownMinValue() > 1 && !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) { - EltCnt /= 2; + EltCnt = EltCnt.div(2); NumVectorRegs <<= 1; } Index: llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -4829,7 +4829,7 @@ ElementCount EC = PredVT.getVectorElementCount(); EVT ScalarVT = EVT::getIntegerVT(Ctx, AArch64::SVEBitsPerBlock / EC.getKnownMinValue()); - EVT MemVT = EVT::getVectorVT(Ctx, ScalarVT, EC * NumVec); + EVT MemVT = EVT::getVectorVT(Ctx, ScalarVT, EC.mul(NumVec)); return MemVT; } Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9111,7 +9111,7 @@ SDValue Vec1 = Op.getOperand(1); // Ensure the subvector is half the size of the main vector. - if (VT.getVectorElementCount() != (InVT.getVectorElementCount() * 2)) + if (VT.getVectorElementCount() != (InVT.getVectorElementCount().mul(2))) return SDValue(); // Extend elements of smaller vector... @@ -9680,7 +9680,7 @@ #endif // memVT is `NumVecs * VT`. Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(), - EC * NumVecs); + EC.mul(NumVecs)); Info.ptrVal = CI.getArgOperand(CI.getNumArgOperands() - 1); Info.offset = 0; Info.align.reset(); @@ -10447,7 +10447,7 @@ "invalid tuple vector type!"); EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), - VT.getVectorElementCount() / N); + VT.getVectorElementCount().div(N)); assert(isTypeLegal(SplitVT)); SmallVector VTs(N, SplitVT); @@ -14188,9 +14188,7 @@ assert((EltTy == MVT::i8 || EltTy == MVT::i16 || EltTy == MVT::i32) && "Sign extending from an invalid type"); - EVT ExtVT = EVT::getVectorVT(*DAG.getContext(), - VT.getVectorElementType(), - VT.getVectorElementCount() * 2); + EVT ExtVT = VT.getDoubleNumVectorElementsVT(*DAG.getContext()); SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ExtOp.getValueType(), ExtOp, DAG.getValueType(ExtVT)); @@ -14594,9 +14592,9 @@ EVT VT = Opnds[0].getValueType(); EVT EltVT = VT.getVectorElementType(); - EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, - VT.getVectorElementCount() * - (N->getNumOperands() - 2)); + EVT DestVT = EVT::getVectorVT( + *DAG.getContext(), EltVT, + VT.getVectorElementCount().mul((N->getNumOperands() - 2))); SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, DestVT, Opnds); return DAG.getMergeValues({Concat, Chain}, DL); } @@ -14797,7 +14795,7 @@ ElementCount ResEC = VT.getVectorElementCount(); - if (InVT.getVectorElementCount() != (ResEC * 2)) + if (InVT.getVectorElementCount() != (ResEC.mul(2))) return; auto *CIndex = dyn_cast(N->getOperand(1)); Index: llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp =================================================================== --- llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -413,7 +413,7 @@ /// Return the estimated number of vector ops required for an operation on /// \p VT * N. unsigned getNumOps(Type *ST, unsigned N) { - return std::ceil((ST->getPrimitiveSizeInBits() * N).getFixedSize() / + return std::ceil((ST->getPrimitiveSizeInBits().mul(N)).getFixedSize() / double(TTI.getRegisterBitWidth(true))); } Index: llvm/lib/Transforms/Vectorize/LoopVectorize.cpp =================================================================== --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -353,7 +353,9 @@ // with a vector. if (VF.isVector()) { auto *VectorTy = VectorType::get(Ty, VF); - return VF * DL.getTypeAllocSize(Ty) != DL.getTypeStoreSize(VectorTy); + uint64_t TySize = DL.getTypeAllocSize(Ty).getFixedSize(); + TypeSize VFTy = TypeSize(TySize * VF.getKnownMinValue(), VF.isScalable()); + return VFTy != DL.getTypeStoreSize(VectorTy); } // If the vectorization factor is one, we just check if an array of type Ty @@ -2325,7 +2327,7 @@ Type *ScalarTy = getMemInstValueType(Instr); unsigned InterleaveFactor = Group->getFactor(); assert(!VF.isScalable() && "scalable vectors not yet supported."); - auto *VecTy = VectorType::get(ScalarTy, VF * InterleaveFactor); + auto *VecTy = VectorType::get(ScalarTy, VF.mul(InterleaveFactor)); // Prepare for the new pointers. SmallVector AddrParts; @@ -6265,7 +6267,7 @@ unsigned InterleaveFactor = Group->getFactor(); assert(!VF.isScalable() && "scalable vectors not yet supported."); - auto *WideVecTy = VectorType::get(ValTy, VF * InterleaveFactor); + auto *WideVecTy = VectorType::get(ValTy, VF.mul(InterleaveFactor)); // Holds the indices of existing members in an interleaved load group. // An interleaved store group doesn't need this as it doesn't allow gaps. @@ -7825,7 +7827,7 @@ ElementCount VF = ElementCount::getFixed(Range.Start); Plan->addVF(VF); RSO << "Initial VPlan for VF={" << VF; - for (VF *= 2; VF.getKnownMinValue() < Range.End; VF *= 2) { + for (VF = VF.mul(2); VF.getKnownMinValue() < Range.End; VF = VF.mul(2)) { Plan->addVF(VF); RSO << "," << VF; } Index: llvm/unittests/CodeGen/ScalableVectorMVTsTest.cpp =================================================================== --- llvm/unittests/CodeGen/ScalableVectorMVTsTest.cpp +++ llvm/unittests/CodeGen/ScalableVectorMVTsTest.cpp @@ -62,8 +62,8 @@ EXPECT_EQ(Vnx4i32.getHalfNumVectorElementsVT(Ctx), Vnx2i32); // Check that overloaded '*' and '/' operators work - EXPECT_EQ(EVT::getVectorVT(Ctx, MVT::i64, EltCnt * 2), MVT::nxv4i64); - EXPECT_EQ(EVT::getVectorVT(Ctx, MVT::i64, EltCnt / 2), MVT::nxv1i64); + EXPECT_EQ(EVT::getVectorVT(Ctx, MVT::i64, EltCnt.mul(2)), MVT::nxv4i64); + EXPECT_EQ(EVT::getVectorVT(Ctx, MVT::i64, EltCnt.div(2)), MVT::nxv1i64); // Check that float->int conversion works EVT Vnx2f64 = EVT::getVectorVT(Ctx, MVT::f64, ElementCount::getScalable(2)); Index: llvm/unittests/IR/VectorTypesTest.cpp =================================================================== --- llvm/unittests/IR/VectorTypesTest.cpp +++ llvm/unittests/IR/VectorTypesTest.cpp @@ -72,13 +72,13 @@ EXPECT_EQ(V4Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *V2Int64Ty = - dyn_cast(VectorType::get(Int64Ty, EltCnt / 2)); + dyn_cast(VectorType::get(Int64Ty, EltCnt.div(2))); ASSERT_NE(nullptr, V2Int64Ty); EXPECT_EQ(V2Int64Ty->getNumElements(), 2U); EXPECT_EQ(V2Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *V8Int64Ty = - dyn_cast(VectorType::get(Int64Ty, EltCnt * 2)); + dyn_cast(VectorType::get(Int64Ty, EltCnt.mul(2))); ASSERT_NE(nullptr, V8Int64Ty); EXPECT_EQ(V8Int64Ty->getNumElements(), 8U); EXPECT_EQ(V8Int64Ty->getElementType()->getScalarSizeInBits(), 64U); @@ -167,13 +167,13 @@ EXPECT_EQ(ScV4Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *ScV2Int64Ty = - dyn_cast(VectorType::get(Int64Ty, EltCnt / 2)); + dyn_cast(VectorType::get(Int64Ty, EltCnt.div(2))); ASSERT_NE(nullptr, ScV2Int64Ty); EXPECT_EQ(ScV2Int64Ty->getMinNumElements(), 2U); EXPECT_EQ(ScV2Int64Ty->getElementType()->getScalarSizeInBits(), 64U); auto *ScV8Int64Ty = - dyn_cast(VectorType::get(Int64Ty, EltCnt * 2)); + dyn_cast(VectorType::get(Int64Ty, EltCnt.mul(2))); ASSERT_NE(nullptr, ScV8Int64Ty); EXPECT_EQ(ScV8Int64Ty->getMinNumElements(), 8U); EXPECT_EQ(ScV8Int64Ty->getElementType()->getScalarSizeInBits(), 64U);