diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -3197,7 +3197,7 @@ if (Vec.getOpcode() == ISD::BUILD_VECTOR) { SDValue Elt0 = Vec.getOperand(0); EVT EltVT = Elt0.getValueType(); - if (VT.getSizeInBits() <= EltVT.getSizeInBits()) { + if (VT.getFixedSizeInBits() <= EltVT.getFixedSizeInBits()) { if (EltVT.isFloatingPoint()) { Elt0 = DAG.getNode(ISD::BITCAST, SL, EltVT.changeTypeToInteger(), Elt0); diff --git a/llvm/lib/Target/ARM/ARMCallLowering.cpp b/llvm/lib/Target/ARM/ARMCallLowering.cpp --- a/llvm/lib/Target/ARM/ARMCallLowering.cpp +++ b/llvm/lib/Target/ARM/ARMCallLowering.cpp @@ -335,8 +335,8 @@ assert(VA.isRegLoc() && "Value shouldn't be assigned to reg"); assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?"); - auto ValSize = VA.getValVT().getSizeInBits(); - auto LocSize = VA.getLocVT().getSizeInBits(); + uint64_t ValSize = VA.getValVT().getFixedSizeInBits(); + uint64_t LocSize = VA.getLocVT().getFixedSizeInBits(); assert(ValSize <= 64 && "Unsupported value size"); assert(LocSize <= 64 && "Unsupported location size"); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -7763,17 +7763,19 @@ for (auto &Src : Sources) { EVT SrcVT = Src.ShuffleVec.getValueType(); - if (SrcVT.getSizeInBits() == VT.getSizeInBits()) + uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); + uint64_t VTSize = VT.getFixedSizeInBits(); + if (SrcVTSize == VTSize) continue; // This stage of the search produces a source with the same element type as // the original, but with a total width matching the BUILD_VECTOR output. EVT EltVT = SrcVT.getVectorElementType(); - unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); + unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); - if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { - if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits()) + if (SrcVTSize < VTSize) { + if (2 * SrcVTSize != VTSize) return SDValue(); // We can pad out the smaller vector for free, so if it's part of a // shuffle... @@ -7783,7 +7785,7 @@ continue; } - if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits()) + if (SrcVTSize != 2 * VTSize) return SDValue(); if (Src.MaxElt - Src.MinElt >= NumSrcElts) { diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -1300,7 +1300,8 @@ // promoted differently). The cost of 2 here is then a load and vrev or // vmovn. if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 && - VecTy->isIntOrIntVectorTy() && DL.getTypeSizeInBits(SubVecTy) <= 64) + VecTy->isIntOrIntVectorTy() && + DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64) return 2 * BaseCost; } diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -1391,14 +1391,15 @@ if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) return false; - return (Ty1->getPrimitiveSizeInBits() > Ty2->getPrimitiveSizeInBits()); + return (Ty1->getPrimitiveSizeInBits().getFixedSize() > + Ty2->getPrimitiveSizeInBits().getFixedSize()); } bool MSP430TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { if (!VT1.isInteger() || !VT2.isInteger()) return false; - return (VT1.getSizeInBits() > VT2.getSizeInBits()); + return (VT1.getFixedSizeInBits() > VT2.getFixedSizeInBits()); } bool MSP430TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -347,7 +347,7 @@ const ISD::ArgFlagsTy &Flags) { // > does not mean loss of information as type RegisterVT can't hold type VT, // it means that type VT is split into multiple registers of type RegisterVT - if (VT.getSizeInBits() >= RegisterVT.getSizeInBits()) + if (VT.getFixedSizeInBits() >= RegisterVT.getFixedSizeInBits()) return CCValAssign::LocInfo::Full; if (Flags.isSExt()) return CCValAssign::LocInfo::SExt; diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -134,7 +134,7 @@ // Break down vector types to either 2 i64s or 4 i32s. RegisterVT = getRegisterTypeForCallingConv(Context, CC, VT); IntermediateVT = RegisterVT; - NumIntermediates = VT.getSizeInBits() < RegisterVT.getSizeInBits() + NumIntermediates = VT.getFixedSizeInBits() < RegisterVT.getFixedSizeInBits() ? VT.getVectorNumElements() : VT.getSizeInBits() / RegisterVT.getSizeInBits(); diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -2590,7 +2590,8 @@ // Extend the element if necessary (e.g. an i8 is loaded // into an i16 register) if (Ins[InsIdx].VT.isInteger() && - Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) { + Ins[InsIdx].VT.getFixedSizeInBits() > + LoadVT.getFixedSizeInBits()) { unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt); @@ -4564,13 +4565,13 @@ if (Op.getOpcode() == ISD::SIGN_EXTEND || Op.getOpcode() == ISD::SIGN_EXTEND_INREG) { EVT OrigVT = Op.getOperand(0).getValueType(); - if (OrigVT.getSizeInBits() <= OptSize) { + if (OrigVT.getFixedSizeInBits() <= OptSize) { S = Signed; return true; } } else if (Op.getOpcode() == ISD::ZERO_EXTEND) { EVT OrigVT = Op.getOperand(0).getValueType(); - if (OrigVT.getSizeInBits() <= OptSize) { + if (OrigVT.getFixedSizeInBits() <= OptSize) { S = Unsigned; return true; } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -6923,7 +6923,7 @@ const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; assert((!ValVT.isInteger() || - (ValVT.getSizeInBits() <= RegVT.getSizeInBits())) && + (ValVT.getFixedSizeInBits() <= RegVT.getFixedSizeInBits())) && "Integer argument exceeds register size: should have been legalized"); if (ValVT == MVT::f128) @@ -6986,7 +6986,7 @@ case MVT::i32: { const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign); // AIX integer arguments are always passed in register width. - if (ValVT.getSizeInBits() < RegVT.getSizeInBits()) + if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits()) LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt : CCValAssign::LocInfo::ZExt; if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) @@ -7064,7 +7064,7 @@ SelectionDAG &DAG, SDValue ArgValue, MVT LocVT, const SDLoc &dl) { assert(ValVT.isScalarInteger() && LocVT.isScalarInteger()); - assert(ValVT.getSizeInBits() < LocVT.getSizeInBits()); + assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits()); if (Flags.isSExt()) ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue, @@ -7267,7 +7267,7 @@ MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64)); SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); if (ValVT.isScalarInteger() && - (ValVT.getSizeInBits() < LocVT.getSizeInBits())) { + (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) { ArgValue = truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl); } @@ -7558,7 +7558,8 @@ // f32 in 32-bit GPR // f64 in 64-bit GPR RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt)); - else if (Arg.getValueType().getSizeInBits() < LocVT.getSizeInBits()) + else if (Arg.getValueType().getFixedSizeInBits() < + LocVT.getFixedSizeInBits()) // f32 in 64-bit GPR. RegsToPass.push_back(std::make_pair( VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT))); diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -2541,8 +2541,9 @@ MachinePointerInfo(SV)); // Load the actual argument out of the pointer VAList. // We can't count on greater alignment than the word size. - return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(), - std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8); + return DAG.getLoad( + VT, DL, InChain, VAList, MachinePointerInfo(), + std::min(PtrVT.getFixedSizeInBits(), VT.getFixedSizeInBits()) / 8); } static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -982,16 +982,16 @@ bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) return false; - unsigned FromBits = FromType->getPrimitiveSizeInBits(); - unsigned ToBits = ToType->getPrimitiveSizeInBits(); + unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedSize(); + unsigned ToBits = ToType->getPrimitiveSizeInBits().getFixedSize(); return FromBits > ToBits; } bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const { if (!FromVT.isInteger() || !ToVT.isInteger()) return false; - unsigned FromBits = FromVT.getSizeInBits(); - unsigned ToBits = ToVT.getSizeInBits(); + unsigned FromBits = FromVT.getFixedSizeInBits(); + unsigned ToBits = ToVT.getFixedSizeInBits(); return FromBits > ToBits; } @@ -2286,7 +2286,8 @@ C.Op1.getOpcode() == ISD::Constant && cast(C.Op1)->getZExtValue() == 0) { auto *L = cast(C.Op0.getOperand(0)); - if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) { + if (L->getMemoryVT().getStoreSizeInBits().getFixedSize() <= + C.Op0.getValueSizeInBits().getFixedSize()) { unsigned Type = L->getExtensionType(); if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp --- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp @@ -593,8 +593,9 @@ unsigned SystemZTTIImpl:: getVectorTruncCost(Type *SrcTy, Type *DstTy) { assert (SrcTy->isVectorTy() && DstTy->isVectorTy()); - assert (SrcTy->getPrimitiveSizeInBits() > DstTy->getPrimitiveSizeInBits() && - "Packing must reduce size of vector type."); + assert(SrcTy->getPrimitiveSizeInBits().getFixedSize() > + DstTy->getPrimitiveSizeInBits().getFixedSize() && + "Packing must reduce size of vector type."); assert(cast(SrcTy)->getNumElements() == cast(DstTy)->getNumElements() && "Packing should not change number of elements."); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4517,7 +4517,8 @@ if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI)) return false; - if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) { + if (VA.getLocVT().getFixedSizeInBits() > + Arg.getValueSizeInBits().getFixedSize()) { // If the argument location is wider than the argument type, check that any // extension flags match. if (Flags.isZExt() != MFI.isObjectZExt(FI) || @@ -5865,7 +5866,7 @@ static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements, const X86Subtarget &Subtarget, SelectionDAG &DAG, const SDLoc &dl) { - assert(Vec.getValueSizeInBits() < VT.getSizeInBits() && + assert(Vec.getValueSizeInBits().getFixedSize() < VT.getFixedSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && "Unsupported vector widening type"); SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl) @@ -7499,7 +7500,8 @@ // Subvector shuffle inputs must not be larger than the subvector. if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) { - return SubVT.getSizeInBits() < SubInput.getValueSizeInBits(); + return SubVT.getFixedSizeInBits() < + SubInput.getValueSizeInBits().getFixedSize(); })) return false; @@ -22674,7 +22676,8 @@ Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)); } - if (VT.getSizeInBits() > Op.getSimpleValueType().getSizeInBits()) { + if (VT.getFixedSizeInBits() > + Op.getSimpleValueType().getFixedSizeInBits()) { // We emitted a compare with an XMM/YMM result. Finish converting to a // mask register using a vptestm. EVT CastVT = EVT(VT).changeVectorElementTypeToInteger(); @@ -23647,7 +23650,7 @@ MVT SVT = VT.getVectorElementType(); MVT InSVT = InVT.getVectorElementType(); - assert(SVT.getSizeInBits() > InSVT.getSizeInBits()); + assert(SVT.getFixedSizeInBits() > InSVT.getFixedSizeInBits()); if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16) return SDValue(); @@ -36695,7 +36698,8 @@ // Share broadcast with the longest vector and extract low subvector (free). for (SDNode *User : Src->uses()) if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST && - User->getValueSizeInBits(0) > VT.getSizeInBits()) { + User->getValueSizeInBits(0).getFixedSize() > + VT.getFixedSizeInBits()) { return extractSubVector(SDValue(User, 0), 0, DAG, DL, VT.getSizeInBits()); } @@ -44448,7 +44452,7 @@ // InScalarVT is the intermediate type in AVG pattern and it should be greater // than the original input type (i8/i16). EVT InScalarVT = InVT.getVectorElementType(); - if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits()) + if (InScalarVT.getFixedSizeInBits() <= ScalarVT.getFixedSizeInBits()) return SDValue(); if (!Subtarget.hasSSE2()) @@ -49084,7 +49088,8 @@ SDValue Ins = SubVec.getOperand(0); if (isNullConstant(Ins.getOperand(2)) && ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) && - Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits()) + Ins.getOperand(1).getValueSizeInBits().getFixedSize() <= + SubVecVT.getFixedSizeInBits()) return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, getZeroVector(OpVT, Subtarget, DAG, dl), Ins.getOperand(1), N->getOperand(2)); @@ -49710,7 +49715,7 @@ cast(User)->getMemoryVT().getSizeInBits() == MemVT.getSizeInBits() && !User->hasAnyUseOfValue(1) && - User->getValueSizeInBits(0) > VT.getSizeInBits()) { + User->getValueSizeInBits(0).getFixedSize() > VT.getFixedSizeInBits()) { SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N), VT.getSizeInBits()); Extract = DAG.getBitcast(VT, Extract);