diff --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h --- a/llvm/include/llvm/Support/TypeSize.h +++ b/llvm/include/llvm/Support/TypeSize.h @@ -311,9 +311,16 @@ // the exact size. If the type is a scalable vector, it will represent the known // minimum size. class TypeSize : public details::FixedOrScalableQuantity { + using UP = details::FixedOrScalableQuantity; + TypeSize(const FixedOrScalableQuantity &V) : FixedOrScalableQuantity(V) {} + // Make 'getFixedValue' private, it is exposed as 'getFixedSize' below. + using UP::getFixedValue; + // Make 'getKnownMinValue' private, it is exposed as 'getKnownMinSize' below. + using UP::getKnownMinValue; + public: constexpr TypeSize(ScalarTy Quantity, bool Scalable) : FixedOrScalableQuantity(Quantity, Scalable) {} @@ -399,7 +406,7 @@ /// Similar to the alignTo functions in MathExtras.h inline constexpr TypeSize alignTo(TypeSize Size, uint64_t Align) { assert(Align != 0u && "Align must be non-zero"); - return {(Size.getKnownMinValue() + Align - 1) / Align * Align, + return {(Size.getKnownMinSize() + Align - 1) / Align * Align, Size.isScalable()}; } diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -599,7 +599,7 @@ return nullptr; // If we're not accessing anything in this constant, the result is undefined. - if (Offset >= (int64_t)InitializerSize.getFixedValue()) + if (Offset >= (int64_t)InitializerSize.getFixedSize()) return PoisonValue::get(IntType); unsigned char RawBytes[32] = {0}; diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -408,7 +408,7 @@ TypeSize TySize = DL.getTypeStoreSize(Ty); if (TySize.isScalable()) return false; - APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedValue()); + APInt Size(DL.getIndexTypeSizeInBits(V->getType()), TySize.getFixedSize()); return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT, TLI); } diff --git a/llvm/lib/CodeGen/Analysis.cpp b/llvm/lib/CodeGen/Analysis.cpp --- a/llvm/lib/CodeGen/Analysis.cpp +++ b/llvm/lib/CodeGen/Analysis.cpp @@ -101,7 +101,7 @@ // Given an array type, recursively traverse the elements. if (ArrayType *ATy = dyn_cast(Ty)) { Type *EltTy = ATy->getElementType(); - uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue(); + uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize(); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets, StartingOffset + i * EltSize); @@ -146,7 +146,7 @@ // Given an array type, recursively traverse the elements. if (ArrayType *ATy = dyn_cast(&Ty)) { Type *EltTy = ATy->getElementType(); - uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue(); + uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedSize(); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) computeValueLLTs(DL, *EltTy, ValueTys, Offsets, StartingOffset + i * EltSize); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4062,11 +4062,11 @@ AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize, DAG.getVScale(dl, IntPtr, APInt(IntPtr.getScalarSizeInBits(), - TySize.getKnownMinValue()))); + TySize.getKnownMinSize()))); else AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize, - DAG.getConstant(TySize.getFixedValue(), dl, IntPtr)); + DAG.getConstant(TySize.getFixedSize(), dl, IntPtr)); // Handle alignment. If the requested alignment is less than or equal to // the stack alignment, ignore it. If the size is greater than or equal to diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp --- a/llvm/lib/CodeGen/StackProtector.cpp +++ b/llvm/lib/CodeGen/StackProtector.cpp @@ -218,7 +218,7 @@ // We can't subtract a fixed size from a scalable one, so in that case // assume the scalable value is of minimum size. TypeSize NewAllocSize = - TypeSize::Fixed(AllocSize.getKnownMinValue()) - OffsetSize; + TypeSize::Fixed(AllocSize.getKnownMinSize()) - OffsetSize; if (HasAddressTaken(I, NewAllocSize)) return true; break; diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -67,7 +67,7 @@ getMemberOffsets()[i] = StructSize; // Consume space for this data item - StructSize += DL.getTypeAllocSize(Ty).getFixedValue(); + StructSize += DL.getTypeAllocSize(Ty).getFixedSize(); } // Add padding to the end of the struct so that it could be put in an array diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -791,7 +791,7 @@ TypeSize TS = VT.getSizeInBits(); // TODO: We should be able to use bic/bif too for SVE. - return !TS.isScalable() && TS.getFixedValue() >= 64; // vector 'bic' + return !TS.isScalable() && TS.getFixedSize() >= 64; // vector 'bic' } bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10502,7 +10502,7 @@ unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); - if (SrcVTSize.getFixedValue() < VTSize) { + if (SrcVTSize.getFixedSize() < VTSize) { assert(2 * SrcVTSize == VTSize); // We can pad out the smaller vector for free, so if it's part of a // shuffle... @@ -10512,7 +10512,7 @@ continue; } - if (SrcVTSize.getFixedValue() != 2 * VTSize) { + if (SrcVTSize.getFixedSize() != 2 * VTSize) { LLVM_DEBUG( dbgs() << "Reshuffle failed: result vector too small to extract\n"); return SDValue(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp @@ -650,7 +650,7 @@ continue; } CandidateTy Candidate(GV, K.second.size(), - DL.getTypeAllocSize(GV->getValueType()).getFixedValue()); + DL.getTypeAllocSize(GV->getValueType()).getFixedSize()); if (MostUsed < Candidate) MostUsed = Candidate; } diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp --- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp @@ -1816,9 +1816,9 @@ auto *NcTy = const_cast(Ty); switch (Kind) { case Store: - return DL.getTypeStoreSize(NcTy).getFixedValue(); + return DL.getTypeStoreSize(NcTy).getFixedSize(); case Alloc: - return DL.getTypeAllocSize(NcTy).getFixedValue(); + return DL.getTypeAllocSize(NcTy).getFixedSize(); } llvm_unreachable("Unhandled SizeKind enum"); } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -138,7 +138,7 @@ if (VT.getVectorMinNumElements() < MinElts) return; - unsigned Size = VT.getSizeInBits().getKnownMinValue(); + unsigned Size = VT.getSizeInBits().getKnownMinSize(); const TargetRegisterClass *RC; if (Size <= RISCV::RVVBitsPerBlock) RC = &RISCV::VRRegClass; @@ -1589,7 +1589,7 @@ RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { assert(VT.isScalableVector() && "Expecting a scalable vector type"); - unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); + unsigned KnownSize = VT.getSizeInBits().getKnownMinSize(); if (VT.getVectorElementType() == MVT::i1) KnownSize *= 8; @@ -5443,7 +5443,7 @@ // Optimize for constant AVL if (isa(AVL)) { unsigned EltSize = VT.getScalarSizeInBits(); - unsigned MinSize = VT.getSizeInBits().getKnownMinValue(); + unsigned MinSize = VT.getSizeInBits().getKnownMinSize(); unsigned VectorBitsMax = Subtarget.getRealMaxVLen(); unsigned MaxVLMAX = @@ -6419,7 +6419,7 @@ return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Op2); } unsigned EltSize = VecVT.getScalarSizeInBits(); - unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue(); + unsigned MinSize = VecVT.getSizeInBits().getKnownMinSize(); unsigned VectorBitsMax = Subtarget.getRealMaxVLen(); unsigned MaxVLMAX = RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize); diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -990,7 +990,7 @@ unsigned RISCVTTIImpl::getEstimatedVLFor(VectorType *Ty) { if (isa(Ty)) { const unsigned EltSize = DL.getTypeSizeInBits(Ty->getElementType()); - const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinValue(); + const unsigned MinSize = DL.getTypeSizeInBits(Ty).getKnownMinSize(); const unsigned VectorBits = *getVScaleForTuning() * RISCV::RVVBitsPerBlock; return RISCVTargetLowering::computeVLMAX(VectorBits, EltSize, MinSize); } @@ -1455,7 +1455,7 @@ TypeSize Size = DL.getTypeSizeInBits(Ty); if (Ty->isVectorTy()) { if (Size.isScalable() && ST->hasVInstructions()) - return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock); + return divideCeil(Size.getKnownMinSize(), RISCV::RVVBitsPerBlock); if (ST->useRVVForFixedLengthVectors()) return divideCeil(Size, ST->getRealMinVLen()); diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -543,7 +543,7 @@ if (!isAligned(I->getAlign(), Off)) return false; - NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedValue()); + NeededDerefBytes = std::max(NeededDerefBytes, Off + Size.getFixedSize()); NeededAlign = std::max(NeededAlign, I->getAlign()); } diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp --- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp @@ -764,8 +764,8 @@ namespace { struct TypeSizeComparator { bool operator()(const TypeSize &LHS, const TypeSize &RHS) const { - return std::make_tuple(LHS.isScalable(), LHS.getKnownMinValue()) < - std::make_tuple(RHS.isScalable(), RHS.getKnownMinValue()); + return std::make_tuple(LHS.isScalable(), LHS.getKnownMinSize()) < + std::make_tuple(RHS.isScalable(), RHS.getKnownMinSize()); } }; } // end anonymous namespace