diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -6784,8 +6784,8 @@ Value *Result = CGF.EmitNeonCall(F, Ops, s); llvm::Type *ResultType = CGF.ConvertType(E->getType()); - if (ResultType->getPrimitiveSizeInBits().getFixedSize() < - Result->getType()->getPrimitiveSizeInBits().getFixedSize()) + if (ResultType->getPrimitiveSizeInBits().getFixedValue() < + Result->getType()->getPrimitiveSizeInBits().getFixedValue()) return CGF.Builder.CreateExtractElement(Result, C0); return CGF.Builder.CreateBitCast(Result, ResultType, s); diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1261,7 +1261,7 @@ if (llvm::StructType *SrcSTy = dyn_cast(SrcTy)) { Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, - DstSize.getFixedSize(), CGF); + DstSize.getFixedValue(), CGF); SrcTy = Src.getElementType(); } @@ -1277,7 +1277,7 @@ // If load is legal, just bitcast the src pointer. if (!SrcSize.isScalable() && !DstSize.isScalable() && - SrcSize.getFixedSize() >= DstSize.getFixedSize()) { + SrcSize.getFixedValue() >= DstSize.getFixedValue()) { // Generally SrcSize is never greater than DstSize, since this means we are // losing bits. However, this can happen in cases where the structure has // additional padding, for example due to a user specified alignment. @@ -1323,7 +1323,7 @@ CGF.Builder.CreateMemCpy( Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), Src.getAlignment().getAsAlign(), - llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); + llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue())); return CGF.Builder.CreateLoad(Tmp); } @@ -1366,7 +1366,7 @@ if (llvm::StructType *DstSTy = dyn_cast(DstTy)) { Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, - SrcSize.getFixedSize(), CGF); + SrcSize.getFixedValue(), CGF); DstTy = Dst.getElementType(); } @@ -1393,7 +1393,7 @@ // If store is legal, just bitcast the src pointer. if (isa(SrcTy) || isa(DstTy) || - SrcSize.getFixedSize() <= DstSize.getFixedSize()) { + SrcSize.getFixedValue() <= DstSize.getFixedValue()) { Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); } else { @@ -1411,7 +1411,7 @@ CGF.Builder.CreateMemCpy( Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), - llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); + llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue())); } } @@ -4725,7 +4725,7 @@ static unsigned getMaxVectorWidth(const llvm::Type *Ty) { if (auto *VT = dyn_cast(Ty)) - return VT->getPrimitiveSizeInBits().getKnownMinSize(); + return VT->getPrimitiveSizeInBits().getKnownMinValue(); if (auto *AT = dyn_cast(Ty)) return getMaxVectorWidth(AT->getElementType()); diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -3089,7 +3089,7 @@ // Floating-point types which fit into intptr_t are bitcast to integers // and then passed directly (after zero-extension, if necessary). if (V->getType()->isFloatingPointTy()) { - unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedSize(); + unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue(); if (Bits <= TargetTy->getIntegerBitWidth()) V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), Bits)); diff --git a/clang/lib/CodeGen/CGGPUBuiltin.cpp b/clang/lib/CodeGen/CGGPUBuiltin.cpp --- a/clang/lib/CodeGen/CGGPUBuiltin.cpp +++ b/clang/lib/CodeGen/CGGPUBuiltin.cpp @@ -162,7 +162,7 @@ // amdgpu llvm::Constant *Size = llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGM.getLLVMContext()), - static_cast(r.second.getFixedSize())); + static_cast(r.second.getFixedValue())); Vec.push_back(Size); } diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -2477,7 +2477,7 @@ if (auto *VT = dyn_cast(ResultRegTypes.back())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); } else { Address DestAddr = Dest.getAddress(*this); // Matrix types in memory are represented by arrays, but accessed through @@ -2516,7 +2516,7 @@ if (auto *VT = dyn_cast(Arg->getType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); // Only tie earlyclobber physregs. if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber())) InOutConstraints += llvm::utostr(i); @@ -2606,7 +2606,7 @@ if (auto *VT = dyn_cast(Arg->getType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); ArgTypes.push_back(Arg->getType()); ArgElemTypes.push_back(ArgElemType); diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -481,13 +481,13 @@ if (auto *VT = dyn_cast(A.getType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); // Update vector width based on return type. if (auto *VT = dyn_cast(CurFn->getReturnType())) LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getKnownMinSize()); + VT->getPrimitiveSizeInBits().getKnownMinValue()); if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth) LargestVectorWidth = CurFnInfo->getMaxVectorWidth(); diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -1078,7 +1078,7 @@ .Cases("y", "&y", "^Ym", true) .Default(false); if (IsMMXCons && Ty->isVectorTy()) { - if (cast(Ty)->getPrimitiveSizeInBits().getFixedSize() != + if (cast(Ty)->getPrimitiveSizeInBits().getFixedValue() != 64) { // Invalid MMX constraint return nullptr; @@ -2417,7 +2417,7 @@ if (info.isDirect()) { llvm::Type *ty = info.getCoerceToType(); if (llvm::VectorType *vectorTy = dyn_cast_or_null(ty)) - return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128; + return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128; } return false; } diff --git a/llvm/include/llvm/Analysis/MemoryLocation.h b/llvm/include/llvm/Analysis/MemoryLocation.h --- a/llvm/include/llvm/Analysis/MemoryLocation.h +++ b/llvm/include/llvm/Analysis/MemoryLocation.h @@ -103,7 +103,7 @@ static LocationSize precise(TypeSize Value) { if (Value.isScalable()) return afterPointer(); - return precise(Value.getFixedSize()); + return precise(Value.getFixedValue()); } static LocationSize upperBound(uint64_t Value) { @@ -117,7 +117,7 @@ static LocationSize upperBound(TypeSize Value) { if (Value.isScalable()) return afterPointer(); - return upperBound(Value.getFixedSize()); + return upperBound(Value.getFixedValue()); } /// Any location after the base pointer (but still within the underlying @@ -283,7 +283,7 @@ // Return the exact size if the exact size is known at compiletime, // otherwise return MemoryLocation::UnknownSize. static uint64_t getSizeOrUnknown(const TypeSize &T) { - return T.isScalable() ? UnknownSize : T.getFixedSize(); + return T.isScalable() ? UnknownSize : T.getFixedValue(); } MemoryLocation() : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()) {} diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -555,7 +555,7 @@ // trunc to a native type is free (assuming the target has compare and // shift-right of the same width). TypeSize DstSize = DL.getTypeSizeInBits(Dst); - if (!DstSize.isScalable() && DL.isLegalInteger(DstSize.getFixedSize())) + if (!DstSize.isScalable() && DL.isLegalInteger(DstSize.getFixedValue())) return 0; break; } @@ -872,7 +872,7 @@ // The max required size is the size of the vector element type unsigned MaxRequiredSize = - VT->getElementType()->getPrimitiveSizeInBits().getFixedSize(); + VT->getElementType()->getPrimitiveSizeInBits().getFixedValue(); unsigned MinRequiredSize = 0; for (unsigned i = 0, e = VT->getNumElements(); i < e; ++i) { @@ -992,7 +992,7 @@ if (isa(TargetType)) return TTI::TCC_Basic; int64_t ElementSize = - DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize(); + DL.getTypeAllocSize(GTI.getIndexedType()).getFixedValue(); if (ConstIdx) { BaseOffset += ConstIdx->getValue().sextOrTrunc(PtrSizeBits) * ElementSize; diff --git a/llvm/include/llvm/CodeGen/ValueTypes.h b/llvm/include/llvm/CodeGen/ValueTypes.h --- a/llvm/include/llvm/CodeGen/ValueTypes.h +++ b/llvm/include/llvm/CodeGen/ValueTypes.h @@ -346,11 +346,11 @@ /// Return the size of the specified fixed width value type in bits. The /// function will assert if the type is scalable. uint64_t getFixedSizeInBits() const { - return getSizeInBits().getFixedSize(); + return getSizeInBits().getFixedValue(); } uint64_t getScalarSizeInBits() const { - return getScalarType().getSizeInBits().getFixedSize(); + return getScalarType().getSizeInBits().getFixedValue(); } /// Return the number of bytes overwritten by a store of the specified value @@ -361,13 +361,13 @@ /// base size. TypeSize getStoreSize() const { TypeSize BaseSize = getSizeInBits(); - return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()}; + return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()}; } // Return the number of bytes overwritten by a store of this value type or // this value type's element type in the case of a vector. uint64_t getScalarStoreSize() const { - return getScalarType().getStoreSize().getFixedSize(); + return getScalarType().getStoreSize().getFixedValue(); } /// Return the number of bits overwritten by a store of the specified value diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h --- a/llvm/include/llvm/IR/DataLayout.h +++ b/llvm/include/llvm/IR/DataLayout.h @@ -473,7 +473,7 @@ /// For example, returns 5 for i36 and 10 for x86_fp80. TypeSize getTypeStoreSize(Type *Ty) const { TypeSize BaseSize = getTypeSizeInBits(Ty); - return {divideCeil(BaseSize.getKnownMinSize(), 8), BaseSize.isScalable()}; + return {divideCeil(BaseSize.getKnownMinValue(), 8), BaseSize.isScalable()}; } /// Returns the maximum number of bits that may be overwritten by @@ -710,7 +710,7 @@ VectorType *VTy = cast(Ty); auto EltCnt = VTy->getElementCount(); uint64_t MinBits = EltCnt.getKnownMinValue() * - getTypeSizeInBits(VTy->getElementType()).getFixedSize(); + getTypeSizeInBits(VTy->getElementType()).getFixedValue(); return TypeSize(MinBits, EltCnt.isScalable()); } case Type::TargetExtTyID: { diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -2490,7 +2490,7 @@ if (GEP->getNumIndices() == 1 && isa(DerefTy) && m_Zero().match(GEP->getPointerOperand()) && m_SpecificInt(1).match(GEP->idx_begin()->get()) && - DL.getTypeAllocSizeInBits(DerefTy).getKnownMinSize() == 8) + DL.getTypeAllocSizeInBits(DerefTy).getKnownMinValue() == 8) return true; } } diff --git a/llvm/include/llvm/Support/LowLevelTypeImpl.h b/llvm/include/llvm/Support/LowLevelTypeImpl.h --- a/llvm/include/llvm/Support/LowLevelTypeImpl.h +++ b/llvm/include/llvm/Support/LowLevelTypeImpl.h @@ -65,7 +65,7 @@ assert(!ScalarTy.isVector() && "invalid vector element type"); return LLT{ScalarTy.isPointer(), /*isVector=*/true, /*isScalar=*/false, EC, - ScalarTy.getSizeInBits().getFixedSize(), + ScalarTy.getSizeInBits().getFixedValue(), ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0}; } @@ -166,7 +166,7 @@ /// needed to represent the size in bits. Must only be called on sized types. constexpr TypeSize getSizeInBytes() const { TypeSize BaseSize = getSizeInBits(); - return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()}; + return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()}; } constexpr LLT getScalarType() const { diff --git a/llvm/include/llvm/Support/MachineValueType.h b/llvm/include/llvm/Support/MachineValueType.h --- a/llvm/include/llvm/Support/MachineValueType.h +++ b/llvm/include/llvm/Support/MachineValueType.h @@ -1132,11 +1132,11 @@ /// Return the size of the specified fixed width value type in bits. The /// function will assert if the type is scalable. uint64_t getFixedSizeInBits() const { - return getSizeInBits().getFixedSize(); + return getSizeInBits().getFixedValue(); } uint64_t getScalarSizeInBits() const { - return getScalarType().getSizeInBits().getFixedSize(); + return getScalarType().getSizeInBits().getFixedValue(); } /// Return the number of bytes overwritten by a store of the specified value @@ -1147,13 +1147,13 @@ /// base size. TypeSize getStoreSize() const { TypeSize BaseSize = getSizeInBits(); - return {(BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable()}; + return {(BaseSize.getKnownMinValue() + 7) / 8, BaseSize.isScalable()}; } // Return the number of bytes overwritten by a store of this value type or // this value type's element type in the case of a vector. uint64_t getScalarStoreSize() const { - return getScalarType().getStoreSize().getFixedSize(); + return getScalarType().getStoreSize().getFixedValue(); } /// Return the number of bits overwritten by a store of the specified value diff --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h --- a/llvm/include/llvm/Support/TypeSize.h +++ b/llvm/include/llvm/Support/TypeSize.h @@ -334,9 +334,6 @@ return TypeSize(MinimumSize, true); } - constexpr ScalarTy getFixedSize() const { return getFixedValue(); } - constexpr ScalarTy getKnownMinSize() const { return getKnownMinValue(); } - // All code for this class below this point is needed because of the // temporary implicit conversion to uint64_t. The operator overloads are // needed because otherwise the conversion of the parent class diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -614,7 +614,7 @@ return Decomposed; } - Decomposed.Offset += AllocTypeSize.getFixedSize() * + Decomposed.Offset += AllocTypeSize.getFixedValue() * CIdx->getValue().sextOrTrunc(MaxIndexSize); continue; } @@ -636,7 +636,7 @@ CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT); // Scale by the type size. - unsigned TypeSize = AllocTypeSize.getFixedSize(); + unsigned TypeSize = AllocTypeSize.getFixedValue(); LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds()); Decomposed.Offset += LE.Offset.sext(MaxIndexSize); APInt Scale = LE.Scale.sext(MaxIndexSize); diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -562,7 +562,7 @@ return nullptr; Type *MapTy = Type::getIntNTy( - C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize()); + C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedValue()); if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) { if (Res->isNullValue() && !LoadTy->isX86_MMXTy() && !LoadTy->isX86_AMXTy()) @@ -705,7 +705,7 @@ // Explicitly check for out-of-bounds access, so we return poison even if the // constant is a uniform value. TypeSize Size = DL.getTypeAllocSize(C->getType()); - if (!Size.isScalable() && Offset.sge(Size.getFixedSize())) + if (!Size.isScalable() && Offset.sge(Size.getFixedValue())) return PoisonValue::get(Ty); // Try an offset-independent fold of a uniform value. diff --git a/llvm/lib/Analysis/IVDescriptors.cpp b/llvm/lib/Analysis/IVDescriptors.cpp --- a/llvm/lib/Analysis/IVDescriptors.cpp +++ b/llvm/lib/Analysis/IVDescriptors.cpp @@ -1585,7 +1585,7 @@ if (TySize.isZero() || TySize.isScalable()) return false; - int64_t Size = static_cast(TySize.getFixedSize()); + int64_t Size = static_cast(TySize.getFixedValue()); int64_t CVSize = CV->getSExtValue(); if (CVSize % Size) return false; diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -1394,7 +1394,7 @@ Type *Ty = I.getAllocatedType(); AllocatedSize = SaturatingMultiplyAdd( AllocSize->getLimitedValue(), - DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize); + DL.getTypeAllocSize(Ty).getKnownMinValue(), AllocatedSize); if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline) HasDynamicAlloca = true; return false; @@ -1405,7 +1405,7 @@ if (I.isStaticAlloca()) { Type *Ty = I.getAllocatedType(); AllocatedSize = - SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize); + SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinValue(), AllocatedSize); } // FIXME: This is overly conservative. Dynamic allocas are inefficient for diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -267,7 +267,7 @@ Value *Ptr = LI->getPointerOperand(); APInt EltSize(DL.getIndexTypeSizeInBits(Ptr->getType()), - DL.getTypeStoreSize(LI->getType()).getFixedSize()); + DL.getTypeStoreSize(LI->getType()).getFixedValue()); const Align Alignment = LI->getAlign(); Instruction *HeaderFirstNonPHI = L->getHeader()->getFirstNonPHI(); @@ -539,7 +539,7 @@ return nullptr; // Make sure the read bytes are contained in the memset. - uint64_t LoadSize = LoadTypeSize.getFixedSize(); + uint64_t LoadSize = LoadTypeSize.getFixedValue(); if ((Len->getValue() * 8).ult(LoadSize)) return nullptr; diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp --- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp +++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp @@ -1441,7 +1441,7 @@ auto &DL = Lp->getHeader()->getModule()->getDataLayout(); TypeSize AllocSize = DL.getTypeAllocSize(AccessTy); - int64_t Size = AllocSize.getFixedSize(); + int64_t Size = AllocSize.getFixedValue(); const APInt &APStepVal = C->getAPInt(); // Huge step value - give up. diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -750,7 +750,7 @@ TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType()); if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min) return unknown(); - APInt Size(IntTyBits, ElemSize.getKnownMinSize()); + APInt Size(IntTyBits, ElemSize.getKnownMinValue()); if (!I.isArrayAllocation()) return std::make_pair(align(Size, I.getAlign()), Zero); diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp --- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp +++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp @@ -159,7 +159,7 @@ ConstantRange R = ConstantRange::getEmpty(PointerSize); if (TS.isScalable()) return R; - APInt APSize(PointerSize, TS.getFixedSize(), true); + APInt APSize(PointerSize, TS.getFixedValue(), true); if (APSize.isNonPositive()) return R; if (AI.isArrayAllocation()) { @@ -307,7 +307,7 @@ TypeSize Size) { if (Size.isScalable()) return UnknownRange; - APInt APSize(PointerSize, Size.getFixedSize(), true); + APInt APSize(PointerSize, Size.getFixedValue(), true); if (APSize.isNegative()) return UnknownRange; return getAccessRange(Addr, Base, @@ -348,7 +348,7 @@ if (TS.isScalable()) return false; auto *CalculationTy = IntegerType::getIntNTy(SE.getContext(), PointerSize); - const SCEV *SV = SE.getConstant(CalculationTy, TS.getFixedSize()); + const SCEV *SV = SE.getConstant(CalculationTy, TS.getFixedValue()); return isSafeAccess(U, AI, SV); } diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1367,7 +1367,7 @@ KnownBits IndexBits(IndexBitWidth); computeKnownBits(Index, IndexBits, Depth + 1, Q); TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy); - uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize(); + uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue(); KnownBits ScalingFactor(IndexBitWidth); // Multiply by current sizeof type. // &A[i] == A + i * sizeof(*A[i]). @@ -2565,16 +2565,16 @@ // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well // as casts that can alter the value, e.g., AddrSpaceCasts. if (!isa(I->getType()) && - Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedSize() <= - Q.DL.getTypeSizeInBits(I->getType()).getFixedSize()) + Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= + Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) return isKnownNonZero(I->getOperand(0), Depth, Q); break; case Instruction::PtrToInt: // Similar to int2ptr above, we can look through ptr2int here if the cast // is a no-op or an extend and not a truncate. if (!isa(I->getType()) && - Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedSize() <= - Q.DL.getTypeSizeInBits(I->getType()).getFixedSize()) + Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= + Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) return isKnownNonZero(I->getOperand(0), Depth, Q); break; case Instruction::Or: @@ -4323,7 +4323,7 @@ if (GV->getInitializer()->isNullValue()) { Type *GVTy = GV->getValueType(); - uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize(); + uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue(); uint64_t Length = SizeInBytes / ElementSizeInBytes; Slice.Array = nullptr; @@ -7479,7 +7479,7 @@ TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType()); if (Size.isScalable()) return std::nullopt; - Offset += Size.getFixedSize() * OpC->getSExtValue(); + Offset += Size.getFixedValue() * OpC->getSExtValue(); } return Offset; diff --git a/llvm/lib/CodeGen/Analysis.cpp b/llvm/lib/CodeGen/Analysis.cpp --- a/llvm/lib/CodeGen/Analysis.cpp +++ b/llvm/lib/CodeGen/Analysis.cpp @@ -320,7 +320,7 @@ } else if (isa(I) && TLI.allowTruncateForTailCall(Op->getType(), I->getType())) { DataBits = std::min((uint64_t)DataBits, - I->getType()->getPrimitiveSizeInBits().getFixedSize()); + I->getType()->getPrimitiveSizeInBits().getFixedValue()); NoopInput = Op; } else if (auto *CB = dyn_cast(I)) { const Value *ReturnedOp = CB->getReturnedArgOperand(); diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2927,8 +2927,8 @@ // // If the pointer is larger than the resultant integer, then // as with Trunc just depend on the assembler to truncate it. - if (DL.getTypeAllocSize(Ty).getFixedSize() <= - DL.getTypeAllocSize(Op->getType()).getFixedSize()) + if (DL.getTypeAllocSize(Ty).getFixedValue() <= + DL.getTypeAllocSize(Op->getType()).getFixedValue()) return OpExpr; break; // Error diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -2355,7 +2355,7 @@ // to benefit from cheap constant propagation. Type *ScalableVectorTy = VectorType::get(Type::getInt8Ty(II->getContext()), 1, true); - if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) { + if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinValue() == 8) { auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo()); auto *One = ConstantInt::getSigned(II->getType(), 1); auto *CGep = @@ -4694,7 +4694,7 @@ // The optimisations below currently only work for fixed offsets. if (TS.isScalable()) return false; - int64_t TypeSize = TS.getFixedSize(); + int64_t TypeSize = TS.getFixedValue(); if (ConstantInt *CI = dyn_cast(AddrInst->getOperand(i))) { const APInt &CVal = CI->getValue(); diff --git a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp --- a/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CallLowering.cpp @@ -386,7 +386,7 @@ assert(OrigRegs.size() == 1); LLT OrigTy = MRI.getType(OrigRegs[0]); - unsigned SrcSize = PartLLT.getSizeInBits().getFixedSize() * Regs.size(); + unsigned SrcSize = PartLLT.getSizeInBits().getFixedValue() * Regs.size(); if (SrcSize == OrigTy.getSizeInBits()) B.buildMerge(OrigRegs[0], Regs); else { diff --git a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp --- a/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp @@ -306,7 +306,7 @@ bool AnyMerged = false; do { unsigned NumPow2 = PowerOf2Floor(StoresToMerge.size()); - unsigned MaxSizeBits = NumPow2 * OrigTy.getSizeInBits().getFixedSize(); + unsigned MaxSizeBits = NumPow2 * OrigTy.getSizeInBits().getFixedValue(); // Compute the biggest store we can generate to handle the number of stores. unsigned MergeSizeBits; for (MergeSizeBits = MaxSizeBits; MergeSizeBits > 1; MergeSizeBits /= 2) { @@ -352,7 +352,7 @@ const unsigned NumStores = Stores.size(); LLT SmallTy = MRI->getType(FirstStore->getValueReg()); LLT WideValueTy = - LLT::scalar(NumStores * SmallTy.getSizeInBits().getFixedSize()); + LLT::scalar(NumStores * SmallTy.getSizeInBits().getFixedValue()); // For each store, compute pairwise merged debug locs. DebugLoc MergedLoc = Stores.front()->getDebugLoc(); diff --git a/llvm/lib/CodeGen/GlobalMerge.cpp b/llvm/lib/CodeGen/GlobalMerge.cpp --- a/llvm/lib/CodeGen/GlobalMerge.cpp +++ b/llvm/lib/CodeGen/GlobalMerge.cpp @@ -225,8 +225,8 @@ llvm::stable_sort( Globals, [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) { // We don't support scalable global variables. - return DL.getTypeAllocSize(GV1->getValueType()).getFixedSize() < - DL.getTypeAllocSize(GV2->getValueType()).getFixedSize(); + return DL.getTypeAllocSize(GV1->getValueType()).getFixedValue() < + DL.getTypeAllocSize(GV2->getValueType()).getFixedValue(); }); // If we want to just blindly group all globals together, do so. diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -251,8 +251,8 @@ for (MVT VT : MVT::all_valuetypes()) if (EVT(VT).isSimple() && VT != MVT::Other && TLI.isTypeLegal(EVT(VT)) && - VT.getSizeInBits().getKnownMinSize() >= MaximumLegalStoreInBits) - MaximumLegalStoreInBits = VT.getSizeInBits().getKnownMinSize(); + VT.getSizeInBits().getKnownMinValue() >= MaximumLegalStoreInBits) + MaximumLegalStoreInBits = VT.getSizeInBits().getKnownMinValue(); } void ConsiderForPruning(SDNode *N) { @@ -13280,8 +13280,8 @@ auto AdjustBigEndianShift = [&](unsigned ShAmt) { unsigned LVTStoreBits = - LN0->getMemoryVT().getStoreSizeInBits().getFixedSize(); - unsigned EVTStoreBits = ExtVT.getStoreSizeInBits().getFixedSize(); + LN0->getMemoryVT().getStoreSizeInBits().getFixedValue(); + unsigned EVTStoreBits = ExtVT.getStoreSizeInBits().getFixedValue(); return LVTStoreBits - EVTStoreBits - ShAmt; }; @@ -17196,8 +17196,8 @@ // n:th least significant byte of the stored value. int64_t OrigOffset = Offset; if (DAG.getDataLayout().isBigEndian()) - Offset = ((int64_t)STMemType.getStoreSizeInBits().getFixedSize() - - (int64_t)LDMemType.getStoreSizeInBits().getFixedSize()) / + Offset = ((int64_t)STMemType.getStoreSizeInBits().getFixedValue() - + (int64_t)LDMemType.getStoreSizeInBits().getFixedValue()) / 8 - Offset; @@ -17209,8 +17209,8 @@ if (LdStScalable) STCoversLD = (Offset == 0) && LdMemSize == StMemSize; else - STCoversLD = (Offset >= 0) && (Offset * 8 + LdMemSize.getFixedSize() <= - StMemSize.getFixedSize()); + STCoversLD = (Offset >= 0) && (Offset * 8 + LdMemSize.getFixedValue() <= + StMemSize.getFixedValue()); auto ReplaceLd = [&](LoadSDNode *LD, SDValue Val, SDValue Chain) -> SDValue { if (LD->isIndexed()) { @@ -17239,7 +17239,7 @@ // Mask to size of LDMemType auto Mask = DAG.getConstant(APInt::getLowBitsSet(STType.getFixedSizeInBits(), - StMemSize.getFixedSize()), + StMemSize.getFixedValue()), SDLoc(ST), STType); auto Val = DAG.getNode(ISD::AND, SDLoc(LD), LDType, ST->getValue(), Mask); return ReplaceLd(LD, Val, Chain); @@ -18265,7 +18265,7 @@ return SDValue(); unsigned FastLD = 0, FastST = 0; - EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VTSize.getFixedSize()); + EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VTSize.getFixedValue()); if (!TLI.isOperationLegal(ISD::LOAD, IntVT) || !TLI.isOperationLegal(ISD::STORE, IntVT) || !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) || @@ -19769,7 +19769,7 @@ // If we store purely within object bounds just before its lifetime ends, // we can remove the store. if (LifetimeEndBase.contains(DAG, LifetimeEnd->getSize() * 8, StoreBase, - StoreSize.getFixedSize() * 8)) { + StoreSize.getFixedValue() * 8)) { LLVM_DEBUG(dbgs() << "\nRemoving store:"; StoreBase.dump(); dbgs() << "\nwithin LIFETIME_END of : "; LifetimeEndBase.dump(); dbgs() << "\n"); @@ -21551,7 +21551,7 @@ if (Op.getOpcode() != ISD::ZERO_EXTEND) return SDValue(); unsigned CurrActiveBits = - Op.getOperand(0).getValueSizeInBits().getFixedSize(); + Op.getOperand(0).getValueSizeInBits().getFixedValue(); assert(!ActiveBits && "Already encountered non-constant-zero operand?"); ActiveBits = CurrActiveBits; // We want to at least halve the element size. @@ -22464,7 +22464,7 @@ MachinePointerInfo(Ld->getPointerInfo().getAddrSpace()); MMO = MF.getMachineMemOperand(Ld->getMemOperand(), MPI, StoreSize); } else - MMO = MF.getMachineMemOperand(Ld->getMemOperand(), Offset.getFixedSize(), + MMO = MF.getMachineMemOperand(Ld->getMemOperand(), Offset.getFixedValue(), StoreSize); SDValue NewLd = DAG.getLoad(VT, DL, Ld->getChain(), NewAddr, MMO); diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -150,7 +150,7 @@ (TFI->isStackRealignable() || (Alignment <= StackAlign))) { const ConstantInt *CUI = cast(AI->getArraySize()); uint64_t TySize = - MF->getDataLayout().getTypeAllocSize(Ty).getKnownMinSize(); + MF->getDataLayout().getTypeAllocSize(Ty).getKnownMinValue(); TySize *= CUI->getZExtValue(); // Get total allocated size. if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -552,16 +552,16 @@ // Promote to a byte-sized store with upper bits zero if not // storing an integral number of bytes. For example, promote // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1) - EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StSize.getFixedSize()); + EVT NVT = EVT::getIntegerVT(*DAG.getContext(), StSize.getFixedValue()); Value = DAG.getZeroExtendInReg(Value, dl, StVT); SDValue Result = DAG.getTruncStore(Chain, dl, Value, Ptr, ST->getPointerInfo(), NVT, ST->getOriginalAlign(), MMOFlags, AAInfo); ReplaceNode(SDValue(Node, 0), Result); - } else if (!StVT.isVector() && !isPowerOf2_64(StWidth.getFixedSize())) { + } else if (!StVT.isVector() && !isPowerOf2_64(StWidth.getFixedValue())) { // If not storing a power-of-2 number of bits, expand as two stores. assert(!StVT.isVector() && "Unsupported truncstore!"); - unsigned StWidthBits = StWidth.getFixedSize(); + unsigned StWidthBits = StWidth.getFixedValue(); unsigned LogStWidth = Log2_32(StWidthBits); assert(LogStWidth < 32); unsigned RoundWidth = 1 << LogStWidth; @@ -769,10 +769,10 @@ Value = Result; Chain = Ch; - } else if (!isPowerOf2_64(SrcWidth.getKnownMinSize())) { + } else if (!isPowerOf2_64(SrcWidth.getKnownMinValue())) { // If not loading a power-of-2 number of bits, expand as two loads. assert(!SrcVT.isVector() && "Unsupported extload!"); - unsigned SrcWidthBits = SrcWidth.getFixedSize(); + unsigned SrcWidthBits = SrcWidth.getFixedValue(); unsigned LogSrcWidth = Log2_32(SrcWidthBits); assert(LogSrcWidth < 32); unsigned RoundWidth = 1 << LogSrcWidth; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -763,7 +763,7 @@ // a constant i8 operand. // We don't currently support the scalarization of scalable vector types. - assert(Result.getValueSizeInBits().getFixedSize() >= + assert(Result.getValueSizeInBits().getFixedValue() >= Op.getScalarValueSizeInBits() && "Invalid type for scalarized vector"); AnalyzeNewValue(Result); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1166,13 +1166,13 @@ MachinePointerInfo &MPI, SDValue &Ptr, uint64_t *ScaledOffset) { SDLoc DL(N); - unsigned IncrementSize = MemVT.getSizeInBits().getKnownMinSize() / 8; + unsigned IncrementSize = MemVT.getSizeInBits().getKnownMinValue() / 8; if (MemVT.isScalableVector()) { SDNodeFlags Flags; SDValue BytesIncrement = DAG.getVScale( DL, Ptr.getValueType(), - APInt(Ptr.getValueSizeInBits().getFixedSize(), IncrementSize)); + APInt(Ptr.getValueSizeInBits().getFixedValue(), IncrementSize)); MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace()); Flags.setNoUnsignedWrap(true); if (ScaledOffset) @@ -1927,7 +1927,7 @@ MPI = MachinePointerInfo(LD->getPointerInfo().getAddrSpace()); else MPI = LD->getPointerInfo().getWithOffset( - LoMemVT.getStoreSize().getFixedSize()); + LoMemVT.getStoreSize().getFixedValue()); MMO = DAG.getMachineFunction().getMachineMemOperand( MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, Alignment, @@ -2006,7 +2006,7 @@ Align Alignment = SLD->getOriginalAlign(); if (LoMemVT.isScalableVector()) Alignment = commonAlignment( - Alignment, LoMemVT.getSizeInBits().getKnownMinSize() / 8); + Alignment, LoMemVT.getSizeInBits().getKnownMinValue() / 8); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(SLD->getPointerInfo().getAddrSpace()), @@ -2091,7 +2091,7 @@ MPI = MachinePointerInfo(MLD->getPointerInfo().getAddrSpace()); else MPI = MLD->getPointerInfo().getWithOffset( - LoMemVT.getStoreSize().getFixedSize()); + LoMemVT.getStoreSize().getFixedValue()); MMO = DAG.getMachineFunction().getMachineMemOperand( MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize, Alignment, @@ -3310,11 +3310,11 @@ MachinePointerInfo MPI; if (LoMemVT.isScalableVector()) { Alignment = commonAlignment(Alignment, - LoMemVT.getSizeInBits().getKnownMinSize() / 8); + LoMemVT.getSizeInBits().getKnownMinValue() / 8); MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace()); } else MPI = N->getPointerInfo().getWithOffset( - LoMemVT.getStoreSize().getFixedSize()); + LoMemVT.getStoreSize().getFixedValue()); MMO = DAG.getMachineFunction().getMachineMemOperand( MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize, Alignment, @@ -3386,7 +3386,7 @@ Align Alignment = N->getOriginalAlign(); if (LoMemVT.isScalableVector()) Alignment = commonAlignment(Alignment, - LoMemVT.getSizeInBits().getKnownMinSize() / 8); + LoMemVT.getSizeInBits().getKnownMinValue() / 8); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(N->getPointerInfo().getAddrSpace()), @@ -3460,11 +3460,11 @@ MachinePointerInfo MPI; if (LoMemVT.isScalableVector()) { Alignment = commonAlignment( - Alignment, LoMemVT.getSizeInBits().getKnownMinSize() / 8); + Alignment, LoMemVT.getSizeInBits().getKnownMinValue() / 8); MPI = MachinePointerInfo(N->getPointerInfo().getAddrSpace()); } else MPI = N->getPointerInfo().getWithOffset( - LoMemVT.getStoreSize().getFixedSize()); + LoMemVT.getStoreSize().getFixedValue()); MMO = DAG.getMachineFunction().getMachineMemOperand( MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize, Alignment, @@ -6611,7 +6611,7 @@ unsigned WidenEx = 0) { EVT WidenEltVT = WidenVT.getVectorElementType(); const bool Scalable = WidenVT.isScalableVector(); - unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinSize(); + unsigned WidenWidth = WidenVT.getSizeInBits().getKnownMinValue(); unsigned WidenEltWidth = WidenEltVT.getSizeInBits(); unsigned AlignInBits = Align*8; @@ -6649,7 +6649,7 @@ // Skip vector MVTs which don't match the scalable property of WidenVT. if (Scalable != MemVT.isScalableVector()) continue; - unsigned MemVTWidth = MemVT.getSizeInBits().getKnownMinSize(); + unsigned MemVTWidth = MemVT.getSizeInBits().getKnownMinValue(); auto Action = TLI.getTypeAction(*DAG.getContext(), MemVT); if ((Action == TargetLowering::TypeLegal || Action == TargetLowering::TypePromoteInteger) && @@ -6732,8 +6732,8 @@ // Find the vector type that can load from. std::optional FirstVT = - findMemType(DAG, TLI, LdWidth.getKnownMinSize(), WidenVT, LdAlign, - WidthDiff.getKnownMinSize()); + findMemType(DAG, TLI, LdWidth.getKnownMinValue(), WidenVT, LdAlign, + WidthDiff.getKnownMinValue()); if (!FirstVT) return SDValue(); @@ -6751,8 +6751,8 @@ RemainingWidth -= NewVTWidth; if (TypeSize::isKnownLT(RemainingWidth, NewVTWidth)) { // The current type we are using is too large. Find a better size. - NewVT = findMemType(DAG, TLI, RemainingWidth.getKnownMinSize(), WidenVT, - LdAlign, WidthDiff.getKnownMinSize()); + NewVT = findMemType(DAG, TLI, RemainingWidth.getKnownMinValue(), WidenVT, + LdAlign, WidthDiff.getKnownMinValue()); if (!NewVT) return SDValue(); NewVTWidth = NewVT->getSizeInBits(); @@ -6770,7 +6770,7 @@ assert(TypeSize::isKnownLE(LdWidth, FirstVTWidth)); if (!FirstVT->isVector()) { unsigned NumElts = - WidenWidth.getFixedSize() / FirstVTWidth.getFixedSize(); + WidenWidth.getFixedValue() / FirstVTWidth.getFixedValue(); EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), *FirstVT, NumElts); SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp); return DAG.getNode(ISD::BITCAST, dl, WidenVT, VecOp); @@ -6779,9 +6779,9 @@ return LdOp; // TODO: We don't currently have any tests that exercise this code path. - assert(WidenWidth.getFixedSize() % FirstVTWidth.getFixedSize() == 0); + assert(WidenWidth.getFixedValue() % FirstVTWidth.getFixedValue() == 0); unsigned NumConcat = - WidenWidth.getFixedSize() / FirstVTWidth.getFixedSize(); + WidenWidth.getFixedValue() / FirstVTWidth.getFixedValue(); SmallVector ConcatOps(NumConcat); SDValue UndefVal = DAG.getUNDEF(*FirstVT); ConcatOps[0] = LdOp; @@ -6844,9 +6844,9 @@ TypeSize LdTySize = LdTy.getSizeInBits(); TypeSize NewLdTySize = NewLdTy.getSizeInBits(); assert(NewLdTySize.isScalable() == LdTySize.isScalable() && - NewLdTySize.isKnownMultipleOf(LdTySize.getKnownMinSize())); + NewLdTySize.isKnownMultipleOf(LdTySize.getKnownMinValue())); unsigned NumOps = - NewLdTySize.getKnownMinSize() / LdTySize.getKnownMinSize(); + NewLdTySize.getKnownMinValue() / LdTySize.getKnownMinValue(); SmallVector WidenOps(NumOps); unsigned j = 0; for (; j != End-Idx; ++j) @@ -6868,7 +6868,7 @@ // We need to fill the rest with undefs to build the vector. unsigned NumOps = - WidenWidth.getKnownMinSize() / LdTy.getSizeInBits().getKnownMinSize(); + WidenWidth.getKnownMinValue() / LdTy.getSizeInBits().getKnownMinValue(); SmallVector WidenOps(NumOps); SDValue UndefVal = DAG.getUNDEF(LdTy); { @@ -6968,7 +6968,7 @@ while (StWidth.isNonZero()) { // Find the largest vector type we can store with. std::optional NewVT = - findMemType(DAG, TLI, StWidth.getKnownMinSize(), ValVT); + findMemType(DAG, TLI, StWidth.getKnownMinValue(), ValVT); if (!NewVT) return false; MemVTs.push_back({*NewVT, 0}); @@ -7003,11 +7003,11 @@ } while (--Count); } else { // Cast the vector to the scalar type we can store. - unsigned NumElts = ValWidth.getFixedSize() / NewVTWidth.getFixedSize(); + unsigned NumElts = ValWidth.getFixedValue() / NewVTWidth.getFixedValue(); EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts); SDValue VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, ValOp); // Readjust index position based on new vector type. - Idx = Idx * ValEltWidth / NewVTWidth.getFixedSize(); + Idx = Idx * ValEltWidth / NewVTWidth.getFixedValue(); do { SDValue EOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NewVT, VecOp, DAG.getVectorIdxConstant(Idx++, dl)); @@ -7019,7 +7019,7 @@ IncrementPointer(cast(PartStore), NewVT, MPI, BasePtr); } while (--Count); // Restore index back to be relative to the original widen element type. - Idx = Idx * NewVTWidth.getFixedSize() / ValEltWidth; + Idx = Idx * NewVTWidth.getFixedValue() / ValEltWidth; } } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2348,7 +2348,7 @@ StackID = TFI->getStackIDForScalableVectors(); // The stack id gives an indication of whether the object is scalable or // not, so it's safe to pass in the minimum size here. - int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinSize(), Alignment, + int FrameIdx = MFI.CreateStackObject(Bytes.getKnownMinValue(), Alignment, false, nullptr, StackID); return getFrameIndex(FrameIdx, TLI->getFrameIndexTy(getDataLayout())); } @@ -2367,7 +2367,7 @@ "Don't know how to choose the maximum size when creating a stack " "temporary"); TypeSize Bytes = - VT1Size.getKnownMinSize() > VT2Size.getKnownMinSize() ? VT1Size : VT2Size; + VT1Size.getKnownMinValue() > VT2Size.getKnownMinValue() ? VT1Size : VT2Size; Type *Ty1 = VT1.getTypeForEVT(*getContext()); Type *Ty2 = VT2.getTypeForEVT(*getContext()); @@ -6893,10 +6893,10 @@ if (Offset.isScalable()) Index = getVScale(DL, Base.getValueType(), - APInt(Base.getValueSizeInBits().getFixedSize(), - Offset.getKnownMinSize())); + APInt(Base.getValueSizeInBits().getFixedValue(), + Offset.getKnownMinValue())); else - Index = getConstant(Offset.getFixedSize(), DL, VT); + Index = getConstant(Offset.getFixedValue(), DL, VT); return getMemBasePlusOffset(Base, Index, DL, Flags); } @@ -11099,7 +11099,7 @@ // the MMO. This is because the MMO might indicate only a possible address // range instead of specifying the affected memory addresses precisely. // TODO: Make MachineMemOperands aware of scalable vectors. - assert(memvt.getStoreSize().getKnownMinSize() <= MMO->getSize() && + assert(memvt.getStoreSize().getKnownMinValue() <= MMO->getSize() && "Size mismatch!"); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3948,7 +3948,7 @@ DAG.getDataLayout().getTypeAllocSize(GTI.getIndexedType()); // We intentionally mask away the high bits here; ElementSize may not // fit in IdxTy. - APInt ElementMul(IdxSize, ElementSize.getKnownMinSize()); + APInt ElementMul(IdxSize, ElementSize.getKnownMinValue()); bool ElementScalable = ElementSize.isScalable(); // If this is a scalar constant or a splat vector of constants, @@ -10145,7 +10145,7 @@ ISD::OutputArg MyFlags( Flags, Parts[j].getValueType().getSimpleVT(), VT, i < CLI.NumFixedArgs, i, - j * Parts[j].getValueType().getStoreSize().getKnownMinSize()); + j * Parts[j].getValueType().getStoreSize().getKnownMinValue()); if (NumParts > 1 && j == 0) MyFlags.Flags.setSplit(); else if (j != 0) { @@ -10673,7 +10673,7 @@ // are responsible for handling scalable vector arguments and // return values. ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed, - ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinSize()); + ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinValue()); if (NumRegs > 1 && i == 0) MyFlags.Flags.setSplit(); // if it isn't first piece, alignment must be 1 @@ -10686,7 +10686,7 @@ } if (NeedsRegBlock && Value == NumValues - 1) Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast(); - PartBase += VT.getStoreSize().getKnownMinSize(); + PartBase += VT.getStoreSize().getKnownMinValue(); } } diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -9343,7 +9343,7 @@ } else if (DataVT.isScalableVector()) { Increment = DAG.getVScale(DL, AddrVT, APInt(AddrVT.getFixedSizeInBits(), - DataVT.getStoreSize().getKnownMinSize())); + DataVT.getStoreSize().getKnownMinValue())); } else Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); @@ -10341,7 +10341,7 @@ // Store the hi part of CONCAT_VECTORS(V1, V2) SDValue OffsetToV2 = DAG.getVScale( DL, PtrVT, - APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); + APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinValue())); SDValue StackPtr2 = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, OffsetToV2); SDValue StoreV2 = DAG.getStore(StoreV1, DL, V2, StackPtr2, PtrInfo); @@ -10364,7 +10364,7 @@ if (TrailingElts > VT.getVectorMinNumElements()) { SDValue VLBytes = DAG.getVScale( DL, PtrVT, - APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinSize())); + APInt(PtrVT.getFixedSizeInBits(), VT.getStoreSize().getKnownMinValue())); TrailingBytes = DAG.getNode(ISD::UMIN, DL, PtrVT, TrailingBytes, VLBytes); } diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1627,7 +1627,7 @@ if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16. TypeSize NewVTSize = NewVT.getSizeInBits(); // Convert sizes such as i33 to i64. - if (!isPowerOf2_32(NewVTSize.getKnownMinSize())) + if (!isPowerOf2_32(NewVTSize.getKnownMinValue())) NewVTSize = NewVTSize.coefficientNextPowerOf2(); return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits()); } diff --git a/llvm/lib/CodeGen/TypePromotion.cpp b/llvm/lib/CodeGen/TypePromotion.cpp --- a/llvm/lib/CodeGen/TypePromotion.cpp +++ b/llvm/lib/CodeGen/TypePromotion.cpp @@ -774,7 +774,7 @@ bool TypePromotionImpl::TryToPromote(Value *V, unsigned PromotedWidth, const LoopInfo &LI) { Type *OrigTy = V->getType(); - TypeSize = OrigTy->getPrimitiveSizeInBits().getFixedSize(); + TypeSize = OrigTy->getPrimitiveSizeInBits().getFixedValue(); SafeToPromote.clear(); SafeWrap.clear(); @@ -919,7 +919,7 @@ const TargetSubtargetInfo *SubtargetInfo = TM->getSubtargetImpl(F); const TargetLowering *TLI = SubtargetInfo->getTargetLowering(); RegisterBitWidth = - TTI.getRegisterBitWidth(TargetTransformInfo::RGK_Scalar).getFixedSize(); + TTI.getRegisterBitWidth(TargetTransformInfo::RGK_Scalar).getFixedValue(); Ctx = &F.getParent()->getContext(); // Return the preferred integer width of the instruction, or zero if we diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -1559,7 +1559,7 @@ llvm::PointerType::getUnqual(Data->getType())); const Align Alignment = Aligned - ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedSize() / 8) + ? Align(Data->getType()->getPrimitiveSizeInBits().getFixedValue() / 8) : Align(1); // If the mask is all ones just emit a regular store. @@ -1581,7 +1581,7 @@ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy)); const Align Alignment = Aligned - ? Align(Passthru->getType()->getPrimitiveSizeInBits().getFixedSize() / + ? Align(Passthru->getType()->getPrimitiveSizeInBits().getFixedValue() / 8) : Align(1); @@ -2135,7 +2135,7 @@ "cast"); StoreInst *SI = Builder.CreateAlignedStore( Arg1, BC, - Align(Arg1->getType()->getPrimitiveSizeInBits().getFixedSize() / 8)); + Align(Arg1->getType()->getPrimitiveSizeInBits().getFixedValue() / 8)); SI->setMetadata(M->getMDKindID("nontemporal"), Node); // Remove intrinsic. @@ -3475,7 +3475,7 @@ Ptr, PointerType::getUnqual(CI->getType()), "cast"); LoadInst *LI = Builder.CreateAlignedLoad( CI->getType(), BC, - Align(CI->getType()->getPrimitiveSizeInBits().getFixedSize() / 8)); + Align(CI->getType()->getPrimitiveSizeInBits().getFixedValue() / 8)); LI->setMetadata(M->getMDKindID("nontemporal"), Node); Rep = LI; } else if (IsX86 && (Name.startswith("fma.vfmadd.") || diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -783,7 +783,7 @@ case Type::PPC_FP128TyID: case Type::FP128TyID: case Type::X86_FP80TyID: { - unsigned BitWidth = getTypeSizeInBits(Ty).getFixedSize(); + unsigned BitWidth = getTypeSizeInBits(Ty).getFixedValue(); auto I = findAlignmentLowerBound(FLOAT_ALIGN, BitWidth); if (I != Alignments.end() && I->AlignType == FLOAT_ALIGN && I->TypeBitWidth == BitWidth) @@ -800,7 +800,7 @@ case Type::X86_MMXTyID: case Type::FixedVectorTyID: case Type::ScalableVectorTyID: { - unsigned BitWidth = getTypeSizeInBits(Ty).getKnownMinSize(); + unsigned BitWidth = getTypeSizeInBits(Ty).getKnownMinValue(); auto I = findAlignmentLowerBound(VECTOR_ALIGN, BitWidth); if (I != Alignments.end() && I->AlignType == VECTOR_ALIGN && I->TypeBitWidth == BitWidth) @@ -812,7 +812,7 @@ // We're only calculating a natural alignment, so it doesn't have to be // based on the full size for scalable vectors. Using the minimum element // count should be enough here. - return Align(PowerOf2Ceil(getTypeStoreSize(Ty).getKnownMinSize())); + return Align(PowerOf2Ceil(getTypeStoreSize(Ty).getKnownMinValue())); } case Type::X86_AMXTyID: return Align(64); @@ -938,7 +938,7 @@ if (auto *VecTy = dyn_cast(ElemTy)) { ElemTy = VecTy->getElementType(); - unsigned ElemSizeInBits = getTypeSizeInBits(ElemTy).getFixedSize(); + unsigned ElemSizeInBits = getTypeSizeInBits(ElemTy).getFixedValue(); // GEPs over non-multiple of 8 size vector elements are invalid. if (ElemSizeInBits % 8 != 0) return std::nullopt; diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -3594,7 +3594,7 @@ // Could still have vectors of pointers if the number of elements doesn't // match - if (SrcBits.getKnownMinSize() == 0 || DestBits.getKnownMinSize() == 0) + if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0) return false; if (SrcBits != DestBits) diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp --- a/llvm/lib/IR/Type.cpp +++ b/llvm/lib/IR/Type.cpp @@ -117,18 +117,18 @@ // 64-bit fixed width vector types can be losslessly converted to x86mmx. if (((isa(this)) && Ty->isX86_MMXTy()) && - getPrimitiveSizeInBits().getFixedSize() == 64) + getPrimitiveSizeInBits().getFixedValue() == 64) return true; if ((isX86_MMXTy() && isa(Ty)) && - Ty->getPrimitiveSizeInBits().getFixedSize() == 64) + Ty->getPrimitiveSizeInBits().getFixedValue() == 64) return true; // 8192-bit fixed width vector types can be losslessly converted to x86amx. if (((isa(this)) && Ty->isX86_AMXTy()) && - getPrimitiveSizeInBits().getFixedSize() == 8192) + getPrimitiveSizeInBits().getFixedValue() == 8192) return true; if ((isX86_AMXTy() && isa(Ty)) && - Ty->getPrimitiveSizeInBits().getFixedSize() == 8192) + Ty->getPrimitiveSizeInBits().getFixedValue() == 8192) return true; // At this point we have only various mismatches of the first class types @@ -179,7 +179,7 @@ ElementCount EC = VTy->getElementCount(); TypeSize ETS = VTy->getElementType()->getPrimitiveSizeInBits(); assert(!ETS.isScalable() && "Vector type should have fixed-width elements"); - return {ETS.getFixedSize() * EC.getKnownMinValue(), EC.isScalable()}; + return {ETS.getFixedValue() * EC.getKnownMinValue(), EC.isScalable()}; } default: return TypeSize::Fixed(0); } @@ -187,7 +187,7 @@ unsigned Type::getScalarSizeInBits() const { // It is safe to assume that the scalar types have a fixed size. - return getScalarType()->getPrimitiveSizeInBits().getFixedSize(); + return getScalarType()->getPrimitiveSizeInBits().getFixedValue(); } int Type::getFPMantissaWidth() const { diff --git a/llvm/lib/IR/Value.cpp b/llvm/lib/IR/Value.cpp --- a/llvm/lib/IR/Value.cpp +++ b/llvm/lib/IR/Value.cpp @@ -855,7 +855,7 @@ if (Type *ArgMemTy = A->getPointeeInMemoryValueType()) { if (ArgMemTy->isSized()) { // FIXME: Why isn't this the type alloc size? - DerefBytes = DL.getTypeStoreSize(ArgMemTy).getKnownMinSize(); + DerefBytes = DL.getTypeStoreSize(ArgMemTy).getKnownMinValue(); } } } @@ -899,7 +899,7 @@ } else if (auto *AI = dyn_cast(this)) { if (!AI->isArrayAllocation()) { DerefBytes = - DL.getTypeStoreSize(AI->getAllocatedType()).getKnownMinSize(); + DL.getTypeStoreSize(AI->getAllocatedType()).getKnownMinValue(); CanBeNull = false; CanBeFreed = false; } @@ -907,7 +907,7 @@ if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) { // TODO: Don't outright reject hasExternalWeakLinkage but set the // CanBeNull flag. - DerefBytes = DL.getTypeStoreSize(GV->getValueType()).getFixedSize(); + DerefBytes = DL.getTypeStoreSize(GV->getValueType()).getFixedValue(); CanBeNull = false; CanBeFreed = false; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -3945,7 +3945,7 @@ // vector types larger than NEON don't have a matching SubRegIndex. static SDNode *extractSubReg(SelectionDAG *DAG, EVT VT, SDValue V) { assert(V.getValueType().isScalableVector() && - V.getValueType().getSizeInBits().getKnownMinSize() == + V.getValueType().getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock && "Expected to extract from a packed scalable vector!"); assert(VT.isFixedLengthVector() && @@ -3972,7 +3972,7 @@ // vector types larger than NEON don't have a matching SubRegIndex. static SDNode *insertSubReg(SelectionDAG *DAG, EVT VT, SDValue V) { assert(VT.isScalableVector() && - VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock && + VT.getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock && "Expected to insert into a packed scalable vector!"); assert(V.getValueType().isFixedLengthVector() && "Expected to insert a fixed length vector!"); @@ -5697,7 +5697,7 @@ return false; TypeSize TS = MemVT.getSizeInBits(); - int64_t MemWidthBytes = static_cast(TS.getKnownMinSize()) / 8; + int64_t MemWidthBytes = static_cast(TS.getKnownMinValue()) / 8; int64_t MulImm = cast(VScale.getOperand(0))->getSExtValue(); if ((MulImm % MemWidthBytes) != 0) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -205,7 +205,7 @@ assert(VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) && "Expected legal vector type!"); return VT.isFixedLengthVector() || - VT.getSizeInBits().getKnownMinSize() == AArch64::SVEBitsPerBlock; + VT.getSizeInBits().getKnownMinValue() == AArch64::SVEBitsPerBlock; } // Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading @@ -6548,7 +6548,7 @@ (VA.getValVT().isScalableVector() || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets"); - uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinSize(); + uint64_t PartSize = VA.getValVT().getStoreSize().getKnownMinValue(); unsigned NumParts = 1; if (Ins[i].Flags.isInConsecutiveRegs()) { assert(!Ins[i].Flags.isInConsecutiveRegsLast()); @@ -6570,10 +6570,10 @@ if (PartLoad.isScalableVector()) { BytesIncrement = DAG.getVScale( DL, Ptr.getValueType(), - APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize)); + APInt(Ptr.getValueSizeInBits().getFixedValue(), PartSize)); } else { BytesIncrement = DAG.getConstant( - APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize), DL, + APInt(Ptr.getValueSizeInBits().getFixedValue(), PartSize), DL, Ptr.getValueType()); } SDNodeFlags Flags; @@ -7405,7 +7405,7 @@ assert((isScalable || Subtarget->isWindowsArm64EC()) && "Indirect arguments should be scalable on most subtargets"); - uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinSize(); + uint64_t StoreSize = VA.getValVT().getStoreSize().getKnownMinValue(); uint64_t PartSize = StoreSize; unsigned NumParts = 1; if (Outs[i].Flags.isInConsecutiveRegs()) { @@ -7436,10 +7436,10 @@ if (isScalable) { BytesIncrement = DAG.getVScale( DL, Ptr.getValueType(), - APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize)); + APInt(Ptr.getValueSizeInBits().getFixedValue(), PartSize)); } else { BytesIncrement = DAG.getConstant( - APInt(Ptr.getValueSizeInBits().getFixedSize(), PartSize), DL, + APInt(Ptr.getValueSizeInBits().getFixedValue(), PartSize), DL, Ptr.getValueType()); } SDNodeFlags Flags; @@ -11718,7 +11718,7 @@ return SDValue(); // Current lowering only supports the SVE-ACLE types. - if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock) + if (VT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock) return SDValue(); // The DUPQ operation is indepedent of element type so normalise to i64s. @@ -13650,8 +13650,8 @@ bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) return false; - uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedSize(); - uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedSize(); + uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedValue(); + uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedValue(); return NumBits1 > NumBits2; } bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { @@ -13752,7 +13752,7 @@ // Get the shift amount based on the scaling factor: // log2(sizeof(IdxTy)) - log2(8). uint64_t ShiftAmt = - countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy).getFixedSize()) - 3; + countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy).getFixedValue()) - 3; // Is the constant foldable in the shift of the addressing mode? // I.e., shift amount is between 1 and 4 inclusive. if (ShiftAmt == 0 || ShiftAmt > 4) @@ -13788,8 +13788,8 @@ auto areTypesHalfed = [](Value *FullV, Value *HalfV) { auto *FullTy = FullV->getType(); auto *HalfTy = HalfV->getType(); - return FullTy->getPrimitiveSizeInBits().getFixedSize() == - 2 * HalfTy->getPrimitiveSizeInBits().getFixedSize(); + return FullTy->getPrimitiveSizeInBits().getFixedValue() == + 2 * HalfTy->getPrimitiveSizeInBits().getFixedValue(); }; auto extractHalf = [](Value *FullV, Value *HalfV) { @@ -17751,11 +17751,11 @@ assert(VT.isScalableVector() && "Expected a scalable vector."); // Current lowering only supports the SVE-ACLE types. - if (VT.getSizeInBits().getKnownMinSize() != AArch64::SVEBitsPerBlock) + if (VT.getSizeInBits().getKnownMinValue() != AArch64::SVEBitsPerBlock) return SDValue(); unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8; - unsigned ByteSize = VT.getSizeInBits().getKnownMinSize() / 8; + unsigned ByteSize = VT.getSizeInBits().getKnownMinValue() / 8; EVT ByteVT = EVT::getVectorVT(Ctx, MVT::i8, ElementCount::getScalable(ByteSize)); @@ -18439,7 +18439,7 @@ SDLoc DL(N); EVT VT = N->getValueType(0); - if (VT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) + if (VT.getSizeInBits().getKnownMinValue() > AArch64::SVEBitsPerBlock) return SDValue(); EVT ContainerVT = VT; @@ -20717,7 +20717,7 @@ MVT SrcElVT = SrcVT.getVectorElementType().getSimpleVT(); // Make sure that source data will fit into an SVE register - if (SrcVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) + if (SrcVT.getSizeInBits().getKnownMinValue() > AArch64::SVEBitsPerBlock) return SDValue(); // For FPs, ACLE only supports _packed_ single and double precision types. @@ -20819,7 +20819,7 @@ SDLoc DL(N); // Make sure that the loaded data will fit into an SVE register - if (RetVT.getSizeInBits().getKnownMinSize() > AArch64::SVEBitsPerBlock) + if (RetVT.getSizeInBits().getKnownMinValue() > AArch64::SVEBitsPerBlock) return SDValue(); // Depending on the addressing mode, this is either a pointer or a vector of @@ -22365,7 +22365,7 @@ const DataLayout &DL) const { if (!Ty->isArrayTy()) { const TypeSize &TySize = Ty->getPrimitiveSizeInBits(); - return TySize.isScalable() && TySize.getKnownMinSize() > 128; + return TySize.isScalable() && TySize.getKnownMinValue() > 128; } // All non aggregate members of the type must have the same type diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2647,11 +2647,11 @@ // set to 1. if (LdSt.getNumExplicitOperands() == 3) { BaseOp = &LdSt.getOperand(1); - Offset = LdSt.getOperand(2).getImm() * Scale.getKnownMinSize(); + Offset = LdSt.getOperand(2).getImm() * Scale.getKnownMinValue(); } else { assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands"); BaseOp = &LdSt.getOperand(2); - Offset = LdSt.getOperand(3).getImm() * Scale.getKnownMinSize(); + Offset = LdSt.getOperand(3).getImm() * Scale.getKnownMinValue(); } OffsetIsScalable = Scale.isScalable(); @@ -4694,7 +4694,7 @@ // Construct the complete offset. bool IsMulVL = ScaleValue.isScalable(); - unsigned Scale = ScaleValue.getKnownMinSize(); + unsigned Scale = ScaleValue.getKnownMinValue(); int64_t Offset = IsMulVL ? SOffset.getScalable() : SOffset.getFixed(); const MachineOperand &ImmOpnd = @@ -4712,7 +4712,7 @@ MaxOff)) llvm_unreachable("unhandled opcode in isAArch64FrameOffsetLegal"); - Scale = ScaleValue.getKnownMinSize(); + Scale = ScaleValue.getKnownMinValue(); assert(IsMulVL == ScaleValue.isScalable() && "Unscaled opcode has different value for scalable"); @@ -7362,8 +7362,8 @@ getMemOpInfo(MI.getOpcode(), Scale, DummyWidth, MinOffset, MaxOffset); Offset += 16; // Update the offset to what it would be if we outlined. - if (Offset < MinOffset * (int64_t)Scale.getFixedSize() || - Offset > MaxOffset * (int64_t)Scale.getFixedSize()) + if (Offset < MinOffset * (int64_t)Scale.getFixedValue() || + Offset > MaxOffset * (int64_t)Scale.getFixedValue()) return false; // It's in range, so we can outline it. @@ -7839,7 +7839,7 @@ // We've pushed the return address to the stack, so add 16 to the offset. // This is safe, since we already checked if it would overflow when we // checked if this instruction was legal to outline. - int64_t NewImm = (Offset + 16) / (int64_t)Scale.getFixedSize(); + int64_t NewImm = (Offset + 16) / (int64_t)Scale.getFixedValue(); StackOffsetOperand.setImm(NewImm); } } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -2202,7 +2202,7 @@ // SDIV/UDIV operations are lowered using SVE, then we can have less // costs. if (isa(Ty) && - cast(Ty)->getPrimitiveSizeInBits().getFixedSize() < + cast(Ty)->getPrimitiveSizeInBits().getFixedValue() < 128) { EVT VT = TLI->getValueType(DL, Ty); static const CostTblEntry DivTbl[]{ diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -21200,7 +21200,7 @@ return false; assert(VectorTy->isVectorTy() && "VectorTy is not a vector type"); - unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedSize(); + unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedValue(); // We can do a store + vector extract on any vector that fits perfectly in a D // or Q register. if (BitWidth == 64 || BitWidth == 128) { @@ -21720,11 +21720,11 @@ case HA_DOUBLE: return false; case HA_VECT64: - return VT->getPrimitiveSizeInBits().getFixedSize() == 64; + return VT->getPrimitiveSizeInBits().getFixedValue() == 64; case HA_VECT128: - return VT->getPrimitiveSizeInBits().getFixedSize() == 128; + return VT->getPrimitiveSizeInBits().getFixedValue() == 128; case HA_UNKNOWN: - switch (VT->getPrimitiveSizeInBits().getFixedSize()) { + switch (VT->getPrimitiveSizeInBits().getFixedValue()) { case 64: Base = HA_VECT64; return true; diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -1545,7 +1545,7 @@ // vmovn. if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 && VecTy->isIntOrIntVectorTy() && - DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64) + DL.getTypeSizeInBits(SubVecTy).getFixedValue() <= 64) return 2 * BaseCost; } diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp @@ -189,11 +189,11 @@ if (Src->isVectorTy()) { VectorType *VecTy = cast(Src); - unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedSize(); + unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedValue(); if (isHVXVectorType(VecTy)) { unsigned RegWidth = getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) - .getFixedSize(); + .getFixedValue(); assert(RegWidth && "Non-zero vector register width expected"); // Cost of HVX loads. if (VecWidth % RegWidth == 0) diff --git a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp --- a/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/llvm/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -1392,8 +1392,8 @@ if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) return false; - return (Ty1->getPrimitiveSizeInBits().getFixedSize() > - Ty2->getPrimitiveSizeInBits().getFixedSize()); + return (Ty1->getPrimitiveSizeInBits().getFixedValue() > + Ty2->getPrimitiveSizeInBits().getFixedValue()); } bool MSP430TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -3983,7 +3983,7 @@ break; case 'f': // FPU or MSA register if (Subtarget.hasMSA() && type->isVectorTy() && - type->getPrimitiveSizeInBits().getFixedSize() == 128) + type->getPrimitiveSizeInBits().getFixedValue() == 128) weight = CW_Register; else if (type->isFloatTy()) weight = CW_Register; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1567,9 +1567,9 @@ return; if (VectorType *VTy = dyn_cast(Ty)) { if (MaxMaxAlign >= 32 && - VTy->getPrimitiveSizeInBits().getFixedSize() >= 256) + VTy->getPrimitiveSizeInBits().getFixedValue() >= 256) MaxAlign = Align(32); - else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 && + else if (VTy->getPrimitiveSizeInBits().getFixedValue() >= 128 && MaxAlign < 16) MaxAlign = Align(16); } else if (ArrayType *ATy = dyn_cast(Ty)) { diff --git a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp @@ -94,7 +94,7 @@ return false; MaybeAlign MA = cast(AlignOp)->getMaybeAlignValue(); - if (MA && MA->value() < DL->getTypeStoreSize(ScalarType).getFixedSize()) + if (MA && MA->value() < DL->getTypeStoreSize(ScalarType).getFixedValue()) return false; // FIXME: Let the backend type legalize by splitting/widening? @@ -365,7 +365,7 @@ if (TS.isScalable()) return std::make_pair(nullptr, nullptr); - TypeScale = TS.getFixedSize(); + TypeScale = TS.getFixedValue(); } // We need to find a vector index to simplify. diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -13820,8 +13820,8 @@ LLVMContext &Context = *DAG.getContext(); EVT ValueEltVT = ValueVT.getVectorElementType(); EVT PartEltVT = PartVT.getVectorElementType(); - unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); - unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); + unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue(); + unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue(); if (PartVTBitSize % ValueVTBitSize == 0) { assert(PartVTBitSize >= ValueVTBitSize); // If the element types are different, bitcast to the same element type of @@ -13872,8 +13872,8 @@ SDValue Val = Parts[0]; EVT ValueEltVT = ValueVT.getVectorElementType(); EVT PartEltVT = PartVT.getVectorElementType(); - unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); - unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); + unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue(); + unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue(); if (PartVTBitSize % ValueVTBitSize == 0) { assert(PartVTBitSize >= ValueVTBitSize); EVT SameEltTypeVT = ValueVT; diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -186,7 +186,7 @@ return false; if (Alignment < - DL.getTypeStoreSize(DataType->getScalarType()).getFixedSize()) + DL.getTypeStoreSize(DataType->getScalarType()).getFixedValue()) return false; return TLI->isLegalElementTypeForRVV(DataType->getScalarType()); @@ -214,7 +214,7 @@ return false; if (Alignment < - DL.getTypeStoreSize(DataType->getScalarType()).getFixedSize()) + DL.getTypeStoreSize(DataType->getScalarType()).getFixedValue()) return false; return TLI->isLegalElementTypeForRVV(DataType->getScalarType()); diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -1022,8 +1022,8 @@ bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const { if (!FromType->isIntegerTy() || !ToType->isIntegerTy()) return false; - unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedSize(); - unsigned ToBits = ToType->getPrimitiveSizeInBits().getFixedSize(); + unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue(); + unsigned ToBits = ToType->getPrimitiveSizeInBits().getFixedValue(); return FromBits > ToBits; } @@ -2489,8 +2489,8 @@ C.Op1.getOpcode() == ISD::Constant && cast(C.Op1)->getZExtValue() == 0) { auto *L = cast(C.Op0.getOperand(0)); - if (L->getMemoryVT().getStoreSizeInBits().getFixedSize() <= - C.Op0.getValueSizeInBits().getFixedSize()) { + if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <= + C.Op0.getValueSizeInBits().getFixedValue()) { unsigned Type = L->getExtensionType(); if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) || (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) { diff --git a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp --- a/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp @@ -647,8 +647,8 @@ unsigned SystemZTTIImpl:: getVectorTruncCost(Type *SrcTy, Type *DstTy) { assert (SrcTy->isVectorTy() && DstTy->isVectorTy()); - assert(SrcTy->getPrimitiveSizeInBits().getFixedSize() > - DstTy->getPrimitiveSizeInBits().getFixedSize() && + assert(SrcTy->getPrimitiveSizeInBits().getFixedValue() > + DstTy->getPrimitiveSizeInBits().getFixedValue() && "Packing must reduce size of vector type."); assert(cast(SrcTy)->getNumElements() == cast(DstTy)->getNumElements() && diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2633,7 +2633,7 @@ if (MaxAlign == 16) return; if (VectorType *VTy = dyn_cast(Ty)) { - if (VTy->getPrimitiveSizeInBits().getFixedSize() == 128) + if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128) MaxAlign = Align(16); } else if (ArrayType *ATy = dyn_cast(Ty)) { Align EltAlign; @@ -5060,7 +5060,7 @@ return false; if (VA.getLocVT().getFixedSizeInBits() > - Arg.getValueSizeInBits().getFixedSize()) { + Arg.getValueSizeInBits().getFixedValue()) { // If the argument location is wider than the argument type, check that any // extension flags match. if (Flags.isZExt() != MFI.isObjectZExt(FI) || @@ -6520,7 +6520,7 @@ static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements, const X86Subtarget &Subtarget, SelectionDAG &DAG, const SDLoc &dl) { - assert(Vec.getValueSizeInBits().getFixedSize() < VT.getFixedSizeInBits() && + assert(Vec.getValueSizeInBits().getFixedValue() < VT.getFixedSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && "Unsupported vector widening type"); SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl) @@ -8352,7 +8352,7 @@ // Subvector shuffle inputs must not be larger than the subvector. if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) { return SubVT.getFixedSizeInBits() < - SubInput.getValueSizeInBits().getFixedSize(); + SubInput.getValueSizeInBits().getFixedValue(); })) return false; @@ -40684,7 +40684,7 @@ for (SDNode *User : Src->uses()) if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST && Src == User->getOperand(0) && - User->getValueSizeInBits(0).getFixedSize() > + User->getValueSizeInBits(0).getFixedValue() > VT.getFixedSizeInBits()) { return extractSubVector(SDValue(User, 0), 0, DAG, DL, VT.getSizeInBits()); @@ -50001,7 +50001,7 @@ cast(User)->getMemoryVT().getSizeInBits() == MemVT.getSizeInBits() && !User->hasAnyUseOfValue(1) && - User->getValueSizeInBits(0).getFixedSize() > + User->getValueSizeInBits(0).getFixedValue() > RegVT.getFixedSizeInBits()) { SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N), RegVT.getSizeInBits()); @@ -55232,7 +55232,7 @@ SDValue Ins = SubVec.getOperand(0); if (isNullConstant(Ins.getOperand(2)) && ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) && - Ins.getOperand(1).getValueSizeInBits().getFixedSize() <= + Ins.getOperand(1).getValueSizeInBits().getFixedValue() <= SubVecVT.getFixedSizeInBits()) return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, getZeroVector(OpVT, Subtarget, DAG, dl), @@ -55647,7 +55647,7 @@ Src == User->getOperand(0)) { unsigned SizeInBits = VT.getFixedSizeInBits(); unsigned BroadcastSizeInBits = - User->getValueSizeInBits(0).getFixedSize(); + User->getValueSizeInBits(0).getFixedValue(); if (BroadcastSizeInBits == SizeInBits) return SDValue(User, 0); if (BroadcastSizeInBits > SizeInBits) @@ -55944,7 +55944,7 @@ cast(User)->getMemoryVT().getSizeInBits() == MemVT.getSizeInBits() && !User->hasAnyUseOfValue(1) && - User->getValueSizeInBits(0).getFixedSize() > VT.getFixedSizeInBits()) { + User->getValueSizeInBits(0).getFixedValue() > VT.getFixedSizeInBits()) { SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N), VT.getSizeInBits()); Extract = DAG.getBitcast(VT, Extract); diff --git a/llvm/lib/Target/X86/X86InterleavedAccess.cpp b/llvm/lib/Target/X86/X86InterleavedAccess.cpp --- a/llvm/lib/Target/X86/X86InterleavedAccess.cpp +++ b/llvm/lib/Target/X86/X86InterleavedAccess.cpp @@ -215,7 +215,7 @@ "VecBaseTy's size must be a multiple of 8"); const Align FirstAlignment = LI->getAlign(); const Align SubsequentAlignment = commonAlignment( - FirstAlignment, VecBaseTy->getPrimitiveSizeInBits().getFixedSize() / 8); + FirstAlignment, VecBaseTy->getPrimitiveSizeInBits().getFixedValue() / 8); Align Alignment = FirstAlignment; for (unsigned i = 0; i < NumLoads; i++) { // TODO: Support inbounds GEP. diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -196,7 +196,7 @@ unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) - .getFixedSize(); + .getFixedValue(); } unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp --- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp +++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp @@ -678,7 +678,7 @@ std::optional RetSize = A.Alloca->getAllocationSize(DL); assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n"); assert(!RetSize->isScalable() && "Scalable vectors are not yet supported"); - return RetSize->getFixedSize(); + return RetSize->getFixedValue(); }; // Put larger allocas in the front. So the larger allocas have higher // priority to merge, which can save more space potentially. Also each @@ -1086,7 +1086,7 @@ Type *Ty = FrameTy->getElementType(Index); assert(Ty->isSized() && "We can't handle type which is not sized.\n"); - SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize(); + SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedValue(); AlignInBits = OffsetCache[Index].first * 8; OffsetInBits = OffsetCache[Index].second * 8; diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -204,7 +204,7 @@ for (auto *I : Params) if (auto *VT = dyn_cast(I)) LargestVectorWidth = std::max( - LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize()); + LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinValue()); // Recompute the parameter attributes list based on the new arguments for // the function. diff --git a/llvm/lib/Transforms/IPO/Attributor.cpp b/llvm/lib/Transforms/IPO/Attributor.cpp --- a/llvm/lib/Transforms/IPO/Attributor.cpp +++ b/llvm/lib/Transforms/IPO/Attributor.cpp @@ -2602,7 +2602,7 @@ for (auto *I : NewArgumentTypes) if (auto *VT = dyn_cast(I)) LargestVectorWidth = std::max( - LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize()); + LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinValue()); FunctionType *OldFnTy = OldFn->getFunctionType(); Type *RetTy = OldFnTy->getReturnType(); diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp --- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp +++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp @@ -1305,7 +1305,7 @@ const DataLayout &DL = A.getDataLayout(); TypeSize AccessSize = DL.getTypeStoreSize(&Ty); if (!AccessSize.isScalable()) - Size = AccessSize.getFixedSize(); + Size = AccessSize.getFixedValue(); // Make a strictly ascending list of offsets as required by addAccess() llvm::sort(Offsets); diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -1382,8 +1382,8 @@ // and the number of bits loaded in L is less than or equal to // the number of bits stored in S. return DT.dominates(S, L) && - DL.getTypeStoreSize(LTy).getFixedSize() <= - DL.getTypeStoreSize(STy).getFixedSize(); + DL.getTypeStoreSize(LTy).getFixedValue() <= + DL.getTypeStoreSize(STy).getFixedValue(); })) return false; } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -2703,7 +2703,7 @@ unsigned Elts = VecTy->getElementCount().getKnownMinValue(); // For a fixed or scalable vector, get the size in bits of N x iM; for a // scalar this is just M. - unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinSize(); + unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinValue(); Type *EltTy = Builder.getIntNTy(SelEltSize / Elts); SelTy = VectorType::get(EltTy, VecTy->getElementCount()); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -121,14 +121,14 @@ if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; // The alloc and cast types should be either both fixed or both scalable. - uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinSize(); - uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinSize(); + uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinValue(); + uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinValue(); if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; // If the allocation has multiple uses, only promote it if we're not // shrinking the amount of memory being allocated. - uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinSize(); - uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinSize(); + uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinValue(); + uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinValue(); if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; // See if we can satisfy the modulus by pulling a scale out of the array diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -282,7 +282,7 @@ if (!GEP->isInBounds()) { Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); unsigned PtrSize = IntPtrTy->getIntegerBitWidth(); - if (Idx->getType()->getPrimitiveSizeInBits().getFixedSize() > PtrSize) + if (Idx->getType()->getPrimitiveSizeInBits().getFixedValue() > PtrSize) Idx = Builder.CreateTrunc(Idx, IntPtrTy); } @@ -816,8 +816,8 @@ Type *LHSIndexTy = LOffset->getType(); Type *RHSIndexTy = ROffset->getType(); if (LHSIndexTy != RHSIndexTy) { - if (LHSIndexTy->getPrimitiveSizeInBits().getFixedSize() < - RHSIndexTy->getPrimitiveSizeInBits().getFixedSize()) { + if (LHSIndexTy->getPrimitiveSizeInBits().getFixedValue() < + RHSIndexTy->getPrimitiveSizeInBits().getFixedValue()) { ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy); } else LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -364,7 +364,7 @@ // Move all alloca's of zero byte objects to the entry block and merge them // together. Note that we only do this for alloca's, because malloc should // allocate and return a unique pointer, even for a zero byte allocation. - if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinSize() == 0) { + if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinValue() == 0) { // For a zero sized alloca there is no point in doing an array allocation. // This is helpful if the array size is a complicated expression not used // elsewhere. @@ -382,7 +382,7 @@ AllocaInst *EntryAI = dyn_cast(FirstInst); if (!EntryAI || !EntryAI->getAllocatedType()->isSized() || DL.getTypeAllocSize(EntryAI->getAllocatedType()) - .getKnownMinSize() != 0) { + .getKnownMinValue() != 0) { AI.moveBefore(FirstInst); return &AI; } @@ -780,7 +780,7 @@ return false; // Make sure that, even if the multiplication below would wrap as an // uint64_t, we still do the right thing. - if ((CS->getValue().zext(128) * APInt(128, TS.getFixedSize())) + if ((CS->getValue().zext(128) * APInt(128, TS.getFixedValue())) .ugt(MaxSize)) return false; continue; @@ -858,7 +858,7 @@ if (!AllocTy || !AllocTy->isSized()) return false; const DataLayout &DL = IC.getDataLayout(); - uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy).getFixedSize(); + uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy).getFixedValue(); // If there are more indices after the one we might replace with a zero, make // sure they're all non-negative. If any of them are negative, the overall diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -2878,7 +2878,7 @@ Value *V = LHS; unsigned MaskElems = Mask.size(); auto *SrcTy = cast(V->getType()); - unsigned VecBitWidth = SrcTy->getPrimitiveSizeInBits().getFixedSize(); + unsigned VecBitWidth = SrcTy->getPrimitiveSizeInBits().getFixedValue(); unsigned SrcElemBitWidth = DL.getTypeSizeInBits(SrcTy->getElementType()); assert(SrcElemBitWidth && "vector elements must have a bitwidth"); unsigned SrcNumElems = SrcTy->getNumElements(); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2463,7 +2463,7 @@ unsigned AS = GEP.getPointerAddressSpace(); if (GEP.getOperand(1)->getType()->getScalarSizeInBits() == DL.getIndexSizeInBits(AS)) { - uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); + uint64_t TyAllocSize = DL.getTypeAllocSize(GEPEltType).getFixedValue(); bool Matched = false; uint64_t C; @@ -2593,8 +2593,8 @@ if (GEPEltType->isSized() && StrippedPtrEltTy->isSized()) { // Check that changing the type amounts to dividing the index by a scale // factor. - uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); - uint64_t SrcSize = DL.getTypeAllocSize(StrippedPtrEltTy).getFixedSize(); + uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedValue(); + uint64_t SrcSize = DL.getTypeAllocSize(StrippedPtrEltTy).getFixedValue(); if (ResSize && SrcSize % ResSize == 0) { Value *Idx = GEP.getOperand(1); unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); @@ -2630,10 +2630,10 @@ StrippedPtrEltTy->isArrayTy()) { // Check that changing to the array element type amounts to dividing the // index by a scale factor. - uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedSize(); + uint64_t ResSize = DL.getTypeAllocSize(GEPEltType).getFixedValue(); uint64_t ArrayEltSize = DL.getTypeAllocSize(StrippedPtrEltTy->getArrayElementType()) - .getFixedSize(); + .getFixedValue(); if (ResSize && ArrayEltSize % ResSize == 0) { Value *Idx = GEP.getOperand(1); unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); @@ -2694,7 +2694,7 @@ BasePtrOffset.isNonNegative()) { APInt AllocSize( IdxWidth, - DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinSize()); + DL.getTypeAllocSize(AI->getAllocatedType()).getKnownMinValue()); if (BasePtrOffset.ule(AllocSize)) { return GetElementPtrInst::CreateInBounds( GEP.getSourceElementType(), PtrOp, Indices, GEP.getName()); diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1533,7 +1533,7 @@ Type *getShadowTyNoVec(Type *ty) { if (VectorType *vt = dyn_cast(ty)) return IntegerType::get(*MS.C, - vt->getPrimitiveSizeInBits().getFixedSize()); + vt->getPrimitiveSizeInBits().getFixedValue()); return ty; } diff --git a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp --- a/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp +++ b/llvm/lib/Transforms/Instrumentation/SanitizerCoverage.cpp @@ -722,7 +722,7 @@ if (auto Comdat = getOrCreateFunctionComdat(F, TargetTriple)) Array->setComdat(Comdat); Array->setSection(getSectionName(Section)); - Array->setAlignment(Align(DL->getTypeStoreSize(Ty).getFixedSize())); + Array->setAlignment(Align(DL->getTypeStoreSize(Ty).getFixedValue())); // sancov_pcs parallels the other metadata section(s). Optimizers (e.g. // GlobalOpt/ConstantMerge) may not discard sancov_pcs and the other diff --git a/llvm/lib/Transforms/Scalar/LICM.cpp b/llvm/lib/Transforms/Scalar/LICM.cpp --- a/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/llvm/lib/Transforms/Scalar/LICM.cpp @@ -1097,7 +1097,7 @@ // in bits. Also, the invariant.start should dominate the load, and we // should not hoist the load out of a loop that contains this dominating // invariant.start. - if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits && + if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits && DT->properlyDominates(II->getParent(), CurLoop->getHeader())) return true; } diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -496,8 +496,8 @@ // When storing out scalable vectors we bail out for now, since the code // below currently only works for constant strides. TypeSize SizeInBits = DL->getTypeSizeInBits(StoredVal->getType()); - if (SizeInBits.isScalable() || (SizeInBits.getFixedSize() & 7) || - (SizeInBits.getFixedSize() >> 32) != 0) + if (SizeInBits.isScalable() || (SizeInBits.getFixedValue() & 7) || + (SizeInBits.getFixedValue() >> 32) != 0) return LegalStoreKind::None; // See if the pointer expression is an AddRec like {base,+,1} on the current @@ -1296,7 +1296,7 @@ // Ensure that LoadBasePtr is after StoreBasePtr or before StoreBasePtr // for negative stride. LoadBasePtr shouldn't overlap with StoreBasePtr. int64_t LoadSize = - DL.getTypeSizeInBits(TheLoad.getType()).getFixedSize() / 8; + DL.getTypeSizeInBits(TheLoad.getType()).getFixedValue() / 8; if (BP1 != BP2 || LoadSize != int64_t(StoreSize)) return false; if ((!IsNegStride && LoadOff < StoreOff + int64_t(StoreSize)) || diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp --- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp +++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp @@ -454,8 +454,8 @@ Type *RangeCheckType) { if (!EnableIVTruncation) return false; - assert(DL.getTypeSizeInBits(LatchCheck.IV->getType()).getFixedSize() > - DL.getTypeSizeInBits(RangeCheckType).getFixedSize() && + assert(DL.getTypeSizeInBits(LatchCheck.IV->getType()).getFixedValue() > + DL.getTypeSizeInBits(RangeCheckType).getFixedValue() && "Expected latch check IV type to be larger than range check operand " "type!"); // The start and end values of the IV should be known. This is to guarantee @@ -475,7 +475,7 @@ // guarantees that truncating the latch check to RangeCheckType is a safe // operation. auto RangeCheckTypeBitSize = - DL.getTypeSizeInBits(RangeCheckType).getFixedSize(); + DL.getTypeSizeInBits(RangeCheckType).getFixedValue(); return Start->getAPInt().getActiveBits() < RangeCheckTypeBitSize && Limit->getAPInt().getActiveBits() < RangeCheckTypeBitSize; } @@ -492,8 +492,8 @@ if (RangeCheckType == LatchType) return LatchCheck; // For now, bail out if latch type is narrower than range type. - if (DL.getTypeSizeInBits(LatchType).getFixedSize() < - DL.getTypeSizeInBits(RangeCheckType).getFixedSize()) + if (DL.getTypeSizeInBits(LatchType).getFixedValue() < + DL.getTypeSizeInBits(RangeCheckType).getFixedValue()) return std::nullopt; if (!isSafeToTruncateWideIVType(DL, SE, LatchCheck, RangeCheckType)) return std::nullopt; diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp --- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -459,10 +459,10 @@ /// Return the estimated number of vector ops required for an operation on /// \p VT * N. unsigned getNumOps(Type *ST, unsigned N) { - return std::ceil((ST->getPrimitiveSizeInBits() * N).getFixedSize() / + return std::ceil((ST->getPrimitiveSizeInBits() * N).getFixedValue() / double(TTI.getRegisterBitWidth( TargetTransformInfo::RGK_FixedWidthVector) - .getFixedSize())); + .getFixedValue())); } /// Return the set of vectors that a matrix value is lowered to. @@ -1260,8 +1260,8 @@ bool IsScalarMatrixTransposed, FastMathFlags FMF) { const unsigned VF = std::max( TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) - .getFixedSize() / - Result.getElementType()->getPrimitiveSizeInBits().getFixedSize(), + .getFixedValue() / + Result.getElementType()->getPrimitiveSizeInBits().getFixedValue(), 1U); unsigned R = Result.getNumRows(); unsigned C = Result.getNumColumns(); @@ -1439,8 +1439,8 @@ const unsigned VF = std::max( TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) - .getFixedSize() / - EltType->getPrimitiveSizeInBits().getFixedSize(), + .getFixedValue() / + EltType->getPrimitiveSizeInBits().getFixedValue(), 1U); // Cost model for tiling diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -176,7 +176,7 @@ void addStore(int64_t OffsetFromFirst, StoreInst *SI) { TypeSize StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); assert(!StoreSize.isScalable() && "Can't track scalable-typed stores"); - addRange(OffsetFromFirst, StoreSize.getFixedSize(), SI->getPointerOperand(), + addRange(OffsetFromFirst, StoreSize.getFixedValue(), SI->getPointerOperand(), SI->getAlign(), SI); } diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp --- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp +++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp @@ -403,8 +403,8 @@ // Replace the I-th index with LHS. IndexExprs[I] = SE->getSCEV(LHS); if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) && - DL->getTypeSizeInBits(LHS->getType()).getFixedSize() < - DL->getTypeSizeInBits(GEP->getOperand(I)->getType()).getFixedSize()) { + DL->getTypeSizeInBits(LHS->getType()).getFixedValue() < + DL->getTypeSizeInBits(GEP->getOperand(I)->getType()).getFixedValue()) { // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to // zext if the source operand is proved non-negative. We should do that // consistently so that CandidateExpr more likely appears before. See diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -763,7 +763,7 @@ public: SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &AS) : PtrUseVisitor(DL), - AllocSize(DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize()), + AllocSize(DL.getTypeAllocSize(AI.getAllocatedType()).getFixedValue()), AS(AS) {} private: @@ -856,7 +856,7 @@ GEPOffset += Index * APInt(Offset.getBitWidth(), - DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); + DL.getTypeAllocSize(GTI.getIndexedType()).getFixedValue()); } // If this index has computed an intermediate pointer which is not @@ -891,7 +891,7 @@ if (isa(LI.getType())) return PI.setAborted(&LI); - uint64_t Size = DL.getTypeStoreSize(LI.getType()).getFixedSize(); + uint64_t Size = DL.getTypeStoreSize(LI.getType()).getFixedValue(); return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile()); } @@ -905,7 +905,7 @@ if (isa(ValOp->getType())) return PI.setAborted(&SI); - uint64_t Size = DL.getTypeStoreSize(ValOp->getType()).getFixedSize(); + uint64_t Size = DL.getTypeStoreSize(ValOp->getType()).getFixedValue(); // If this memory access can be shown to *statically* extend outside the // bounds of the allocation, it's behavior is undefined, so simply @@ -1062,7 +1062,7 @@ if (LoadInst *LI = dyn_cast(I)) { Size = std::max(Size, - DL.getTypeStoreSize(LI->getType()).getFixedSize()); + DL.getTypeStoreSize(LI->getType()).getFixedValue()); continue; } if (StoreInst *SI = dyn_cast(I)) { @@ -1070,7 +1070,7 @@ if (Op == UsedI) return SI; Size = std::max(Size, - DL.getTypeStoreSize(Op->getType()).getFixedSize()); + DL.getTypeStoreSize(Op->getType()).getFixedValue()); continue; } @@ -1331,7 +1331,7 @@ if (!LoadType) return false; - APInt LoadSize = APInt(APWidth, DL.getTypeStoreSize(LoadType).getFixedSize()); + APInt LoadSize = APInt(APWidth, DL.getTypeStoreSize(LoadType).getFixedValue()); // We can only transform this if it is safe to push the loads into the // predecessor blocks. The only thing to watch out for is that we can't put @@ -1912,8 +1912,8 @@ return false; } - if (DL.getTypeSizeInBits(NewTy).getFixedSize() != - DL.getTypeSizeInBits(OldTy).getFixedSize()) + if (DL.getTypeSizeInBits(NewTy).getFixedValue() != + DL.getTypeSizeInBits(OldTy).getFixedValue()) return false; if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) return false; @@ -2091,13 +2091,13 @@ static bool checkVectorTypeForPromotion(Partition &P, VectorType *VTy, const DataLayout &DL) { uint64_t ElementSize = - DL.getTypeSizeInBits(VTy->getElementType()).getFixedSize(); + DL.getTypeSizeInBits(VTy->getElementType()).getFixedValue(); // While the definition of LLVM vectors is bitpacked, we don't support sizes // that aren't byte sized. if (ElementSize % 8) return false; - assert((DL.getTypeSizeInBits(VTy).getFixedSize() % 8) == 0 && + assert((DL.getTypeSizeInBits(VTy).getFixedValue() % 8) == 0 && "vector size not a multiple of element size?"); ElementSize /= 8; @@ -2135,8 +2135,8 @@ // Return if bitcast to vectors is different for total size in bits. if (!CandidateTys.empty()) { VectorType *V = CandidateTys[0]; - if (DL.getTypeSizeInBits(VTy).getFixedSize() != - DL.getTypeSizeInBits(V).getFixedSize()) { + if (DL.getTypeSizeInBits(VTy).getFixedValue() != + DL.getTypeSizeInBits(V).getFixedValue()) { CandidateTys.clear(); return; } @@ -2196,8 +2196,8 @@ // they're all integer vectors. We sort by ascending number of elements. auto RankVectorTypes = [&DL](VectorType *RHSTy, VectorType *LHSTy) { (void)DL; - assert(DL.getTypeSizeInBits(RHSTy).getFixedSize() == - DL.getTypeSizeInBits(LHSTy).getFixedSize() && + assert(DL.getTypeSizeInBits(RHSTy).getFixedValue() == + DL.getTypeSizeInBits(LHSTy).getFixedValue() && "Cannot have vector types of different sizes!"); assert(RHSTy->getElementType()->isIntegerTy() && "All non-integer types eliminated!"); @@ -2247,7 +2247,7 @@ Type *AllocaTy, const DataLayout &DL, bool &WholeAllocaOp) { - uint64_t Size = DL.getTypeStoreSize(AllocaTy).getFixedSize(); + uint64_t Size = DL.getTypeStoreSize(AllocaTy).getFixedValue(); uint64_t RelBegin = S.beginOffset() - AllocBeginOffset; uint64_t RelEnd = S.endOffset() - AllocBeginOffset; @@ -2272,7 +2272,7 @@ if (LI->isVolatile()) return false; // We can't handle loads that extend past the allocated memory. - if (DL.getTypeStoreSize(LI->getType()).getFixedSize() > Size) + if (DL.getTypeStoreSize(LI->getType()).getFixedValue() > Size) return false; // So far, AllocaSliceRewriter does not support widening split slice tails // in rewriteIntegerLoad. @@ -2284,7 +2284,7 @@ if (!isa(LI->getType()) && RelBegin == 0 && RelEnd == Size) WholeAllocaOp = true; if (IntegerType *ITy = dyn_cast(LI->getType())) { - if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize()) + if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedValue()) return false; } else if (RelBegin != 0 || RelEnd != Size || !canConvertValue(DL, AllocaTy, LI->getType())) { @@ -2297,7 +2297,7 @@ if (SI->isVolatile()) return false; // We can't handle stores that extend past the allocated memory. - if (DL.getTypeStoreSize(ValueTy).getFixedSize() > Size) + if (DL.getTypeStoreSize(ValueTy).getFixedValue() > Size) return false; // So far, AllocaSliceRewriter does not support widening split slice tails // in rewriteIntegerStore. @@ -2309,7 +2309,7 @@ if (!isa(ValueTy) && RelBegin == 0 && RelEnd == Size) WholeAllocaOp = true; if (IntegerType *ITy = dyn_cast(ValueTy)) { - if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedSize()) + if (ITy->getBitWidth() < DL.getTypeStoreSizeInBits(ITy).getFixedValue()) return false; } else if (RelBegin != 0 || RelEnd != Size || !canConvertValue(DL, ValueTy, AllocaTy)) { @@ -2337,13 +2337,13 @@ /// promote the resulting alloca. static bool isIntegerWideningViable(Partition &P, Type *AllocaTy, const DataLayout &DL) { - uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy).getFixedSize(); + uint64_t SizeInBits = DL.getTypeSizeInBits(AllocaTy).getFixedValue(); // Don't create integer types larger than the maximum bitwidth. if (SizeInBits > IntegerType::MAX_INT_BITS) return false; // Don't try to handle allocas with bit-padding. - if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy).getFixedSize()) + if (SizeInBits != DL.getTypeStoreSizeInBits(AllocaTy).getFixedValue()) return false; // We need to ensure that an integer type with the appropriate bitwidth can @@ -2381,13 +2381,13 @@ const Twine &Name) { LLVM_DEBUG(dbgs() << " start: " << *V << "\n"); IntegerType *IntTy = cast(V->getType()); - assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <= - DL.getTypeStoreSize(IntTy).getFixedSize() && + assert(DL.getTypeStoreSize(Ty).getFixedValue() + Offset <= + DL.getTypeStoreSize(IntTy).getFixedValue() && "Element extends past full value"); uint64_t ShAmt = 8 * Offset; if (DL.isBigEndian()) - ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() - - DL.getTypeStoreSize(Ty).getFixedSize() - Offset); + ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedValue() - + DL.getTypeStoreSize(Ty).getFixedValue() - Offset); if (ShAmt) { V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); @@ -2412,13 +2412,13 @@ V = IRB.CreateZExt(V, IntTy, Name + ".ext"); LLVM_DEBUG(dbgs() << " extended: " << *V << "\n"); } - assert(DL.getTypeStoreSize(Ty).getFixedSize() + Offset <= - DL.getTypeStoreSize(IntTy).getFixedSize() && + assert(DL.getTypeStoreSize(Ty).getFixedValue() + Offset <= + DL.getTypeStoreSize(IntTy).getFixedValue() && "Element store outside of alloca store"); uint64_t ShAmt = 8 * Offset; if (DL.isBigEndian()) - ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedSize() - - DL.getTypeStoreSize(Ty).getFixedSize() - Offset); + ShAmt = 8 * (DL.getTypeStoreSize(IntTy).getFixedValue() - + DL.getTypeStoreSize(Ty).getFixedValue() - Offset); if (ShAmt) { V = IRB.CreateShl(V, ShAmt, Name + ".shift"); LLVM_DEBUG(dbgs() << " shifted: " << *V << "\n"); @@ -2594,16 +2594,16 @@ IsIntegerPromotable ? Type::getIntNTy(NewAI.getContext(), DL.getTypeSizeInBits(NewAI.getAllocatedType()) - .getFixedSize()) + .getFixedValue()) : nullptr), VecTy(PromotableVecTy), ElementTy(VecTy ? VecTy->getElementType() : nullptr), - ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8 + ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy).getFixedValue() / 8 : 0), PHIUsers(PHIUsers), SelectUsers(SelectUsers), IRB(NewAI.getContext(), ConstantFolder()) { if (VecTy) { - assert((DL.getTypeSizeInBits(ElementTy).getFixedSize() % 8) == 0 && + assert((DL.getTypeSizeInBits(ElementTy).getFixedValue() % 8) == 0 && "Only multiple-of-8 sized vector elements are viable"); ++NumVectorized; } @@ -2772,7 +2772,7 @@ Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) : LI.getType(); const bool IsLoadPastEnd = - DL.getTypeStoreSize(TargetTy).getFixedSize() > SliceSize; + DL.getTypeStoreSize(TargetTy).getFixedValue() > SliceSize; bool IsPtrAdjusted = false; Value *V; if (VecTy) { @@ -2844,7 +2844,7 @@ assert(!LI.isVolatile()); assert(LI.getType()->isIntegerTy() && "Only integer type loads and stores are split"); - assert(SliceSize < DL.getTypeStoreSize(LI.getType()).getFixedSize() && + assert(SliceSize < DL.getTypeStoreSize(LI.getType()).getFixedValue() && "Split load isn't smaller than original load"); assert(DL.typeSizeEqualsStoreSize(LI.getType()) && "Non-byte-multiple bit width"); @@ -2912,7 +2912,7 @@ bool rewriteIntegerStore(Value *V, StoreInst &SI, AAMDNodes AATags) { assert(IntTy && "We cannot extract an integer from the alloca"); assert(!SI.isVolatile()); - if (DL.getTypeSizeInBits(V->getType()).getFixedSize() != + if (DL.getTypeSizeInBits(V->getType()).getFixedValue() != IntTy->getBitWidth()) { Value *Old = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), &NewAI, NewAI.getAlign(), "oldload"); @@ -2950,7 +2950,7 @@ if (AllocaInst *AI = dyn_cast(V->stripInBoundsOffsets())) Pass.PostPromotionWorklist.insert(AI); - if (SliceSize < DL.getTypeStoreSize(V->getType()).getFixedSize()) { + if (SliceSize < DL.getTypeStoreSize(V->getType()).getFixedValue()) { assert(!SI.isVolatile()); assert(V->getType()->isIntegerTy() && "Only integer type loads and stores are split"); @@ -2967,7 +2967,7 @@ return rewriteIntegerStore(V, SI, AATags); const bool IsStorePastEnd = - DL.getTypeStoreSize(V->getType()).getFixedSize() > SliceSize; + DL.getTypeStoreSize(V->getType()).getFixedValue() > SliceSize; StoreInst *NewSI; if (NewBeginOffset == NewAllocaBeginOffset && NewEndOffset == NewAllocaEndOffset && @@ -3094,7 +3094,7 @@ auto *Int8Ty = IntegerType::getInt8Ty(NewAI.getContext()); auto *SrcTy = FixedVectorType::get(Int8Ty, Len); return canConvertValue(DL, SrcTy, AllocaTy) && - DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy).getFixedSize()); + DL.isLegalInteger(DL.getTypeSizeInBits(ScalarTy).getFixedValue()); }(); // If this doesn't map cleanly onto the alloca type, and that type isn't @@ -3134,7 +3134,7 @@ "Too many elements!"); Value *Splat = getIntegerSplat( - II.getValue(), DL.getTypeSizeInBits(ElementTy).getFixedSize() / 8); + II.getValue(), DL.getTypeSizeInBits(ElementTy).getFixedValue() / 8); Splat = convertValue(DL, IRB, Splat, ElementTy); if (NumElements > 1) Splat = getVectorSplat(Splat, NumElements); @@ -3168,7 +3168,7 @@ assert(NewEndOffset == NewAllocaEndOffset); V = getIntegerSplat(II.getValue(), - DL.getTypeSizeInBits(ScalarTy).getFixedSize() / 8); + DL.getTypeSizeInBits(ScalarTy).getFixedValue() / 8); if (VectorType *AllocaVecTy = dyn_cast(AllocaTy)) V = getVectorSplat( V, cast(AllocaVecTy)->getNumElements()); @@ -3244,7 +3244,7 @@ !VecTy && !IntTy && (BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset || SliceSize != - DL.getTypeStoreSize(NewAI.getAllocatedType()).getFixedSize() || + DL.getTypeStoreSize(NewAI.getAllocatedType()).getFixedValue() || !NewAI.getAllocatedType()->isSingleValueType()); // If we're just going to emit a memcpy, the alloca hasn't changed, and the @@ -3949,8 +3949,8 @@ if (Ty->isSingleValueType()) return Ty; - uint64_t AllocSize = DL.getTypeAllocSize(Ty).getFixedSize(); - uint64_t TypeSize = DL.getTypeSizeInBits(Ty).getFixedSize(); + uint64_t AllocSize = DL.getTypeAllocSize(Ty).getFixedValue(); + uint64_t TypeSize = DL.getTypeSizeInBits(Ty).getFixedValue(); Type *InnerTy; if (ArrayType *ArrTy = dyn_cast(Ty)) { @@ -3963,8 +3963,8 @@ return Ty; } - if (AllocSize > DL.getTypeAllocSize(InnerTy).getFixedSize() || - TypeSize > DL.getTypeSizeInBits(InnerTy).getFixedSize()) + if (AllocSize > DL.getTypeAllocSize(InnerTy).getFixedValue() || + TypeSize > DL.getTypeSizeInBits(InnerTy).getFixedValue()) return Ty; return stripAggregateTypeWrapping(DL, InnerTy); @@ -3985,10 +3985,10 @@ /// return a type if necessary. static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset, uint64_t Size) { - if (Offset == 0 && DL.getTypeAllocSize(Ty).getFixedSize() == Size) + if (Offset == 0 && DL.getTypeAllocSize(Ty).getFixedValue() == Size) return stripAggregateTypeWrapping(DL, Ty); - if (Offset > DL.getTypeAllocSize(Ty).getFixedSize() || - (DL.getTypeAllocSize(Ty).getFixedSize() - Offset) < Size) + if (Offset > DL.getTypeAllocSize(Ty).getFixedValue() || + (DL.getTypeAllocSize(Ty).getFixedValue() - Offset) < Size) return nullptr; if (isa(Ty) || isa(Ty)) { @@ -4004,7 +4004,7 @@ ElementTy = VT->getElementType(); TyNumElements = VT->getNumElements(); } - uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize(); + uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedValue(); uint64_t NumSkippedElements = Offset / ElementSize; if (NumSkippedElements >= TyNumElements) return nullptr; @@ -4044,7 +4044,7 @@ Offset -= SL->getElementOffset(Index); Type *ElementTy = STy->getElementType(Index); - uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedSize(); + uint64_t ElementSize = DL.getTypeAllocSize(ElementTy).getFixedValue(); if (Offset >= ElementSize) return nullptr; // The offset points into alignment padding. @@ -4604,7 +4604,7 @@ findCommonType(P.begin(), P.end(), P.endOffset()); // Do all uses operate on the same type? if (CommonUseTy.first) - if (DL.getTypeAllocSize(CommonUseTy.first).getFixedSize() >= P.size()) { + if (DL.getTypeAllocSize(CommonUseTy.first).getFixedValue() >= P.size()) { SliceTy = CommonUseTy.first; SliceVecTy = dyn_cast(SliceTy); } @@ -4616,7 +4616,7 @@ // If still not, can we use the largest bitwidth integer type used? if (!SliceTy && CommonUseTy.second) - if (DL.getTypeAllocSize(CommonUseTy.second).getFixedSize() >= P.size()) { + if (DL.getTypeAllocSize(CommonUseTy.second).getFixedValue() >= P.size()) { SliceTy = CommonUseTy.second; SliceVecTy = dyn_cast(SliceTy); } @@ -4639,7 +4639,7 @@ if (!SliceTy) SliceTy = ArrayType::get(Type::getInt8Ty(*C), P.size()); - assert(DL.getTypeAllocSize(SliceTy).getFixedSize() >= P.size()); + assert(DL.getTypeAllocSize(SliceTy).getFixedValue() >= P.size()); bool IsIntegerPromotable = isIntegerWideningViable(P, SliceTy, DL); @@ -4795,7 +4795,7 @@ bool IsSorted = true; uint64_t AllocaSize = - DL.getTypeAllocSize(AI.getAllocatedType()).getFixedSize(); + DL.getTypeAllocSize(AI.getAllocatedType()).getFixedValue(); const uint64_t MaxBitVectorSize = 1024; if (AllocaSize <= MaxBitVectorSize) { // If a byte boundary is included in any load or store, a slice starting or @@ -4860,7 +4860,7 @@ if (NewAI != &AI) { uint64_t SizeOfByte = 8; uint64_t AllocaSize = - DL.getTypeSizeInBits(NewAI->getAllocatedType()).getFixedSize(); + DL.getTypeSizeInBits(NewAI->getAllocatedType()).getFixedValue(); // Don't include any padding. uint64_t Size = std::min(AllocaSize, P.size() * SizeOfByte); Fragments.push_back(Fragment(NewAI, P.beginOffset() * SizeOfByte, Size)); @@ -4881,7 +4881,7 @@ auto *Expr = DbgDeclare->getExpression(); DIBuilder DIB(*AI.getModule(), /*AllowUnresolved*/ false); uint64_t AllocaSize = - DL.getTypeSizeInBits(AI.getAllocatedType()).getFixedSize(); + DL.getTypeSizeInBits(AI.getAllocatedType()).getFixedValue(); for (auto Fragment : Fragments) { // Create a fragment expression describing the new partition or reuse AI's // expression if there is only one partition. @@ -5002,7 +5002,7 @@ // Skip alloca forms that this analysis can't handle. auto *AT = AI.getAllocatedType(); if (AI.isArrayAllocation() || !AT->isSized() || isa(AT) || - DL.getTypeAllocSize(AT).getFixedSize() == 0) + DL.getTypeAllocSize(AT).getFixedValue() == 0) return {Changed, CFGChanged}; // First, split any FCA loads and stores touching this alloca to promote diff --git a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp --- a/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp +++ b/llvm/lib/Transforms/Utils/AssumeBundleBuilder.cpp @@ -254,7 +254,7 @@ unsigned DerefSize = MemInst->getModule() ->getDataLayout() .getTypeStoreSize(AccType) - .getKnownMinSize(); + .getKnownMinValue(); if (DerefSize != 0) { addKnowledge({Attribute::Dereferenceable, DerefSize, Pointer}); if (!NullPointerIsDefined(MemInst->getFunction(), diff --git a/llvm/lib/Transforms/Utils/FunctionComparator.cpp b/llvm/lib/Transforms/Utils/FunctionComparator.cpp --- a/llvm/lib/Transforms/Utils/FunctionComparator.cpp +++ b/llvm/lib/Transforms/Utils/FunctionComparator.cpp @@ -241,9 +241,9 @@ unsigned TyRWidth = 0; if (auto *VecTyL = dyn_cast(TyL)) - TyLWidth = VecTyL->getPrimitiveSizeInBits().getFixedSize(); + TyLWidth = VecTyL->getPrimitiveSizeInBits().getFixedValue(); if (auto *VecTyR = dyn_cast(TyR)) - TyRWidth = VecTyR->getPrimitiveSizeInBits().getFixedSize(); + TyRWidth = VecTyR->getPrimitiveSizeInBits().getFixedValue(); if (TyLWidth != TyRWidth) return cmpNumbers(TyLWidth, TyRWidth); diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -2504,7 +2504,7 @@ if (!AllocaTypeSize.isScalable() && AllocaArraySize != std::numeric_limits::max() && std::numeric_limits::max() / AllocaArraySize >= - AllocaTypeSize.getFixedSize()) { + AllocaTypeSize.getFixedValue()) { AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), AllocaArraySize * AllocaTypeSize); } diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp --- a/llvm/lib/Transforms/Utils/Local.cpp +++ b/llvm/lib/Transforms/Utils/Local.cpp @@ -1496,7 +1496,7 @@ if (std::optional FragmentSize = DII->getFragmentSizeInBits()) { assert(!ValueSize.isScalable() && "Fragments don't work on scalable types."); - return ValueSize.getFixedSize() >= *FragmentSize; + return ValueSize.getFixedValue() >= *FragmentSize; } // We can't always calculate the size of the DI variable (e.g. if it is a // VLA). Try to use the size of the alloca that the dbg intrinsic describes diff --git a/llvm/lib/Transforms/Utils/MemoryOpRemark.cpp b/llvm/lib/Transforms/Utils/MemoryOpRemark.cpp --- a/llvm/lib/Transforms/Utils/MemoryOpRemark.cpp +++ b/llvm/lib/Transforms/Utils/MemoryOpRemark.cpp @@ -309,7 +309,7 @@ SmallVectorImpl &Result) { if (auto *GV = dyn_cast(V)) { auto *Ty = GV->getValueType(); - uint64_t Size = DL.getTypeSizeInBits(Ty).getFixedSize(); + uint64_t Size = DL.getTypeSizeInBits(Ty).getFixedValue(); VariableInfo Var{nameOrNone(GV), Size}; if (!Var.isEmpty()) Result.push_back(std::move(Var)); @@ -343,7 +343,7 @@ // If not, get it from the alloca. std::optional TySize = AI->getAllocationSize(DL); std::optional Size = - TySize ? std::optional(TySize->getFixedSize()) : std::nullopt; + TySize ? std::optional(TySize->getFixedValue()) : std::nullopt; VariableInfo Var{nameOrNone(AI), Size}; if (!Var.isEmpty()) Result.push_back(std::move(Var)); diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp --- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp @@ -1900,8 +1900,8 @@ // Put pointers at the back and make sure pointer < pointer = false. if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy(); - return RHS->getType()->getPrimitiveSizeInBits().getFixedSize() < - LHS->getType()->getPrimitiveSizeInBits().getFixedSize(); + return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() < + LHS->getType()->getPrimitiveSizeInBits().getFixedValue(); }); unsigned NumElim = 0; diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp --- a/llvm/lib/Transforms/Utils/VNCoercion.cpp +++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp @@ -28,14 +28,14 @@ isFirstClassAggregateOrScalableType(StoredTy)) return false; - uint64_t StoreSize = DL.getTypeSizeInBits(StoredTy).getFixedSize(); + uint64_t StoreSize = DL.getTypeSizeInBits(StoredTy).getFixedValue(); // The store size must be byte-aligned to support future type casts. if (llvm::alignTo(StoreSize, 8) != StoreSize) return false; // The store has to be at least as big as the load. - if (StoreSize < DL.getTypeSizeInBits(LoadTy).getFixedSize()) + if (StoreSize < DL.getTypeSizeInBits(LoadTy).getFixedValue()) return false; bool StoredNI = DL.isNonIntegralPointerType(StoredTy->getScalarType()); @@ -58,7 +58,7 @@ // The implementation below uses inttoptr for vectors of unequal size; we // can't allow this for non integral pointers. We could teach it to extract // exact subvectors if desired. - if (StoredNI && StoreSize != DL.getTypeSizeInBits(LoadTy).getFixedSize()) + if (StoredNI && StoreSize != DL.getTypeSizeInBits(LoadTy).getFixedValue()) return false; if (StoredTy->isTargetExtTy() || LoadTy->isTargetExtTy()) @@ -84,8 +84,8 @@ // If this is already the right type, just return it. Type *StoredValTy = StoredVal->getType(); - uint64_t StoredValSize = DL.getTypeSizeInBits(StoredValTy).getFixedSize(); - uint64_t LoadedValSize = DL.getTypeSizeInBits(LoadedTy).getFixedSize(); + uint64_t StoredValSize = DL.getTypeSizeInBits(StoredValTy).getFixedValue(); + uint64_t LoadedValSize = DL.getTypeSizeInBits(LoadedTy).getFixedValue(); // If the store and reload are the same size, we can always reuse it. if (StoredValSize == LoadedValSize) { @@ -137,8 +137,8 @@ // If this is a big-endian system, we need to shift the value down to the low // bits so that a truncate will work. if (DL.isBigEndian()) { - uint64_t ShiftAmt = DL.getTypeStoreSizeInBits(StoredValTy).getFixedSize() - - DL.getTypeStoreSizeInBits(LoadedTy).getFixedSize(); + uint64_t ShiftAmt = DL.getTypeStoreSizeInBits(StoredValTy).getFixedValue() - + DL.getTypeStoreSizeInBits(LoadedTy).getFixedValue(); StoredVal = Helper.CreateLShr( StoredVal, ConstantInt::get(StoredVal->getType(), ShiftAmt)); } @@ -186,7 +186,7 @@ if (StoreBase != LoadBase) return -1; - uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy).getFixedSize(); + uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy).getFixedValue(); if ((WriteSizeInBits & 7) | (LoadSize & 7)) return -1; @@ -221,7 +221,7 @@ Value *StorePtr = DepSI->getPointerOperand(); uint64_t StoreSize = - DL.getTypeSizeInBits(DepSI->getValueOperand()->getType()).getFixedSize(); + DL.getTypeSizeInBits(DepSI->getValueOperand()->getType()).getFixedValue(); return analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, StorePtr, StoreSize, DL); } @@ -324,7 +324,7 @@ return -1; Value *DepPtr = DepLI->getPointerOperand(); - uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()).getFixedSize(); + uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()).getFixedValue(); int R = analyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL); if (R != -1) return R; @@ -334,7 +334,7 @@ int64_t LoadOffs = 0; const Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL); - unsigned LoadSize = DL.getTypeStoreSize(LoadTy).getFixedSize(); + unsigned LoadSize = DL.getTypeStoreSize(LoadTy).getFixedValue(); unsigned Size = getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI); @@ -411,8 +411,8 @@ } uint64_t StoreSize = - (DL.getTypeSizeInBits(SrcVal->getType()).getFixedSize() + 7) / 8; - uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy).getFixedSize() + 7) / 8; + (DL.getTypeSizeInBits(SrcVal->getType()).getFixedValue() + 7) / 8; + uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy).getFixedValue() + 7) / 8; // Compute which bits of the stored value are being used by the load. Convert // to an integer type to start with. if (SrcVal->getType()->isPtrOrPtrVectorTy()) @@ -465,8 +465,8 @@ // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to // widen SrcVal out to a larger load. unsigned SrcValStoreSize = - DL.getTypeStoreSize(SrcVal->getType()).getFixedSize(); - unsigned LoadSize = DL.getTypeStoreSize(LoadTy).getFixedSize(); + DL.getTypeStoreSize(SrcVal->getType()).getFixedValue(); + unsigned LoadSize = DL.getTypeStoreSize(LoadTy).getFixedValue(); if (Offset + LoadSize > SrcValStoreSize) { assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!"); assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load"); @@ -510,8 +510,8 @@ Constant *getConstantLoadValueForLoad(Constant *SrcVal, unsigned Offset, Type *LoadTy, const DataLayout &DL) { unsigned SrcValStoreSize = - DL.getTypeStoreSize(SrcVal->getType()).getFixedSize(); - unsigned LoadSize = DL.getTypeStoreSize(LoadTy).getFixedSize(); + DL.getTypeStoreSize(SrcVal->getType()).getFixedValue(); + unsigned LoadSize = DL.getTypeStoreSize(LoadTy).getFixedValue(); if (Offset + LoadSize > SrcValStoreSize) return nullptr; return getConstantStoreValueForLoad(SrcVal, Offset, LoadTy, DL); @@ -523,7 +523,7 @@ Type *LoadTy, Instruction *InsertPt, const DataLayout &DL) { LLVMContext &Ctx = LoadTy->getContext(); - uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy).getFixedSize() / 8; + uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy).getFixedValue() / 8; IRBuilder<> Builder(InsertPt); // We know that this method is only called when the mem transfer fully @@ -569,7 +569,7 @@ Constant *getConstantMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, const DataLayout &DL) { LLVMContext &Ctx = LoadTy->getContext(); - uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy).getFixedSize() / 8; + uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy).getFixedValue() / 8; // We know that this method is only called when the mem transfer fully // provides the bits for the load. diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -5187,7 +5187,7 @@ // Ensure MaxVF is a power of 2; the dependence distance bound may not be. // Note that both WidestRegister and WidestType may not be a powers of 2. auto MaxVectorElementCount = ElementCount::get( - PowerOf2Floor(WidestRegister.getKnownMinSize() / WidestType), + PowerOf2Floor(WidestRegister.getKnownMinValue() / WidestType), ComputeScalableMaxVF); MaxVectorElementCount = MinVF(MaxVectorElementCount, MaxSafeVF); LLVM_DEBUG(dbgs() << "LV: The Widest register safe to use is: " @@ -5228,7 +5228,7 @@ if (MaximizeBandwidth || (MaximizeBandwidth.getNumOccurrences() == 0 && TTI.shouldMaximizeVectorBandwidth(RegKind))) { auto MaxVectorElementCountMaxBW = ElementCount::get( - PowerOf2Floor(WidestRegister.getKnownMinSize() / SmallestType), + PowerOf2Floor(WidestRegister.getKnownMinValue() / SmallestType), ComputeScalableMaxVF); MaxVectorElementCountMaxBW = MinVF(MaxVectorElementCountMaxBW, MaxSafeVF); @@ -5616,9 +5616,9 @@ } else { for (Type *T : ElementTypesInLoop) { MinWidth = std::min( - MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); + MinWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue()); MaxWidth = std::max( - MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedSize()); + MaxWidth, DL.getTypeSizeInBits(T->getScalarType()).getFixedValue()); } } return {MinWidth, MaxWidth}; @@ -7459,7 +7459,7 @@ if (UserVF.isZero()) { VF = ElementCount::getFixed(determineVPlanVF( TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) - .getFixedSize(), + .getFixedValue(), CM)); LLVM_DEBUG(dbgs() << "LV: VPlan computed VF " << VF << ".\n"); diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -991,7 +991,7 @@ else MaxVecRegSize = TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) - .getFixedSize(); + .getFixedValue(); if (MinVectorRegSizeOption.getNumOccurrences()) MinVecRegSize = MinVectorRegSizeOption; diff --git a/llvm/unittests/AsmParser/AsmParserTest.cpp b/llvm/unittests/AsmParser/AsmParserTest.cpp --- a/llvm/unittests/AsmParser/AsmParserTest.cpp +++ b/llvm/unittests/AsmParser/AsmParserTest.cpp @@ -232,7 +232,7 @@ // Check the details of the vector. auto *VT = cast(Ty); ASSERT_TRUE(VT->getNumElements() == 5); - ASSERT_TRUE(VT->getPrimitiveSizeInBits().getFixedSize() == 160); + ASSERT_TRUE(VT->getPrimitiveSizeInBits().getFixedValue() == 160); Ty = VT->getElementType(); ASSERT_TRUE(Ty->isIntegerTy()); ASSERT_TRUE(Ty->getPrimitiveSizeInBits() == 32); @@ -347,7 +347,7 @@ // Check the details of the vector. auto *VT = cast(Ty); ASSERT_TRUE(VT->getNumElements() == 5); - ASSERT_TRUE(VT->getPrimitiveSizeInBits().getFixedSize() == 160); + ASSERT_TRUE(VT->getPrimitiveSizeInBits().getFixedValue() == 160); Ty = VT->getElementType(); ASSERT_TRUE(Ty->isIntegerTy()); ASSERT_TRUE(Ty->getPrimitiveSizeInBits() == 32); diff --git a/llvm/unittests/CodeGen/ScalableVectorMVTsTest.cpp b/llvm/unittests/CodeGen/ScalableVectorMVTsTest.cpp --- a/llvm/unittests/CodeGen/ScalableVectorMVTsTest.cpp +++ b/llvm/unittests/CodeGen/ScalableVectorMVTsTest.cpp @@ -139,14 +139,14 @@ EXPECT_EQ(nxv4i32.getSizeInBits(), nxv2i64.getSizeInBits()); EXPECT_EQ(nxv2f64.getSizeInBits(), nxv2i64.getSizeInBits()); EXPECT_NE(nxv2i32.getSizeInBits(), nxv4i32.getSizeInBits()); - EXPECT_LT(nxv2i32.getSizeInBits().getKnownMinSize(), - nxv2i64.getSizeInBits().getKnownMinSize()); - EXPECT_LE(nxv4i32.getSizeInBits().getKnownMinSize(), - nxv2i64.getSizeInBits().getKnownMinSize()); - EXPECT_GT(nxv4i32.getSizeInBits().getKnownMinSize(), - nxv2i32.getSizeInBits().getKnownMinSize()); - EXPECT_GE(nxv2i64.getSizeInBits().getKnownMinSize(), - nxv4i32.getSizeInBits().getKnownMinSize()); + EXPECT_LT(nxv2i32.getSizeInBits().getKnownMinValue(), + nxv2i64.getSizeInBits().getKnownMinValue()); + EXPECT_LE(nxv4i32.getSizeInBits().getKnownMinValue(), + nxv2i64.getSizeInBits().getKnownMinValue()); + EXPECT_GT(nxv4i32.getSizeInBits().getKnownMinValue(), + nxv2i32.getSizeInBits().getKnownMinValue()); + EXPECT_GE(nxv2i64.getSizeInBits().getKnownMinValue(), + nxv4i32.getSizeInBits().getKnownMinValue()); // Check equivalence and ordering on fixed types. EXPECT_EQ(v4i32.getSizeInBits(), v2i64.getSizeInBits()); @@ -168,10 +168,10 @@ // Check that we can query the known minimum size for both scalable and // fixed length types. - EXPECT_EQ(nxv2i32.getSizeInBits().getKnownMinSize(), 64U); - EXPECT_EQ(nxv2f64.getSizeInBits().getKnownMinSize(), 128U); - EXPECT_EQ(v2i32.getSizeInBits().getKnownMinSize(), - nxv2i32.getSizeInBits().getKnownMinSize()); + EXPECT_EQ(nxv2i32.getSizeInBits().getKnownMinValue(), 64U); + EXPECT_EQ(nxv2f64.getSizeInBits().getKnownMinValue(), 128U); + EXPECT_EQ(v2i32.getSizeInBits().getKnownMinValue(), + nxv2i32.getSizeInBits().getKnownMinValue()); // Check scalable property. ASSERT_FALSE(v4i32.getSizeInBits().isScalable()); diff --git a/llvm/unittests/IR/VectorTypesTest.cpp b/llvm/unittests/IR/VectorTypesTest.cpp --- a/llvm/unittests/IR/VectorTypesTest.cpp +++ b/llvm/unittests/IR/VectorTypesTest.cpp @@ -287,21 +287,21 @@ auto *V2Int64Ty = FixedVectorType::get(Int64Ty, 2); TypeSize V2I32Len = V2Int32Ty->getPrimitiveSizeInBits(); - EXPECT_EQ(V2I32Len.getKnownMinSize(), 64U); + EXPECT_EQ(V2I32Len.getKnownMinValue(), 64U); EXPECT_FALSE(V2I32Len.isScalable()); - EXPECT_LT(V2Int32Ty->getPrimitiveSizeInBits().getFixedSize(), - V4Int32Ty->getPrimitiveSizeInBits().getFixedSize()); - EXPECT_GT(V2Int64Ty->getPrimitiveSizeInBits().getFixedSize(), - V2Int32Ty->getPrimitiveSizeInBits().getFixedSize()); + EXPECT_LT(V2Int32Ty->getPrimitiveSizeInBits().getFixedValue(), + V4Int32Ty->getPrimitiveSizeInBits().getFixedValue()); + EXPECT_GT(V2Int64Ty->getPrimitiveSizeInBits().getFixedValue(), + V2Int32Ty->getPrimitiveSizeInBits().getFixedValue()); EXPECT_EQ(V4Int32Ty->getPrimitiveSizeInBits(), V2Int64Ty->getPrimitiveSizeInBits()); EXPECT_NE(V2Int32Ty->getPrimitiveSizeInBits(), V2Int64Ty->getPrimitiveSizeInBits()); // Check that a fixed-only comparison works for fixed size vectors. - EXPECT_EQ(V2Int64Ty->getPrimitiveSizeInBits().getFixedSize(), - V4Int32Ty->getPrimitiveSizeInBits().getFixedSize()); + EXPECT_EQ(V2Int64Ty->getPrimitiveSizeInBits().getFixedValue(), + V4Int32Ty->getPrimitiveSizeInBits().getFixedValue()); // Check the DataLayout interfaces. EXPECT_EQ(DL.getTypeSizeInBits(V2Int64Ty), DL.getTypeSizeInBits(V4Int32Ty)); @@ -333,32 +333,32 @@ auto *ScV2Int64Ty = ScalableVectorType::get(Int64Ty, 2); TypeSize ScV2I32Len = ScV2Int32Ty->getPrimitiveSizeInBits(); - EXPECT_EQ(ScV2I32Len.getKnownMinSize(), 64U); + EXPECT_EQ(ScV2I32Len.getKnownMinValue(), 64U); EXPECT_TRUE(ScV2I32Len.isScalable()); - EXPECT_LT(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(), - ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize()); - EXPECT_GT(ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinSize(), - ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinSize()); - EXPECT_EQ(ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(), - ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinSize()); - EXPECT_NE(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(), - ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinSize()); + EXPECT_LT(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinValue(), + ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinValue()); + EXPECT_GT(ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinValue(), + ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinValue()); + EXPECT_EQ(ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinValue(), + ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinValue()); + EXPECT_NE(ScV2Int32Ty->getPrimitiveSizeInBits().getKnownMinValue(), + ScV2Int64Ty->getPrimitiveSizeInBits().getKnownMinValue()); // Check the DataLayout interfaces. EXPECT_EQ(DL.getTypeSizeInBits(ScV2Int64Ty), DL.getTypeSizeInBits(ScV4Int32Ty)); - EXPECT_EQ(DL.getTypeSizeInBits(ScV2Int32Ty).getKnownMinSize(), 64U); + EXPECT_EQ(DL.getTypeSizeInBits(ScV2Int32Ty).getKnownMinValue(), 64U); EXPECT_EQ(DL.getTypeStoreSize(ScV2Int64Ty), DL.getTypeStoreSize(ScV4Int32Ty)); EXPECT_NE(DL.getTypeStoreSizeInBits(ScV2Int32Ty), DL.getTypeStoreSizeInBits(ScV2Int64Ty)); - EXPECT_EQ(DL.getTypeStoreSizeInBits(ScV2Int32Ty).getKnownMinSize(), 64U); - EXPECT_EQ(DL.getTypeStoreSize(ScV2Int64Ty).getKnownMinSize(), 16U); + EXPECT_EQ(DL.getTypeStoreSizeInBits(ScV2Int32Ty).getKnownMinValue(), 64U); + EXPECT_EQ(DL.getTypeStoreSize(ScV2Int64Ty).getKnownMinValue(), 16U); EXPECT_EQ(DL.getTypeAllocSize(ScV4Int32Ty), DL.getTypeAllocSize(ScV2Int64Ty)); EXPECT_NE(DL.getTypeAllocSizeInBits(ScV2Int32Ty), DL.getTypeAllocSizeInBits(ScV2Int64Ty)); - EXPECT_EQ(DL.getTypeAllocSizeInBits(ScV4Int32Ty).getKnownMinSize(), 128U); - EXPECT_EQ(DL.getTypeAllocSize(ScV2Int32Ty).getKnownMinSize(), 8U); + EXPECT_EQ(DL.getTypeAllocSizeInBits(ScV4Int32Ty).getKnownMinValue(), 128U); + EXPECT_EQ(DL.getTypeAllocSize(ScV2Int32Ty).getKnownMinValue(), 8U); ASSERT_TRUE(DL.typeSizeEqualsStoreSize(ScV4Int32Ty)); } @@ -375,8 +375,8 @@ EXPECT_NE(V4Int32Ty->getPrimitiveSizeInBits(), ScV4Int32Ty->getPrimitiveSizeInBits()); // If we are only checking the minimum, then they are the same size. - EXPECT_EQ(V4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize(), - ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinSize()); + EXPECT_EQ(V4Int32Ty->getPrimitiveSizeInBits().getKnownMinValue(), + ScV4Int32Ty->getPrimitiveSizeInBits().getKnownMinValue()); // We can't use ordering comparisons (<,<=,>,>=) between scalable and // non-scalable vector sizes. diff --git a/llvm/unittests/Support/TypeSizeTest.cpp b/llvm/unittests/Support/TypeSizeTest.cpp --- a/llvm/unittests/Support/TypeSizeTest.cpp +++ b/llvm/unittests/Support/TypeSizeTest.cpp @@ -63,12 +63,12 @@ constexpr TypeSize TSFixed1 = TypeSize::Fixed(1); constexpr TypeSize TSFixed32 = TypeSize::Fixed(32); -static_assert(TSFixed0.getFixedSize() == 0); -static_assert(TSFixed1.getFixedSize() == 1); -static_assert(TSFixed32.getFixedSize() == 32); -static_assert(TSFixed32.getKnownMinSize() == 32); +static_assert(TSFixed0.getFixedValue() == 0); +static_assert(TSFixed1.getFixedValue() == 1); +static_assert(TSFixed32.getFixedValue() == 32); +static_assert(TSFixed32.getKnownMinValue() == 32); -static_assert(TypeSize::Scalable(32).getKnownMinSize() == 32); +static_assert(TypeSize::Scalable(32).getKnownMinValue() == 32); static_assert(TSFixed32 * 2 == TypeSize::Fixed(64)); static_assert(TSFixed32 * 2u == TypeSize::Fixed(64)); diff --git a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp --- a/llvm/utils/TableGen/CodeGenDAGPatterns.cpp +++ b/llvm/utils/TableGen/CodeGenDAGPatterns.cpp @@ -544,9 +544,9 @@ // Always treat non-scalable MVTs as smaller than scalable MVTs for the // purposes of ordering. auto ASize = std::make_tuple(A.isScalableVector(), A.getScalarSizeInBits(), - A.getSizeInBits().getKnownMinSize()); + A.getSizeInBits().getKnownMinValue()); auto BSize = std::make_tuple(B.isScalableVector(), B.getScalarSizeInBits(), - B.getSizeInBits().getKnownMinSize()); + B.getSizeInBits().getKnownMinValue()); return ASize < BSize; }; auto SameKindLE = [](MVT A, MVT B) -> bool { @@ -558,9 +558,9 @@ return false; return std::make_tuple(A.getScalarSizeInBits(), - A.getSizeInBits().getKnownMinSize()) <= + A.getSizeInBits().getKnownMinValue()) <= std::make_tuple(B.getScalarSizeInBits(), - B.getSizeInBits().getKnownMinSize()); + B.getSizeInBits().getKnownMinValue()); }; for (unsigned M : Modes) { diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp --- a/llvm/utils/TableGen/GlobalISelEmitter.cpp +++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp @@ -183,11 +183,11 @@ "Unexpected mismatch of scalable property"); return Ty.isVector() ? std::make_tuple(Ty.isScalable(), - Ty.getSizeInBits().getKnownMinSize()) < + Ty.getSizeInBits().getKnownMinValue()) < std::make_tuple(Other.Ty.isScalable(), - Other.Ty.getSizeInBits().getKnownMinSize()) - : Ty.getSizeInBits().getFixedSize() < - Other.Ty.getSizeInBits().getFixedSize(); + Other.Ty.getSizeInBits().getKnownMinValue()) + : Ty.getSizeInBits().getFixedValue() < + Other.Ty.getSizeInBits().getFixedValue(); } bool operator==(const LLTCodeGen &B) const { return Ty == B.Ty; } diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp --- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp +++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp @@ -973,7 +973,7 @@ .Case([](LLVMFixedVectorType t) { llvm::TypeSize elementSize = getPrimitiveTypeSizeInBits(t.getElementType()); - return llvm::TypeSize(elementSize.getFixedSize() * t.getNumElements(), + return llvm::TypeSize(elementSize.getFixedValue() * t.getNumElements(), elementSize.isScalable()); }) .Case([](VectorType t) { @@ -981,7 +981,7 @@ "unexpected incompatible with LLVM vector type"); llvm::TypeSize elementSize = getPrimitiveTypeSizeInBits(t.getElementType()); - return llvm::TypeSize(elementSize.getFixedSize() * t.getNumElements(), + return llvm::TypeSize(elementSize.getFixedValue() * t.getNumElements(), elementSize.isScalable()); }) .Default([](Type ty) { diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -1326,7 +1326,7 @@ llvm::TypeSize typeSize = builder.GetInsertBlock()->getModule()->getDataLayout().getTypeStoreSize( type); - llvm::ConstantInt *size = builder.getInt64(typeSize.getFixedSize()); + llvm::ConstantInt *size = builder.getInt64(typeSize.getFixedValue()); llvm::StringRef suffix = llvm::StringRef(".cache", 6); std::string cacheName = (Twine(global.getSymName()).concat(suffix)).str(); // Emit runtime function and bitcast its type (i8*) to real data type.