diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3014,7 +3014,9 @@ DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0))); } - if (VT.getSizeInBits() < InVT.getSizeInBits()) { + uint64_t VTSize = VT.getFixedSizeInBits(); + uint64_t InVTSize = InVT.getFixedSizeInBits(); + if (VTSize < InVTSize) { SDLoc dl(Op); SDValue Cv = DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(), @@ -3022,7 +3024,7 @@ return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv); } - if (VT.getSizeInBits() > InVT.getSizeInBits()) { + if (VTSize > InVTSize) { SDLoc dl(Op); MVT ExtVT = MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()), @@ -3084,7 +3086,9 @@ return LowerToPredicatedOp(Op, DAG, Opcode); } - if (VT.getSizeInBits() < InVT.getSizeInBits()) { + uint64_t VTSize = VT.getFixedSizeInBits(); + uint64_t InVTSize = InVT.getFixedSizeInBits(); + if (VTSize < InVTSize) { MVT CastVT = MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()), InVT.getVectorNumElements()); @@ -3092,7 +3096,7 @@ return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0, dl)); } - if (VT.getSizeInBits() > InVT.getSizeInBits()) { + if (VTSize > InVTSize) { unsigned CastOpc = Op.getOpcode() == ISD::SINT_TO_FP ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; EVT CastVT = VT.changeVectorElementTypeToInteger(); @@ -4051,11 +4055,11 @@ return true; // Ensure NEON MVTs only belong to a single register class. - if (VT.getSizeInBits() <= 128) + if (VT.getFixedSizeInBits() <= 128) return false; // Don't use SVE for types that don't fit. - if (VT.getSizeInBits() > Subtarget->getMinSVEVectorSizeInBits()) + if (VT.getFixedSizeInBits() > Subtarget->getMinSVEVectorSizeInBits()) return false; // TODO: Perhaps an artificial restriction, but worth having whilst getting @@ -7410,6 +7414,8 @@ LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n"); SDLoc dl(Op); EVT VT = Op.getValueType(); + assert(!VT.isScalableVector() && + "Scalable vectors cannot be used with ISD::BUILD_VECTOR"); unsigned NumElts = VT.getVectorNumElements(); struct ShuffleSourceInfo { @@ -7480,8 +7486,9 @@ } } unsigned ResMultiplier = - VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); - NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); + VT.getScalarSizeInBits() / SmallestEltTy.getFixedSizeInBits(); + uint64_t VTSize = VT.getFixedSizeInBits(); + NumElts = VTSize / SmallestEltTy.getFixedSizeInBits(); EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); // If the source vector is too wide or too narrow, we may nevertheless be able @@ -7490,17 +7497,18 @@ for (auto &Src : Sources) { EVT SrcVT = Src.ShuffleVec.getValueType(); - if (SrcVT.getSizeInBits() == VT.getSizeInBits()) + uint64_t SrcVTSize = SrcVT.getFixedSizeInBits(); + if (SrcVTSize == VTSize) continue; // This stage of the search produces a source with the same element type as // the original, but with a total width matching the BUILD_VECTOR output. EVT EltVT = SrcVT.getVectorElementType(); - unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); + unsigned NumSrcElts = VTSize / EltVT.getFixedSizeInBits(); EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); - if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { - assert(2 * SrcVT.getSizeInBits() == VT.getSizeInBits()); + if (SrcVTSize < VTSize) { + assert(2 * SrcVTSize == VTSize); // We can pad out the smaller vector for free, so if it's part of a // shuffle... Src.ShuffleVec = @@ -7509,7 +7517,7 @@ continue; } - if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits()) { + if (SrcVTSize != 2 * VTSize) { LLVM_DEBUG( dbgs() << "Reshuffle failed: result vector too small to extract\n"); return SDValue(); @@ -7565,7 +7573,8 @@ continue; assert(ShuffleVT.getVectorElementType() == SmallestEltTy); Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); - Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); + Src.WindowScale = + SrcEltTy.getFixedSizeInBits() / SmallestEltTy.getFixedSizeInBits(); Src.WindowBase *= Src.WindowScale; } @@ -8339,7 +8348,7 @@ EVT ScalarVT = VT.getVectorElementType(); - if (ScalarVT.getSizeInBits() < 32 && ScalarVT.isInteger()) + if (ScalarVT.getFixedSizeInBits() < 32 && ScalarVT.isInteger()) ScalarVT = MVT::i32; return DAG.getNode( @@ -10131,15 +10140,15 @@ bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) return false; - unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); - unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); + uint64_t NumBits1 = Ty1->getPrimitiveSizeInBits().getFixedSize(); + uint64_t NumBits2 = Ty2->getPrimitiveSizeInBits().getFixedSize(); return NumBits1 > NumBits2; } bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger()) return false; - unsigned NumBits1 = VT1.getSizeInBits(); - unsigned NumBits2 = VT2.getSizeInBits(); + uint64_t NumBits1 = VT1.getFixedSizeInBits(); + uint64_t NumBits2 = VT2.getFixedSizeInBits(); return NumBits1 > NumBits2; } diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -455,7 +455,7 @@ // The destination type should be larger than the element type. If not, get // the default cost for the extend. - if (DstVT.getSizeInBits() < SrcVT.getSizeInBits()) + if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits()) return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, CostKind);