diff --git a/llvm/include/llvm/Support/MachineValueType.h b/llvm/include/llvm/Support/MachineValueType.h --- a/llvm/include/llvm/Support/MachineValueType.h +++ b/llvm/include/llvm/Support/MachineValueType.h @@ -151,6 +151,9 @@ FIRST_INTEGER_VECTOR_VALUETYPE = v1i1, LAST_INTEGER_VECTOR_VALUETYPE = nxv32i64, + FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i1, + LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i128, + FIRST_INTEGER_SCALABLE_VALUETYPE = nxv1i1, LAST_INTEGER_SCALABLE_VALUETYPE = nxv32i64, @@ -193,6 +196,9 @@ FIRST_FP_VECTOR_VALUETYPE = v2f16, LAST_FP_VECTOR_VALUETYPE = nxv8f64, + FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE = v2f16, + LAST_FP_FIXEDLEN_VECTOR_VALUETYPE = v8f64, + FIRST_FP_SCALABLE_VALUETYPE = nxv2f16, LAST_FP_SCALABLE_VALUETYPE = nxv8f64, @@ -1096,6 +1102,18 @@ (MVT::SimpleValueType)(MVT::LAST_FP_VECTOR_VALUETYPE + 1)); } + static mvt_range integer_fixedlen_vector_valuetypes() { + return mvt_range( + MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE, + (MVT::SimpleValueType)(MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE + 1)); + } + + static mvt_range fp_fixedlen_vector_valuetypes() { + return mvt_range( + MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE, + (MVT::SimpleValueType)(MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE + 1)); + } + static mvt_range integer_scalable_vector_valuetypes() { return mvt_range(MVT::FIRST_INTEGER_SCALABLE_VALUETYPE, (MVT::SimpleValueType)(MVT::LAST_INTEGER_SCALABLE_VALUETYPE + 1)); diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -307,6 +307,9 @@ setOperationAction(ISD::ROTL, MVT::i32, Expand); setOperationAction(ISD::ROTL, MVT::i64, Expand); for (MVT VT : MVT::vector_valuetypes()) { + // SVE types handled later + if (VT.isScalableVector()) + continue; setOperationAction(ISD::ROTL, VT, Expand); setOperationAction(ISD::ROTR, VT, Expand); } @@ -321,6 +324,9 @@ setOperationAction(ISD::SDIVREM, MVT::i32, Expand); setOperationAction(ISD::SDIVREM, MVT::i64, Expand); for (MVT VT : MVT::vector_valuetypes()) { + // SVE types handled later + if (VT.isScalableVector()) + continue; setOperationAction(ISD::SDIVREM, VT, Expand); setOperationAction(ISD::UDIVREM, VT, Expand); } @@ -753,6 +759,9 @@ // Likewise, narrowing and extending vector loads/stores aren't handled // directly. for (MVT VT : MVT::vector_valuetypes()) { + // SVE types handled later + if (VT.isScalableVector()) + continue; setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) { @@ -769,6 +778,9 @@ setOperationAction(ISD::CTTZ, VT, Expand); for (MVT InnerVT : MVT::vector_valuetypes()) { + // Don't mix neon fixed length types with sve scalable types + if (InnerVT.isScalableVector()) + continue; setTruncStoreAction(VT, InnerVT, Expand); setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -134,7 +134,7 @@ setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); } - for (MVT VT : MVT::integer_vector_valuetypes()) { + for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand); setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -688,7 +688,12 @@ } for (MVT VT : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (VT.isScalableVector()) + continue; for (MVT InnerVT : MVT::vector_valuetypes()) { + if (InnerVT.isScalableVector()) + continue; setTruncStoreAction(VT, InnerVT, Expand); addAllExtLoads(VT, InnerVT, Expand); } @@ -893,7 +898,7 @@ // It is legal to extload from v4i8 to v4i16 or v4i32. for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16, MVT::v2i32}) { - for (MVT VT : MVT::integer_vector_valuetypes()) { + for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal); setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal); setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal); @@ -1037,6 +1042,9 @@ // ARM does not have ROTL. setOperationAction(ISD::ROTL, MVT::i32, Expand); for (MVT VT : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (VT.isScalableVector()) + continue; setOperationAction(ISD::ROTL, VT, Expand); setOperationAction(ISD::ROTR, VT, Expand); } diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1440,11 +1440,17 @@ }; for (MVT VT : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (VT.isScalableVector()) + continue; + for (unsigned VectExpOp : VectExpOps) setOperationAction(VectExpOp, VT, Expand); // Expand all extending loads and truncating stores: for (MVT TargetVT : MVT::vector_valuetypes()) { + if (TargetVT.isScalableVector()) + continue; if (TargetVT == VT) continue; setLoadExtAction(ISD::EXTLOAD, TargetVT, VT, Expand); @@ -1856,7 +1862,7 @@ TargetLoweringBase::LegalizeTypeAction HexagonTargetLowering::getPreferredVectorAction(MVT VT) const { - if (VT.getVectorNumElements() == 1) + if (VT.getVectorNumElements() == 1 || VT.isScalableVector()) return TargetLoweringBase::TypeScalarizeVector; // Always widen vectors of i1. diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h --- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h +++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h @@ -228,7 +228,7 @@ } bool isHVXVectorType(MVT VecTy, bool IncludeBool = false) const { - if (!VecTy.isVector() || !useHVXOps()) + if (!VecTy.isVector() || !useHVXOps() || VecTy.isScalableVector()) return false; MVT ElemTy = VecTy.getVectorElementType(); if (!IncludeBool && ElemTy == MVT::i1) diff --git a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp --- a/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonTargetTransformInfo.cpp @@ -45,6 +45,8 @@ bool HexagonTTIImpl::isTypeForHVX(Type *VecTy) const { assert(VecTy->isVectorTy()); + if (cast(VecTy)->isScalable()) + return false; // Avoid types like <2 x i32*>. if (!cast(VecTy)->getElementType()->isIntegerTy()) return false; diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -330,7 +330,7 @@ } // Set LoadExtAction for f16 vectors to Expand - for (MVT VT : MVT::fp_vector_valuetypes()) { + for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) { MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements()); if (F16VT.isValid()) setLoadExtAction(ISD::EXTLOAD, VT, F16VT, Expand); diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -72,7 +72,12 @@ if (Subtarget.hasDSP() || Subtarget.hasMSA()) { // Expand all truncating stores and extending loads. for (MVT VT0 : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (VT0.isScalableVector()) + continue; for (MVT VT1 : MVT::vector_valuetypes()) { + if (VT1.isScalableVector()) + continue; setTruncStoreAction(VT0, VT1, Expand); setLoadExtAction(ISD::SEXTLOAD, VT0, VT1, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT0, VT1, Expand); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -554,6 +554,10 @@ // First set operation action for all vector types to expand. Then we // will selectively turn on ones that can be effectively codegen'd. for (MVT VT : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (VT.isScalableVector()) + continue; + // add/sub are legal for all supported vector VT's. setOperationAction(ISD::ADD, VT, Legal); setOperationAction(ISD::SUB, VT, Legal); @@ -647,6 +651,9 @@ setOperationAction(ISD::ROTR, VT, Expand); for (MVT InnerVT : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (VT.isScalableVector()) + continue; setTruncStoreAction(VT, InnerVT, Expand); setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp --- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp +++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp @@ -1438,7 +1438,7 @@ setOperationAction(Op, MVT::v2i32, Expand); } // Truncating/extending stores/loads are also not supported. - for (MVT VT : MVT::integer_vector_valuetypes()) { + for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand); setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand); diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp --- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -295,6 +295,10 @@ setOperationAction(ISD::PREFETCH, MVT::Other, Custom); for (MVT VT : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (VT.isScalableVector()) + continue; + // Assume by default that all vector operations need to be expanded. for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode) if (getOperationAction(Opcode, VT) == Legal) @@ -302,6 +306,8 @@ // Likewise all truncating stores and extending loads. for (MVT InnerVT : MVT::vector_valuetypes()) { + if (InnerVT.isScalableVector()) + continue; setTruncStoreAction(VT, InnerVT, Expand); setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -205,7 +205,7 @@ for (auto T : {MVT::i8, MVT::i16, MVT::i32}) setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action); } - for (auto T : MVT::integer_vector_valuetypes()) + for (auto T : MVT::integer_fixedlen_vector_valuetypes()) setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand); // Dynamic stack allocation: use the default expansion. @@ -238,6 +238,9 @@ for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32, MVT::v2f64}) { for (auto MemT : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (MemT.isScalableVector()) + continue; if (MVT(T) != MemT) { setTruncStoreAction(T, MemT, Expand); for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -711,6 +711,10 @@ // (for widening) or expand (for scalarization). Then we will selectively // turn on ones that can be effectively codegen'd. for (MVT VT : MVT::vector_valuetypes()) { + // Scalable vectors aren't supported on this backend. + if (VT.isScalableVector()) + continue; + setOperationAction(ISD::SDIV, VT, Expand); setOperationAction(ISD::UDIV, VT, Expand); setOperationAction(ISD::SREM, VT, Expand); @@ -749,6 +753,9 @@ setOperationAction(ISD::ANY_EXTEND, VT, Expand); setOperationAction(ISD::SELECT_CC, VT, Expand); for (MVT InnerVT : MVT::vector_valuetypes()) { + if (VT.isScalableVector()) + continue; + setTruncStoreAction(InnerVT, VT, Expand); setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);