Index: clang/include/clang/Basic/AArch64SVEACLETypes.def =================================================================== --- clang/include/clang/Basic/AArch64SVEACLETypes.def +++ clang/include/clang/Basic/AArch64SVEACLETypes.def @@ -63,6 +63,57 @@ SVE_VECTOR_TYPE("__SVFloat32_t", SveFloat32, SveFloat32Ty, 4, 32, true, true) SVE_VECTOR_TYPE("__SVFloat64_t", SveFloat64, SveFloat64Ty, 2, 64, true, true) +// +// x2 +// +SVE_VECTOR_TYPE("__SVInt8x2_t", SveInt8x2, SveInt8x2Ty, 32, 8, true, false) +SVE_VECTOR_TYPE("__SVInt16x2_t", SveInt16x2, SveInt16x2Ty, 16, 16, true, false) +SVE_VECTOR_TYPE("__SVInt32x2_t", SveInt32x2, SveInt32x2Ty, 8, 32, true, false) +SVE_VECTOR_TYPE("__SVInt64x2_t", SveInt64x2, SveInt64x2Ty, 4, 64, true, false) + +SVE_VECTOR_TYPE("__SVUint8x2_t", SveUint8x2, SveUint8x2Ty, 32, 8, false, false) +SVE_VECTOR_TYPE("__SVUint16x2_t", SveUint16x2, SveUint16x2Ty, 16, 16, false, false) +SVE_VECTOR_TYPE("__SVUint32x2_t", SveUint32x2, SveUint32x2Ty, 8, 32, false, false) +SVE_VECTOR_TYPE("__SVUint64x2_t", SveUint64x2, SveUint64x2Ty, 4, 64, false, false) + +SVE_VECTOR_TYPE("__SVFloat16x2_t", SveFloat16x2, SveFloat16x2Ty, 16, 16, true, true) +SVE_VECTOR_TYPE("__SVFloat32x2_t", SveFloat32x2, SveFloat32x2Ty, 8, 32, true, true) +SVE_VECTOR_TYPE("__SVFloat64x2_t", SveFloat64x2, SveFloat64x2Ty, 4, 64, true, true) + +// +// x3 +// +SVE_VECTOR_TYPE("__SVInt8x3_t", SveInt8x3, SveInt8x3Ty, 48, 8, true, false) +SVE_VECTOR_TYPE("__SVInt16x3_t", SveInt16x3, SveInt16x3Ty, 24, 16, true, false) +SVE_VECTOR_TYPE("__SVInt32x3_t", SveInt32x3, SveInt32x3Ty, 12, 32, true, false) +SVE_VECTOR_TYPE("__SVInt64x3_t", SveInt64x3, SveInt64x3Ty, 6, 64, true, false) + +SVE_VECTOR_TYPE("__SVUint8x3_t", SveUint8x3, SveUint8x3Ty, 48, 8, false, false) +SVE_VECTOR_TYPE("__SVUint16x3_t", SveUint16x3, SveUint16x3Ty, 24, 16, false, false) +SVE_VECTOR_TYPE("__SVUint32x3_t", SveUint32x3, SveUint32x3Ty, 12, 32, false, false) +SVE_VECTOR_TYPE("__SVUint64x3_t", SveUint64x3, SveUint64x3Ty, 6, 64, false, false) + +SVE_VECTOR_TYPE("__SVFloat16x3_t", SveFloat16x3, SveFloat16x3Ty, 24, 16, true, true) +SVE_VECTOR_TYPE("__SVFloat32x3_t", SveFloat32x3, SveFloat32x3Ty, 12, 32, true, true) +SVE_VECTOR_TYPE("__SVFloat64x3_t", SveFloat64x3, SveFloat64x3Ty, 6, 64, true, true) + +// +// x4 +// +SVE_VECTOR_TYPE("__SVInt8x4_t", SveInt8x4, SveInt8x4Ty, 64, 8, true, false) +SVE_VECTOR_TYPE("__SVInt16x4_t", SveInt16x4, SveInt16x4Ty, 32, 16, true, false) +SVE_VECTOR_TYPE("__SVInt32x4_t", SveInt32x4, SveInt32x4Ty, 16, 32, true, false) +SVE_VECTOR_TYPE("__SVInt64x4_t", SveInt64x4, SveInt64x4Ty, 8, 64, true, false) + +SVE_VECTOR_TYPE("__SVUint8x4_t", SveUint8x4, SveUint8x4Ty, 64, 8, false, false) +SVE_VECTOR_TYPE("__SVUint16x4_t", SveUint16x4, SveUint16x4Ty, 32, 16, false, false) +SVE_VECTOR_TYPE("__SVUint32x4_t", SveUint32x4, SveUint32x4Ty, 16, 32, false, false) +SVE_VECTOR_TYPE("__SVUint64x4_t", SveUint64x4, SveUint64x4Ty, 8, 64, false, false) + +SVE_VECTOR_TYPE("__SVFloat16x4_t", SveFloat16x4, SveFloat16x4Ty, 32, 16, true, true) +SVE_VECTOR_TYPE("__SVFloat32x4_t", SveFloat32x4, SveFloat32x4Ty, 16, 32, true, true) +SVE_VECTOR_TYPE("__SVFloat64x4_t", SveFloat64x4, SveFloat64x4Ty, 8, 64, true, true) + SVE_PREDICATE_TYPE("__SVBool_t", SveBool, SveBoolTy, 16) #undef SVE_VECTOR_TYPE Index: clang/include/clang/Basic/TargetBuiltins.h =================================================================== --- clang/include/clang/Basic/TargetBuiltins.h +++ clang/include/clang/Basic/TargetBuiltins.h @@ -246,6 +246,7 @@ bool isInsertOp1SVALL() const { return Flags & IsInsertOp1SVALL; } bool isGatherPrefetch() const { return Flags & IsGatherPrefetch; } bool isReverseUSDOT() const { return Flags & ReverseUSDOT; } + bool isUndef() const { return Flags & IsUndef; } uint64_t getBits() const { return Flags; } bool isFlagSet(uint64_t Flag) const { return Flags & Flag; } Index: clang/include/clang/Basic/arm_sve.td =================================================================== --- clang/include/clang/Basic/arm_sve.td +++ clang/include/clang/Basic/arm_sve.td @@ -58,6 +58,7 @@ // ------------------- // prototype: return (arg, arg, ...) // +// 2,3,4: array of default vectors // v: void // x: vector of signed integers // u: vector of unsigned integers @@ -198,6 +199,7 @@ def IsGatherPrefetch : FlagType<0x10000000>; def ReverseCompare : FlagType<0x20000000>; // Compare operands must be swapped. def ReverseUSDOT : FlagType<0x40000000>; // Unsigned/signed operands must be swapped. +def IsUndef : FlagType<0x80000000>; // Codegen `undef` of given type. // These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h class ImmCheckType { @@ -1264,6 +1266,12 @@ def SVMLLA_F64 : SInst<"svmmla[_f64]", "dddd","d", MergeNone, "aarch64_sve_fmmla">; } +//////////////////////////////////////////////////////////////////////////////// +// Vector creation +def SVUNDEF_1 : SInst<"svundef_{d}", "d", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>; +def SVUNDEF_2 : SInst<"svundef2_{d}", "2", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>; +def SVUNDEF_3 : SInst<"svundef3_{d}", "3", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>; +def SVUNDEF_4 : SInst<"svundef4_{d}", "4", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>; //////////////////////////////////////////////////////////////////////////////// // SVE2 WhileGE/GT Index: clang/lib/CodeGen/CGBuiltin.cpp =================================================================== --- clang/lib/CodeGen/CGBuiltin.cpp +++ clang/lib/CodeGen/CGBuiltin.cpp @@ -8036,6 +8036,8 @@ return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); else if (TypeFlags.isGatherPrefetch()) return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic); + else if (TypeFlags.isUndef()) + return UndefValue::get(Ty); else if (Builtin->LLVMIntrinsic != 0) { if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp) InsertExplicitZeroOperand(Builder, Ty, Ops); Index: clang/lib/CodeGen/CodeGenTypes.cpp =================================================================== --- clang/lib/CodeGen/CodeGenTypes.cpp +++ clang/lib/CodeGen/CodeGenTypes.cpp @@ -533,44 +533,91 @@ case BuiltinType::OCLReserveID: ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); break; +#define GET_SVE_INT_VEC(BITS, ELTS) \ + llvm::ScalableVectorType::get( \ + llvm::IntegerType::get(getLLVMContext(), BITS), ELTS); case BuiltinType::SveInt8: case BuiltinType::SveUint8: - return llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), - {16, true}); + return GET_SVE_INT_VEC(8, 16); + case BuiltinType::SveInt8x2: + case BuiltinType::SveUint8x2: + return GET_SVE_INT_VEC(8, 32); + case BuiltinType::SveInt8x3: + case BuiltinType::SveUint8x3: + return GET_SVE_INT_VEC(8, 48); + case BuiltinType::SveInt8x4: + case BuiltinType::SveUint8x4: + return GET_SVE_INT_VEC(8, 64); case BuiltinType::SveInt16: case BuiltinType::SveUint16: - return llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), - {8, true}); + return GET_SVE_INT_VEC(16, 8); + case BuiltinType::SveInt16x2: + case BuiltinType::SveUint16x2: + return GET_SVE_INT_VEC(16, 16); + case BuiltinType::SveInt16x3: + case BuiltinType::SveUint16x3: + return GET_SVE_INT_VEC(16, 24); + case BuiltinType::SveInt16x4: + case BuiltinType::SveUint16x4: + return GET_SVE_INT_VEC(16, 32); case BuiltinType::SveInt32: case BuiltinType::SveUint32: - return llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), - {4, true}); + return GET_SVE_INT_VEC(32, 4); + case BuiltinType::SveInt32x2: + case BuiltinType::SveUint32x2: + return GET_SVE_INT_VEC(32, 8); + case BuiltinType::SveInt32x3: + case BuiltinType::SveUint32x3: + return GET_SVE_INT_VEC(32, 12); + case BuiltinType::SveInt32x4: + case BuiltinType::SveUint32x4: + return GET_SVE_INT_VEC(32, 16); case BuiltinType::SveInt64: case BuiltinType::SveUint64: - return llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), - {2, true}); + return GET_SVE_INT_VEC(64, 2); + case BuiltinType::SveInt64x2: + case BuiltinType::SveUint64x2: + return GET_SVE_INT_VEC(64, 4); + case BuiltinType::SveInt64x3: + case BuiltinType::SveUint64x3: + return GET_SVE_INT_VEC(64, 6); + case BuiltinType::SveInt64x4: + case BuiltinType::SveUint64x4: + return GET_SVE_INT_VEC(64, 8); + case BuiltinType::SveBool: + return GET_SVE_INT_VEC(1, 16); +#undef GET_SVE_INT_VEC +#define GET_SVE_FP_VEC(TY, ISFP16, ELTS) \ + llvm::ScalableVectorType::get( \ + getTypeForFormat(getLLVMContext(), \ + Context.getFloatTypeSemantics(Context.TY), \ + /* UseNativeHalf = */ ISFP16), \ + ELTS); case BuiltinType::SveFloat16: - return llvm::VectorType::get( - getTypeForFormat(getLLVMContext(), - Context.getFloatTypeSemantics(Context.HalfTy), - /* UseNativeHalf = */ true), - {8, true}); + return GET_SVE_FP_VEC(HalfTy, true, 8); + case BuiltinType::SveFloat16x2: + return GET_SVE_FP_VEC(HalfTy, true, 16); + case BuiltinType::SveFloat16x3: + return GET_SVE_FP_VEC(HalfTy, true, 24); + case BuiltinType::SveFloat16x4: + return GET_SVE_FP_VEC(HalfTy, true, 32); case BuiltinType::SveFloat32: - return llvm::VectorType::get( - getTypeForFormat(getLLVMContext(), - Context.getFloatTypeSemantics(Context.FloatTy), - /* UseNativeHalf = */ false), - {4, true}); + return GET_SVE_FP_VEC(FloatTy, false, 4); + case BuiltinType::SveFloat32x2: + return GET_SVE_FP_VEC(FloatTy, false, 8); + case BuiltinType::SveFloat32x3: + return GET_SVE_FP_VEC(FloatTy, false, 12); + case BuiltinType::SveFloat32x4: + return GET_SVE_FP_VEC(FloatTy, false, 16); case BuiltinType::SveFloat64: - return llvm::VectorType::get( - getTypeForFormat(getLLVMContext(), - Context.getFloatTypeSemantics(Context.DoubleTy), - /* UseNativeHalf = */ false), - {2, true}); - case BuiltinType::SveBool: - return llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 1), - {16, true}); - break; + return GET_SVE_FP_VEC(DoubleTy, false, 2); + case BuiltinType::SveFloat64x2: + return GET_SVE_FP_VEC(DoubleTy, false, 4); + case BuiltinType::SveFloat64x3: + return GET_SVE_FP_VEC(DoubleTy, false, 6); + case BuiltinType::SveFloat64x4: + return GET_SVE_FP_VEC(DoubleTy, false, 8); +#undef GET_SVE_FP_VEC case BuiltinType::Dependent: #define BUILTIN_TYPE(Id, SingletonId) #define PLACEHOLDER_TYPE(Id, SingletonId) \ Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_undef.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_undef.c @@ -0,0 +1,80 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +svint8_t test_svundef_s8() +{ + // CHECK-LABEL: test_svundef_s8 + // CHECK: ret undef + return svundef_s8(); +} + +svint16_t test_svundef_s16() +{ + // CHECK-LABEL: test_svundef_s16 + // CHECK: ret undef + return svundef_s16(); +} + +svint32_t test_svundef_s32() +{ + // CHECK-LABEL: test_svundef_s32 + // CHECK: ret undef + return svundef_s32(); +} + +svint64_t test_svundef_s64() +{ + // CHECK-LABEL: test_svundef_s64 + // CHECK: ret undef + return svundef_s64(); +} + +svuint8_t test_svundef_u8() +{ + // CHECK-LABEL: test_svundef_u8 + // CHECK: ret undef + return svundef_u8(); +} + +svuint16_t test_svundef_u16() +{ + // CHECK-LABEL: test_svundef_u16 + // CHECK: ret undef + return svundef_u16(); +} + +svuint32_t test_svundef_u32() +{ + // CHECK-LABEL: test_svundef_u32 + // CHECK: ret undef + return svundef_u32(); +} + +svuint64_t test_svundef_u64() +{ + // CHECK-LABEL: test_svundef_u64 + // CHECK: ret undef + return svundef_u64(); +} + +svfloat16_t test_svundef_f16() +{ + // CHECK-LABEL: test_svundef_f16 + // CHECK: ret undef + return svundef_f16(); +} + +svfloat32_t test_svundef_f32() +{ + // CHECK-LABEL: test_svundef_f32 + // CHECK: ret undef + return svundef_f32(); +} + +svfloat64_t test_svundef_f64() +{ + // CHECK-LABEL: test_svundef_f64 + // CHECK: ret undef + return svundef_f64(); +} Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_undef2.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_undef2.c @@ -0,0 +1,80 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O2 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +svint8x2_t test_svundef2_s8() +{ + // CHECK-LABEL: test_svundef2_s8 + // CHECK: ret undef + return svundef2_s8(); +} + +svint16x2_t test_svundef2_s16() +{ + // CHECK-LABEL: test_svundef2_s16 + // CHECK: ret undef + return svundef2_s16(); +} + +svint32x2_t test_svundef2_s32() +{ + // CHECK-LABEL: test_svundef2_s32 + // CHECK: ret undef + return svundef2_s32(); +} + +svint64x2_t test_svundef2_s64() +{ + // CHECK-LABEL: test_svundef2_s64 + // CHECK: ret undef + return svundef2_s64(); +} + +svuint8x2_t test_svundef2_u8() +{ + // CHECK-LABEL: test_svundef2_u8 + // CHECK: ret undef + return svundef2_u8(); +} + +svuint16x2_t test_svundef2_u16() +{ + // CHECK-LABEL: test_svundef2_u16 + // CHECK: ret undef + return svundef2_u16(); +} + +svuint32x2_t test_svundef2_u32() +{ + // CHECK-LABEL: test_svundef2_u32 + // CHECK: ret undef + return svundef2_u32(); +} + +svuint64x2_t test_svundef2_u64() +{ + // CHECK-LABEL: test_svundef2_u64 + // CHECK: ret undef + return svundef2_u64(); +} + +svfloat16x2_t test_svundef2_f16() +{ + // CHECK-LABEL: test_svundef2_f16 + // CHECK: ret undef + return svundef2_f16(); +} + +svfloat32x2_t test_svundef2_f32() +{ + // CHECK-LABEL: test_svundef2_f32 + // CHECK: ret undef + return svundef2_f32(); +} + +svfloat64x2_t test_svundef2_f64() +{ + // CHECK-LABEL: test_svundef2_f64 + // CHECK: ret undef + return svundef2_f64(); +} Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_undef3.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_undef3.c @@ -0,0 +1,80 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O2 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +svint8x3_t test_svundef3_s8() +{ + // CHECK-LABEL: test_svundef3_s8 + // CHECK: ret undef + return svundef3_s8(); +} + +svint16x3_t test_svundef3_s16() +{ + // CHECK-LABEL: test_svundef3_s16 + // CHECK: ret undef + return svundef3_s16(); +} + +svint32x3_t test_svundef3_s32() +{ + // CHECK-LABEL: test_svundef3_s32 + // CHECK: ret undef + return svundef3_s32(); +} + +svint64x3_t test_svundef3_s64() +{ + // CHECK-LABEL: test_svundef3_s64 + // CHECK: ret undef + return svundef3_s64(); +} + +svuint8x3_t test_svundef3_u8() +{ + // CHECK-LABEL: test_svundef3_u8 + // CHECK: ret undef + return svundef3_u8(); +} + +svuint16x3_t test_svundef3_u16() +{ + // CHECK-LABEL: test_svundef3_u16 + // CHECK: ret undef + return svundef3_u16(); +} + +svuint32x3_t test_svundef3_u32() +{ + // CHECK-LABEL: test_svundef3_u32 + // CHECK: ret undef + return svundef3_u32(); +} + +svuint64x3_t test_svundef3_u64() +{ + // CHECK-LABEL: test_svundef3_u64 + // CHECK: ret undef + return svundef3_u64(); +} + +svfloat16x3_t test_svundef3_f16() +{ + // CHECK-LABEL: test_svundef3_f16 + // CHECK: ret undef + return svundef3_f16(); +} + +svfloat32x3_t test_svundef3_f32() +{ + // CHECK-LABEL: test_svundef3_f32 + // CHECK: ret undef + return svundef3_f32(); +} + +svfloat64x3_t test_svundef3_f64() +{ + // CHECK-LABEL: test_svundef3_f64 + // CHECK: ret undef + return svundef3_f64(); +} Index: clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_undef4.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_undef4.c @@ -0,0 +1,80 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O2 -Werror -Wall -emit-llvm -o - %s | FileCheck %s + +#include + +svint8x4_t test_svundef4_s8() +{ + // CHECK-LABEL: test_svundef4_s8 + // CHECK: ret undef + return svundef4_s8(); +} + +svint16x4_t test_svundef4_s16() +{ + // CHECK-LABEL: test_svundef4_s16 + // CHECK: ret undef + return svundef4_s16(); +} + +svint32x4_t test_svundef4_s32() +{ + // CHECK-LABEL: test_svundef4_s32 + // CHECK: ret undef + return svundef4_s32(); +} + +svint64x4_t test_svundef4_s64() +{ + // CHECK-LABEL: test_svundef4_s64 + // CHECK: ret undef + return svundef4_s64(); +} + +svuint8x4_t test_svundef4_u8() +{ + // CHECK-LABEL: test_svundef4_u8 + // CHECK: ret undef + return svundef4_u8(); +} + +svuint16x4_t test_svundef4_u16() +{ + // CHECK-LABEL: test_svundef4_u16 + // CHECK: ret undef + return svundef4_u16(); +} + +svuint32x4_t test_svundef4_u32() +{ + // CHECK-LABEL: test_svundef4_u32 + // CHECK: ret undef + return svundef4_u32(); +} + +svuint64x4_t test_svundef4_u64() +{ + // CHECK-LABEL: test_svundef4_u64 + // CHECK: ret undef + return svundef4_u64(); +} + +svfloat16x4_t test_svundef4_f16() +{ + // CHECK-LABEL: test_svundef4_f16 + // CHECK: ret undef + return svundef4_f16(); +} + +svfloat32x4_t test_svundef4_f32() +{ + // CHECK-LABEL: test_svundef4_f32 + // CHECK: ret undef + return svundef4_f32(); +} + +svfloat64x4_t test_svundef4_f64() +{ + // CHECK-LABEL: test_svundef4_f64 + // CHECK: ret undef + return svundef4_f64(); +} Index: clang/utils/TableGen/SveEmitter.cpp =================================================================== --- clang/utils/TableGen/SveEmitter.cpp +++ clang/utils/TableGen/SveEmitter.cpp @@ -490,6 +490,15 @@ void SVEType::applyModifier(char Mod) { switch (Mod) { + case '2': + NumVectors = 2; + break; + case '3': + NumVectors = 3; + break; + case '4': + NumVectors = 4; + break; case 'v': Void = true; break; @@ -801,18 +810,7 @@ } std::string Intrinsic::getBuiltinTypeStr() { - std::string S; - - SVEType RetT = getReturnType(); - // Since the return value must be one type, return a vector type of the - // appropriate width which we will bitcast. An exception is made for - // returning structs of 2, 3, or 4 vectors which are returned in a sret-like - // fashion, storing them to a pointer arg. - if (RetT.getNumVectors() > 1) { - S += "vv*"; // void result with void* first argument - } else - S += RetT.builtin_str(); - + std::string S = getReturnType().builtin_str(); for (unsigned I = 0; I < getNumParams(); ++I) S += getParamType(I).builtin_str(); @@ -1071,6 +1069,39 @@ OS << "typedef __SVFloat16_t svfloat16_t;\n"; OS << "typedef __SVFloat32_t svfloat32_t;\n"; OS << "typedef __SVFloat64_t svfloat64_t;\n"; + OS << "typedef __SVInt8x2_t svint8x2_t;\n"; + OS << "typedef __SVInt16x2_t svint16x2_t;\n"; + OS << "typedef __SVInt32x2_t svint32x2_t;\n"; + OS << "typedef __SVInt64x2_t svint64x2_t;\n"; + OS << "typedef __SVUint8x2_t svuint8x2_t;\n"; + OS << "typedef __SVUint16x2_t svuint16x2_t;\n"; + OS << "typedef __SVUint32x2_t svuint32x2_t;\n"; + OS << "typedef __SVUint64x2_t svuint64x2_t;\n"; + OS << "typedef __SVFloat16x2_t svfloat16x2_t;\n"; + OS << "typedef __SVFloat32x2_t svfloat32x2_t;\n"; + OS << "typedef __SVFloat64x2_t svfloat64x2_t;\n"; + OS << "typedef __SVInt8x3_t svint8x3_t;\n"; + OS << "typedef __SVInt16x3_t svint16x3_t;\n"; + OS << "typedef __SVInt32x3_t svint32x3_t;\n"; + OS << "typedef __SVInt64x3_t svint64x3_t;\n"; + OS << "typedef __SVUint8x3_t svuint8x3_t;\n"; + OS << "typedef __SVUint16x3_t svuint16x3_t;\n"; + OS << "typedef __SVUint32x3_t svuint32x3_t;\n"; + OS << "typedef __SVUint64x3_t svuint64x3_t;\n"; + OS << "typedef __SVFloat16x3_t svfloat16x3_t;\n"; + OS << "typedef __SVFloat32x3_t svfloat32x3_t;\n"; + OS << "typedef __SVFloat64x3_t svfloat64x3_t;\n"; + OS << "typedef __SVInt8x4_t svint8x4_t;\n"; + OS << "typedef __SVInt16x4_t svint16x4_t;\n"; + OS << "typedef __SVInt32x4_t svint32x4_t;\n"; + OS << "typedef __SVInt64x4_t svint64x4_t;\n"; + OS << "typedef __SVUint8x4_t svuint8x4_t;\n"; + OS << "typedef __SVUint16x4_t svuint16x4_t;\n"; + OS << "typedef __SVUint32x4_t svuint32x4_t;\n"; + OS << "typedef __SVUint64x4_t svuint64x4_t;\n"; + OS << "typedef __SVFloat16x4_t svfloat16x4_t;\n"; + OS << "typedef __SVFloat32x4_t svfloat32x4_t;\n"; + OS << "typedef __SVFloat64x4_t svfloat64x4_t;\n"; OS << "typedef __SVBool_t svbool_t;\n\n"; OS << "typedef enum\n";