diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td --- a/clang/include/clang/Basic/arm_sve.td +++ b/clang/include/clang/Basic/arm_sve.td @@ -71,6 +71,7 @@ // e: 1/2 width unsigned elements, 2x element count // h: 1/2 width elements, 2x element count // q: 1/4 width elements, 4x element count +// b: 1/4 width elements, 4x element count, integer, reverse sign // o: 4x width elements, 1/4 element count // // w: vector of element type promoted to 64bits, vector maintains @@ -1223,6 +1224,21 @@ def SVQINCP_N_U32 : SInst<"svqincp[_n_u32]_{d}", "mmP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n32">; def SVQINCP_N_U64 : SInst<"svqincp[_n_u64]_{d}", "nnP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n64">; +let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_INT8)" in { +def SVMLLA_S32 : SInst<"svmmla[_s32]", "ddqq","i", MergeNone, "aarch64_sve_smmla">; +def SVMLLA_U32 : SInst<"svmmla[_u32]", "ddqq","Ui", MergeNone, "aarch64_sve_ummla">; +def SVUSMLLA_S32 : SInst<"svusmmla[_s32]", "ddbq","i", MergeNone, "aarch64_sve_usmmla">; +} + +let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP32)" in { +def SVMLLA_F32 : SInst<"svmmla[_f32]", "dddd","f", MergeNone, "aarch64_sve_mmla">; +} + +let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64)" in { +def SVMLLA_F64 : SInst<"svmmla[_f64]", "dddd","d", MergeNone, "aarch64_sve_mmla">; +} + + //////////////////////////////////////////////////////////////////////////////// // SVE2 WhileGE/GT let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in { diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -7985,8 +7985,8 @@ } } - auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID, - AArch64SVEIntrinsicsProvenSorted); + const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap( + AArch64SVEIntrinsicMap, BuiltinID, AArch64SVEIntrinsicsProvenSorted); SVETypeFlags TypeFlags(Builtin->TypeModifier); if (TypeFlags.isLoad()) return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic, diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp32.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp32.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp32.c @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_FP32 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s | FileCheck %s + +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_FP32 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s -DSVE_OVERLOADED_FORMS | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svfloat32_t test_svmmla_f32(svfloat32_t x, svfloat32_t y, svfloat32_t z) { + // CHECK-LABEL: test_svmmla_f32 + // CHECK: %[[RET:.*]] = call @llvm.aarch64.sve.mmla.nxv4f32( %x, %y, %z) + // CHECK: ret %[[RET]] + return SVE_ACLE_FUNC(svmmla, _f32, , )(x, y, z); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp64.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp64.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_matmul_fp64.c @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_FP64 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s | FileCheck %s + +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_FP64 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s -DSVE_OVERLOADED_FORMS | FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svfloat64_t test_svmmla_f64(svfloat64_t x, svfloat64_t y, svfloat64_t z) { + // CHECK-LABEL: test_svmmla_f64 + // CHECK: %[[RET:.*]] = call @llvm.aarch64.sve.mmla.nxv2f64( %x, %y, %z) + // CHECK: ret %[[RET]] + return SVE_ACLE_FUNC(svmmla, _f64, , )(x, y, z); +} diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_mmla.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_mmla.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_mmla.c @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_INT8 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s | FileCheck %s + +// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE_MATMUL_INT8 \ +// RUN: -triple aarch64-none-linux-gnu -target-feature +sve \ +// RUN: -fallow-half-arguments-and-returns -S -O1 -Werror -Wall \ +// RUN: -emit-llvm -o - %s -DSVE_OVERLOADED_FORMS| FileCheck %s + +#include + +#ifdef SVE_OVERLOADED_FORMS +// A simple used,unused... macro, long enough to represent any SVE builtin. +#define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3 +#else +#define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4 +#endif + +svint32_t test_svmmla_s32(svint32_t x, svint8_t y, svint8_t z) { + // CHECK-LABEL: test_svmmla_s32 + // CHECK: %[[RET:.*]] = call @llvm.aarch64.sve.smmla.nxv4i32( %x, %y, %z) + // CHECK: ret %[[RET]] + return SVE_ACLE_FUNC(svmmla, _s32, , )(x, y, z); +} + +svuint32_t test_svmmla_u32(svuint32_t x, svuint8_t y, svuint8_t z) { + // CHECK-LABEL: test_svmmla_u32 + // CHECK: %[[RET:.*]] = call @llvm.aarch64.sve.ummla.nxv4i32( %x, %y, %z) + // CHECK: ret %[[RET]] + return SVE_ACLE_FUNC(svmmla, _u32, , )(x, y, z); +} + +svint32_t test_svusmmla_s32(svint32_t x, svuint8_t y, svint8_t z) { + // CHECK-LABEL: test_svusmmla_s32 + // CHECK: %[[RET:.*]] = call @llvm.aarch64.sve.usmmla.nxv4i32( %x, %y, %z) + // CHECK: ret %[[RET]] + return SVE_ACLE_FUNC(svusmmla, _s32, , )(x, y, z); +} diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp --- a/clang/utils/TableGen/SveEmitter.cpp +++ b/clang/utils/TableGen/SveEmitter.cpp @@ -513,6 +513,11 @@ case 'q': ElementBitwidth /= 4; break; + case 'b': + Float = false; + ElementBitwidth /= 4; + Signed = !Signed; + break; case 'o': ElementBitwidth *= 4; break;