diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -3164,4 +3164,21 @@ // def int_aarch64_sve_add_single_x2 : SME2_VG2_Multi_Single_Intrinsic; def int_aarch64_sve_add_single_x4 : SME2_VG4_Multi_Single_Intrinsic; + + // 2-way and 4-way multi-vector signed/unsigned integer dot-product + foreach ty = ["s", "u"] in { + foreach sz = ["za32", "za64"] in { + def int_aarch64_sme_ # ty # dot_single_ # sz # _vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; + def int_aarch64_sme_ # ty # dot_single_ # sz # _vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; + } + } + + foreach ty = ["su", "us"] in { + def int_aarch64_sme_ # ty # dot_single_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; + def int_aarch64_sme_ # ty # dot_single_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; + } + + // Multi-vector half-precision or bfloat floating-point dot-product + def int_aarch64_sme_fdot_single_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; + def int_aarch64_sme_fdot_single_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; } diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td @@ -448,15 +448,15 @@ defm FDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"fdot", 0b1001, ZZ_h_mul_r, ZPR4b16, nxv8f16, null_frag>; defm FDOT_VG4_M4ZZI_HtoS : sme2_multi_vec_array_vg4_index_32b<"fdot", 0b1001, ZZZZ_h_mul_r, ZPR4b16, nxv8f16, null_frag>; -defm FDOT_VG2_M2ZZ_HtoS : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0010000, MatrixOp32, ZZ_h, ZPR4b16>; -defm FDOT_VG4_M4ZZ_HtoS : sme2_dot_mla_add_sub_array_vg24_single<"fdot", 0b0110000, MatrixOp32, ZZZZ_h, ZPR4b16>; +defm FDOT_VG2_M2ZZ_HtoS : sme2_dot_mla_add_sub_array_vg2_single<"fdot", 0b0010000, MatrixOp32, ZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fdot_single_za32_vg1x2>; +defm FDOT_VG4_M4ZZ_HtoS : sme2_dot_mla_add_sub_array_vg4_single<"fdot", 0b0110000, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8f16, int_aarch64_sme_fdot_single_za32_vg1x4>; defm FDOT_VG2_M2Z2Z_HtoS : sme2_dot_mla_add_sub_array_vg2_multi<"fdot", 0b010000, MatrixOp32, ZZ_h_mul_r, nxv8f16, null_frag>; defm FDOT_VG4_M4Z4Z_HtoS : sme2_dot_mla_add_sub_array_vg4_multi<"fdot", 0b010000, MatrixOp32, ZZZZ_h_mul_r, nxv8f16, null_frag>; defm BFDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"bfdot", 0b1011, ZZ_h_mul_r, ZPR4b16, nxv8bf16, null_frag>; defm BFDOT_VG4_M4ZZI_HtoS : sme2_multi_vec_array_vg4_index_32b<"bfdot", 0b1011, ZZZZ_h_mul_r, ZPR4b16, nxv8bf16, null_frag>; -defm BFDOT_VG2_M2ZZ_HtoS : sme2_dot_mla_add_sub_array_vg24_single<"bfdot", 0b0010010, MatrixOp32, ZZ_h, ZPR4b16>; -defm BFDOT_VG4_M4ZZ_HtoS : sme2_dot_mla_add_sub_array_vg24_single<"bfdot", 0b0110010, MatrixOp32, ZZZZ_h, ZPR4b16>; +defm BFDOT_VG2_M2ZZ_HtoS : sme2_dot_mla_add_sub_array_vg2_single<"bfdot", 0b0010010, MatrixOp32, ZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fdot_single_za32_vg1x2>; +defm BFDOT_VG4_M4ZZ_HtoS : sme2_dot_mla_add_sub_array_vg4_single<"bfdot", 0b0110010, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8bf16, int_aarch64_sme_fdot_single_za32_vg1x4>; defm BFDOT_VG2_M2Z2Z_HtoS : sme2_dot_mla_add_sub_array_vg2_multi<"bfdot", 0b010010, MatrixOp32, ZZ_h_mul_r, nxv8bf16, null_frag>; defm BFDOT_VG4_M4Z4Z_HtoS : sme2_dot_mla_add_sub_array_vg4_multi<"bfdot", 0b010010, MatrixOp32, ZZZZ_h_mul_r, nxv8bf16, null_frag>; @@ -468,19 +468,19 @@ defm SDOT_VG2_M2ZZI_BToS : sme2_multi_vec_array_vg2_index_32b<"sdot", 0b1100, ZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; defm SDOT_VG4_M4ZZI_HToS : sme2_multi_vec_array_vg4_index_32b<"sdot", 0b1000, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; defm SDOT_VG4_M4ZZI_BToS : sme2_multi_vec_array_vg4_index_32b<"sdot", 0b1100, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; -defm SDOT_VG2_M2ZZ_HtoS : sme2_dot_mla_add_sub_array_vg24_single<"sdot", 0b1010101, MatrixOp32, ZZ_h, ZPR4b16>; -defm SDOT_VG4_M4ZZ_HtoS : sme2_dot_mla_add_sub_array_vg24_single<"sdot", 0b1110101, MatrixOp32, ZZZZ_h, ZPR4b16>; +defm SDOT_VG2_M2ZZ_HtoS : sme2_dot_mla_add_sub_array_vg2_single<"sdot", 0b1010101, MatrixOp32, ZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_sdot_single_za32_vg1x2>; +defm SDOT_VG4_M4ZZ_HtoS : sme2_dot_mla_add_sub_array_vg4_single<"sdot", 0b1110101, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_sdot_single_za32_vg1x4>; defm SDOT_VG2_M2Z2Z_HtoS : sme2_dot_mla_add_sub_array_vg2_multi<"sdot", 0b110101, MatrixOp32, ZZ_h_mul_r, nxv8i16, null_frag>; defm SDOT_VG4_M4Z4Z_HtoS : sme2_dot_mla_add_sub_array_vg4_multi<"sdot", 0b110101, MatrixOp32, ZZZZ_h_mul_r, nxv8i16, null_frag>; -defm SDOT_VG2_M2ZZ_BtoS : sme2_dot_mla_add_sub_array_vg24_single<"sdot", 0b0010100, MatrixOp32, ZZ_b, ZPR4b8>; -defm SDOT_VG4_M4ZZ_BtoS : sme2_dot_mla_add_sub_array_vg24_single<"sdot", 0b0110100, MatrixOp32, ZZZZ_b, ZPR4b8>; +defm SDOT_VG2_M2ZZ_BtoS : sme2_dot_mla_add_sub_array_vg2_single<"sdot", 0b0010100, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_sdot_single_za32_vg1x2>; +defm SDOT_VG4_M4ZZ_BtoS : sme2_dot_mla_add_sub_array_vg4_single<"sdot", 0b0110100, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_sdot_single_za32_vg1x4>; defm SDOT_VG2_M2Z2Z_BtoS : sme2_dot_mla_add_sub_array_vg2_multi<"sdot", 0b010100, MatrixOp32, ZZ_b_mul_r, nxv16i8, null_frag>; defm SDOT_VG4_M4Z4Z_BtoS : sme2_dot_mla_add_sub_array_vg4_multi<"sdot", 0b010100, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, null_frag>; defm SUDOT_VG2_M2ZZI_BToS : sme2_multi_vec_array_vg2_index_32b<"sudot", 0b1111, ZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; defm SUDOT_VG4_M4ZZI_BToS : sme2_multi_vec_array_vg4_index_32b<"sudot", 0b1111, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; -defm SUDOT_VG2_M2ZZ_BToS : sme2_dot_mla_add_sub_array_vg24_single<"sudot", 0b0010111, MatrixOp32, ZZ_b, ZPR4b8>; -defm SUDOT_VG4_M4ZZ_BToS : sme2_dot_mla_add_sub_array_vg24_single<"sudot", 0b0110111, MatrixOp32, ZZZZ_b, ZPR4b8>; +defm SUDOT_VG2_M2ZZ_BToS : sme2_dot_mla_add_sub_array_vg2_single<"sudot", 0b0010111, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_sudot_single_za32_vg1x2>; +defm SUDOT_VG4_M4ZZ_BToS : sme2_dot_mla_add_sub_array_vg4_single<"sudot", 0b0110111, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_sudot_single_za32_vg1x4>; defm SVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"svdot", 0b0100, ZZ_h_mul_r, ZPR4b16, nxv8i16, int_aarch64_sme_svdot_lane_za32_vg1x2>; defm SVDOT_VG4_M4ZZI_BtoS : sme2_multi_vec_array_vg4_index_32b<"svdot", 0b0100, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, int_aarch64_sme_svdot_lane_za32_vg1x4>; @@ -491,19 +491,19 @@ defm UDOT_VG2_M2ZZI_BToS : sme2_multi_vec_array_vg2_index_32b<"udot", 0b1110, ZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; defm UDOT_VG4_M4ZZI_BtoS : sme2_multi_vec_array_vg4_index_32b<"udot", 0b1110, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; defm UDOT_VG4_M4ZZI_HToS : sme2_multi_vec_array_vg4_index_32b<"udot", 0b1010, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; -defm UDOT_VG2_M2ZZ_HtoS : sme2_dot_mla_add_sub_array_vg24_single<"udot", 0b1010111, MatrixOp32, ZZ_h, ZPR4b16>; -defm UDOT_VG4_M4ZZ_HtoS : sme2_dot_mla_add_sub_array_vg24_single<"udot", 0b1110111, MatrixOp32, ZZZZ_h, ZPR4b16>; +defm UDOT_VG2_M2ZZ_HtoS : sme2_dot_mla_add_sub_array_vg2_single<"udot", 0b1010111, MatrixOp32, ZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_udot_single_za32_vg1x2>; +defm UDOT_VG4_M4ZZ_HtoS : sme2_dot_mla_add_sub_array_vg4_single<"udot", 0b1110111, MatrixOp32, ZZZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_udot_single_za32_vg1x4>; defm UDOT_VG2_M2Z2Z_HtoS : sme2_dot_mla_add_sub_array_vg2_multi<"udot", 0b110111, MatrixOp32, ZZ_h_mul_r, nxv8i16, null_frag>; defm UDOT_VG4_M4Z4Z_HtoS : sme2_dot_mla_add_sub_array_vg4_multi<"udot", 0b110111, MatrixOp32, ZZZZ_h_mul_r, nxv8i16, null_frag>; -defm UDOT_VG2_M2ZZ_BtoS : sme2_dot_mla_add_sub_array_vg24_single<"udot", 0b0010110, MatrixOp32, ZZ_b, ZPR4b8>; -defm UDOT_VG4_M4ZZ_BtoS : sme2_dot_mla_add_sub_array_vg24_single<"udot", 0b0110110, MatrixOp32, ZZZZ_b, ZPR4b8>; +defm UDOT_VG2_M2ZZ_BtoS : sme2_dot_mla_add_sub_array_vg2_single<"udot", 0b0010110, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_udot_single_za32_vg1x2>; +defm UDOT_VG4_M4ZZ_BtoS : sme2_dot_mla_add_sub_array_vg4_single<"udot", 0b0110110, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_udot_single_za32_vg1x4>; defm UDOT_VG2_M2Z2Z_BtoS : sme2_dot_mla_add_sub_array_vg2_multi<"udot", 0b010110, MatrixOp32, ZZ_b_mul_r, nxv16i8, null_frag>; defm UDOT_VG4_M4Z4Z_BtoS : sme2_dot_mla_add_sub_array_vg4_multi<"udot", 0b010110, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, null_frag>; defm USDOT_VG2_M2ZZI_BToS: sme2_multi_vec_array_vg2_index_32b<"usdot", 0b1101, ZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; defm USDOT_VG4_M4ZZI_BToS: sme2_multi_vec_array_vg4_index_32b<"usdot", 0b1101, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; -defm USDOT_VG2_M2ZZ_BToS : sme2_dot_mla_add_sub_array_vg24_single<"usdot", 0b0010101, MatrixOp32, ZZ_b, ZPR4b8>; -defm USDOT_VG4_M4ZZ_BToS : sme2_dot_mla_add_sub_array_vg24_single<"usdot", 0b0110101, MatrixOp32, ZZZZ_b, ZPR4b8>; +defm USDOT_VG2_M2ZZ_BToS : sme2_dot_mla_add_sub_array_vg2_single<"usdot", 0b0010101, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_usdot_single_za32_vg1x2>; +defm USDOT_VG4_M4ZZ_BToS : sme2_dot_mla_add_sub_array_vg4_single<"usdot", 0b0110101, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_usdot_single_za32_vg1x4>; defm USDOT_VG2_M2Z2Z_BToS : sme2_dot_mla_add_sub_array_vg2_multi<"usdot", 0b010101, MatrixOp32, ZZ_b_mul_r, nxv16i8, null_frag>; defm USDOT_VG4_M4Z4Z_BToS : sme2_dot_mla_add_sub_array_vg4_multi<"usdot", 0b010101, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, null_frag>; @@ -723,8 +723,8 @@ defm SDOT_VG2_M2ZZI_HtoD : sme2_multi_vec_array_vg2_index_64b<"sdot", 0b01, ZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; defm SDOT_VG4_M4ZZI_HtoD : sme2_multi_vec_array_vg4_index_64b<"sdot", 0b001, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; -defm SDOT_VG2_M2ZZ_HtoD : sme2_dot_mla_add_sub_array_vg24_single<"sdot", 0b1010100, MatrixOp64, ZZ_h, ZPR4b16>; -defm SDOT_VG4_M4ZZ_HtoD : sme2_dot_mla_add_sub_array_vg24_single<"sdot", 0b1110100, MatrixOp64, ZZZZ_h, ZPR4b16>; +defm SDOT_VG2_M2ZZ_HtoD : sme2_dot_mla_add_sub_array_vg2_single<"sdot", 0b1010100, MatrixOp64, ZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_sdot_single_za64_vg1x2>; +defm SDOT_VG4_M4ZZ_HtoD : sme2_dot_mla_add_sub_array_vg4_single<"sdot", 0b1110100, MatrixOp64, ZZZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_sdot_single_za64_vg1x4>; defm SDOT_VG2_M2Z2Z_HtoD : sme2_dot_mla_add_sub_array_vg2_multi<"sdot", 0b110100, MatrixOp64, ZZ_h_mul_r, nxv8i16, null_frag>; defm SDOT_VG4_M4Z4Z_HtoD : sme2_dot_mla_add_sub_array_vg4_multi<"sdot", 0b110100, MatrixOp64, ZZZZ_h_mul_r, nxv8i16, null_frag>; @@ -732,8 +732,8 @@ defm UDOT_VG2_M2ZZI_HtoD : sme2_multi_vec_array_vg2_index_64b<"udot", 0b11, ZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; defm UDOT_VG4_M4ZZI_HtoD : sme2_multi_vec_array_vg4_index_64b<"udot", 0b011, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; -defm UDOT_VG2_M2ZZ_HtoD : sme2_dot_mla_add_sub_array_vg24_single<"udot", 0b1010110, MatrixOp64, ZZ_h, ZPR4b16>; -defm UDOT_VG4_M4ZZ_HtoD : sme2_dot_mla_add_sub_array_vg24_single<"udot", 0b1110110, MatrixOp64, ZZZZ_h, ZPR4b16>; +defm UDOT_VG2_M2ZZ_HtoD : sme2_dot_mla_add_sub_array_vg2_single<"udot", 0b1010110, MatrixOp64, ZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_udot_single_za64_vg1x2>; +defm UDOT_VG4_M4ZZ_HtoD : sme2_dot_mla_add_sub_array_vg4_single<"udot", 0b1110110, MatrixOp64, ZZZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_udot_single_za64_vg1x4>; defm UDOT_VG2_M2Z2Z_HtoD : sme2_dot_mla_add_sub_array_vg2_multi<"udot", 0b110110, MatrixOp64, ZZ_h_mul_r, nxv8i16, null_frag>; defm UDOT_VG4_M4Z4Z_HtoD : sme2_dot_mla_add_sub_array_vg4_multi<"udot", 0b110110, MatrixOp64, ZZZZ_h_mul_r, nxv8i16, null_frag>; diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-fp-dots.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-fp-dots.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-fp-dots.ll @@ -0,0 +1,88 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs < %s | FileCheck %s + +target triple="aarch64-linux-gnu" + + +; == Multi, single (16-bit float) == + +define void @fdot_single_za32_f16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { +; CHECK-LABEL: fdot_single_za32_f16_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: fdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @fdot_single_za32_f16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { +; CHECK-LABEL: fdot_single_za32_f16_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + + +; == Multi, single (16-bit bfloat) == + +define void @bfdot_single_za32_bf16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { +; CHECK-LABEL: bfdot_single_za32_bf16_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: bfdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: bfdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @bfdot_single_za32_bf16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { +; CHECK-LABEL: bfdot_single_za32_bf16_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: bfdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: bfdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + + +attributes #0 = { nounwind "target-features"="+sme2" } + + +; == Multi, single (16-bit float) + +declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32, , , ) +declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32, , , , , ) + +; == Multi, single (16-bit bfloat) + +declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32, , , ) +declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32, , , , , ) diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll @@ -0,0 +1,293 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs < %s | FileCheck %s + +target triple="aarch64-linux-gnu" + + +; == Multi, single (unsigned) == + +define void @udot_single_za32_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { +; CHECK-LABEL: udot_single_za32_u16_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: udot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @udot_single_za32_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { +; CHECK-LABEL: udot_single_za32_u16_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + +define void @udot_single_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { +; CHECK-LABEL: udot_single_za32_u8_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z1.b, z2.b }, z3.b +; CHECK-NEXT: udot za.s[w8, 7, vgx2], { z1.b, z2.b }, z3.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @udot_single_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { +; CHECK-LABEL: udot_single_za32_u8_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z1.b - z4.b }, z5.b +; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z1.b - z4.b }, z5.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + +define void @udot_single_za64_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #1 { +; CHECK-LABEL: udot_single_za64_u16_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: udot za.d[w8, 0, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: udot za.d[w8, 7, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.udot.single.za64.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.udot.single.za64.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @udot_single_za64_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #1 { +; CHECK-LABEL: udot_single_za64_u16_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: udot za.d[w8, 0, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: udot za.d[w8, 7, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.udot.single.za64.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.udot.single.za64.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + +define void @usdot_single_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { +; CHECK-LABEL: usdot_single_za32_u8_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z1.b, z2.b }, z3.b +; CHECK-NEXT: usdot za.s[w8, 7, vgx2], { z1.b, z2.b }, z3.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @usdot_single_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { +; CHECK-LABEL: usdot_single_za32_u8_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z1.b - z4.b }, z5.b +; CHECK-NEXT: usdot za.s[w8, 7, vgx4], { z1.b - z4.b }, z5.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + + +; == Multi, single (signed) == + +define void @sdot_single_za32_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { +; CHECK-LABEL: sdot_single_za32_u16_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: sdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @sdot_single_za32_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { +; CHECK-LABEL: sdot_single_za32_u16_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + +define void @sdot_single_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { +; CHECK-LABEL: sdot_single_za32_u8_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z1.b, z2.b }, z3.b +; CHECK-NEXT: sdot za.s[w8, 7, vgx2], { z1.b, z2.b }, z3.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @sdot_single_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { +; CHECK-LABEL: sdot_single_za32_u8_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z1.b - z4.b }, z5.b +; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z1.b - z4.b }, z5.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + +define void @sdot_single_za64_u16_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #1 { +; CHECK-LABEL: sdot_single_za64_u16_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: sdot za.d[w8, 0, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: sdot za.d[w8, 7, vgx2], { z1.h, z2.h }, z3.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sdot.single.za64.vg1x2.nxv8i16(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sdot.single.za64.vg1x2.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @sdot_single_za64_u16_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #1 { +; CHECK-LABEL: sdot_single_za64_u16_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: sdot za.d[w8, 0, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: sdot za.d[w8, 7, vgx4], { z1.h - z4.h }, z5.h +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sdot.single.za64.vg1x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sdot.single.za64.vg1x4.nxv8i16(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + +define void @sudot_single_za32_u8_vg1x2(i32 %slice, %unused, %zn0, %zn1, %zn2) #0 { +; CHECK-LABEL: sudot_single_za32_u8_vg1x2: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2 def $z1_z2 +; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z1.b, z2.b }, z3.b +; CHECK-NEXT: sudot za.s[w8, 7, vgx2], { z1.b, z2.b }, z3.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32 %slice, %zn0, %zn1, %zn2) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2) + ret void +} + +define void @sudot_single_za32_u8_vg1x4(i32 %slice, %unused, %zn0, %zn1, %zn2, %zn3, %zn4) #0 { +; CHECK-LABEL: sudot_single_za32_u8_vg1x4: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z1_z2_z3_z4 def $z1_z2_z3_z4 +; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z1.b - z4.b }, z5.b +; CHECK-NEXT: sudot za.s[w8, 7, vgx4], { z1.b - z4.b }, z5.b +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zn4) + %slice2 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32 %slice2, %zn0, %zn1, %zn2, %zn3, %zn4) + ret void +} + + +attributes #0 = { nounwind "target-features"="+sme2" } +attributes #1 = { nounwind "target-features"="+sme2,+sme-i16i64" } + + +; == Multi, single (unsigned) + +declare void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv8i16(i32, , , ) +declare void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv8i16(i32, , , , , ) +declare void @llvm.aarch64.sme.udot.single.za32.vg1x2.nxv16i8(i32, , , ) +declare void @llvm.aarch64.sme.udot.single.za32.vg1x4.nxv16i8(i32, , , , , ) +declare void @llvm.aarch64.sme.udot.single.za64.vg1x2.nxv8i16(i32, , , ) +declare void @llvm.aarch64.sme.udot.single.za64.vg1x4.nxv8i16(i32, , , , , ) +declare void @llvm.aarch64.sme.usdot.single.za32.vg1x2.nxv16i8(i32, , , ) +declare void @llvm.aarch64.sme.usdot.single.za32.vg1x4.nxv16i8(i32, , , , , ) + +; == Multi, single (signed) + +declare void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv8i16(i32, , , ) +declare void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv8i16(i32, , , , , ) +declare void @llvm.aarch64.sme.sdot.single.za32.vg1x2.nxv16i8(i32, , , ) +declare void @llvm.aarch64.sme.sdot.single.za32.vg1x4.nxv16i8(i32, , , , , ) +declare void @llvm.aarch64.sme.sdot.single.za64.vg1x2.nxv8i16(i32, , , ) +declare void @llvm.aarch64.sme.sdot.single.za64.vg1x4.nxv8i16(i32, , , , , ) +declare void @llvm.aarch64.sme.sudot.single.za32.vg1x2.nxv16i8(i32, , , ) +declare void @llvm.aarch64.sme.sudot.single.za32.vg1x4.nxv16i8(i32, , , , , )