diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -2789,4 +2789,19 @@ def int_aarch64_sme_ # ty # instr # _lane_vg2x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; } } + + // + // Multi-vector vertical dot-products + // + + def int_aarch64_sme_fvdot_lane_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; + + foreach ty = ["s", "u"] in { + def int_aarch64_sme_ #ty # vdot_lane_za32_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; + def int_aarch64_sme_ #ty # vdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; + def int_aarch64_sme_ #ty # vdot_lane_za64_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; + } + + def int_aarch64_sme_suvdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; + def int_aarch64_sme_usvdot_lane_za32_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; } diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td @@ -460,9 +460,9 @@ defm BFDOT_VG2_M2Z2Z_HtoS : sme2_dot_mla_add_sub_array_vg2_multi<"bfdot", 0b010010, MatrixOp32, ZZ_h_mul_r, nxv8bf16, null_frag>; defm BFDOT_VG4_M4Z4Z_HtoS : sme2_dot_mla_add_sub_array_vg4_multi<"bfdot", 0b010010, MatrixOp32, ZZZZ_h_mul_r, nxv8bf16, null_frag>; -defm BFVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"bfvdot", 0b0011, ZZ_h_mul_r, ZPR4b16, nxv8bf16, null_frag>; +defm BFVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"bfvdot", 0b0011, ZZ_h_mul_r, ZPR4b16, nxv8bf16, int_aarch64_sme_fvdot_lane_za32_vg1x2>; -defm FVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"fvdot", 0b0001, ZZ_h_mul_r, ZPR4b16, nxv8f16, null_frag>; +defm FVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"fvdot", 0b0001, ZZ_h_mul_r, ZPR4b16, nxv8f16, int_aarch64_sme_fvdot_lane_za32_vg1x2>; defm SDOT_VG2_M2ZZI_HToS : sme2_multi_vec_array_vg2_index_32b<"sdot", 0b1000, ZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; defm SDOT_VG2_M2ZZI_BToS : sme2_multi_vec_array_vg2_index_32b<"sdot", 0b1100, ZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; @@ -482,10 +482,10 @@ defm SUDOT_VG2_M2ZZ_BToS : sme2_dot_mla_add_sub_array_vg24_single<"sudot", 0b0010111, MatrixOp32, ZZ_b, ZPR4b8>; defm SUDOT_VG4_M4ZZ_BToS : sme2_dot_mla_add_sub_array_vg24_single<"sudot", 0b0110111, MatrixOp32, ZZZZ_b, ZPR4b8>; -defm SVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"svdot", 0b0100, ZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; -defm SVDOT_VG4_M4ZZI_BtoS : sme2_multi_vec_array_vg4_index_32b<"svdot", 0b0100, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; +defm SVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"svdot", 0b0100, ZZ_h_mul_r, ZPR4b16, nxv8i16, int_aarch64_sme_svdot_lane_za32_vg1x2>; +defm SVDOT_VG4_M4ZZI_BtoS : sme2_multi_vec_array_vg4_index_32b<"svdot", 0b0100, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, int_aarch64_sme_svdot_lane_za32_vg1x4>; -defm SUVDOT_VG4_M4ZZI_BToS : sme2_multi_vec_array_vg4_index_32b<"suvdot", 0b0111, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; +defm SUVDOT_VG4_M4ZZI_BToS : sme2_multi_vec_array_vg4_index_32b<"suvdot", 0b0111, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, int_aarch64_sme_suvdot_lane_za32_vg1x4>; defm UDOT_VG2_M2ZZI_HToS : sme2_multi_vec_array_vg2_index_32b<"udot", 0b1010, ZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; defm UDOT_VG2_M2ZZI_BToS : sme2_multi_vec_array_vg2_index_32b<"udot", 0b1110, ZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; @@ -507,10 +507,10 @@ defm USDOT_VG2_M2Z2Z_BToS : sme2_dot_mla_add_sub_array_vg2_multi<"usdot", 0b010101, MatrixOp32, ZZ_b_mul_r, nxv16i8, null_frag>; defm USDOT_VG4_M4Z4Z_BToS : sme2_dot_mla_add_sub_array_vg4_multi<"usdot", 0b010101, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, null_frag>; -defm USVDOT_VG4_M4ZZI_BToS : sme2_multi_vec_array_vg4_index_32b<"usvdot", 0b0101, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; +defm USVDOT_VG4_M4ZZI_BToS : sme2_multi_vec_array_vg4_index_32b<"usvdot", 0b0101, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, int_aarch64_sme_usvdot_lane_za32_vg1x4>; -defm UVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"uvdot", 0b0110, ZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; -defm UVDOT_VG4_M4ZZI_BtoS : sme2_multi_vec_array_vg4_index_32b<"uvdot", 0b0110, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, null_frag>; +defm UVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"uvdot", 0b0110, ZZ_h_mul_r, ZPR4b16, nxv8i16, int_aarch64_sme_uvdot_lane_za32_vg1x2>; +defm UVDOT_VG4_M4ZZI_BtoS : sme2_multi_vec_array_vg4_index_32b<"uvdot", 0b0110, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, int_aarch64_sme_uvdot_lane_za32_vg1x4>; def SMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"smlall", 0b000>; defm SMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"smlall", 0b000>; @@ -728,7 +728,7 @@ defm SDOT_VG2_M2Z2Z_HtoD : sme2_dot_mla_add_sub_array_vg2_multi<"sdot", 0b110100, MatrixOp64, ZZ_h_mul_r, nxv8i16, null_frag>; defm SDOT_VG4_M4Z4Z_HtoD : sme2_dot_mla_add_sub_array_vg4_multi<"sdot", 0b110100, MatrixOp64, ZZZZ_h_mul_r, nxv8i16, null_frag>; -defm SVDOT_VG4_M4ZZI_HtoD : sme2_multi_vec_array_vg4_index_64b<"svdot", 0b101, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; +defm SVDOT_VG4_M4ZZI_HtoD : sme2_multi_vec_array_vg4_index_64b<"svdot", 0b101, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, int_aarch64_sme_svdot_lane_za64_vg1x4>; defm UDOT_VG2_M2ZZI_HtoD : sme2_multi_vec_array_vg2_index_64b<"udot", 0b11, ZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; defm UDOT_VG4_M4ZZI_HtoD : sme2_multi_vec_array_vg4_index_64b<"udot", 0b011, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; @@ -737,7 +737,7 @@ defm UDOT_VG2_M2Z2Z_HtoD : sme2_dot_mla_add_sub_array_vg2_multi<"udot", 0b110110, MatrixOp64, ZZ_h_mul_r, nxv8i16, null_frag>; defm UDOT_VG4_M4Z4Z_HtoD : sme2_dot_mla_add_sub_array_vg4_multi<"udot", 0b110110, MatrixOp64, ZZZZ_h_mul_r, nxv8i16, null_frag>; -defm UVDOT_VG4_M4ZZI_HtoD : sme2_multi_vec_array_vg4_index_64b<"uvdot", 0b111, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, null_frag>; +defm UVDOT_VG4_M4ZZI_HtoD : sme2_multi_vec_array_vg4_index_64b<"uvdot", 0b111, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, int_aarch64_sme_uvdot_lane_za64_vg1x4>; def SMLALL_MZZI_HtoD : sme2_mla_ll_array_index_64b<"smlall", 0b00>; defm SMLALL_VG2_M2ZZI_HtoD : sme2_mla_ll_array_vg2_index_64b<"smlall", 0b00>; diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll @@ -0,0 +1,203 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2,+sme-i16i64 -verify-machineinstrs < %s | FileCheck %s + + +; == FVDOT == + +define void @test_fvdot_lane_za32_vg1x2_nxv8f16(i32 %slice, %zn1, %zn2, %zm) { +; CHECK-LABEL: test_fvdot_lane_za32_vg1x2_nxv8f16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: fvdot za.s[w8, 0, vgx2], { z0.h, z1.h }, z2.h[3] +; CHECK-NEXT: fvdot za.s[w8, 7, vgx2], { z0.h, z1.h }, z2.h[3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8f16(i32 %slice, %zn1, %zn2, %zm, i32 3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8f16(i32 %slice.7, %zn1, %zn2, %zm, i32 3) + ret void +} + + +; == BFVDOT == + +define void @test_fvdot_lane_za32_vg1x2_nxv8bf16(i32 %slice, %zn1, %zn2, %zm) { +; CHECK-LABEL: test_fvdot_lane_za32_vg1x2_nxv8bf16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: bfvdot za.s[w8, 0, vgx2], { z0.h, z1.h }, z2.h[3] +; CHECK-NEXT: bfvdot za.s[w8, 7, vgx2], { z0.h, z1.h }, z2.h[3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8bf16(i32 %slice, %zn1, %zn2, %zm, i32 3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8bf16(i32 %slice.7, %zn1, %zn2, %zm, i32 3) + ret void +} + + +; == SVDOT == + +define void @test_svdot_lane_za32_vg1x2_nxv8i16(i32 %slice, %zn1, %zn2, %zm) { +; CHECK-LABEL: test_svdot_lane_za32_vg1x2_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: svdot za.s[w8, 0, vgx2], { z0.h, z1.h }, z2.h[3] +; CHECK-NEXT: svdot za.s[w8, 7, vgx2], { z0.h, z1.h }, z2.h[3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.svdot.lane.za32.vg1x2.nxv8i16(i32 %slice, %zn1, %zn2, %zm, i32 3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.svdot.lane.za32.vg1x2.nxv8i16(i32 %slice.7, %zn1, %zn2, %zm, i32 3) + ret void +} + +define void @test_svdot_lane_za32_vg1x4_nxv16i8(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm) { +; CHECK-LABEL: test_svdot_lane_za32_vg1x4_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: svdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z4.b[3] +; CHECK-NEXT: svdot za.s[w8, 7, vgx4], { z0.b - z3.b }, z4.b[3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.svdot.lane.za32.vg1x4.nxv16i8(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm, i32 3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.svdot.lane.za32.vg1x4.nxv16i8(i32 %slice.7, %zn1, %zn2, %zn3, %zn4, %zm, i32 3) + ret void +} + +define void @test_svdot_lane_za64_vg1x4_nxv8i16(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm) { +; CHECK-LABEL: test_svdot_lane_za64_vg1x4_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: svdot za.d[w8, 0, vgx4], { z0.h - z3.h }, z4.h[1] +; CHECK-NEXT: svdot za.d[w8, 7, vgx4], { z0.h - z3.h }, z4.h[1] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.svdot.lane.za64.vg1x4.nxv8i16(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm, i32 1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.svdot.lane.za64.vg1x4.nxv8i16(i32 %slice.7, %zn1, %zn2, %zn3, %zn4, %zm, i32 1) + ret void +} + + +; == UVDOT == + +define void @test_uvdot_lane_za32_vg1x2_nxv8i16(i32 %slice, %zn1, %zn2, %zm) { +; CHECK-LABEL: test_uvdot_lane_za32_vg1x2_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: uvdot za.s[w8, 0, vgx2], { z0.h, z1.h }, z2.h[3] +; CHECK-NEXT: uvdot za.s[w8, 7, vgx2], { z0.h, z1.h }, z2.h[3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.uvdot.lane.za32.vg1x2.nxv8i16(i32 %slice, %zn1, %zn2, %zm, i32 3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.uvdot.lane.za32.vg1x2.nxv8i16(i32 %slice.7, %zn1, %zn2, %zm, i32 3) + ret void +} + +define void @test_uvdot_lane_za32_vg1x4_nxv16i8(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm) { +; CHECK-LABEL: test_uvdot_lane_za32_vg1x4_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: uvdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z4.b[3] +; CHECK-NEXT: uvdot za.s[w8, 7, vgx4], { z0.b - z3.b }, z4.b[3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.uvdot.lane.za32.vg1x4.nxv16i8(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm, i32 3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.uvdot.lane.za32.vg1x4.nxv16i8(i32 %slice.7, %zn1, %zn2, %zn3, %zn4, %zm, i32 3) + ret void +} + +define void @test_uvdot_lane_za64_vg1x4_nxv8i16(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm) { +; CHECK-LABEL: test_uvdot_lane_za64_vg1x4_nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: uvdot za.d[w8, 0, vgx4], { z0.h - z3.h }, z4.h[1] +; CHECK-NEXT: uvdot za.d[w8, 7, vgx4], { z0.h - z3.h }, z4.h[1] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.uvdot.lane.za64.vg1x4.nxv8i16(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm, i32 1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.uvdot.lane.za64.vg1x4.nxv8i16(i32 %slice.7, %zn1, %zn2, %zn3, %zn4, %zm, i32 1) + ret void +} + + +; == SUVDOT == + +define void @test_suvdot_lane_za32_vg1x4_nxv16i8(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm) { +; CHECK-LABEL: test_suvdot_lane_za32_vg1x4_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: suvdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z4.b[3] +; CHECK-NEXT: suvdot za.s[w8, 7, vgx4], { z0.b - z3.b }, z4.b[3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.suvdot.lane.za32.vg1x4.nxv16i8(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm, i32 3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.suvdot.lane.za32.vg1x4.nxv16i8(i32 %slice.7, %zn1, %zn2, %zn3, %zn4, %zm, i32 3) + ret void +} + + +; == USVDOT == + +define void @test_usvdot_lane_za32_vg1x4_nxv16i8(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm) { +; CHECK-LABEL: test_usvdot_lane_za32_vg1x4_nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: usvdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z4.b[3] +; CHECK-NEXT: usvdot za.s[w8, 7, vgx4], { z0.b - z3.b }, z4.b[3] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.usvdot.lane.za32.vg1x4.nxv16i8(i32 %slice, %zn1, %zn2, %zn3, %zn4, %zm, i32 3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.usvdot.lane.za32.vg1x4.nxv16i8(i32 %slice.7, %zn1, %zn2, %zn3, %zn4, %zm, i32 3) + ret void +} + + +; == FVDOT == +declare void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8f16(i32, , , , i32) +declare void @llvm.aarch64.sme.fvdot.lane.za32.vg1x2.nxv8bf16(i32, , , , i32) + +; == SVDOT == +declare void @llvm.aarch64.sme.svdot.lane.za32.vg1x2.nxv8i16(i32, , , , i32) +declare void @llvm.aarch64.sme.svdot.lane.za32.vg1x4.nxv16i8(i32, , , , , , i32) +declare void @llvm.aarch64.sme.svdot.lane.za64.vg1x4.nxv8i16(i32, , , , , , i32) + +; == UVDOT == +declare void @llvm.aarch64.sme.uvdot.lane.za32.vg1x2.nxv8i16(i32, , , , i32) +declare void @llvm.aarch64.sme.uvdot.lane.za32.vg1x4.nxv16i8(i32, , , , , , i32) +declare void @llvm.aarch64.sme.uvdot.lane.za64.vg1x4.nxv8i16(i32, , , , , , i32) + +; == SUVDOT == +declare void @llvm.aarch64.sme.suvdot.lane.za32.vg1x4.nxv16i8(i32, , , , , , i32) + +; == USVDOT == +declare void @llvm.aarch64.sme.usvdot.lane.za32.vg1x4.nxv16i8(i32, , , , , , i32)