Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -3067,6 +3067,10 @@ def int_aarch64_sme_ # ty # instr # _ # za # _vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic; def int_aarch64_sme_ # ty # instr # _ # za # _vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic; + + def int_aarch64_sme_ # ty # instr # _ # za # _lane_vg4x1 : SME2_Matrix_ArrayVector_Single_Index_Intrinsic; + def int_aarch64_sme_ # ty # instr # _ # za # _lane_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; + def int_aarch64_sme_ # ty # instr # _ # za # _lane_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; } } } @@ -3074,6 +3078,10 @@ def int_aarch64_sme_sumla_za32_single_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; def int_aarch64_sme_sumla_za32_single_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; + def int_aarch64_sme_sumla_za32_lane_vg4x1 : SME2_Matrix_ArrayVector_Single_Index_Intrinsic; + def int_aarch64_sme_sumla_za32_lane_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; + def int_aarch64_sme_sumla_za32_lane_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; + def int_aarch64_sme_usmla_za32_single_vg4x1 : SME2_Matrix_ArrayVector_Single_Single_Intrinsic; def int_aarch64_sme_usmla_za32_single_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; def int_aarch64_sme_usmla_za32_single_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; @@ -3081,6 +3089,10 @@ def int_aarch64_sme_usmla_za32_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic; def int_aarch64_sme_usmla_za32_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic; + def int_aarch64_sme_usmla_za32_lane_vg4x1 : SME2_Matrix_ArrayVector_Single_Index_Intrinsic; + def int_aarch64_sme_usmla_za32_lane_vg4x2 : SME2_Matrix_ArrayVector_VG2_Multi_Index_Intrinsic; + def int_aarch64_sme_usmla_za32_lane_vg4x4 : SME2_Matrix_ArrayVector_VG4_Multi_Index_Intrinsic; + // Multi-vector signed saturating doubling multiply high def int_aarch64_sve_sqdmulh_single_vgx2 : SME2_VG2_Multi_Single_Intrinsic; Index: llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td @@ -512,51 +512,51 @@ defm UVDOT_VG2_M2ZZI_HtoS : sme2_multi_vec_array_vg2_index_32b<"uvdot", 0b0110, ZZ_h_mul_r, ZPR4b16, nxv8i16, int_aarch64_sme_uvdot_lane_za32_vg1x2>; defm UVDOT_VG4_M4ZZI_BtoS : sme2_multi_vec_array_vg4_index_32b<"uvdot", 0b0110, ZZZZ_b_mul_r, ZPR4b8, nxv16i8, int_aarch64_sme_uvdot_lane_za32_vg1x4>; -def SMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"smlall", 0b000>; -defm SMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"smlall", 0b000>; -defm SMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"smlall", 0b000>; +defm SMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"smlall", 0b000, int_aarch64_sme_smla_za32_lane_vg4x1>; +defm SMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"smlall", 0b000, int_aarch64_sme_smla_za32_lane_vg4x2>; +defm SMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"smlall", 0b000, int_aarch64_sme_smla_za32_lane_vg4x4>; defm SMLALL_MZZ_BtoS : sme2_mla_ll_array_single<"smlall", 0b0000, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, int_aarch64_sme_smla_za32_single_vg4x1>; defm SMLALL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg2_single<"smlall", 0b00000, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_smla_za32_single_vg4x2>; defm SMLALL_VG4_M4ZZ_BtoS : sme2_mla_ll_array_vg4_single<"smlall", 0b01000, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_smla_za32_single_vg4x4>; defm SMLALL_VG2_M2Z2Z_BtoS : sme2_mla_ll_array_vg2_multi<"smlall", 0b0000, MatrixOp32, ZZ_b_mul_r, nxv16i8, int_aarch64_sme_smla_za32_vg4x2>; defm SMLALL_VG4_M4Z4Z_BtoS : sme2_mla_ll_array_vg4_multi<"smlall", 0b0000, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, int_aarch64_sme_smla_za32_vg4x4>; -def USMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"usmlall", 0b001>; -defm USMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"usmlall", 0b100>; -defm USMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"usmlall", 0b100>; +defm USMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"usmlall", 0b001, int_aarch64_sme_usmla_za32_lane_vg4x1>; +defm USMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"usmlall", 0b100, int_aarch64_sme_usmla_za32_lane_vg4x2>; +defm USMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"usmlall", 0b100, int_aarch64_sme_usmla_za32_lane_vg4x4>; defm USMLALL_MZZ_BtoS : sme2_mla_ll_array_single<"usmlall", 0b0001, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, int_aarch64_sme_usmla_za32_single_vg4x1>; defm USMLALL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg2_single<"usmlall", 0b00001, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_usmla_za32_single_vg4x2>; defm USMLALL_VG4_M4ZZ_BtoS : sme2_mla_ll_array_vg4_single<"usmlall", 0b01001, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_usmla_za32_single_vg4x4>; defm USMLALL_VG2_M2Z2Z_BtoS : sme2_mla_ll_array_vg2_multi<"usmlall", 0b0001, MatrixOp32, ZZ_b_mul_r, nxv16i8, int_aarch64_sme_usmla_za32_vg4x2>; defm USMLALL_VG4_M4Z4Z_BtoS : sme2_mla_ll_array_vg4_multi<"usmlall", 0b0001, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, int_aarch64_sme_usmla_za32_vg4x4>; -def SMLSLL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"smlsll", 0b010>; -defm SMLSLL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"smlsll", 0b001>; -defm SMLSLL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"smlsll", 0b001>; +defm SMLSLL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"smlsll", 0b010, int_aarch64_sme_smls_za32_lane_vg4x1>; +defm SMLSLL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"smlsll", 0b001, int_aarch64_sme_smls_za32_lane_vg4x2>; +defm SMLSLL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"smlsll", 0b001, int_aarch64_sme_smls_za32_lane_vg4x4>; defm SMLSLL_MZZ_BtoS : sme2_mla_ll_array_single<"smlsll", 0b0010, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, int_aarch64_sme_smls_za32_single_vg4x1>; defm SMLSLL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg2_single<"smlsll", 0b00010, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_smls_za32_single_vg4x2>; defm SMLSLL_VG4_M4ZZ_BtoS : sme2_mla_ll_array_vg4_single<"smlsll", 0b01010, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_smls_za32_single_vg4x4>; defm SMLSLL_VG2_M2Z2Z_BtoS : sme2_mla_ll_array_vg2_multi<"smlsll", 0b0010, MatrixOp32, ZZ_b_mul_r, nxv16i8, int_aarch64_sme_smls_za32_vg4x2>; defm SMLSLL_VG4_M4Z4Z_BtoS : sme2_mla_ll_array_vg4_multi<"smlsll", 0b0010, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, int_aarch64_sme_smls_za32_vg4x4>; -def UMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"umlall", 0b100>; -defm UMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"umlall", 0b010>; -defm UMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"umlall", 0b010>; +defm UMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"umlall", 0b100, int_aarch64_sme_umla_za32_lane_vg4x1>; +defm UMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"umlall", 0b010, int_aarch64_sme_umla_za32_lane_vg4x2>; +defm UMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"umlall", 0b010, int_aarch64_sme_umla_za32_lane_vg4x4>; defm UMLALL_MZZ_BtoS : sme2_mla_ll_array_single<"umlall", 0b0100, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, int_aarch64_sme_umla_za32_single_vg4x1>; defm UMLALL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg2_single<"umlall", 0b00100, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_umla_za32_single_vg4x2>; defm UMLALL_VG4_M4ZZ_BtoS : sme2_mla_ll_array_vg4_single<"umlall", 0b01100, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_umla_za32_single_vg4x4>; defm UMLALL_VG2_M2Z2Z_BtoS : sme2_mla_ll_array_vg2_multi<"umlall", 0b0100, MatrixOp32, ZZ_b_mul_r, nxv16i8, int_aarch64_sme_umla_za32_vg4x2>; defm UMLALL_VG4_M4Z4Z_BtoS : sme2_mla_ll_array_vg4_multi<"umlall", 0b0100, MatrixOp32, ZZZZ_b_mul_r, nxv16i8, int_aarch64_sme_umla_za32_vg4x4>; -def SUMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"sumlall", 0b101>; -defm SUMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"sumlall", 0b110>; -defm SUMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"sumlall", 0b110>; +defm SUMLALL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"sumlall", 0b101, int_aarch64_sme_sumla_za32_lane_vg4x1>; +defm SUMLALL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"sumlall", 0b110, int_aarch64_sme_sumla_za32_lane_vg4x2>; +defm SUMLALL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"sumlall", 0b110, int_aarch64_sme_sumla_za32_lane_vg4x4>; defm SUMLALL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg2_single<"sumlall", 0b00101, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_sumla_za32_single_vg4x2>; defm SUMLALL_VG4_M4ZZ_BtoS : sme2_mla_ll_array_vg4_single<"sumlall", 0b01101, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_sumla_za32_single_vg4x4>; -def UMLSLL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"umlsll", 0b110>; -defm UMLSLL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"umlsll", 0b011>; -defm UMLSLL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"umlsll", 0b011>; +defm UMLSLL_MZZI_BtoS : sme2_mla_ll_array_index_32b<"umlsll", 0b110, int_aarch64_sme_umls_za32_lane_vg4x1>; +defm UMLSLL_VG2_M2ZZI_BtoS : sme2_mla_ll_array_vg2_index_32b<"umlsll", 0b011, int_aarch64_sme_umls_za32_lane_vg4x2>; +defm UMLSLL_VG4_M4ZZI_BtoS : sme2_mla_ll_array_vg4_index_32b<"umlsll", 0b011, int_aarch64_sme_umls_za32_lane_vg4x4>; defm UMLSLL_MZZ_BtoS : sme2_mla_ll_array_single<"umlsll", 0b0110, MatrixOp32, ZPR8, ZPR4b8, nxv16i8, int_aarch64_sme_umls_za32_single_vg4x1>; defm UMLSLL_VG2_M2ZZ_BtoS : sme2_mla_ll_array_vg2_single<"umlsll", 0b00110, MatrixOp32, ZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_umls_za32_single_vg4x2>; defm UMLSLL_VG4_M4ZZ_BtoS : sme2_mla_ll_array_vg4_single<"umlsll", 0b01110, MatrixOp32, ZZZZ_b, ZPR4b8, nxv16i8, int_aarch64_sme_umls_za32_single_vg4x4>; @@ -739,36 +739,36 @@ defm UVDOT_VG4_M4ZZI_HtoD : sme2_multi_vec_array_vg4_index_64b<"uvdot", 0b111, ZZZZ_h_mul_r, ZPR4b16, nxv8i16, int_aarch64_sme_uvdot_lane_za64_vg1x4>; -def SMLALL_MZZI_HtoD : sme2_mla_ll_array_index_64b<"smlall", 0b00>; -defm SMLALL_VG2_M2ZZI_HtoD : sme2_mla_ll_array_vg2_index_64b<"smlall", 0b00>; -defm SMLALL_VG4_M4ZZI_HtoD : sme2_mla_ll_array_vg4_index_64b<"smlall", 0b00>; +defm SMLALL_MZZI_HtoD : sme2_mla_ll_array_index_64b<"smlall", 0b00, int_aarch64_sme_smla_za64_lane_vg4x1>; +defm SMLALL_VG2_M2ZZI_HtoD : sme2_mla_ll_array_vg2_index_64b<"smlall", 0b00, int_aarch64_sme_smla_za64_lane_vg4x2>; +defm SMLALL_VG4_M4ZZI_HtoD : sme2_mla_ll_array_vg4_index_64b<"smlall", 0b00, int_aarch64_sme_smla_za64_lane_vg4x4>; defm SMLALL_MZZ_HtoD : sme2_mla_ll_array_single<"smlall", 0b1000, MatrixOp64, ZPR16, ZPR4b16, nxv8i16, int_aarch64_sme_smla_za64_single_vg4x1>; defm SMLALL_VG2_M2ZZ_HtoD : sme2_mla_ll_array_vg2_single<"smlall", 0b10000, MatrixOp64, ZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_smla_za64_single_vg4x2>; defm SMLALL_VG4_M4ZZ_HtoD : sme2_mla_ll_array_vg4_single<"smlall", 0b11000, MatrixOp64, ZZZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_smla_za64_single_vg4x4>; defm SMLALL_VG2_M2Z2Z_HtoD : sme2_mla_ll_array_vg2_multi<"smlall", 0b1000, MatrixOp64, ZZ_h_mul_r, nxv8i16, int_aarch64_sme_smla_za64_vg4x2>; defm SMLALL_VG4_M4Z4Z_HtoD : sme2_mla_ll_array_vg4_multi<"smlall", 0b1000, MatrixOp64, ZZZZ_h_mul_r, nxv8i16, int_aarch64_sme_smla_za64_vg4x4>; -def SMLSLL_MZZI_HtoD : sme2_mla_ll_array_index_64b<"smlsll", 0b01>; -defm SMLSLL_VG2_M2ZZI_HtoD : sme2_mla_ll_array_vg2_index_64b<"smlsll", 0b01>; -defm SMLSLL_VG4_M4ZZI_HtoD : sme2_mla_ll_array_vg4_index_64b<"smlsll", 0b01>; +defm SMLSLL_MZZI_HtoD : sme2_mla_ll_array_index_64b<"smlsll", 0b01, int_aarch64_sme_smls_za64_lane_vg4x1>; +defm SMLSLL_VG2_M2ZZI_HtoD : sme2_mla_ll_array_vg2_index_64b<"smlsll", 0b01, int_aarch64_sme_smls_za64_lane_vg4x2>; +defm SMLSLL_VG4_M4ZZI_HtoD : sme2_mla_ll_array_vg4_index_64b<"smlsll", 0b01, int_aarch64_sme_smls_za64_lane_vg4x4>; defm SMLSLL_MZZ_HtoD : sme2_mla_ll_array_single<"smlsll", 0b1010, MatrixOp64, ZPR16, ZPR4b16, nxv8i16, int_aarch64_sme_smls_za64_single_vg4x1>; defm SMLSLL_VG2_M2ZZ_HtoD : sme2_mla_ll_array_vg2_single<"smlsll", 0b10010, MatrixOp64, ZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_smls_za64_single_vg4x2>; defm SMLSLL_VG4_M4ZZ_HtoD : sme2_mla_ll_array_vg4_single<"smlsll", 0b11010, MatrixOp64, ZZZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_smls_za64_single_vg4x4>; defm SMLSLL_VG2_M2Z2Z_HtoD : sme2_mla_ll_array_vg2_multi<"smlsll", 0b1010, MatrixOp64, ZZ_h_mul_r, nxv8i16, int_aarch64_sme_smls_za64_vg4x2>; defm SMLSLL_VG4_M4Z4Z_HtoD : sme2_mla_ll_array_vg4_multi<"smlsll", 0b1010, MatrixOp64, ZZZZ_h_mul_r, nxv8i16, int_aarch64_sme_smls_za64_vg4x4>; -def UMLALL_MZZI_HtoD : sme2_mla_ll_array_index_64b<"umlall", 0b10>; -defm UMLALL_VG2_M2ZZI_HtoD : sme2_mla_ll_array_vg2_index_64b<"umlall", 0b10>; -defm UMLALL_VG4_M4ZZI_HtoD : sme2_mla_ll_array_vg4_index_64b<"umlall", 0b10>; +defm UMLALL_MZZI_HtoD : sme2_mla_ll_array_index_64b<"umlall", 0b10, int_aarch64_sme_umla_za64_lane_vg4x1>; +defm UMLALL_VG2_M2ZZI_HtoD : sme2_mla_ll_array_vg2_index_64b<"umlall", 0b10, int_aarch64_sme_umla_za64_lane_vg4x2>; +defm UMLALL_VG4_M4ZZI_HtoD : sme2_mla_ll_array_vg4_index_64b<"umlall", 0b10, int_aarch64_sme_umla_za64_lane_vg4x4>; defm UMLALL_MZZ_HtoD : sme2_mla_ll_array_single<"umlall", 0b1100, MatrixOp64, ZPR16, ZPR4b16, nxv8i16, int_aarch64_sme_umla_za64_single_vg4x1>; defm UMLALL_VG2_M2ZZ_HtoD : sme2_mla_ll_array_vg2_single<"umlall", 0b10100, MatrixOp64, ZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_umla_za64_single_vg4x2>; defm UMLALL_VG4_M4ZZ_HtoD : sme2_mla_ll_array_vg4_single<"umlall", 0b11100, MatrixOp64, ZZZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_umla_za64_single_vg4x4>; defm UMLALL_VG2_M2Z2Z_HtoD : sme2_mla_ll_array_vg2_multi<"umlall", 0b1100, MatrixOp64, ZZ_h_mul_r, nxv8i16, int_aarch64_sme_umla_za64_vg4x2>; defm UMLALL_VG4_M4Z4Z_HtoD : sme2_mla_ll_array_vg4_multi<"umlall", 0b1100, MatrixOp64, ZZZZ_h_mul_r, nxv8i16, int_aarch64_sme_umla_za64_vg4x4>; -def UMLSLL_MZZI_HtoD : sme2_mla_ll_array_index_64b<"umlsll", 0b11>; -defm UMLSLL_VG2_M2ZZI_HtoD : sme2_mla_ll_array_vg2_index_64b<"umlsll", 0b11>; -defm UMLSLL_VG4_M4ZZI_HtoD : sme2_mla_ll_array_vg4_index_64b<"umlsll", 0b11>; +defm UMLSLL_MZZI_HtoD : sme2_mla_ll_array_index_64b<"umlsll", 0b11, int_aarch64_sme_umls_za64_lane_vg4x1>; +defm UMLSLL_VG2_M2ZZI_HtoD : sme2_mla_ll_array_vg2_index_64b<"umlsll", 0b11, int_aarch64_sme_umls_za64_lane_vg4x2>; +defm UMLSLL_VG4_M4ZZI_HtoD : sme2_mla_ll_array_vg4_index_64b<"umlsll", 0b11, int_aarch64_sme_umls_za64_lane_vg4x4>; defm UMLSLL_MZZ_HtoD : sme2_mla_ll_array_single<"umlsll", 0b1110, MatrixOp64, ZPR16, ZPR4b16, nxv8i16, int_aarch64_sme_umls_za64_single_vg4x1>; defm UMLSLL_VG2_M2ZZ_HtoD : sme2_mla_ll_array_vg2_single<"umlsll", 0b10110, MatrixOp64, ZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_umls_za64_single_vg4x2>; defm UMLSLL_VG4_M4ZZ_HtoD : sme2_mla_ll_array_vg4_single<"umlsll", 0b11110, MatrixOp64, ZZZZ_h, ZPR4b16, nxv8i16, int_aarch64_sme_umls_za64_single_vg4x4>; Index: llvm/lib/Target/AArch64/SMEInstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/SMEInstrFormats.td +++ llvm/lib/Target/AArch64/SMEInstrFormats.td @@ -2539,7 +2539,7 @@ // SME2 multi-vec indexed long long MLA one source 32-bit class sme2_mla_ll_array_index_32b op> : I<(outs MatrixOp32:$ZAda), - (ins MatrixOp32:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s4range:$imm2, ZPR8:$Zn, ZPR4b8:$Zm, VectorIndexB:$i), + (ins MatrixOp32:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s4range:$imm2, ZPR8:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), mnemonic, "\t$ZAda[$Rv, $imm2], $Zn, $Zm$i", "", []>, Sched<[]> { bits<4> Zm; @@ -2559,11 +2559,19 @@ let Constraints = "$ZAda = $_ZAda"; } +multiclass sme2_mla_ll_array_index_32b op, SDPatternOperator intrinsic> { + def NAME : sme2_mla_ll_array_index_32b, SMEPseudo2Instr; + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo; + + def : SME2_ZA_TwoOp_Multi_Index_Pat; +} + // SME2 multi-vec indexed long long MLA one source 64-bit class sme2_mla_ll_array_index_64b op> : I<(outs MatrixOp64:$ZAda), - (ins MatrixOp64:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s4range:$imm2, ZPR16:$Zn, ZPR4b16:$Zm, VectorIndexH:$i), + (ins MatrixOp64:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm2s4range:$imm2, ZPR16:$Zn, ZPR4b16:$Zm, VectorIndexH32b_timm:$i), mnemonic, "\t$ZAda[$Rv, $imm2], $Zn, $Zm$i", "", []>, Sched<[]> { bits<4> Zm; @@ -2585,12 +2593,20 @@ let Constraints = "$ZAda = $_ZAda"; } +multiclass sme2_mla_ll_array_index_64b op, SDPatternOperator intrinsic> { + def NAME : sme2_mla_ll_array_index_64b, SMEPseudo2Instr; + + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo; + + def : SME2_ZA_TwoOp_Multi_Index_Pat; +} + class sme2_mla_ll_array_vg24_index_32b op, RegisterOperand vector_ty, string mnemonic> : I<(outs MatrixOp32:$ZAda), (ins MatrixOp32:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, - vector_ty:$Zn, ZPR4b8:$Zm, VectorIndexB:$i), + vector_ty:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), mnemonic, "\t$ZAda[$Rv, $imm, " # !if(vg4, "vgx4", "vgx2") # "], $Zn, $Zm$i", "", []>, Sched<[]> { bits<4> Zm; @@ -2612,34 +2628,42 @@ //SME2 multi-vec indexed long long MLA two sources 32-bit -multiclass sme2_mla_ll_array_vg2_index_32b op> { - def NAME: sme2_mla_ll_array_vg24_index_32b<0b0, op, ZZ_b_mul_r, mnemonic> { +multiclass sme2_mla_ll_array_vg2_index_32b op, SDPatternOperator intrinsic> { + def NAME: sme2_mla_ll_array_vg24_index_32b<0b0, op, ZZ_b_mul_r, mnemonic>, SMEPseudo2Instr { bits<4> Zn; let Inst{9-6} = Zn; } + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo; + + def : SME2_ZA_TwoOp_VG2_Multi_Index_Pat; + def : InstAlias(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB:$i), 0>; + (!cast(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>; } // SME2 multi-vec indexed long long MLA four sources 32-bit -multiclass sme2_mla_ll_array_vg4_index_32b op> { - def NAME: sme2_mla_ll_array_vg24_index_32b<0b1, op, ZZZZ_b_mul_r, mnemonic> { +multiclass sme2_mla_ll_array_vg4_index_32b op, SDPatternOperator intrinsic> { + def NAME: sme2_mla_ll_array_vg24_index_32b<0b1, op, ZZZZ_b_mul_r, mnemonic>, SMEPseudo2Instr { bits<3> Zn; let Inst{9-7} = Zn; let Inst{6} = 0b0; } + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo; + + def : SME2_ZA_TwoOp_VG4_Multi_Index_Pat; + def : InstAlias(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB:$i), 0>; + (!cast(NAME) MatrixOp32:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZZZ_b_mul_r:$Zn, ZPR4b8:$Zm, VectorIndexB32b_timm:$i), 0>; } class sme2_mla_ll_array_vg24_index_64b op, RegisterOperand vector_ty, string mnemonic> : I<(outs MatrixOp64:$ZAda), (ins MatrixOp64:$_ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, - vector_ty:$Zn, ZPR4b16:$Zm, VectorIndexH:$i), + vector_ty:$Zn, ZPR4b16:$Zm, VectorIndexH32b_timm:$i), mnemonic, "\t$ZAda[$Rv, $imm, " # !if(vg4, "vgx4", "vgx2") # "], $Zn, $Zm$i", "", []>, Sched<[]> { bits<4> Zm; @@ -2662,27 +2686,35 @@ // SME2 multi-vec indexed long long MLA two sources 64-bit -multiclass sme2_mla_ll_array_vg2_index_64b op> { - def NAME: sme2_mla_ll_array_vg24_index_64b<0b0, op, ZZ_h_mul_r, mnemonic>{ +multiclass sme2_mla_ll_array_vg2_index_64b op, SDPatternOperator intrinsic> { + def NAME: sme2_mla_ll_array_vg24_index_64b<0b0, op, ZZ_h_mul_r, mnemonic>, SMEPseudo2Instr { bits<4> Zn; let Inst{9-6} = Zn; } + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo; + + def : SME2_ZA_TwoOp_VG2_Multi_Index_Pat; + def : InstAlias(NAME) MatrixOp64:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZ_h_mul_r:$Zn, ZPR4b16:$Zm, VectorIndexH:$i), 0>; + (!cast(NAME) MatrixOp64:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZ_h_mul_r:$Zn, ZPR4b16:$Zm, VectorIndexH32b_timm:$i), 0>; } // SME2 multi-vec indexed long long MLA four sources 64-bit -multiclass sme2_mla_ll_array_vg4_index_64b op> { - def NAME: sme2_mla_ll_array_vg24_index_64b<0b1, op, ZZZZ_h_mul_r, mnemonic>{ +multiclass sme2_mla_ll_array_vg4_index_64b op, SDPatternOperator intrinsic> { + def NAME: sme2_mla_ll_array_vg24_index_64b<0b1, op, ZZZZ_h_mul_r, mnemonic>, SMEPseudo2Instr { bits<3> Zn; let Inst{9-7} = Zn; let Inst{6} = 0b0; } + def _PSEUDO : sme2_za_array_2op_multi_index_pseudo; + + def : SME2_ZA_TwoOp_VG4_Multi_Index_Pat; + def : InstAlias(NAME) MatrixOp64:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZZZ_h_mul_r:$Zn, ZPR4b16:$Zm, VectorIndexH:$i), 0>; + (!cast(NAME) MatrixOp64:$ZAda, MatrixIndexGPR32Op8_11:$Rv, uimm1s4range:$imm, ZZZZ_h_mul_r:$Zn, ZPR4b16:$Zm, VectorIndexH32b_timm:$i), 0>; } Index: llvm/test/CodeGen/AArch64/sme2-intrinsics-mlall.ll =================================================================== --- llvm/test/CodeGen/AArch64/sme2-intrinsics-mlall.ll +++ llvm/test/CodeGen/AArch64/sme2-intrinsics-mlall.ll @@ -183,6 +183,102 @@ ret void } +; Indexed x1 + +define void @multi_vector_mul_add_lane_long_vg4x1_s8(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x1_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: smlall za.s[w8, 0:3], z1.b, z2.b[0] +; CHECK-NEXT: smlall za.s[w8, 12:15], z1.b, z2.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smla.za32.lane.vg4x1.nxv16i8(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.smla.za32.lane.vg4x1.nxv16i8(i32 %slice.12, %zn, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_add_lane_long_vg4x1_s16(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x1_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: smlall za.d[w8, 0:3], z1.h, z2.h[0] +; CHECK-NEXT: smlall za.d[w8, 12:15], z1.h, z2.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smla.za64.lane.vg4x1.nxv8i16(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.smla.za64.lane.vg4x1.nxv8i16(i32 %slice.12, %zn, %zm, i32 7) + ret void +} + +; Indexed x2 + +define void @multi_vector_mul_add_lane_long_vg4x2_s8(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x2_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: smlall za.s[w8, 0:3, vgx2], { z4.b, z5.b }, z3.b[0] +; CHECK-NEXT: smlall za.s[w8, 4:7, vgx2], { z4.b, z5.b }, z3.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smla.za32.lane.vg4x2.nxv16i8(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.smla.za32.lane.vg4x2.nxv16i8(i32 %slice.4, %zn0, %zn1, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_add_lane_long_vg4x2_s16(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x2_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: smlall za.d[w8, 0:3, vgx2], { z4.h, z5.h }, z3.h[0] +; CHECK-NEXT: smlall za.d[w8, 4:7, vgx2], { z4.h, z5.h }, z3.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smla.za64.lane.vg4x2.nxv8i16(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.smla.za64.lane.vg4x2.nxv8i16(i32 %slice.4, %zn0, %zn1, %zm, i32 7) + ret void +} + +; Indexed x4 + +define void @multi_vector_mul_add_lane_long_vg4x4_s8(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x4_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: smlall za.s[w8, 0:3, vgx4], { z24.b - z27.b }, z5.b[0] +; CHECK-NEXT: smlall za.s[w8, 4:7, vgx4], { z24.b - z27.b }, z5.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smla.za32.lane.vg4x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.smla.za32.lane.vg4x4.nxv16i8(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_add_lane_long_vg4x4_s16(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x4_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: smlall za.d[w8, 0:3, vgx4], { z24.h - z27.h }, z5.h[0] +; CHECK-NEXT: smlall za.d[w8, 4:7, vgx4], { z24.h - z27.h }, z5.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smla.za64.lane.vg4x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.smla.za64.lane.vg4x4.nxv8i16(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 7) + ret void +} + ; UMLALL ; Single x1 @@ -363,6 +459,102 @@ ret void } +; Indexed x1 + +define void @multi_vector_mul_add_lane_long_vg4x1_u8(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x1_u8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: umlall za.s[w8, 0:3], z1.b, z2.b[0] +; CHECK-NEXT: umlall za.s[w8, 12:15], z1.b, z2.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umla.za32.lane.vg4x1.nxv16i8(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.umla.za32.lane.vg4x1.nxv16i8(i32 %slice.12, %zn, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_add_lane_long_vg4x1_u16(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x1_u16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: umlall za.d[w8, 0:3], z1.h, z2.h[0] +; CHECK-NEXT: umlall za.d[w8, 12:15], z1.h, z2.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umla.za64.lane.vg4x1.nxv8i16(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.umla.za64.lane.vg4x1.nxv8i16(i32 %slice.12, %zn, %zm, i32 7) + ret void +} + +; Indexed x2 + +define void @multi_vector_mul_add_lane_long_vg4x2_u8(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x2_u8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: umlall za.s[w8, 0:3, vgx2], { z4.b, z5.b }, z3.b[0] +; CHECK-NEXT: umlall za.s[w8, 4:7, vgx2], { z4.b, z5.b }, z3.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umla.za32.lane.vg4x2.nxv16i8(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.umla.za32.lane.vg4x2.nxv16i8(i32 %slice.4, %zn0, %zn1, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_add_lane_long_vg4x2_u16(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x2_u16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: umlall za.d[w8, 0:3, vgx2], { z4.h, z5.h }, z3.h[0] +; CHECK-NEXT: umlall za.d[w8, 4:7, vgx2], { z4.h, z5.h }, z3.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umla.za64.lane.vg4x2.nxv8i16(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.umla.za64.lane.vg4x2.nxv8i16(i32 %slice.4, %zn0, %zn1, %zm, i32 7) + ret void +} + +; Indexed x4 + +define void @multi_vector_mul_add_lane_long_vg4x4_u8(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x4_u8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: umlall za.s[w8, 0:3, vgx4], { z24.b - z27.b }, z5.b[0] +; CHECK-NEXT: umlall za.s[w8, 4:7, vgx4], { z24.b - z27.b }, z5.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umla.za32.lane.vg4x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.umla.za32.lane.vg4x4.nxv16i8(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_add_lane_long_vg4x4_u16(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_long_vg4x4_u16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: umlall za.d[w8, 0:3, vgx4], { z24.h - z27.h }, z5.h[0] +; CHECK-NEXT: umlall za.d[w8, 4:7, vgx4], { z24.h - z27.h }, z5.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umla.za64.lane.vg4x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.umla.za64.lane.vg4x4.nxv8i16(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 7) + ret void +} + ; SMLSLL ; Single x1 @@ -543,6 +735,102 @@ ret void } +; Indexed x1 + +define void @multi_vector_mul_sub_lane_long_vg4x1_s8(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x1_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: smlsll za.s[w8, 0:3], z1.b, z2.b[0] +; CHECK-NEXT: smlsll za.s[w8, 12:15], z1.b, z2.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smls.za32.lane.vg4x1.nxv16i8(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.smls.za32.lane.vg4x1.nxv16i8(i32 %slice.12, %zn, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_sub_lane_long_vg4x1_s16(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x1_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: smlsll za.d[w8, 0:3], z1.h, z2.h[0] +; CHECK-NEXT: smlsll za.d[w8, 12:15], z1.h, z2.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smls.za64.lane.vg4x1.nxv8i16(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.smls.za64.lane.vg4x1.nxv8i16(i32 %slice.12, %zn, %zm, i32 7) + ret void +} + +; Indexed x2 + +define void @multi_vector_mul_sub_lane_long_vg4x2_s8(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x2_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: smlsll za.s[w8, 0:3, vgx2], { z4.b, z5.b }, z3.b[0] +; CHECK-NEXT: smlsll za.s[w8, 4:7, vgx2], { z4.b, z5.b }, z3.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smls.za32.lane.vg4x2.nxv16i8(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.smls.za32.lane.vg4x2.nxv16i8(i32 %slice.4, %zn0, %zn1, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_sub_lane_long_vg4x2_s16(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x2_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: smlsll za.d[w8, 0:3, vgx2], { z4.h, z5.h }, z3.h[0] +; CHECK-NEXT: smlsll za.d[w8, 4:7, vgx2], { z4.h, z5.h }, z3.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smls.za64.lane.vg4x2.nxv8i16(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.smls.za64.lane.vg4x2.nxv8i16(i32 %slice.4, %zn0, %zn1, %zm, i32 7) + ret void +} + +; Indexed x4 + +define void @multi_vector_mul_sub_lane_long_vg4x4_s8(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x4_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: smlsll za.s[w8, 0:3, vgx4], { z24.b - z27.b }, z5.b[0] +; CHECK-NEXT: smlsll za.s[w8, 4:7, vgx4], { z24.b - z27.b }, z5.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smls.za32.lane.vg4x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.smls.za32.lane.vg4x4.nxv16i8(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_sub_lane_long_vg4x4_s16(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x4_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: smlsll za.d[w8, 0:3, vgx4], { z24.h - z27.h }, z5.h[0] +; CHECK-NEXT: smlsll za.d[w8, 4:7, vgx4], { z24.h - z27.h }, z5.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.smls.za64.lane.vg4x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.smls.za64.lane.vg4x4.nxv8i16(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 7) + ret void +} + ; UMLSLL ; Single x1 @@ -723,6 +1011,102 @@ ret void } +; Indexed x1 + +define void @multi_vector_mul_sub_lane_long_vg4x1_u8(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x1_u8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: umlsll za.s[w8, 0:3], z1.b, z2.b[0] +; CHECK-NEXT: umlsll za.s[w8, 12:15], z1.b, z2.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umls.za32.lane.vg4x1.nxv16i8(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.umls.za32.lane.vg4x1.nxv16i8(i32 %slice.12, %zn, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_sub_lane_long_vg4x1_u16(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x1_u16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: umlsll za.d[w8, 0:3], z1.h, z2.h[0] +; CHECK-NEXT: umlsll za.d[w8, 12:15], z1.h, z2.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umls.za64.lane.vg4x1.nxv8i16(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.umls.za64.lane.vg4x1.nxv8i16(i32 %slice.12, %zn, %zm, i32 7) + ret void +} + +; Indexed x2 + +define void @multi_vector_mul_sub_lane_long_vg4x2_u8(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x2_u8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: umlsll za.s[w8, 0:3, vgx2], { z4.b, z5.b }, z3.b[0] +; CHECK-NEXT: umlsll za.s[w8, 4:7, vgx2], { z4.b, z5.b }, z3.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umls.za32.lane.vg4x2.nxv16i8(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.umls.za32.lane.vg4x2.nxv16i8(i32 %slice.4, %zn0, %zn1, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_sub_lane_long_vg4x2_u16(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x2_u16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: umlsll za.d[w8, 0:3, vgx2], { z4.h, z5.h }, z3.h[0] +; CHECK-NEXT: umlsll za.d[w8, 4:7, vgx2], { z4.h, z5.h }, z3.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umls.za64.lane.vg4x2.nxv8i16(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.umls.za64.lane.vg4x2.nxv8i16(i32 %slice.4, %zn0, %zn1, %zm, i32 7) + ret void +} + +; Indexed x4 + +define void @multi_vector_mul_sub_lane_long_vg4x4_u8(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x4_u8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: umlsll za.s[w8, 0:3, vgx4], { z24.b - z27.b }, z5.b[0] +; CHECK-NEXT: umlsll za.s[w8, 4:7, vgx4], { z24.b - z27.b }, z5.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umls.za32.lane.vg4x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.umls.za32.lane.vg4x4.nxv16i8(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 15) + ret void +} + +define void @multi_vector_mul_sub_lane_long_vg4x4_u16(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_sub_lane_long_vg4x4_u16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: umlsll za.d[w8, 0:3, vgx4], { z24.h - z27.h }, z5.h[0] +; CHECK-NEXT: umlsll za.d[w8, 4:7, vgx4], { z24.h - z27.h }, z5.h[7] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.umls.za64.lane.vg4x4.nxv8i16(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.umls.za64.lane.vg4x4.nxv8i16(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 7) + ret void +} + ; ; SUMLALL ; @@ -763,6 +1147,57 @@ ret void } +; Indexed x1 + +define void @multi_vector_mul_add_lane_signed_long_vg4x1_s8(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_signed_long_vg4x1_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: sumlall za.s[w8, 0:3], z1.b, z2.b[0] +; CHECK-NEXT: sumlall za.s[w8, 12:15], z1.b, z2.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sumla.za32.lane.vg4x1.nxv16i8(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.sumla.za32.lane.vg4x1.nxv16i8(i32 %slice.12, %zn, %zm, i32 15) + ret void +} + +; Indexed x2 + +define void @multi_vector_mul_add_lane_signed_long_vg4x2_s8(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_signed_long_vg4x2_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: sumlall za.s[w8, 0:3, vgx2], { z4.b, z5.b }, z3.b[0] +; CHECK-NEXT: sumlall za.s[w8, 4:7, vgx2], { z4.b, z5.b }, z3.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sumla.za32.lane.vg4x2.nxv16i8(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.sumla.za32.lane.vg4x2.nxv16i8(i32 %slice.4, %zn0, %zn1, %zm, i32 15) + ret void +} + +; Indexed x4 + +define void @multi_vector_mul_add_lane_signed_long_vg4x4_s8(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_signed_long_vg4x4_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: sumlall za.s[w8, 0:3, vgx4], { z24.b - z27.b }, z5.b[0] +; CHECK-NEXT: sumlall za.s[w8, 4:7, vgx4], { z24.b - z27.b }, z5.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sumla.za32.lane.vg4x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.sumla.za32.lane.vg4x4.nxv16i8(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 15) + ret void +} + ; USMLALL ; Single x1 @@ -859,6 +1294,57 @@ ret void } +; Indexed x1 + +define void @multi_vector_mul_add_lane_unsigned_long_vg4x1_s8(i32 %slice, %dummy, %zn, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_unsigned_long_vg4x1_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: usmlall za.s[w8, 0:3], z1.b, z2.b[0] +; CHECK-NEXT: usmlall za.s[w8, 12:15], z1.b, z2.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.usmla.za32.lane.vg4x1.nxv16i8(i32 %slice, %zn, %zm, i32 0) + %slice.12 = add i32 %slice, 12 + call void @llvm.aarch64.sme.usmla.za32.lane.vg4x1.nxv16i8(i32 %slice.12, %zn, %zm, i32 15) + ret void +} + +; Indexed x2 + +define void @multi_vector_mul_add_lane_unsigned_long_vg4x2_s8(i32 %slice, %dummy, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_unsigned_long_vg4x2_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: usmlall za.s[w8, 0:3, vgx2], { z4.b, z5.b }, z3.b[0] +; CHECK-NEXT: usmlall za.s[w8, 4:7, vgx2], { z4.b, z5.b }, z3.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.usmla.za32.lane.vg4x2.nxv16i8(i32 %slice, %zn0, %zn1, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.usmla.za32.lane.vg4x2.nxv16i8(i32 %slice.4, %zn0, %zn1, %zm, i32 15) + ret void +} + +; Indexed x4 + +define void @multi_vector_mul_add_lane_unsigned_long_vg4x4_s8(i32 %slice, %dummy, %zn0, %zn1, %zn2, %zn3, %zm) { +; CHECK-LABEL: multi_vector_mul_add_lane_unsigned_long_vg4x4_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: usmlall za.s[w8, 0:3, vgx4], { z24.b - z27.b }, z5.b[0] +; CHECK-NEXT: usmlall za.s[w8, 4:7, vgx4], { z24.b - z27.b }, z5.b[15] +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.usmla.za32.lane.vg4x4.nxv16i8(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 0) + %slice.4 = add i32 %slice, 4 + call void @llvm.aarch64.sme.usmla.za32.lane.vg4x4.nxv16i8(i32 %slice.4, %zn0, %zn1, %zn2, %zn3, %zm, i32 15) + ret void +} + declare void @llvm.aarch64.sme.smla.za32.single.vg4x1.nxv16i8(i32, , ) declare void @llvm.aarch64.sme.smla.za32.single.vg4x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.smla.za32.single.vg4x4.nxv16i8(i32, , , , , ) @@ -873,6 +1359,14 @@ declare void @llvm.aarch64.sme.smla.za64.vg4x2.nxv8i16(i32, , , , ) declare void @llvm.aarch64.sme.smla.za64.vg4x4.nxv8i16(i32, , , , , , , , ) +declare void @llvm.aarch64.sme.smla.za32.lane.vg4x1.nxv16i8(i32, , , i32) +declare void @llvm.aarch64.sme.smla.za32.lane.vg4x2.nxv16i8(i32, , , , i32) +declare void @llvm.aarch64.sme.smla.za32.lane.vg4x4.nxv16i8(i32, , , , , , i32) + +declare void @llvm.aarch64.sme.smla.za64.lane.vg4x1.nxv8i16(i32, , , i32) +declare void @llvm.aarch64.sme.smla.za64.lane.vg4x2.nxv8i16(i32, , , , i32) +declare void @llvm.aarch64.sme.smla.za64.lane.vg4x4.nxv8i16(i32, , , , , , i32) + declare void @llvm.aarch64.sme.umla.za32.single.vg4x1.nxv16i8(i32, , ) declare void @llvm.aarch64.sme.umla.za32.single.vg4x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.umla.za32.single.vg4x4.nxv16i8(i32, , , , , ) @@ -887,6 +1381,14 @@ declare void @llvm.aarch64.sme.umla.za64.vg4x2.nxv8i16(i32, , , , ) declare void @llvm.aarch64.sme.umla.za64.vg4x4.nxv8i16(i32, , , , , , , , ) +declare void @llvm.aarch64.sme.umla.za32.lane.vg4x1.nxv16i8(i32, , , i32) +declare void @llvm.aarch64.sme.umla.za32.lane.vg4x2.nxv16i8(i32, , , , i32) +declare void @llvm.aarch64.sme.umla.za32.lane.vg4x4.nxv16i8(i32, , , , , , i32) + +declare void @llvm.aarch64.sme.umla.za64.lane.vg4x1.nxv8i16(i32, , , i32) +declare void @llvm.aarch64.sme.umla.za64.lane.vg4x2.nxv8i16(i32, , , , i32) +declare void @llvm.aarch64.sme.umla.za64.lane.vg4x4.nxv8i16(i32, , , , , , i32) + declare void @llvm.aarch64.sme.smls.za32.single.vg4x1.nxv16i8(i32, , ) declare void @llvm.aarch64.sme.smls.za32.single.vg4x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.smls.za32.single.vg4x4.nxv16i8(i32, , , , , ) @@ -901,6 +1403,14 @@ declare void @llvm.aarch64.sme.smls.za64.vg4x2.nxv8i16(i32, , , , ) declare void @llvm.aarch64.sme.smls.za64.vg4x4.nxv8i16(i32, , , , , , , , ) +declare void @llvm.aarch64.sme.smls.za32.lane.vg4x1.nxv16i8(i32, , , i32) +declare void @llvm.aarch64.sme.smls.za32.lane.vg4x2.nxv16i8(i32, , , , i32) +declare void @llvm.aarch64.sme.smls.za32.lane.vg4x4.nxv16i8(i32, , , , , , i32) + +declare void @llvm.aarch64.sme.smls.za64.lane.vg4x1.nxv8i16(i32, , , i32) +declare void @llvm.aarch64.sme.smls.za64.lane.vg4x2.nxv8i16(i32, , , , i32) +declare void @llvm.aarch64.sme.smls.za64.lane.vg4x4.nxv8i16(i32, , , , , , i32) + declare void @llvm.aarch64.sme.umls.za32.single.vg4x1.nxv16i8(i32, , ) declare void @llvm.aarch64.sme.umls.za32.single.vg4x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.umls.za32.single.vg4x4.nxv16i8(i32, , , , , ) @@ -915,12 +1425,36 @@ declare void @llvm.aarch64.sme.umls.za64.vg4x2.nxv8i16(i32, , , , ) declare void @llvm.aarch64.sme.umls.za64.vg4x4.nxv8i16(i32, , , , , , , , ) +declare void @llvm.aarch64.sme.umls.za32.lane.vg4x1.nxv16i8(i32, , , i32) +declare void @llvm.aarch64.sme.umls.za32.lane.vg4x2.nxv16i8(i32, , , , i32) +declare void @llvm.aarch64.sme.umls.za32.lane.vg4x4.nxv16i8(i32, , , , , , i32) + +declare void @llvm.aarch64.sme.umls.za64.lane.vg4x1.nxv8i16(i32, , , i32) +declare void @llvm.aarch64.sme.umls.za64.lane.vg4x2.nxv8i16(i32, , , , i32) +declare void @llvm.aarch64.sme.umls.za64.lane.vg4x4.nxv8i16(i32, , , , , , i32) + declare void @llvm.aarch64.sme.sumla.za32.single.vg4x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.sumla.za32.single.vg4x4.nxv16i8(i32, , , , , ) +declare void @llvm.aarch64.sme.sumla.za32.lane.vg4x1.nxv16i8(i32, , , i32) +declare void @llvm.aarch64.sme.sumla.za32.lane.vg4x2.nxv16i8(i32, , , , i32) +declare void @llvm.aarch64.sme.sumla.za32.lane.vg4x4.nxv16i8(i32, , , , , , i32) + +declare void @llvm.aarch64.sme.sumla.za64.lane.vg4x1.nxv8i16(i32, , , i32) +declare void @llvm.aarch64.sme.sumls.za64.lane.vg4x2.nxv8i16(i32, , , , i32) +declare void @llvm.aarch64.sme.sumls.za64.lane.vg4x4.nxv8i16(i32, , , , , , i32) + declare void @llvm.aarch64.sme.usmla.za32.single.vg4x1.nxv16i8(i32, , ) declare void @llvm.aarch64.sme.usmla.za32.single.vg4x2.nxv16i8(i32, , , ) declare void @llvm.aarch64.sme.usmla.za32.single.vg4x4.nxv16i8(i32, , , , , ) declare void @llvm.aarch64.sme.usmla.za32.vg4x2.nxv16i8(i32, , , , ) declare void @llvm.aarch64.sme.usmla.za32.vg4x4.nxv16i8(i32, , , , , , , , ) + +declare void @llvm.aarch64.sme.usmla.za32.lane.vg4x1.nxv16i8(i32, , , i32) +declare void @llvm.aarch64.sme.usmla.za32.lane.vg4x2.nxv16i8(i32, , , , i32) +declare void @llvm.aarch64.sme.usmla.za32.lane.vg4x4.nxv16i8(i32, , , , , , i32) + +declare void @llvm.aarch64.sme.usmla.za64.lane.vg4x1.nxv8i16(i32, , , i32) +declare void @llvm.aarch64.sme.usmls.za64.lane.vg4x2.nxv8i16(i32, , , , i32) +declare void @llvm.aarch64.sme.usmls.za64.lane.vg4x4.nxv8i16(i32, , , , , , i32)