diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -2838,6 +2838,20 @@ def int_aarch64_sve_sqcvtn_x4 : SME2_CVT_VG4_SINGLE_Intrinsic; def int_aarch64_sve_uqcvtn_x4 : SME2_CVT_VG4_SINGLE_Intrinsic; def int_aarch64_sve_sqcvtun_x4 : SME2_CVT_VG4_SINGLE_Intrinsic; -} + // + // Multi-Single add/sub + // + def int_aarch64_sme_add_write_single_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; + def int_aarch64_sme_sub_write_single_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Single_Intrinsic; + def int_aarch64_sme_add_write_single_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; + def int_aarch64_sme_sub_write_single_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Single_Intrinsic; + // + // Multi-Multi add/sub + // + def int_aarch64_sme_add_write_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic; + def int_aarch64_sme_sub_write_za_vg1x2 : SME2_Matrix_ArrayVector_VG2_Multi_Multi_Intrinsic; + def int_aarch64_sme_add_write_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic; + def int_aarch64_sme_sub_write_za_vg1x4 : SME2_Matrix_ArrayVector_VG4_Multi_Multi_Intrinsic; +} diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td @@ -247,18 +247,18 @@ // SME2 Instructions //===----------------------------------------------------------------------===// let Predicates = [HasSME2] in { -defm ADD_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg24_single<"add", 0b0011010, MatrixOp32, ZZ_s, ZPR4b32>; -defm ADD_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg24_single<"add", 0b0111010, MatrixOp32, ZZZZ_s, ZPR4b32>; -defm ADD_VG2_M2Z2Z_S : sme2_dot_mla_add_sub_array_vg2_multi<"add", 0b011010, MatrixOp32, ZZ_s_mul_r, nxv4i32, null_frag>; -defm ADD_VG4_M4Z4Z_S : sme2_dot_mla_add_sub_array_vg4_multi<"add", 0b011010, MatrixOp32, ZZZZ_s_mul_r, nxv4i32, null_frag>; +defm ADD_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg2_single<"add", 0b0011010, MatrixOp32, ZZ_s, ZPR4b32, nxv4i32, int_aarch64_sme_add_write_single_za_vg1x2>; +defm ADD_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg4_single<"add", 0b0111010, MatrixOp32, ZZZZ_s, ZPR4b32, nxv4i32, int_aarch64_sme_add_write_single_za_vg1x4>; +defm ADD_VG2_M2Z2Z_S : sme2_dot_mla_add_sub_array_vg2_multi<"add", 0b011010, MatrixOp32, ZZ_s_mul_r, nxv4i32, int_aarch64_sme_add_write_za_vg1x2>; +defm ADD_VG4_M4Z4Z_S : sme2_dot_mla_add_sub_array_vg4_multi<"add", 0b011010, MatrixOp32, ZZZZ_s_mul_r, nxv4i32, int_aarch64_sme_add_write_za_vg1x4>; defm ADD_VG2_2ZZ : sme2_int_sve_destructive_vector_vg2_single<"add", 0b0110000>; defm ADD_VG4_4ZZ : sme2_int_sve_destructive_vector_vg4_single<"add", 0b0110000>; -defm SUB_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg24_single<"sub", 0b0011011, MatrixOp32, ZZ_s, ZPR4b32>; -defm SUB_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg24_single<"sub", 0b0111011, MatrixOp32, ZZZZ_s, ZPR4b32>; -defm SUB_VG2_M2Z2Z_S : sme2_dot_mla_add_sub_array_vg2_multi<"sub", 0b011011, MatrixOp32, ZZ_s_mul_r, nxv4i32, null_frag>; -defm SUB_VG4_M4Z4Z_S : sme2_dot_mla_add_sub_array_vg4_multi<"sub", 0b011011, MatrixOp32, ZZZZ_s_mul_r, nxv4i32, null_frag>; +defm SUB_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg2_single<"sub", 0b0011011, MatrixOp32, ZZ_s, ZPR4b32, nxv4i32, int_aarch64_sme_sub_write_single_za_vg1x2>; +defm SUB_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg4_single<"sub", 0b0111011, MatrixOp32, ZZZZ_s, ZPR4b32, nxv4i32, int_aarch64_sme_sub_write_single_za_vg1x4>; +defm SUB_VG2_M2Z2Z_S : sme2_dot_mla_add_sub_array_vg2_multi<"sub", 0b011011, MatrixOp32, ZZ_s_mul_r, nxv4i32, int_aarch64_sme_sub_write_za_vg1x2>; +defm SUB_VG4_M4Z4Z_S : sme2_dot_mla_add_sub_array_vg4_multi<"sub", 0b011011, MatrixOp32, ZZZZ_s_mul_r, nxv4i32, int_aarch64_sme_sub_write_za_vg1x4>; defm FMLA_VG2_M2ZZ_S : sme2_dot_mla_add_sub_array_vg2_single<"fmla", 0b0011000, MatrixOp32, ZZ_s, ZPR4b32, nxv4f32, int_aarch64_sme_fmla_single_vg1x2>; defm FMLA_VG4_M4ZZ_S : sme2_dot_mla_add_sub_array_vg4_single<"fmla", 0b0111000, MatrixOp32, ZZZZ_s, ZPR4b32, nxv4f32, int_aarch64_sme_fmla_single_vg1x4>; @@ -705,15 +705,15 @@ } let Predicates = [HasSME2, HasSMEI16I64] in { -defm ADD_VG2_M2ZZ_D : sme2_dot_mla_add_sub_array_vg24_single<"add", 0b1011010, MatrixOp64, ZZ_d, ZPR4b64>; -defm ADD_VG4_M4ZZ_D : sme2_dot_mla_add_sub_array_vg24_single<"add", 0b1111010, MatrixOp64, ZZZZ_d, ZPR4b64>; -defm ADD_VG2_M2Z2Z_D : sme2_dot_mla_add_sub_array_vg2_multi<"add", 0b111010, MatrixOp64, ZZ_d_mul_r, nxv2i64, null_frag>; -defm ADD_VG4_M4Z4Z_D : sme2_dot_mla_add_sub_array_vg4_multi<"add", 0b111010, MatrixOp64, ZZZZ_d_mul_r, nxv2i64, null_frag>; - -defm SUB_VG2_M2ZZ_D : sme2_dot_mla_add_sub_array_vg24_single<"sub", 0b1011011, MatrixOp64, ZZ_d, ZPR4b64>; -defm SUB_VG4_M4ZZ_D : sme2_dot_mla_add_sub_array_vg24_single<"sub", 0b1111011, MatrixOp64, ZZZZ_d, ZPR4b64>; -defm SUB_VG2_M2Z2Z_D : sme2_dot_mla_add_sub_array_vg2_multi<"sub", 0b111011, MatrixOp64, ZZ_d_mul_r, nxv2i64, null_frag>; -defm SUB_VG4_M4Z4Z_D : sme2_dot_mla_add_sub_array_vg4_multi<"sub", 0b111011, MatrixOp64, ZZZZ_d_mul_r, nxv2i64, null_frag>; +defm ADD_VG2_M2ZZ_D : sme2_dot_mla_add_sub_array_vg2_single<"add", 0b1011010, MatrixOp64, ZZ_d, ZPR4b64, nxv2i64, int_aarch64_sme_add_write_single_za_vg1x2>; +defm ADD_VG4_M4ZZ_D : sme2_dot_mla_add_sub_array_vg4_single<"add", 0b1111010, MatrixOp64, ZZZZ_d, ZPR4b64, nxv2i64, int_aarch64_sme_add_write_single_za_vg1x4>; +defm ADD_VG2_M2Z2Z_D : sme2_dot_mla_add_sub_array_vg2_multi<"add", 0b111010, MatrixOp64, ZZ_d_mul_r, nxv2i64, int_aarch64_sme_add_write_za_vg1x2>; +defm ADD_VG4_M4Z4Z_D : sme2_dot_mla_add_sub_array_vg4_multi<"add", 0b111010, MatrixOp64, ZZZZ_d_mul_r, nxv2i64, int_aarch64_sme_add_write_za_vg1x4>; + +defm SUB_VG2_M2ZZ_D : sme2_dot_mla_add_sub_array_vg2_single<"sub", 0b1011011, MatrixOp64, ZZ_d, ZPR4b64, nxv2i64, int_aarch64_sme_sub_write_single_za_vg1x2>; +defm SUB_VG4_M4ZZ_D : sme2_dot_mla_add_sub_array_vg4_single<"sub", 0b1111011, MatrixOp64, ZZZZ_d, ZPR4b64, nxv2i64, int_aarch64_sme_sub_write_single_za_vg1x4>; +defm SUB_VG2_M2Z2Z_D : sme2_dot_mla_add_sub_array_vg2_multi<"sub", 0b111011, MatrixOp64, ZZ_d_mul_r, nxv2i64, int_aarch64_sme_sub_write_za_vg1x2>; +defm SUB_VG4_M4Z4Z_D : sme2_dot_mla_add_sub_array_vg4_multi<"sub", 0b111011, MatrixOp64, ZZZZ_d_mul_r, nxv2i64, int_aarch64_sme_sub_write_za_vg1x4>; defm ADD_VG2_M2Z_D : sme2_multivec_accum_add_sub_vg2<"add", 0b1010, MatrixOp64, ZZ_d_mul_r>; defm ADD_VG4_M4Z_D : sme2_multivec_accum_add_sub_vg4<"add", 0b1010, MatrixOp64, ZZZZ_d_mul_r>; diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll @@ -0,0 +1,226 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -verify-machineinstrs < %s | FileCheck %s + +; +; ADD Multi-Single x2 +; + +define void @multi_vector_add_write_single_za_vg1x2_i32(i32 %slice, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_add_write_single_za_vg1x2_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: add za.s[w8, 0, vgx2], { z0.s, z1.s }, z2.s +; CHECK-NEXT: add za.s[w8, 7, vgx2], { z0.s, z1.s }, z2.s +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32 %slice, + %zn0, %zn1, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zm) + ret void +} + +define void @multi_vector_add_write_single_za_vg1x2_i64(i32 %slice, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_add_write_single_za_vg1x2_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: add za.d[w8, 0, vgx2], { z0.d, z1.d }, z2.d +; CHECK-NEXT: add za.d[w8, 7, vgx2], { z0.d, z1.d }, z2.d +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32 %slice, + %zn0, %zn1, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zm) + ret void +} + +; +; ADD Multi-Single x4 +; + +define void @multi_vector_add_write_single_za_vg1x4_i32(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_add_write_single_za_vg1x4_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: add za.s[w8, 0, vgx4], { z0.s - z3.s }, z4.s +; CHECK-NEXT: add za.s[w8, 7, vgx4], { z0.s - z3.s }, z4.s +; CHECK-NEXT: ret + %zn2, %zn3, + %zm) { + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm) + ret void +} + +define void @multi_vector_add_write_single_za_vg1x4_i64(i32 %slice, +; CHECK-LABEL: multi_vector_add_write_single_za_vg1x4_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: add za.d[w8, 0, vgx4], { z0.d - z3.d }, z4.d +; CHECK-NEXT: add za.d[w8, 7, vgx4], { z0.d - z3.d }, z4.d +; CHECK-NEXT: ret + %zn0, %zn1, + %zn2, %zn3, + %zm) { + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm) + ret void +} + +; +; ADD Multi-Multi x2 +; + +define void @multi_vector_add_write_za_vg1x2_i32(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_add_write_za_vg1x2_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: add za.s[w8, 0, vgx2], { z0.s, z1.s }, { z2.s, z3.s } +; CHECK-NEXT: add za.s[w8, 7, vgx2], { z0.s, z1.s }, { z2.s, z3.s } +; CHECK-NEXT: ret + %zm1, %zm2) { + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32 %slice, + %zn0, %zn1, + %zm1, %zm2) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zm1, %zm2) + ret void +} + + +define void @multi_vector_add_write_za_vg1x2_i64(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_add_write_za_vg1x2_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: add za.d[w8, 0, vgx2], { z0.d, z1.d }, { z2.d, z3.d } +; CHECK-NEXT: add za.d[w8, 7, vgx2], { z0.d, z1.d }, { z2.d, z3.d } +; CHECK-NEXT: ret + %zm1, %zm2) { + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32 %slice, + %zn0, %zn1, + %zm1, %zm2) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zm1, %zm2) + ret void +} + + +; +; ADD Multi-Multi x4 +; + +define void @multi_vector_add_write_za_vg1x4_i32(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_add_write_za_vg1x4_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: add za.s[w8, 0, vgx4], { z0.s - z3.s }, { z4.s - z7.s } +; CHECK-NEXT: add za.s[w8, 7, vgx4], { z0.s - z3.s }, { z4.s - z7.s } +; CHECK-NEXT: ret + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) { + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + ret void +} + +define void @multi_vector_add_write_za_vg1x4_i64(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_add_write_za_vg1x4_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: add za.d[w8, 0, vgx4], { z0.d - z3.d }, { z4.d - z7.d } +; CHECK-NEXT: add za.d[w8, 7, vgx4], { z0.d - z3.d }, { z4.d - z7.d } +; CHECK-NEXT: ret + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) { + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + ret void +} + +declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32, , , ) +declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32, , , ) +declare void@llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32, , , , , ) +declare void@llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32, , , , , ) +declare void@llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32, , , , ) +declare void@llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32, , , , ) +declare void@llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32, , , , , , , , ) +declare void@llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32, , , , , , , , ) diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-sub.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-sub.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-sub.ll @@ -0,0 +1,228 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -verify-machineinstrs < %s | FileCheck %s + +; +; SUB Multi-Single x2 +; + +define void @multi_vector_sub_write_za_single_za_vg1x2_i32(i32 %slice, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_sub_write_za_single_za_vg1x2_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: sub za.s[w8, 0, vgx2], { z0.s, z1.s }, z2.s +; CHECK-NEXT: sub za.s[w8, 7, vgx2], { z0.s, z1.s }, z2.s +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv4i32(i32 %slice, + %zn0, %zn1, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zm) + ret void +} + +define void @multi_vector_sub_write_za_single_za_vg1x2_i64(i32 %slice, %zn0, %zn1, %zm) { +; CHECK-LABEL: multi_vector_sub_write_za_single_za_vg1x2_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: sub za.d[w8, 0, vgx2], { z0.d, z1.d }, z2.d +; CHECK-NEXT: sub za.d[w8, 7, vgx2], { z0.d, z1.d }, z2.d +; CHECK-NEXT: ret + call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv2i64(i32 %slice, + %zn0, %zn1, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zm) + ret void +} + +; +; SUB Multi-Single x4 +; + +define void @multi_vector_sub_write_za_single_za_vg1x4_i32(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_sub_write_za_single_za_vg1x4_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: sub za.s[w8, 0, vgx4], { z0.s - z3.s }, z4.s +; CHECK-NEXT: sub za.s[w8, 7, vgx4], { z0.s - z3.s }, z4.s +; CHECK-NEXT: ret + %zn2, %zn3, + %zm) { + call void @llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv4i32(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm) + ret void +} + +define void @multi_vector_sub_write_za_single_za_vg1x4_i64(i32 %slice, +; CHECK-LABEL: multi_vector_sub_write_za_single_za_vg1x4_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: sub za.d[w8, 0, vgx4], { z0.d - z3.d }, z4.d +; CHECK-NEXT: sub za.d[w8, 7, vgx4], { z0.d - z3.d }, z4.d +; CHECK-NEXT: ret + %zn0, %zn1, + %zn2, %zn3, + %zm) { + call void @llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv2i64(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm) + ret void +} + +; +; SUB Multi-Multi x2 +; + +define void @multi_vector_sub_write_za_vg1x2_i32(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_sub_write_za_vg1x2_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: sub za.s[w8, 0, vgx2], { z0.s, z1.s }, { z2.s, z3.s } +; CHECK-NEXT: sub za.s[w8, 7, vgx2], { z0.s, z1.s }, { z2.s, z3.s } +; CHECK-NEXT: ret + %zm1, %zm2) { + call void @llvm.aarch64.sme.sub.write.za.vg1x2.nxv4i32(i32 %slice, + %zn0, %zn1, + %zm1, %zm2) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sub.write.za.vg1x2.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zm1, %zm2) + ret void +} + + +define void @multi_vector_sub_write_za_vg1x2_i64(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_sub_write_za_vg1x2_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 +; CHECK-NEXT: sub za.d[w8, 0, vgx2], { z0.d, z1.d }, { z2.d, z3.d } +; CHECK-NEXT: sub za.d[w8, 7, vgx2], { z0.d, z1.d }, { z2.d, z3.d } +; CHECK-NEXT: ret + %zm1, %zm2) { + call void @llvm.aarch64.sme.sub.write.za.vg1x2.nxv2i64(i32 %slice, + %zn0, %zn1, + %zm1, %zm2) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sub.write.za.vg1x2.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zm1, %zm2) + ret void +} + + +; +; SUB Multi-Multi x4 +; + +define void @multi_vector_sub_write_za_vg1x4_i32(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_sub_write_za_vg1x4_i32: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: sub za.s[w8, 0, vgx4], { z0.s - z3.s }, { z4.s - z7.s } +; CHECK-NEXT: sub za.s[w8, 7, vgx4], { z0.s - z3.s }, { z4.s - z7.s } +; CHECK-NEXT: ret + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) { + call void @llvm.aarch64.sme.sub.write.za.vg1x4.nxv4i32(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sub.write.za.vg1x4.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + ret void +} + +define void @multi_vector_sub_write_za_vg1x4_i64(i32 %slice, %zn0, %zn1, +; CHECK-LABEL: multi_vector_sub_write_za_vg1x4_i64: +; CHECK: // %bb.0: +; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: mov w8, w0 +; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 +; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 +; CHECK-NEXT: sub za.d[w8, 0, vgx4], { z0.d - z3.d }, { z4.d - z7.d } +; CHECK-NEXT: sub za.d[w8, 7, vgx4], { z0.d - z3.d }, { z4.d - z7.d } +; CHECK-NEXT: ret + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) { + call void @llvm.aarch64.sme.sub.write.za.vg1x4.nxv2i64(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.sub.write.za.vg1x4.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + ret void +} + +declare void@llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv4i32(i32, , , ) +declare void@llvm.aarch64.sme.sub.write.single.za.vg1x2.nxv2i64(i32, , , ) +declare void@llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv4i32(i32, , , , +, ) +declare void@llvm.aarch64.sme.sub.write.single.za.vg1x4.nxv2i64(i32, , , , +, ) +declare void@llvm.aarch64.sme.sub.write.za.vg1x2.nxv4i32(i32, , , , ) +declare void@llvm.aarch64.sme.sub.write.za.vg1x2.nxv2i64(i32, , , , ) +declare void@llvm.aarch64.sme.sub.write.za.vg1x4.nxv4i32(i32, , , , , , , , ) +declare void@llvm.aarch64.sme.sub.write.za.vg1x4.nxv2i64(i32, , , , , , , , )