diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -3132,4 +3132,10 @@ def int_aarch64_sme_ # intr # _ # za # _vg1x4 : SME2_ZA_Write_VG4_Intrinsic; } } + + // + // Multi-Single Vector add + // + def int_aarch64_sve_add_single_x2 : SME2_VG2_Multi_Single_Intrinsic; + def int_aarch64_sve_add_single_x4 : SME2_VG4_Multi_Single_Intrinsic; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -5061,6 +5061,20 @@ AArch64::FCLAMP_VG4_4Z4Z_D})) SelectClamp(Node, 4, Op); return; + case Intrinsic::aarch64_sve_add_single_x2: + if (auto Op = SelectOpcodeFromVT( + Node->getValueType(0), + {AArch64::ADD_VG2_2ZZ_B, AArch64::ADD_VG2_2ZZ_H, + AArch64::ADD_VG2_2ZZ_S, AArch64::ADD_VG2_2ZZ_D})) + SelectDestructiveMultiIntrinsic(Node, 2, false, Op); + return; + case Intrinsic::aarch64_sve_add_single_x4: + if (auto Op = SelectOpcodeFromVT( + Node->getValueType(0), + {AArch64::ADD_VG4_4ZZ_B, AArch64::ADD_VG4_4ZZ_H, + AArch64::ADD_VG4_4ZZ_S, AArch64::ADD_VG4_4ZZ_D})) + SelectDestructiveMultiIntrinsic(Node, 4, false, Op); + return; } break; } diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll --- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll +++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll @@ -370,6 +370,153 @@ ret void } +; +; ADD Vectors Multi-Single x2 +; + +define { , } @multi_vec_add_single_x2_s8( %unused, %zdn1, %zdn2, %zm) { +; CHECK-LABEL: multi_vec_add_single_x2_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: add { z4.b, z5.b }, { z4.b, z5.b }, z3.b +; CHECK-NEXT: mov z0.d, z4.d +; CHECK-NEXT: mov z1.d, z5.d +; CHECK-NEXT: ret + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv16i8( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +define { , } @multi_vec_add_single_x2_s16( %unused, %zdn1, %zdn2, %zm) { +; CHECK-LABEL: multi_vec_add_single_x2_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: add { z4.h, z5.h }, { z4.h, z5.h }, z3.h +; CHECK-NEXT: mov z0.d, z4.d +; CHECK-NEXT: mov z1.d, z5.d +; CHECK-NEXT: ret + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv8i16( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +define { , } @multi_vec_add_single_x2_s32( %unused, %zdn1, %zdn2, %zm) { +; CHECK-LABEL: multi_vec_add_single_x2_s32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: add { z4.s, z5.s }, { z4.s, z5.s }, z3.s +; CHECK-NEXT: mov z0.d, z4.d +; CHECK-NEXT: mov z1.d, z5.d +; CHECK-NEXT: ret + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv4i32( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +define { , } @multi_vec_add_single_x2_s64( %unused, %zdn1, %zdn2, %zm) { +; CHECK-LABEL: multi_vec_add_single_x2_s64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z5.d, z2.d +; CHECK-NEXT: mov z4.d, z1.d +; CHECK-NEXT: add { z4.d, z5.d }, { z4.d, z5.d }, z3.d +; CHECK-NEXT: mov z0.d, z4.d +; CHECK-NEXT: mov z1.d, z5.d +; CHECK-NEXT: ret + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv2i64( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +; +; ADD Vectors Multi-Single x4 +; + +define { , , , } @multi_vec_add_single_x4_s8( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) { +; CHECK-LABEL: multi_vec_add_single_x4_s8: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: add { z24.b - z27.b }, { z24.b - z27.b }, z5.b +; CHECK-NEXT: mov z0.d, z24.d +; CHECK-NEXT: mov z1.d, z25.d +; CHECK-NEXT: mov z2.d, z26.d +; CHECK-NEXT: mov z3.d, z27.d +; CHECK-NEXT: ret + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv16i8( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} + +define { , , , } @multi_vec_add_x4_single_s16( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) { +; CHECK-LABEL: multi_vec_add_x4_single_s16: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: add { z24.h - z27.h }, { z24.h - z27.h }, z5.h +; CHECK-NEXT: mov z0.d, z24.d +; CHECK-NEXT: mov z1.d, z25.d +; CHECK-NEXT: mov z2.d, z26.d +; CHECK-NEXT: mov z3.d, z27.d +; CHECK-NEXT: ret + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv8i16( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} + +define { , , , } @multi_vec_add_x4_single_s32( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) { +; CHECK-LABEL: multi_vec_add_x4_single_s32: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: add { z24.s - z27.s }, { z24.s - z27.s }, z5.s +; CHECK-NEXT: mov z0.d, z24.d +; CHECK-NEXT: mov z1.d, z25.d +; CHECK-NEXT: mov z2.d, z26.d +; CHECK-NEXT: mov z3.d, z27.d +; CHECK-NEXT: ret + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv4i32( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} + +define { , , , } @multi_vec_add_x4_single_s64( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) { +; CHECK-LABEL: multi_vec_add_x4_single_s64: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z27.d, z4.d +; CHECK-NEXT: mov z26.d, z3.d +; CHECK-NEXT: mov z25.d, z2.d +; CHECK-NEXT: mov z24.d, z1.d +; CHECK-NEXT: add { z24.d - z27.d }, { z24.d - z27.d }, z5.d +; CHECK-NEXT: mov z0.d, z24.d +; CHECK-NEXT: mov z1.d, z25.d +; CHECK-NEXT: mov z2.d, z26.d +; CHECK-NEXT: mov z3.d, z27.d +; CHECK-NEXT: ret + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv2i64( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32, , , ) declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32, , , ) declare void@llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32, , , , , ) @@ -386,3 +533,11 @@ declare void@llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32, , ) declare void@llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32, , ,, ) declare void@llvm.aarch64.sme.add.za64.vg1x4.nxv2f64(i32, , ,, ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv16i8(, , ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv8i16(, , ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv4i32(, , ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv2i64(, , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv16i8(, , , , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv8i16(, , , , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv4i32(, , , , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv2i64(, , , , )