diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1059,6 +1059,13 @@ LLVMVectorOfBitcastsToInt<0>], [IntrNoMem]>; + class AdvSIMD_SVE2_TBX_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMVectorOfBitcastsToInt<0>], + [IntrNoMem]>; + class SVE2_1VectorArg_Long_Intrinsic : Intrinsic<[llvm_anyvector_ty], [LLVMSubdivide2VectorType<0>, @@ -2073,5 +2080,19 @@ Intrinsic<[llvm_nxv4i32_ty], [llvm_nxv4i32_ty, llvm_nxv4i32_ty], [IntrNoMem]>; +// +// SVE2 - Extended table lookup/permute +// + +def int_aarch64_sve_tbl2 : AdvSIMD_SVE2_TBX_Intrinsic; +def int_aarch64_sve_tbx : AdvSIMD_SVE2_TBX_Intrinsic; + +// +// SVE2 - Optional bit permutation +// + +def int_aarch64_sve_bdep_x : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_bext_x : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_bgrp_x : AdvSIMD_2VectorArg_Intrinsic; } diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1921,8 +1921,8 @@ defm STNT1D_ZZR_D : sve2_mem_sstnt_vs<0b110, "stnt1d", Z_d, ZPR64>; // SVE2 table lookup (three sources) - defm TBL_ZZZZ : sve2_int_perm_tbl<"tbl">; - defm TBX_ZZZ : sve2_int_perm_tbx<"tbx">; + defm TBL_ZZZZ : sve2_int_perm_tbl<"tbl", int_aarch64_sve_tbl2>; + defm TBX_ZZZ : sve2_int_perm_tbx<"tbx", int_aarch64_sve_tbx>; // SVE2 integer compare scalar count and limit defm WHILEGE_PWW : sve_int_while4_rr<0b000, "whilege", int_aarch64_sve_whilege>; @@ -1970,7 +1970,7 @@ let Predicates = [HasSVE2BitPerm] in { // SVE2 bitwise permute - defm BEXT_ZZZ : sve2_misc_bitwise<0b1100, "bext">; - defm BDEP_ZZZ : sve2_misc_bitwise<0b1101, "bdep">; - defm BGRP_ZZZ : sve2_misc_bitwise<0b1110, "bgrp">; + defm BEXT_ZZZ : sve2_misc_bitwise<0b1100, "bext", int_aarch64_sve_bext_x>; + defm BDEP_ZZZ : sve2_misc_bitwise<0b1101, "bdep", int_aarch64_sve_bdep_x>; + defm BGRP_ZZZ : sve2_misc_bitwise<0b1110, "bgrp", int_aarch64_sve_bgrp_x>; } diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -998,11 +998,46 @@ def : SVE_2_Op_Pat(NAME # _D)>; } -multiclass sve2_int_perm_tbl { +multiclass sve2_int_perm_tbl { def _B : sve_int_perm_tbl<0b00, 0b01, asm, ZPR8, ZZ_b>; def _H : sve_int_perm_tbl<0b01, 0b01, asm, ZPR16, ZZ_h>; def _S : sve_int_perm_tbl<0b10, 0b01, asm, ZPR32, ZZ_s>; def _D : sve_int_perm_tbl<0b11, 0b01, asm, ZPR64, ZZ_d>; + + def : Pat<(nxv16i8 (op nxv16i8:$Op1, nxv16i8:$Op2, nxv16i8:$Op3)), + (nxv16i8 (!cast(NAME # _B) (REG_SEQUENCE ZPR2, nxv16i8:$Op1, zsub0, + nxv16i8:$Op2, zsub1), + nxv16i8:$Op3))>; + + def : Pat<(nxv8i16 (op nxv8i16:$Op1, nxv8i16:$Op2, nxv8i16:$Op3)), + (nxv8i16 (!cast(NAME # _H) (REG_SEQUENCE ZPR2, nxv8i16:$Op1, zsub0, + nxv8i16:$Op2, zsub1), + nxv8i16:$Op3))>; + + def : Pat<(nxv4i32 (op nxv4i32:$Op1, nxv4i32:$Op2, nxv4i32:$Op3)), + (nxv4i32 (!cast(NAME # _S) (REG_SEQUENCE ZPR2, nxv4i32:$Op1, zsub0, + nxv4i32:$Op2, zsub1), + nxv4i32:$Op3))>; + + def : Pat<(nxv2i64 (op nxv2i64:$Op1, nxv2i64:$Op2, nxv2i64:$Op3)), + (nxv2i64 (!cast(NAME # _D) (REG_SEQUENCE ZPR2, nxv2i64:$Op1, zsub0, + nxv2i64:$Op2, zsub1), + nxv2i64:$Op3))>; + + def : Pat<(nxv8f16 (op nxv8f16:$Op1, nxv8f16:$Op2, nxv8i16:$Op3)), + (nxv8f16 (!cast(NAME # _H) (REG_SEQUENCE ZPR2, nxv8f16:$Op1, zsub0, + nxv8f16:$Op2, zsub1), + nxv8i16:$Op3))>; + + def : Pat<(nxv4f32 (op nxv4f32:$Op1, nxv4f32:$Op2, nxv4i32:$Op3)), + (nxv4f32 (!cast(NAME # _S) (REG_SEQUENCE ZPR2, nxv4f32:$Op1, zsub0, + nxv4f32:$Op2, zsub1), + nxv4i32:$Op3))>; + + def : Pat<(nxv2f64 (op nxv2f64:$Op1, nxv2f64:$Op2, nxv2i64:$Op3)), + (nxv2f64 (!cast(NAME # _D) (REG_SEQUENCE ZPR2, nxv2f64:$Op1, zsub0, + nxv2f64:$Op2, zsub1), + nxv2i64:$Op3))>; } class sve2_int_perm_tbx sz8_64, string asm, ZPRRegOp zprty> @@ -1024,11 +1059,20 @@ let Constraints = "$Zd = $_Zd"; } -multiclass sve2_int_perm_tbx { +multiclass sve2_int_perm_tbx { def _B : sve2_int_perm_tbx<0b00, asm, ZPR8>; def _H : sve2_int_perm_tbx<0b01, asm, ZPR16>; def _S : sve2_int_perm_tbx<0b10, asm, ZPR32>; def _D : sve2_int_perm_tbx<0b11, asm, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; + + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } class sve_int_perm_reverse_z sz8_64, string asm, ZPRRegOp zprty> @@ -3024,11 +3068,16 @@ let Inst{4-0} = Zd; } -multiclass sve2_misc_bitwise opc, string asm> { +multiclass sve2_misc_bitwise opc, string asm, SDPatternOperator op> { def _B : sve2_misc<0b00, opc, asm, ZPR8, ZPR8>; def _H : sve2_misc<0b01, opc, asm, ZPR16, ZPR16>; def _S : sve2_misc<0b10, opc, asm, ZPR32, ZPR32>; def _D : sve2_misc<0b11, opc, asm, ZPR64, ZPR64>; + + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } multiclass sve2_misc_int_addsub_long_interleaved opc, string asm, diff --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-bit-permutation.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-bit-permutation.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-bit-permutation.ll @@ -0,0 +1,124 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2,+sve2-bitperm < %s | FileCheck %s + +; +; BDEP +; + +define @bdep_nxv16i8( %a, %b) { +; CHECK-LABEL: bdep_nxv16i8: +; CHECK: bdep z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bdep.x.nx16i8( %a, %b) + ret %out +} + +define @bdep_nxv8i16( %a, %b) { +; CHECK-LABEL: bdep_nxv8i16: +; CHECK: bdep z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bdep.x.nx8i16( %a, %b) + ret %out +} + +define @bdep_nxv4i32( %a, %b) { +; CHECK-LABEL: bdep_nxv4i32: +; CHECK: bdep z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bdep.x.nx4i32( %a, %b) + ret %out +} + +define @bdep_nxv2i64( %a, %b) { +; CHECK-LABEL: bdep_nxv2i64: +; CHECK: bdep z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bdep.x.nx2i64( %a, %b) + ret %out +} + +; +; BEXT +; + +define @bext_nxv16i8( %a, %b) { +; CHECK-LABEL: bext_nxv16i8: +; CHECK: bext z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bext.x.nx16i8( %a, %b) + ret %out +} + +define @bext_nxv8i16( %a, %b) { +; CHECK-LABEL: bext_nxv8i16: +; CHECK: bext z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bext.x.nx8i16( %a, %b) + ret %out +} + +define @bext_nxv4i32( %a, %b) { +; CHECK-LABEL: bext_nxv4i32: +; CHECK: bext z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bext.x.nx4i32( %a, %b) + ret %out +} + +define @bext_nxv2i64( %a, %b) { +; CHECK-LABEL: bext_nxv2i64: +; CHECK: bext z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bext.x.nx2i64( %a, %b) + ret %out +} + +; +; BGRP +; + +define @bgrp_nxv16i8( %a, %b) { +; CHECK-LABEL: bgrp_nxv16i8: +; CHECK: bgrp z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bgrp.x.nx16i8( %a, %b) + ret %out +} + +define @bgrp_nxv8i16( %a, %b) { +; CHECK-LABEL: bgrp_nxv8i16: +; CHECK: bgrp z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bgrp.x.nx8i16( %a, %b) + ret %out +} + +define @bgrp_nxv4i32( %a, %b) { +; CHECK-LABEL: bgrp_nxv4i32: +; CHECK: bgrp z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bgrp.x.nx4i32( %a, %b) + ret %out +} + +define @bgrp_nxv2i64( %a, %b) { +; CHECK-LABEL: bgrp_nxv2i64: +; CHECK: bgrp z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.bgrp.x.nx2i64( %a, %b) + ret %out +} + +declare @llvm.aarch64.sve.bdep.x.nx16i8( %a, %b) +declare @llvm.aarch64.sve.bdep.x.nx8i16( %a, %b) +declare @llvm.aarch64.sve.bdep.x.nx4i32( %a, %b) +declare @llvm.aarch64.sve.bdep.x.nx2i64( %a, %b) + +declare @llvm.aarch64.sve.bext.x.nx16i8( %a, %b) +declare @llvm.aarch64.sve.bext.x.nx8i16( %a, %b) +declare @llvm.aarch64.sve.bext.x.nx4i32( %a, %b) +declare @llvm.aarch64.sve.bext.x.nx2i64( %a, %b) + +declare @llvm.aarch64.sve.bgrp.x.nx16i8( %a, %b) +declare @llvm.aarch64.sve.bgrp.x.nx8i16( %a, %b) +declare @llvm.aarch64.sve.bgrp.x.nx4i32( %a, %b) +declare @llvm.aarch64.sve.bgrp.x.nx2i64( %a, %b) diff --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-perm-tb.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-perm-tb.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-perm-tb.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s + +; +; TBL2 +; + +define @tbl2_b( %a, %unused, + %b, %c) { +; CHECK-LABEL: tbl2_b: +; CHECK: mov z1.d, z0.d +; CHECK-NEXT: tbl z0.b, { z1.b, z2.b }, z3.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl2.nxv16i8( %a, + %b, + %c) + ret %out +} + +define @tbl2_h( %a, %unused, + %b, %c) { +; CHECK-LABEL: tbl2_h: +; CHECK: mov z1.d, z0.d +; CHECK-NEXT: tbl z0.h, { z1.h, z2.h }, z3.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl2.nxv8i16( %a, + %b, + %c) + ret %out +} + +define @tbl2_s( %a, %unused, + %b, %c) { +; CHECK-LABEL: tbl2_s: +; CHECK: mov z1.d, z0.d +; CHECK-NEXT: tbl z0.s, { z1.s, z2.s }, z3.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl2.nxv4i32( %a, + %b, + %c) + ret %out +} + +define @tbl2_d( %a, %unused, + %b, %c) { +; CHECK-LABEL: tbl2_d: +; CHECK: mov z1.d, z0.d +; CHECK-NEXT: tbl z0.d, { z1.d, z2.d }, z3.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl2.nxv2i64( %a, + %b, + %c) + ret %out +} + +define @tbl2_fh( %a, %unused, + %b, %c) { +; CHECK-LABEL: tbl2_fh: +; CHECK: mov z1.d, z0.d +; CHECK-NEXT: tbl z0.h, { z1.h, z2.h }, z3.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl2.nxv8f16( %a, + %b, + %c) + ret %out +} + +define @tbl2_fs( %a, %unused, + %b, %c) { +; CHECK-LABEL: tbl2_fs: +; CHECK: mov z1.d, z0.d +; CHECK-NEXT: tbl z0.s, { z1.s, z2.s }, z3.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl2.nxv4f32( %a, + %b, + %c) + ret %out +} + +define @tbl2_fd( %a, %unused, + %b, %c) { +; CHECK-LABEL: tbl2_fd: +; CHECK: mov z1.d, z0.d +; CHECK-NEXT: tbl z0.d, { z1.d, z2.d }, z3.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl2.nxv2f64( %a, + %b, + %c) + ret %out +} + +; +; TBX +; + +define @tbx_b( %a, %b, %c) { +; CHECK-LABEL: tbx_b: +; CHECK: tbx z0.b, z1.b, z2.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbx.nxv16i8( %a, + %b, + %c) + ret %out +} + +define @tbx_h( %a, %b, %c) { +; CHECK-LABEL: tbx_h: +; CHECK: tbx z0.h, z1.h, z2.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbx.nxv8i16( %a, + %b, + %c) + ret %out +} + +define @ftbx_h( %a, %b, %c) { +; CHECK-LABEL: ftbx_h: +; CHECK: tbx z0.h, z1.h, z2.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbx.nxv8f16( %a, + %b, + %c) + ret %out +} + +define @tbx_s( %a, %b, %c) { +; CHECK-LABEL: tbx_s: +; CHECK: tbx z0.s, z1.s, z2.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbx.nxv4i32( %a, + %b, + %c) + ret %out +} + +define @ftbx_s( %a, %b, %c) { +; CHECK-LABEL: ftbx_s: +; CHECK: tbx z0.s, z1.s, z2.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbx.nxv4f32( %a, + %b, + %c) + ret %out +} + +define @tbx_d( %a, %b, %c) { +; CHECK-LABEL: tbx_d: +; CHECK: tbx z0.d, z1.d, z2.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbx.nxv2i64( %a, + %b, + %c) + ret %out +} + +define @ftbx_d( %a, %b, %c) { +; CHECK-LABEL: ftbx_d: +; CHECK: tbx z0.d, z1.d, z2.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbx.nxv2f64( %a, + %b, + %c) + ret %out +} + +declare @llvm.aarch64.sve.tbl2.nxv16i8(, , ) +declare @llvm.aarch64.sve.tbl2.nxv8i16(, , ) +declare @llvm.aarch64.sve.tbl2.nxv4i32(, , ) +declare @llvm.aarch64.sve.tbl2.nxv2i64(, , ) + +declare @llvm.aarch64.sve.tbl2.nxv8f16(, , ) +declare @llvm.aarch64.sve.tbl2.nxv4f32(, , ) +declare @llvm.aarch64.sve.tbl2.nxv2f64(, , ) + +declare @llvm.aarch64.sve.tbx.nxv16i8(, , ) +declare @llvm.aarch64.sve.tbx.nxv8i16(, , ) +declare @llvm.aarch64.sve.tbx.nxv4i32(, , ) +declare @llvm.aarch64.sve.tbx.nxv2i64(, , ) + +declare @llvm.aarch64.sve.tbx.nxv8f16(, , ) +declare @llvm.aarch64.sve.tbx.nxv4f32(, , ) +declare @llvm.aarch64.sve.tbx.nxv2f64(, , )