diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1998,4 +1998,13 @@ def int_aarch64_sve_sbclb : AdvSIMD_3VectorArg_Intrinsic; def int_aarch64_sve_sbclt : AdvSIMD_3VectorArg_Intrinsic; +// +// SVE2 - Polynomial arithmetic +// + +def int_aarch64_sve_eorbt : AdvSIMD_3VectorArg_Intrinsic; +def int_aarch64_sve_eortb : AdvSIMD_3VectorArg_Intrinsic; +def int_aarch64_sve_pmullb_pair : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_pmullt_pair : AdvSIMD_2VectorArg_Intrinsic; + } diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1609,8 +1609,8 @@ defm SMULLT_ZZZ : sve2_wide_int_arith_long<0b11101, "smullt", int_aarch64_sve_smullt>; defm UMULLB_ZZZ : sve2_wide_int_arith_long<0b11110, "umullb", int_aarch64_sve_umullb>; defm UMULLT_ZZZ : sve2_wide_int_arith_long<0b11111, "umullt", int_aarch64_sve_umullt>; - defm PMULLB_ZZZ : sve2_pmul_long<0b0, "pmullb">; - defm PMULLT_ZZZ : sve2_pmul_long<0b1, "pmullt">; + defm PMULLB_ZZZ : sve2_pmul_long<0b0, "pmullb", int_aarch64_sve_pmullb_pair>; + defm PMULLT_ZZZ : sve2_pmul_long<0b1, "pmullt", int_aarch64_sve_pmullt_pair>; // SVE2 bitwise shift and insert defm SRI_ZZI : sve2_int_bin_shift_imm_right<0b0, "sri", int_aarch64_sve_sri>; @@ -1689,8 +1689,8 @@ defm NMATCH_PPzZZ : sve2_char_match<0b1, "nmatch", int_aarch64_sve_nmatch>; // SVE2 bitwise exclusive-or interleaved - defm EORBT_ZZZ : sve2_bitwise_xor_interleaved<0b0, "eorbt">; - defm EORTB_ZZZ : sve2_bitwise_xor_interleaved<0b1, "eortb">; + defm EORBT_ZZZ : sve2_bitwise_xor_interleaved<0b0, "eorbt", int_aarch64_sve_eorbt>; + defm EORTB_ZZZ : sve2_bitwise_xor_interleaved<0b1, "eortb", int_aarch64_sve_eortb>; // SVE2 bitwise shift left long defm SSHLLB_ZZI : sve2_bitwise_shift_left_long<0b00, "sshllb", int_aarch64_sve_sshllb>; @@ -1811,10 +1811,8 @@ // PMULLB and PMULLT instructions which operate with 64-bit source and // 128-bit destination elements are enabled with crypto extensions, similar // to NEON PMULL2 instruction. - def PMULLB_ZZZ_Q : sve2_wide_int_arith<0b00, 0b11010, "pmullb", - ZPR128, ZPR64, ZPR64>; - def PMULLT_ZZZ_Q : sve2_wide_int_arith<0b00, 0b11011, "pmullt", - ZPR128, ZPR64, ZPR64>; + defm PMULLB_ZZZ_Q : sve2_wide_int_arith_pmul<0b00, 0b11010, "pmullb", int_aarch64_sve_pmullb_pair>; + defm PMULLT_ZZZ_Q : sve2_wide_int_arith_pmul<0b00, 0b11011, "pmullt", int_aarch64_sve_pmullt_pair>; } let Predicates = [HasSVE2SM4] in { diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -2909,9 +2909,25 @@ def : SVE_2_Op_Pat(NAME # _D)>; } -multiclass sve2_pmul_long opc, string asm> { +multiclass sve2_wide_int_arith_pmul sz, bits<5> opc, string asm, + SDPatternOperator op> { + def NAME : sve2_wide_int_arith; + + // To avoid using 128 bit elements in the IR, the pattern below works with + // llvm intrinsics with the _pair suffix, to reflect that + // _Q is implemented as a pair of _D. + def : SVE_2_Op_Pat(NAME)>; +} + +multiclass sve2_pmul_long opc, string asm, SDPatternOperator op> { def _H : sve2_wide_int_arith<0b01, {0b1101, opc}, asm, ZPR16, ZPR8, ZPR8>; def _D : sve2_wide_int_arith<0b11, {0b1101, opc}, asm, ZPR64, ZPR32, ZPR32>; + + // To avoid using 128 bit elements in the IR, the patterns below work with + // llvm intrinsics with the _pair suffix, to reflect that + // _H is implemented as a pair of _B and _D is implemented as a pair of _S. + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _D)>; } //===----------------------------------------------------------------------===// @@ -2974,11 +2990,17 @@ let ElementSize = ElementSizeNone; } -multiclass sve2_bitwise_xor_interleaved { +multiclass sve2_bitwise_xor_interleaved { def _B : sve2_bitwise_xor_interleaved<0b00, opc, asm, ZPR8, ZPR8>; def _H : sve2_bitwise_xor_interleaved<0b01, opc, asm, ZPR16, ZPR16>; def _S : sve2_bitwise_xor_interleaved<0b10, opc, asm, ZPR32, ZPR32>; def _D : sve2_bitwise_xor_interleaved<0b11, opc, asm, ZPR64, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } class sve2_bitwise_shift_left_long tsz8_64, bits<2> opc, string asm, diff --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-polynomial-arithmetic-128.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-polynomial-arithmetic-128.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-polynomial-arithmetic-128.ll @@ -0,0 +1,31 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2-aes -asm-verbose=0 < %s | FileCheck %s + +; +; PMULLB +; + +define @pmullb_i64( %a, %b) { +; CHECK-LABEL: pmullb_i64: +; CHECK: pmullb z0.q, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.pmullb.pair.nxv2i64( %a, + %b) + ret %out +} + +; +; PMULLT +; + +define @pmullt_i64( %a, %b) { +; CHECK-LABEL: pmullt_i64: +; CHECK: pmullt z0.q, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.pmullt.pair.nxv2i64( %a, + %b) + ret %out +} + +declare @llvm.aarch64.sve.pmullb.pair.nxv2i64(, ) + +declare @llvm.aarch64.sve.pmullt.pair.nxv2i64(, ) diff --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-polynomial-arithmetic.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-polynomial-arithmetic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-polynomial-arithmetic.ll @@ -0,0 +1,149 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 -asm-verbose=0 < %s | FileCheck %s + +; +; EORBT +; + +define @eorbt_i8( %a, %b, %c) { +; CHECK-LABEL: eorbt_i8: +; CHECK: eorbt z0.b, z1.b, z2.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.eorbt.nxv16i8( %a, + %b, + %c) + ret %out +} + +define @eorbt_i16( %a, %b, %c) { +; CHECK-LABEL: eorbt_i16: +; CHECK: eorbt z0.h, z1.h, z2.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.eorbt.nxv8i16( %a, + %b, + %c) + ret %out +} + +define @eorbt_i32( %a, %b, %c) { +; CHECK-LABEL: eorbt_i32: +; CHECK: eorbt z0.s, z1.s, z2.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.eorbt.nxv4i32( %a, + %b, + %c) + ret %out +} + +define @eorbt_i64( %a, %b, %c) { +; CHECK-LABEL: eorbt_i64: +; CHECK: eorbt z0.d, z1.d, z2.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.eorbt.nxv2i64( %a, + %b, + %c) + ret %out +} + +; +; EORTB +; + +define @eortb_i8( %a, %b, %c) { +; CHECK-LABEL: eortb_i8: +; CHECK: eortb z0.b, z1.b, z2.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.eortb.nxv16i8( %a, + %b, + %c) + ret %out +} + +define @eortb_i16( %a, %b, %c) { +; CHECK-LABEL: eortb_i16: +; CHECK: eortb z0.h, z1.h, z2.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.eortb.nxv8i16( %a, + %b, + %c) + ret %out +} + +define @eortb_i32( %a, %b, %c) { +; CHECK-LABEL: eortb_i32: +; CHECK: eortb z0.s, z1.s, z2.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.eortb.nxv4i32( %a, + %b, + %c) + ret %out +} + +define @eortb_i64( %a, %b, %c) { +; CHECK-LABEL: eortb_i64: +; CHECK: eortb z0.d, z1.d, z2.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.eortb.nxv2i64( %a, + %b, + %c) + ret %out +} + +; +; PMULLB +; + +define @pmullb_i8( %a, %b) { +; CHECK-LABEL: pmullb_i8: +; CHECK: pmullb z0.h, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.pmullb.pair.nxv16i8( %a, + %b) + ret %out +} + +define @pmullb_i32( %a, %b) { +; CHECK-LABEL: pmullb_i32: +; CHECK: pmullb z0.d, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.pmullb.pair.nxv4i32( %a, + %b) + ret %out +} + +; +; PMULLT +; + +define @pmullt_i8( %a, %b) { +; CHECK-LABEL: pmullt_i8: +; CHECK: pmullt z0.h, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.pmullt.pair.nxv16i8( %a, + %b) + ret %out +} + +define @pmullt_i32( %a, %b) { +; CHECK-LABEL: pmullt_i32: +; CHECK: pmullt z0.d, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.pmullt.pair.nxv4i32( %a, + %b) + ret %out +} + +declare @llvm.aarch64.sve.eorbt.nxv16i8(, , ) +declare @llvm.aarch64.sve.eorbt.nxv8i16(, , ) +declare @llvm.aarch64.sve.eorbt.nxv4i32(, , ) +declare @llvm.aarch64.sve.eorbt.nxv2i64(, , ) + +declare @llvm.aarch64.sve.eortb.nxv16i8(, , ) +declare @llvm.aarch64.sve.eortb.nxv8i16(, , ) +declare @llvm.aarch64.sve.eortb.nxv4i32(, , ) +declare @llvm.aarch64.sve.eortb.nxv2i64(, , ) + +declare @llvm.aarch64.sve.pmullb.pair.nxv16i8(, ) +declare @llvm.aarch64.sve.pmullb.pair.nxv4i32(, ) + +declare @llvm.aarch64.sve.pmullt.pair.nxv16i8(, ) +declare @llvm.aarch64.sve.pmullt.pair.nxv4i32(, )