diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1063,6 +1063,15 @@ def int_aarch64_sve_cnt : AdvSIMD_SVE_CNT_Intrinsic; // +// Reversal +// + +def int_aarch64_sve_rbit : AdvSIMD_Merged1VectorArg_Intrinsic; +def int_aarch64_sve_revb : AdvSIMD_Merged1VectorArg_Intrinsic; +def int_aarch64_sve_revh : AdvSIMD_Merged1VectorArg_Intrinsic; +def int_aarch64_sve_revw : AdvSIMD_Merged1VectorArg_Intrinsic; + +// // Permutations and selection // diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -221,10 +221,10 @@ defm INSR_ZV : sve_int_perm_insrv<"insr", AArch64insr>; def EXT_ZZI : sve_int_perm_extract_i<"ext">; - defm RBIT_ZPmZ : sve_int_perm_rev_rbit<"rbit">; - defm REVB_ZPmZ : sve_int_perm_rev_revb<"revb">; - defm REVH_ZPmZ : sve_int_perm_rev_revh<"revh">; - defm REVW_ZPmZ : sve_int_perm_rev_revw<"revw">; + defm RBIT_ZPmZ : sve_int_perm_rev_rbit<"rbit", int_aarch64_sve_rbit>; + defm REVB_ZPmZ : sve_int_perm_rev_revb<"revb", int_aarch64_sve_revb, bswap>; + defm REVH_ZPmZ : sve_int_perm_rev_revh<"revh", int_aarch64_sve_revh>; + defm REVW_ZPmZ : sve_int_perm_rev_revw<"revw", int_aarch64_sve_revw>; defm REV_PP : sve_int_perm_reverse_p<"rev">; defm REV_ZZ : sve_int_perm_reverse_z<"rev">; diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -318,6 +318,15 @@ def SVEDup0Undef : ComplexPattern; +// +// Common but less generic patterns. +// + +class SVE_1_Op_AllActive_Pat +: Pat<(vtd (op vt1:$Op1)), + (inst (IMPLICIT_DEF), (ptrue 31), $Op1)>; + //===----------------------------------------------------------------------===// // SVE Predicate Misc Group //===----------------------------------------------------------------------===// @@ -4837,26 +4846,46 @@ let ElementSize = zprty.ElementSize; } -multiclass sve_int_perm_rev_rbit { +multiclass sve_int_perm_rev_rbit { def _B : sve_int_perm_rev<0b00, 0b11, asm, ZPR8>; def _H : sve_int_perm_rev<0b01, 0b11, asm, ZPR16>; def _S : sve_int_perm_rev<0b10, 0b11, asm, ZPR32>; def _D : sve_int_perm_rev<0b11, 0b11, asm, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } -multiclass sve_int_perm_rev_revb { +multiclass sve_int_perm_rev_revb { def _H : sve_int_perm_rev<0b01, 0b00, asm, ZPR16>; def _S : sve_int_perm_rev<0b10, 0b00, asm, ZPR32>; def _D : sve_int_perm_rev<0b11, 0b00, asm, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; + + def : SVE_1_Op_AllActive_Pat(NAME # _H), PTRUE_H>; + def : SVE_1_Op_AllActive_Pat(NAME # _S), PTRUE_S>; + def : SVE_1_Op_AllActive_Pat(NAME # _D), PTRUE_D>; } -multiclass sve_int_perm_rev_revh { +multiclass sve_int_perm_rev_revh { def _S : sve_int_perm_rev<0b10, 0b01, asm, ZPR32>; def _D : sve_int_perm_rev<0b11, 0b01, asm, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } -multiclass sve_int_perm_rev_revw { +multiclass sve_int_perm_rev_revw { def _D : sve_int_perm_rev<0b11, 0b10, asm, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _D)>; } class sve_int_perm_cpy_r sz8_64, string asm, ZPRRegOp zprty, diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-reversal.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-reversal.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-reversal.ll @@ -0,0 +1,166 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; RBIT +; + +define @rbit_i8( %a, %pg, %b) { +; CHECK-LABEL: rbit_i8: +; CHECK: rbit z0.b, p0/m, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.rbit.nxv16i8( %a, + %pg, + %b) + ret %out +} + +define @rbit_i16( %a, %pg, %b) { +; CHECK-LABEL: rbit_i16: +; CHECK: rbit z0.h, p0/m, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.rbit.nxv8i16( %a, + %pg, + %b) + ret %out +} + +define @rbit_i32( %a, %pg, %b) { +; CHECK-LABEL: rbit_i32: +; CHECK: rbit z0.s, p0/m, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.rbit.nxv4i32( %a, + %pg, + %b) + ret %out +} + +define @rbit_i64( %a, %pg, %b) { +; CHECK-LABEL: rbit_i64: +; CHECK: rbit z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.rbit.nxv2i64( %a, + %pg, + %b) + ret %out +} + +; +; REVB +; + +define @revb_i16( %a, %pg, %b) { +; CHECK-LABEL: revb_i16: +; CHECK: revb z0.h, p0/m, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.revb.nxv8i16( %a, + %pg, + %b) + ret %out +} + +define @revb_i32( %a, %pg, %b) { +; CHECK-LABEL: revb_i32: +; CHECK: revb z0.s, p0/m, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.revb.nxv4i32( %a, + %pg, + %b) + ret %out +} + +define @revb_i64( %a, %pg, %b) { +; CHECK-LABEL: revb_i64: +; CHECK: revb z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.revb.nxv2i64( %a, + %pg, + %b) + ret %out +} + +; +; REVB (bswap) +; + +define @revb_i16_bswap( %a) { +; CHECK-LABEL: revb_i16_bswap: +; CHECK: ptrue [[PG:p[0-9]+]].h +; CHECK-NEXT: revb z0.h, [[PG]]/m, z0.h +; CHECK-NEXT: ret + %res = call @llvm.bswap.nxv8i16( %a) + ret %res +} + +define @revb_i32_bswap( %a) { +; CHECK-LABEL: revb_i32_bswap: +; CHECK: ptrue [[PG:p[0-9]+]].s +; CHECK-NEXT: revb z0.s, [[PG]]/m, z0.s +; CHECK-NEXT: ret + %res = call @llvm.bswap.nxv4i32( %a) + ret %res +} + +define @revb_i64_bswap( %a) { +; CHECK-LABEL: revb_i64_bswap: +; CHECK: ptrue [[PG:p[0-9]+]].d +; CHECK-NEXT: revb z0.d, [[PG]]/m, z0.d +; CHECK-NEXT: ret + %res = call @llvm.bswap.nxv2i64( %a) + ret %res +} + +; +; REVH +; + +define @revh_i32( %a, %pg, %b) { +; CHECK-LABEL: revh_i32: +; CHECK: revh z0.s, p0/m, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.revh.nxv4i32( %a, + %pg, + %b) + ret %out +} + +define @revh_i64( %a, %pg, %b) { +; CHECK-LABEL: revh_i64: +; CHECK: revh z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.revh.nxv2i64( %a, + %pg, + %b) + ret %out +} + +; +; REVW +; + +define @revw_i64( %a, %pg, %b) { +; CHECK-LABEL: revw_i64: +; CHECK: revw z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.revw.nxv2i64( %a, + %pg, + %b) + ret %out +} + +declare @llvm.aarch64.sve.rbit.nxv16i8(, , ) +declare @llvm.aarch64.sve.rbit.nxv8i16(, , ) +declare @llvm.aarch64.sve.rbit.nxv4i32(, , ) +declare @llvm.aarch64.sve.rbit.nxv2i64(, , ) + +declare @llvm.aarch64.sve.revb.nxv8i16(, , ) +declare @llvm.aarch64.sve.revb.nxv4i32(, , ) +declare @llvm.aarch64.sve.revb.nxv2i64(, , ) + +declare @llvm.bswap.nxv8i16() +declare @llvm.bswap.nxv4i32() +declare @llvm.bswap.nxv2i64() + +declare @llvm.aarch64.sve.revh.nxv4i32(, , ) +declare @llvm.aarch64.sve.revh.nxv2i64(, , ) + +declare @llvm.aarch64.sve.revw.nxv2i64(, , )