diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -927,6 +927,16 @@ LLVMVectorOfBitcastsToInt<0>], [IntrNoMem]>; + class AdvSIMD_SVE_CNTB_Intrinsic + : Intrinsic<[llvm_i64_ty], + [llvm_i32_ty], + [IntrNoMem]>; + + class AdvSIMD_SVE_CNTP_Intrinsic + : Intrinsic<[llvm_i64_ty], + [llvm_anyvector_ty, LLVMMatchType<0>], + [IntrNoMem]>; + class AdvSIMD_SVE_DOT_Intrinsic : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, @@ -1061,6 +1071,17 @@ def int_aarch64_sve_cnt : AdvSIMD_SVE_CNT_Intrinsic; // +// Counting elements +// + +def int_aarch64_sve_cntb : AdvSIMD_SVE_CNTB_Intrinsic; +def int_aarch64_sve_cnth : AdvSIMD_SVE_CNTB_Intrinsic; +def int_aarch64_sve_cntw : AdvSIMD_SVE_CNTB_Intrinsic; +def int_aarch64_sve_cntd : AdvSIMD_SVE_CNTB_Intrinsic; + +def int_aarch64_sve_cntp : AdvSIMD_SVE_CNTP_Intrinsic; + +// // Reversal // diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -788,11 +788,11 @@ def ADDVL_XXI : sve_int_arith_vl<0b0, "addvl">; def ADDPL_XXI : sve_int_arith_vl<0b1, "addpl">; - defm CNTB_XPiI : sve_int_count<0b000, "cntb">; - defm CNTH_XPiI : sve_int_count<0b010, "cnth">; - defm CNTW_XPiI : sve_int_count<0b100, "cntw">; - defm CNTD_XPiI : sve_int_count<0b110, "cntd">; - defm CNTP_XPP : sve_int_pcount_pred<0b0000, "cntp">; + defm CNTB_XPiI : sve_int_count<0b000, "cntb", int_aarch64_sve_cntb>; + defm CNTH_XPiI : sve_int_count<0b010, "cnth", int_aarch64_sve_cnth>; + defm CNTW_XPiI : sve_int_count<0b100, "cntw", int_aarch64_sve_cntw>; + defm CNTD_XPiI : sve_int_count<0b110, "cntd", int_aarch64_sve_cntd>; + defm CNTP_XPP : sve_int_pcount_pred<0b0000, "cntp", int_aarch64_sve_cntp>; defm INCB_XPiI : sve_int_pred_pattern_a<0b000, "incb">; defm DECB_XPiI : sve_int_pred_pattern_a<0b001, "decb">; diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -504,11 +504,17 @@ let Inst{4-0} = Rd; } -multiclass sve_int_pcount_pred opc, string asm> { +multiclass sve_int_pcount_pred opc, string asm, + SDPatternOperator int_op> { def _B : sve_int_pcount_pred<0b00, opc, asm, PPR8>; def _H : sve_int_pcount_pred<0b01, opc, asm, PPR16>; def _S : sve_int_pcount_pred<0b10, opc, asm, PPR32>; def _D : sve_int_pcount_pred<0b11, opc, asm, PPR64>; + + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } //===----------------------------------------------------------------------===// @@ -533,13 +539,16 @@ let Inst{4-0} = Rd; } -multiclass sve_int_count opc, string asm> { +multiclass sve_int_count opc, string asm, SDPatternOperator op> { def NAME : sve_int_count; def : InstAlias(NAME) GPR64:$Rd, sve_pred_enum:$pattern, 1), 1>; def : InstAlias(NAME) GPR64:$Rd, 0b11111, 1), 2>; + + def : Pat<(i64 (op sve_pred_enum:$pattern)), + (!cast(NAME) sve_pred_enum:$pattern, 1)>; } class sve_int_countvlv opc, string asm, ZPRRegOp zprty> diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-counting-elems.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-counting-elems.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-counting-elems.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; CNTB +; + +define i64 @cntb() { +; CHECK-LABEL: cntb: +; CHECK: cntb x0, vl2 +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.cntb(i32 2) + ret i64 %out +} + +; +; CNTH +; + +define i64 @cnth() { +; CHECK-LABEL: cnth: +; CHECK: cnth x0, vl3 +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.cnth(i32 3) + ret i64 %out +} + +; +; CNTW +; + +define i64 @cntw() { +; CHECK-LABEL: cntw: +; CHECK: cntw x0, vl4 +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.cntw(i32 4) + ret i64 %out +} + +; +; CNTD +; + +define i64 @cntd() { +; CHECK-LABEL: cntd: +; CHECK: cntd x0, vl5 +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.cntd(i32 5) + ret i64 %out +} + +; +; CNTP +; + +define i64 @cntp_b8( %pg, %a) { +; CHECK-LABEL: cntp_b8: +; CHECK: cntp x0, p0, p1.b +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.cntp.nxv16i1( %pg, + %a) + ret i64 %out +} + +define i64 @cntp_b16( %pg, %a) { +; CHECK-LABEL: cntp_b16: +; CHECK: cntp x0, p0, p1.h +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.cntp.nxv8i1( %pg, + %a) + ret i64 %out +} + +define i64 @cntp_b32( %pg, %a) { +; CHECK-LABEL: cntp_b32: +; CHECK: cntp x0, p0, p1.s +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.cntp.nxv4i1( %pg, + %a) + ret i64 %out +} + +define i64 @cntp_b64( %pg, %a) { +; CHECK-LABEL: cntp_b64: +; CHECK: cntp x0, p0, p1.d +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.cntp.nxv2i1( %pg, + %a) + ret i64 %out +} + +declare i64 @llvm.aarch64.sve.cntb(i32 %pattern) +declare i64 @llvm.aarch64.sve.cnth(i32 %pattern) +declare i64 @llvm.aarch64.sve.cntw(i32 %pattern) +declare i64 @llvm.aarch64.sve.cntd(i32 %pattern) + +declare i64 @llvm.aarch64.sve.cntp.nxv16i1(, ) +declare i64 @llvm.aarch64.sve.cntp.nxv8i1(, ) +declare i64 @llvm.aarch64.sve.cntp.nxv4i1(, ) +declare i64 @llvm.aarch64.sve.cntp.nxv2i1(, )