Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -893,6 +893,10 @@ // SVE let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". + class AdvSIMD_SVE_WHILE_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [llvm_anyint_ty, LLVMMatchType<1>], + [IntrNoMem]>; class AdvSIMD_Pred2VectorArg_Intrinsic @@ -986,6 +990,19 @@ def int_aarch64_sve_uxth : AdvSIMD_Merged1VectorArg_Intrinsic; def int_aarch64_sve_uxtw : AdvSIMD_Merged1VectorArg_Intrinsic; +// +// While comparisons +// + +def int_aarch64_sve_whilele : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilelo : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilels : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilelt : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilege : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilegt : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilehs : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilehi : AdvSIMD_SVE_WHILE_Intrinsic; + // // Floating-point arithmetic // Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -751,15 +751,15 @@ defm FCMEQ_PPzZ0 : sve_fp_2op_p_pd<0b100, "fcmeq">; defm FCMNE_PPzZ0 : sve_fp_2op_p_pd<0b110, "fcmne">; - defm WHILELT_PWW : sve_int_while4_rr<0b010, "whilelt">; - defm WHILELE_PWW : sve_int_while4_rr<0b011, "whilele">; - defm WHILELO_PWW : sve_int_while4_rr<0b110, "whilelo">; - defm WHILELS_PWW : sve_int_while4_rr<0b111, "whilels">; + defm WHILELT_PWW : sve_int_while4_rr<0b010, "whilelt", int_aarch64_sve_whilelt>; + defm WHILELE_PWW : sve_int_while4_rr<0b011, "whilele", int_aarch64_sve_whilele>; + defm WHILELO_PWW : sve_int_while4_rr<0b110, "whilelo", int_aarch64_sve_whilelo>; + defm WHILELS_PWW : sve_int_while4_rr<0b111, "whilels", int_aarch64_sve_whilels>; - defm WHILELT_PXX : sve_int_while8_rr<0b010, "whilelt">; - defm WHILELE_PXX : sve_int_while8_rr<0b011, "whilele">; - defm WHILELO_PXX : sve_int_while8_rr<0b110, "whilelo">; - defm WHILELS_PXX : sve_int_while8_rr<0b111, "whilels">; + defm WHILELT_PXX : sve_int_while8_rr<0b010, "whilelt", int_aarch64_sve_whilelt>; + defm WHILELE_PXX : sve_int_while8_rr<0b011, "whilele", int_aarch64_sve_whilele>; + defm WHILELO_PXX : sve_int_while8_rr<0b110, "whilelo", int_aarch64_sve_whilelo>; + defm WHILELS_PXX : sve_int_while8_rr<0b111, "whilels", int_aarch64_sve_whilels>; def CTERMEQ_WW : sve_int_cterm<0b0, 0b0, "ctermeq", GPR32>; def CTERMNE_WW : sve_int_cterm<0b0, 0b1, "ctermne", GPR32>; @@ -1493,15 +1493,16 @@ defm TBX_ZZZ : sve2_int_perm_tbx<"tbx">; // SVE2 integer compare scalar count and limit - defm WHILEGE_PWW : sve_int_while4_rr<0b000, "whilege">; - defm WHILEGT_PWW : sve_int_while4_rr<0b001, "whilegt">; - defm WHILEHS_PWW : sve_int_while4_rr<0b100, "whilehs">; - defm WHILEHI_PWW : sve_int_while4_rr<0b101, "whilehi">; - - defm WHILEGE_PXX : sve_int_while8_rr<0b000, "whilege">; - defm WHILEGT_PXX : sve_int_while8_rr<0b001, "whilegt">; - defm WHILEHS_PXX : sve_int_while8_rr<0b100, "whilehs">; - defm WHILEHI_PXX : sve_int_while8_rr<0b101, "whilehi">; + defm WHILEGE_PWW : sve_int_while4_rr<0b000, "whilege", null_frag>; + defm WHILEGT_PWW : sve_int_while4_rr<0b001, "whilegt", null_frag>; + defm WHILEHS_PWW : sve_int_while4_rr<0b100, "whilehs", null_frag>; + defm WHILEHI_PWW : sve_int_while4_rr<0b101, "whilehi", null_frag>; + + defm WHILEGE_PXX : sve_int_while8_rr<0b000, "whilege", null_frag>; + defm WHILEGT_PXX : sve_int_while8_rr<0b001, "whilegt", null_frag>; + defm WHILEHS_PXX : sve_int_while8_rr<0b100, "whilehs", null_frag>; + defm WHILEHI_PXX : sve_int_while8_rr<0b101, "whilehi", null_frag>; + // SVE2 pointer conflict compare defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr">; Index: llvm/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -3560,7 +3560,8 @@ } class sve_int_while_rr sz8_64, bits<4> opc, string asm, - RegisterClass gprty, PPRRegOp pprty> + RegisterClass gprty, PPRRegOp pprty, + ValueType vt, SDPatternOperator op> : I<(outs pprty:$Pd), (ins gprty:$Rn, gprty:$Rm), asm, "\t$Pd, $Rn, $Rm", "", []>, Sched<[]> { @@ -3580,18 +3581,28 @@ let Defs = [NZCV]; } -multiclass sve_int_while4_rr opc, string asm> { - def _B : sve_int_while_rr<0b00, { 0, opc }, asm, GPR32, PPR8>; - def _H : sve_int_while_rr<0b01, { 0, opc }, asm, GPR32, PPR16>; - def _S : sve_int_while_rr<0b10, { 0, opc }, asm, GPR32, PPR32>; - def _D : sve_int_while_rr<0b11, { 0, opc }, asm, GPR32, PPR64>; +multiclass sve_int_while4_rr opc, string asm, SDPatternOperator op> { + def _B : sve_int_while_rr<0b00, { 0, opc }, asm, GPR32, PPR8, nxv16i1, op>; + def _H : sve_int_while_rr<0b01, { 0, opc }, asm, GPR32, PPR16, nxv8i1, op>; + def _S : sve_int_while_rr<0b10, { 0, opc }, asm, GPR32, PPR32, nxv4i1, op>; + def _D : sve_int_while_rr<0b11, { 0, opc }, asm, GPR32, PPR64, nxv2i1, op>; + + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } -multiclass sve_int_while8_rr opc, string asm> { - def _B : sve_int_while_rr<0b00, { 1, opc }, asm, GPR64, PPR8>; - def _H : sve_int_while_rr<0b01, { 1, opc }, asm, GPR64, PPR16>; - def _S : sve_int_while_rr<0b10, { 1, opc }, asm, GPR64, PPR32>; - def _D : sve_int_while_rr<0b11, { 1, opc }, asm, GPR64, PPR64>; +multiclass sve_int_while8_rr opc, string asm, SDPatternOperator op> { + def _B : sve_int_while_rr<0b00, { 1, opc }, asm, GPR64, PPR8, nxv16i1, op>; + def _H : sve_int_while_rr<0b01, { 1, opc }, asm, GPR64, PPR16, nxv8i1, op>; + def _S : sve_int_while_rr<0b10, { 1, opc }, asm, GPR64, PPR32, nxv4i1, op>; + def _D : sve_int_while_rr<0b11, { 1, opc }, asm, GPR64, PPR64, nxv2i1, op>; + + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } class sve2_int_while_rr sz8_64, bits<1> rw, string asm, Index: llvm/test/CodeGen/AArch64/sve-intrinsics-while.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-while.ll @@ -0,0 +1,309 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; WHILELE +; + +define @whilele_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilele_b_ww: +; CHECK: whilele p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilele_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilele_b_xx: +; CHECK: whilele p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilele_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilele_h_ww: +; CHECK: whilele p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilele_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilele_h_xx: +; CHECK: whilele p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilele_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilele_s_ww: +; CHECK: whilele p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilele_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilele_s_xx: +; CHECK: whilele p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilele_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilele_d_ww: +; CHECK: whilele p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilele_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilele_d_xx: +; CHECK: whilele p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +; +; WHILELO +; + +define @whilelo_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelo_b_ww: +; CHECK: whilelo p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelo_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelo_b_xx: +; CHECK: whilelo p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelo_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelo_h_ww: +; CHECK: whilelo p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelo_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelo_h_xx: +; CHECK: whilelo p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelo_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelo_s_ww: +; CHECK: whilelo p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelo_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelo_s_xx: +; CHECK: whilelo p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelo_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelo_d_ww: +; CHECK: whilelo p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelo_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelo_d_xx: +; CHECK: whilelo p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +; +; WHILELS +; + +define @whilels_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilels_b_ww: +; CHECK: whilels p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilels_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilels_b_xx: +; CHECK: whilels p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilels_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilels_h_ww: +; CHECK: whilels p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilels_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilels_h_xx: +; CHECK: whilels p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilels_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilels_s_ww: +; CHECK: whilels p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilels_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilels_s_xx: +; CHECK: whilels p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilels_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilels_d_ww: +; CHECK: whilels p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilels_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilels_d_xx: +; CHECK: whilels p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +; +; WHILELT +; + +define @whilelt_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelt_b_ww: +; CHECK: whilelt p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelt_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelt_b_xx: +; CHECK: whilelt p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelt_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelt_h_ww: +; CHECK: whilelt p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelt_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelt_h_xx: +; CHECK: whilelt p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelt_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelt_s_ww: +; CHECK: whilelt p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelt_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelt_s_xx: +; CHECK: whilelt p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelt_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelt_d_ww: +; CHECK: whilelt p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelt_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelt_d_xx: +; CHECK: whilelt p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +declare @llvm.aarch64.sve.whilele.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilele.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilele.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilele.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilele.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilele.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilele.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilele.nxv2i1.i64(i64, i64) + +declare @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64, i64) + +declare @llvm.aarch64.sve.whilels.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilels.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilels.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilels.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilels.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilels.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilels.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilels.nxv2i1.i64(i64, i64) + +declare @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64, i64)