Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -910,6 +910,11 @@ llvm_i32_ty], [IntrNoMem]>; + class AdvSIMD_SVE_WHILE_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [llvm_anyint_ty, LLVMMatchType<1>], + [IntrNoMem]>; + // This class of intrinsics are not intended to be useful within LLVM IR but // are instead here to support some of the more regid parts of the ACLE. class Builtin_SVCVT @@ -1001,6 +1006,19 @@ def int_aarch64_sve_cmpne_wide : AdvSIMD_SVE_CompareWide_Intrinsic; // +// While comparisons +// + +def int_aarch64_sve_whilele : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilelo : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilels : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilelt : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilege : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilegt : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilehi : AdvSIMD_SVE_WHILE_Intrinsic; +def int_aarch64_sve_whilehs : AdvSIMD_SVE_WHILE_Intrinsic; + +// // Counting bits // Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -751,15 +751,15 @@ defm FCMEQ_PPzZ0 : sve_fp_2op_p_pd<0b100, "fcmeq">; defm FCMNE_PPzZ0 : sve_fp_2op_p_pd<0b110, "fcmne">; - defm WHILELT_PWW : sve_int_while4_rr<0b010, "whilelt">; - defm WHILELE_PWW : sve_int_while4_rr<0b011, "whilele">; - defm WHILELO_PWW : sve_int_while4_rr<0b110, "whilelo">; - defm WHILELS_PWW : sve_int_while4_rr<0b111, "whilels">; + defm WHILELT_PWW : sve_int_while4_rr<0b010, "whilelt", int_aarch64_sve_whilelt>; + defm WHILELE_PWW : sve_int_while4_rr<0b011, "whilele", int_aarch64_sve_whilele>; + defm WHILELO_PWW : sve_int_while4_rr<0b110, "whilelo", int_aarch64_sve_whilelo>; + defm WHILELS_PWW : sve_int_while4_rr<0b111, "whilels", int_aarch64_sve_whilels>; - defm WHILELT_PXX : sve_int_while8_rr<0b010, "whilelt">; - defm WHILELE_PXX : sve_int_while8_rr<0b011, "whilele">; - defm WHILELO_PXX : sve_int_while8_rr<0b110, "whilelo">; - defm WHILELS_PXX : sve_int_while8_rr<0b111, "whilels">; + defm WHILELT_PXX : sve_int_while8_rr<0b010, "whilelt", int_aarch64_sve_whilelt>; + defm WHILELE_PXX : sve_int_while8_rr<0b011, "whilele", int_aarch64_sve_whilele>; + defm WHILELO_PXX : sve_int_while8_rr<0b110, "whilelo", int_aarch64_sve_whilelo>; + defm WHILELS_PXX : sve_int_while8_rr<0b111, "whilels", int_aarch64_sve_whilels>; def CTERMEQ_WW : sve_int_cterm<0b0, 0b0, "ctermeq", GPR32>; def CTERMNE_WW : sve_int_cterm<0b0, 0b1, "ctermne", GPR32>; @@ -1493,15 +1493,15 @@ defm TBX_ZZZ : sve2_int_perm_tbx<"tbx">; // SVE2 integer compare scalar count and limit - defm WHILEGE_PWW : sve_int_while4_rr<0b000, "whilege">; - defm WHILEGT_PWW : sve_int_while4_rr<0b001, "whilegt">; - defm WHILEHS_PWW : sve_int_while4_rr<0b100, "whilehs">; - defm WHILEHI_PWW : sve_int_while4_rr<0b101, "whilehi">; - - defm WHILEGE_PXX : sve_int_while8_rr<0b000, "whilege">; - defm WHILEGT_PXX : sve_int_while8_rr<0b001, "whilegt">; - defm WHILEHS_PXX : sve_int_while8_rr<0b100, "whilehs">; - defm WHILEHI_PXX : sve_int_while8_rr<0b101, "whilehi">; + defm WHILEGE_PWW : sve_int_while4_rr<0b000, "whilege", int_aarch64_sve_whilege>; + defm WHILEGT_PWW : sve_int_while4_rr<0b001, "whilegt", int_aarch64_sve_whilegt>; + defm WHILEHS_PWW : sve_int_while4_rr<0b100, "whilehs", int_aarch64_sve_whilehs>; + defm WHILEHI_PWW : sve_int_while4_rr<0b101, "whilehi", int_aarch64_sve_whilehi>; + + defm WHILEGE_PXX : sve_int_while8_rr<0b000, "whilege", int_aarch64_sve_whilege>; + defm WHILEGT_PXX : sve_int_while8_rr<0b001, "whilegt", int_aarch64_sve_whilegt>; + defm WHILEHS_PXX : sve_int_while8_rr<0b100, "whilehs", int_aarch64_sve_whilehs>; + defm WHILEHI_PXX : sve_int_while8_rr<0b101, "whilehi", int_aarch64_sve_whilehi>; // SVE2 pointer conflict compare defm WHILEWR_PXX : sve2_int_while_rr<0b0, "whilewr">; Index: llvm/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -3733,10 +3733,12 @@ } class sve_int_while_rr sz8_64, bits<4> opc, string asm, - RegisterClass gprty, PPRRegOp pprty> + RegisterClass gprty, PPRRegOp pprty, + ValueType vt, SDPatternOperator op> : I<(outs pprty:$Pd), (ins gprty:$Rn, gprty:$Rm), asm, "\t$Pd, $Rn, $Rm", - "", []>, Sched<[]> { + "", + [(set (vt pprty:$Pd), (op gprty:$Rn, gprty:$Rm))]>, Sched<[]> { bits<4> Pd; bits<5> Rm; bits<5> Rn; @@ -3751,20 +3753,21 @@ let Inst{3-0} = Pd; let Defs = [NZCV]; + let ElementSize = pprty.ElementSize; } -multiclass sve_int_while4_rr opc, string asm> { - def _B : sve_int_while_rr<0b00, { 0, opc }, asm, GPR32, PPR8>; - def _H : sve_int_while_rr<0b01, { 0, opc }, asm, GPR32, PPR16>; - def _S : sve_int_while_rr<0b10, { 0, opc }, asm, GPR32, PPR32>; - def _D : sve_int_while_rr<0b11, { 0, opc }, asm, GPR32, PPR64>; +multiclass sve_int_while4_rr opc, string asm, SDPatternOperator op> { + def _B : sve_int_while_rr<0b00, { 0, opc }, asm, GPR32, PPR8, nxv16i1, op>; + def _H : sve_int_while_rr<0b01, { 0, opc }, asm, GPR32, PPR16, nxv8i1, op>; + def _S : sve_int_while_rr<0b10, { 0, opc }, asm, GPR32, PPR32, nxv4i1, op>; + def _D : sve_int_while_rr<0b11, { 0, opc }, asm, GPR32, PPR64, nxv2i1, op>; } -multiclass sve_int_while8_rr opc, string asm> { - def _B : sve_int_while_rr<0b00, { 1, opc }, asm, GPR64, PPR8>; - def _H : sve_int_while_rr<0b01, { 1, opc }, asm, GPR64, PPR16>; - def _S : sve_int_while_rr<0b10, { 1, opc }, asm, GPR64, PPR32>; - def _D : sve_int_while_rr<0b11, { 1, opc }, asm, GPR64, PPR64>; +multiclass sve_int_while8_rr opc, string asm, SDPatternOperator op> { + def _B : sve_int_while_rr<0b00, { 1, opc }, asm, GPR64, PPR8, nxv16i1, op>; + def _H : sve_int_while_rr<0b01, { 1, opc }, asm, GPR64, PPR16, nxv8i1, op>; + def _S : sve_int_while_rr<0b10, { 1, opc }, asm, GPR64, PPR32, nxv4i1, op>; + def _D : sve_int_while_rr<0b11, { 1, opc }, asm, GPR64, PPR64, nxv2i1, op>; } class sve2_int_while_rr sz8_64, bits<1> rw, string asm, Index: llvm/test/CodeGen/AArch64/sve-intrinsics-while.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-while.ll @@ -0,0 +1,309 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; WHILELE +; + +define @whilele_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilele_b_ww: +; CHECK: whilele p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilele_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilele_b_xx: +; CHECK: whilele p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilele_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilele_h_ww: +; CHECK: whilele p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilele_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilele_h_xx: +; CHECK: whilele p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilele_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilele_s_ww: +; CHECK: whilele p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilele_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilele_s_xx: +; CHECK: whilele p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilele_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilele_d_ww: +; CHECK: whilele p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilele_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilele_d_xx: +; CHECK: whilele p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +; +; WHILELO +; + +define @whilelo_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelo_b_ww: +; CHECK: whilelo p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelo_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelo_b_xx: +; CHECK: whilelo p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelo_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelo_h_ww: +; CHECK: whilelo p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelo_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelo_h_xx: +; CHECK: whilelo p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelo_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelo_s_ww: +; CHECK: whilelo p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelo_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelo_s_xx: +; CHECK: whilelo p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelo_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelo_d_ww: +; CHECK: whilelo p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelo_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelo_d_xx: +; CHECK: whilelo p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +; +; WHILELS +; + +define @whilels_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilels_b_ww: +; CHECK: whilels p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilels_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilels_b_xx: +; CHECK: whilels p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilels_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilels_h_ww: +; CHECK: whilels p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilels_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilels_h_xx: +; CHECK: whilels p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilels_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilels_s_ww: +; CHECK: whilels p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilels_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilels_s_xx: +; CHECK: whilels p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilels_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilels_d_ww: +; CHECK: whilels p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilels_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilels_d_xx: +; CHECK: whilels p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +; +; WHILELT +; + +define @whilelt_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelt_b_ww: +; CHECK: whilelt p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelt_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelt_b_xx: +; CHECK: whilelt p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelt_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelt_h_ww: +; CHECK: whilelt p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelt_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelt_h_xx: +; CHECK: whilelt p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelt_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelt_s_ww: +; CHECK: whilelt p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelt_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelt_s_xx: +; CHECK: whilelt p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilelt_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilelt_d_ww: +; CHECK: whilelt p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilelt_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilelt_d_xx: +; CHECK: whilelt p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +declare @llvm.aarch64.sve.whilele.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilele.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilele.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilele.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilele.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilele.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilele.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilele.nxv2i1.i64(i64, i64) + +declare @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64, i64) + +declare @llvm.aarch64.sve.whilels.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilels.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilels.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilels.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilels.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilels.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilels.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilels.nxv2i1.i64(i64, i64) + +declare @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64, i64) Index: llvm/test/CodeGen/AArch64/sve2-intrinsics-while.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve2-intrinsics-while.ll @@ -0,0 +1,309 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s + +; +; WHILEGE +; + +define @whilege_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilege_b_ww: +; CHECK: whilege p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilege_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilege_b_xx: +; CHECK: whilege p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilege_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilege_h_ww: +; CHECK: whilege p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilege.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilege_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilege_h_xx: +; CHECK: whilege p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilege.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilege_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilege_s_ww: +; CHECK: whilege p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilege.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilege_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilege_s_xx: +; CHECK: whilege p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilege.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilege_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilege_d_ww: +; CHECK: whilege p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilege_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilege_d_xx: +; CHECK: whilege p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +; +; WHILEHS +; + +define @whilehs_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilehs_b_ww: +; CHECK: whilehs p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilehs_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilehs_b_xx: +; CHECK: whilehs p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilehs_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilehs_h_ww: +; CHECK: whilehs p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilehs_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilehs_h_xx: +; CHECK: whilehs p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilehs_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilehs_s_ww: +; CHECK: whilehs p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilehs_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilehs_s_xx: +; CHECK: whilehs p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilehs_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilehs_d_ww: +; CHECK: whilehs p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilehs_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilehs_d_xx: +; CHECK: whilehs p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +; +; WHILEGT +; + +define @whilegt_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilegt_b_ww: +; CHECK: whilegt p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilegt_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilegt_b_xx: +; CHECK: whilegt p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilegt_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilegt_h_ww: +; CHECK: whilegt p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilegt_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilegt_h_xx: +; CHECK: whilegt p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilegt_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilegt_s_ww: +; CHECK: whilegt p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilegt_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilegt_s_xx: +; CHECK: whilegt p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilegt_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilegt_d_ww: +; CHECK: whilegt p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilegt_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilegt_d_xx: +; CHECK: whilegt p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +; +; WHILEHI +; + +define @whilehi_b_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilehi_b_ww: +; CHECK: whilehi p0.b, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilehi_b_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilehi_b_xx: +; CHECK: whilehi p0.b, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilehi_h_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilehi_h_ww: +; CHECK: whilehi p0.h, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilehi_h_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilehi_h_xx: +; CHECK: whilehi p0.h, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilehi_s_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilehi_s_ww: +; CHECK: whilehi p0.s, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilehi_s_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilehi_s_xx: +; CHECK: whilehi p0.s, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64 %a, i64 %b) + ret %out +} + +define @whilehi_d_ww(i32 %a, i32 %b) { +; CHECK-LABEL: whilehi_d_ww: +; CHECK: whilehi p0.d, w0, w1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %a, i32 %b) + ret %out +} + +define @whilehi_d_xx(i64 %a, i64 %b) { +; CHECK-LABEL: whilehi_d_xx: +; CHECK: whilehi p0.d, x0, x1 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 %a, i64 %b) + ret %out +} + +declare @llvm.aarch64.sve.whilege.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilege.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilege.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilege.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilege.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilege.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilege.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilege.nxv2i1.i64(i64, i64) + +declare @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64, i64) + +declare @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64, i64) + +declare @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64, i64) +declare @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32, i32) +declare @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64, i64)