Index: llvm/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetLowering.h +++ llvm/include/llvm/CodeGen/TargetLowering.h @@ -1252,7 +1252,7 @@ Elm = PointerTy.getTypeForEVT(Ty->getContext()); } return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false), - VTy->getNumElements()); + VTy->getElementCount()); } return getValueType(DL, Ty, AllowUnknown); Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -793,6 +793,13 @@ LLVMMatchType<0>], [IntrNoMem]>; + class AdvSIMD_SVE_CompareWide_Intrinsic + : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], + [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyvector_ty, + llvm_nxv2i64_ty], + [IntrNoMem]>; + class AdvSIMD_SVE_CNT_Intrinsic : Intrinsic<[LLVMVectorOfBitcastsToInt<0>], [LLVMVectorOfBitcastsToInt<0>, @@ -972,6 +979,28 @@ def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic; // +// Integer comparisons +// + +def int_aarch64_sve_cmpeq : AdvSIMD_SVE_Compare_Intrinsic; +def int_aarch64_sve_cmpge : AdvSIMD_SVE_Compare_Intrinsic; +def int_aarch64_sve_cmpgt : AdvSIMD_SVE_Compare_Intrinsic; +def int_aarch64_sve_cmphi : AdvSIMD_SVE_Compare_Intrinsic; +def int_aarch64_sve_cmphs : AdvSIMD_SVE_Compare_Intrinsic; +def int_aarch64_sve_cmpne : AdvSIMD_SVE_Compare_Intrinsic; + +def int_aarch64_sve_cmpeq_wide : AdvSIMD_SVE_CompareWide_Intrinsic; +def int_aarch64_sve_cmpge_wide : AdvSIMD_SVE_CompareWide_Intrinsic; +def int_aarch64_sve_cmpgt_wide : AdvSIMD_SVE_CompareWide_Intrinsic; +def int_aarch64_sve_cmphi_wide : AdvSIMD_SVE_CompareWide_Intrinsic; +def int_aarch64_sve_cmphs_wide : AdvSIMD_SVE_CompareWide_Intrinsic; +def int_aarch64_sve_cmple_wide : AdvSIMD_SVE_CompareWide_Intrinsic; +def int_aarch64_sve_cmplo_wide : AdvSIMD_SVE_CompareWide_Intrinsic; +def int_aarch64_sve_cmpls_wide : AdvSIMD_SVE_CompareWide_Intrinsic; +def int_aarch64_sve_cmplt_wide : AdvSIMD_SVE_CompareWide_Intrinsic; +def int_aarch64_sve_cmpne_wide : AdvSIMD_SVE_CompareWide_Intrinsic; + +// // Counting bits // Index: llvm/lib/Target/AArch64/AArch64InstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrFormats.td +++ llvm/lib/Target/AArch64/AArch64InstrFormats.td @@ -713,6 +713,13 @@ let PrintMethod = "printImm"; } +def imm0_127_64b : Operand, ImmLeaf { + let ParserMatchClass = Imm0_127Operand; + let PrintMethod = "printImm"; +} + // NOTE: These imm0_N operands have to be of type i64 because i64 is the size // for all shift-amounts. Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -707,34 +707,34 @@ defm TRN1_PPP : sve_int_perm_bin_perm_pp<0b100, "trn1">; defm TRN2_PPP : sve_int_perm_bin_perm_pp<0b101, "trn2">; - defm CMPHS_PPzZZ : sve_int_cmp_0<0b000, "cmphs">; - defm CMPHI_PPzZZ : sve_int_cmp_0<0b001, "cmphi">; - defm CMPGE_PPzZZ : sve_int_cmp_0<0b100, "cmpge">; - defm CMPGT_PPzZZ : sve_int_cmp_0<0b101, "cmpgt">; - defm CMPEQ_PPzZZ : sve_int_cmp_0<0b110, "cmpeq">; - defm CMPNE_PPzZZ : sve_int_cmp_0<0b111, "cmpne">; - - defm CMPEQ_WIDE_PPzZZ : sve_int_cmp_0_wide<0b010, "cmpeq">; - defm CMPNE_WIDE_PPzZZ : sve_int_cmp_0_wide<0b011, "cmpne">; - defm CMPGE_WIDE_PPzZZ : sve_int_cmp_1_wide<0b000, "cmpge">; - defm CMPGT_WIDE_PPzZZ : sve_int_cmp_1_wide<0b001, "cmpgt">; - defm CMPLT_WIDE_PPzZZ : sve_int_cmp_1_wide<0b010, "cmplt">; - defm CMPLE_WIDE_PPzZZ : sve_int_cmp_1_wide<0b011, "cmple">; - defm CMPHS_WIDE_PPzZZ : sve_int_cmp_1_wide<0b100, "cmphs">; - defm CMPHI_WIDE_PPzZZ : sve_int_cmp_1_wide<0b101, "cmphi">; - defm CMPLO_WIDE_PPzZZ : sve_int_cmp_1_wide<0b110, "cmplo">; - defm CMPLS_WIDE_PPzZZ : sve_int_cmp_1_wide<0b111, "cmpls">; - - defm CMPGE_PPzZI : sve_int_scmp_vi<0b000, "cmpge">; - defm CMPGT_PPzZI : sve_int_scmp_vi<0b001, "cmpgt">; - defm CMPLT_PPzZI : sve_int_scmp_vi<0b010, "cmplt">; - defm CMPLE_PPzZI : sve_int_scmp_vi<0b011, "cmple">; - defm CMPEQ_PPzZI : sve_int_scmp_vi<0b100, "cmpeq">; - defm CMPNE_PPzZI : sve_int_scmp_vi<0b101, "cmpne">; - defm CMPHS_PPzZI : sve_int_ucmp_vi<0b00, "cmphs">; - defm CMPHI_PPzZI : sve_int_ucmp_vi<0b01, "cmphi">; - defm CMPLO_PPzZI : sve_int_ucmp_vi<0b10, "cmplo">; - defm CMPLS_PPzZI : sve_int_ucmp_vi<0b11, "cmpls">; + defm CMPHS_PPzZZ : sve_int_cmp_0<0b000, "cmphs", int_aarch64_sve_cmphs, SETUGE>; + defm CMPHI_PPzZZ : sve_int_cmp_0<0b001, "cmphi", int_aarch64_sve_cmphi, SETUGT>; + defm CMPGE_PPzZZ : sve_int_cmp_0<0b100, "cmpge", int_aarch64_sve_cmpge, SETGE>; + defm CMPGT_PPzZZ : sve_int_cmp_0<0b101, "cmpgt", int_aarch64_sve_cmpgt, SETGT>; + defm CMPEQ_PPzZZ : sve_int_cmp_0<0b110, "cmpeq", int_aarch64_sve_cmpeq, SETEQ>; + defm CMPNE_PPzZZ : sve_int_cmp_0<0b111, "cmpne", int_aarch64_sve_cmpne, SETNE>; + + defm CMPEQ_WIDE_PPzZZ : sve_int_cmp_0_wide<0b010, "cmpeq", int_aarch64_sve_cmpeq_wide>; + defm CMPNE_WIDE_PPzZZ : sve_int_cmp_0_wide<0b011, "cmpne", int_aarch64_sve_cmpne_wide>; + defm CMPGE_WIDE_PPzZZ : sve_int_cmp_1_wide<0b000, "cmpge", int_aarch64_sve_cmpge_wide>; + defm CMPGT_WIDE_PPzZZ : sve_int_cmp_1_wide<0b001, "cmpgt", int_aarch64_sve_cmpgt_wide>; + defm CMPLT_WIDE_PPzZZ : sve_int_cmp_1_wide<0b010, "cmplt", int_aarch64_sve_cmplt_wide>; + defm CMPLE_WIDE_PPzZZ : sve_int_cmp_1_wide<0b011, "cmple", int_aarch64_sve_cmple_wide>; + defm CMPHS_WIDE_PPzZZ : sve_int_cmp_1_wide<0b100, "cmphs", int_aarch64_sve_cmphs_wide>; + defm CMPHI_WIDE_PPzZZ : sve_int_cmp_1_wide<0b101, "cmphi", int_aarch64_sve_cmphi_wide>; + defm CMPLO_WIDE_PPzZZ : sve_int_cmp_1_wide<0b110, "cmplo", int_aarch64_sve_cmplo_wide>; + defm CMPLS_WIDE_PPzZZ : sve_int_cmp_1_wide<0b111, "cmpls", int_aarch64_sve_cmpls_wide>; + + defm CMPGE_PPzZI : sve_int_scmp_vi<0b000, "cmpge", SETGE, int_aarch64_sve_cmpge>; + defm CMPGT_PPzZI : sve_int_scmp_vi<0b001, "cmpgt", SETGT, int_aarch64_sve_cmpgt>; + defm CMPLT_PPzZI : sve_int_scmp_vi<0b010, "cmplt", SETLT, null_frag, int_aarch64_sve_cmpgt>; + defm CMPLE_PPzZI : sve_int_scmp_vi<0b011, "cmple", SETLE, null_frag, int_aarch64_sve_cmpge>; + defm CMPEQ_PPzZI : sve_int_scmp_vi<0b100, "cmpeq", SETEQ, int_aarch64_sve_cmpeq>; + defm CMPNE_PPzZI : sve_int_scmp_vi<0b101, "cmpne", SETNE, int_aarch64_sve_cmpne>; + defm CMPHS_PPzZI : sve_int_ucmp_vi<0b00, "cmphs", SETUGE, int_aarch64_sve_cmphs>; + defm CMPHI_PPzZI : sve_int_ucmp_vi<0b01, "cmphi", SETUGT, int_aarch64_sve_cmphi>; + defm CMPLO_PPzZI : sve_int_ucmp_vi<0b10, "cmplo", SETULT, null_frag, int_aarch64_sve_cmphi>; + defm CMPLS_PPzZI : sve_int_ucmp_vi<0b11, "cmpls", SETULE, null_frag, int_aarch64_sve_cmphs>; defm FCMGE_PPzZZ : sve_fp_3op_p_pd<0b000, "fcmge", int_aarch64_sve_fcmpge>; defm FCMGT_PPzZZ : sve_fp_3op_p_pd<0b001, "fcmgt", int_aarch64_sve_fcmpgt>; Index: llvm/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -306,6 +306,19 @@ def SVEDup0Undef : ComplexPattern; +// +// Instruction specific patterns. +// + +class SVE_Cmp_Pat0 +: Pat<(pt (setcc vt:$Zn, vt:$Zm, cc)), + (inst (ptrue 31), $Zn, $Zm)>; + +class SVE_Cmp_Pat1 +: Pat<(pt (and pt:$Pg, (setcc vt:$Zn, vt:$Zm, cc))), + (inst $Pg, $Zn, $Zm)>; + //===----------------------------------------------------------------------===// // SVE Predicate Misc Group //===----------------------------------------------------------------------===// @@ -3466,23 +3479,47 @@ let Defs = [NZCV]; } -multiclass sve_int_cmp_0 opc, string asm> { +multiclass sve_int_cmp_0 opc, string asm, SDPatternOperator op, + CondCode cc> { def _B : sve_int_cmp<0b0, 0b00, opc, asm, PPR8, ZPR8, ZPR8>; def _H : sve_int_cmp<0b0, 0b01, opc, asm, PPR16, ZPR16, ZPR16>; def _S : sve_int_cmp<0b0, 0b10, opc, asm, PPR32, ZPR32, ZPR32>; def _D : sve_int_cmp<0b0, 0b11, opc, asm, PPR64, ZPR64, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; + + def : SVE_Cmp_Pat0(NAME # _B), PTRUE_B>; + def : SVE_Cmp_Pat0(NAME # _H), PTRUE_H>; + def : SVE_Cmp_Pat0(NAME # _S), PTRUE_S>; + def : SVE_Cmp_Pat0(NAME # _D), PTRUE_D>; + + def : SVE_Cmp_Pat1(NAME # _B)>; + def : SVE_Cmp_Pat1(NAME # _H)>; + def : SVE_Cmp_Pat1(NAME # _S)>; + def : SVE_Cmp_Pat1(NAME # _D)>; } -multiclass sve_int_cmp_0_wide opc, string asm> { +multiclass sve_int_cmp_0_wide opc, string asm, SDPatternOperator op> { def _B : sve_int_cmp<0b0, 0b00, opc, asm, PPR8, ZPR8, ZPR64>; def _H : sve_int_cmp<0b0, 0b01, opc, asm, PPR16, ZPR16, ZPR64>; def _S : sve_int_cmp<0b0, 0b10, opc, asm, PPR32, ZPR32, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; } -multiclass sve_int_cmp_1_wide opc, string asm> { +multiclass sve_int_cmp_1_wide opc, string asm, SDPatternOperator op> { def _B : sve_int_cmp<0b1, 0b00, opc, asm, PPR8, ZPR8, ZPR64>; def _H : sve_int_cmp<0b1, 0b01, opc, asm, PPR16, ZPR16, ZPR64>; def _S : sve_int_cmp<0b1, 0b10, opc, asm, PPR32, ZPR32, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; } @@ -3514,13 +3551,70 @@ let Inst{3-0} = Pd; let Defs = [NZCV]; + let ElementSize = pprty.ElementSize; } -multiclass sve_int_scmp_vi opc, string asm> { +multiclass sve_int_scmp_vi opc, string asm, CondCode cc, + SDPatternOperator op = null_frag, + SDPatternOperator inv_op = null_frag> { def _B : sve_int_scmp_vi<0b00, opc, asm, PPR8, ZPR8, simm5_32b>; def _H : sve_int_scmp_vi<0b01, opc, asm, PPR16, ZPR16, simm5_32b>; def _S : sve_int_scmp_vi<0b10, opc, asm, PPR32, ZPR32, simm5_32b>; def _D : sve_int_scmp_vi<0b11, opc, asm, PPR64, ZPR64, simm5_64b>; + + // IR version + def : Pat<(nxv16i1 (setcc (nxv16i8 ZPR:$Zs1), + (nxv16i8 (AArch64dup (simm5_32b:$imm))), + cc)), + (!cast(NAME # "_B") (PTRUE_B 31), ZPR:$Zs1, simm5_32b:$imm)>; + def : Pat<(nxv8i1 (setcc (nxv8i16 ZPR:$Zs1), + (nxv8i16 (AArch64dup (simm5_32b:$imm))), + cc)), + (!cast(NAME # "_H") (PTRUE_H 31), ZPR:$Zs1, simm5_32b:$imm)>; + def : Pat<(nxv4i1 (setcc (nxv4i32 ZPR:$Zs1), + (nxv4i32 (AArch64dup (simm5_32b:$imm))), + cc)), + (!cast(NAME # "_S") (PTRUE_S 31), ZPR:$Zs1, simm5_32b:$imm)>; + def : Pat<(nxv2i1 (setcc (nxv2i64 ZPR:$Zs1), + (nxv2i64 (AArch64dup (simm5_64b:$imm))), + cc)), + (!cast(NAME # "_D") (PTRUE_D 31), ZPR:$Zs1, simm5_64b:$imm)>; + + // Intrinsic version + def : Pat<(nxv16i1 (op (nxv16i1 PPR_3b:$Pg), + (nxv16i8 ZPR:$Zs1), + (nxv16i8 (AArch64dup (simm5_32b:$imm))))), + (!cast(NAME # "_B") PPR_3b:$Pg, ZPR:$Zs1, simm5_32b:$imm)>; + def : Pat<(nxv8i1 (op (nxv8i1 PPR_3b:$Pg), + (nxv8i16 ZPR:$Zs1), + (nxv8i16 (AArch64dup (simm5_32b:$imm))))), + (!cast(NAME # "_H") PPR_3b:$Pg, ZPR:$Zs1, simm5_32b:$imm)>; + def : Pat<(nxv4i1 (op (nxv4i1 PPR_3b:$Pg), + (nxv4i32 ZPR:$Zs1), + (nxv4i32 (AArch64dup (simm5_32b:$imm))))), + (!cast(NAME # "_S") PPR_3b:$Pg, ZPR:$Zs1, simm5_32b:$imm)>; + def : Pat<(nxv2i1 (op (nxv2i1 PPR_3b:$Pg), + (nxv2i64 ZPR:$Zs1), + (nxv2i64 (AArch64dup (simm5_64b:$imm))))), + (!cast(NAME # "_D") PPR_3b:$Pg, ZPR:$Zs1, simm5_64b:$imm)>; + + // Inverted intrinsic version + def : Pat<(nxv16i1 (inv_op (nxv16i1 PPR_3b:$Pg), + (nxv16i8 (AArch64dup (simm5_32b:$imm))), + (nxv16i8 ZPR:$Zs1))), + (!cast(NAME # "_B") PPR_3b:$Pg, ZPR:$Zs1, simm5_32b:$imm)>; + def : Pat<(nxv8i1 (inv_op (nxv8i1 PPR_3b:$Pg), + (nxv8i16 (AArch64dup (simm5_32b:$imm))), + (nxv8i16 ZPR:$Zs1))), + (!cast(NAME # "_H") PPR_3b:$Pg, ZPR:$Zs1, simm5_32b:$imm)>; + def : Pat<(nxv4i1 (inv_op (nxv4i1 PPR_3b:$Pg), + (nxv4i32 (AArch64dup (simm5_32b:$imm))), + (nxv4i32 ZPR:$Zs1))), + (!cast(NAME # "_S") PPR_3b:$Pg, ZPR:$Zs1, simm5_32b:$imm)>; + def : Pat<(nxv2i1 (inv_op (nxv2i1 PPR_3b:$Pg), + (nxv2i64 (AArch64dup (simm5_64b:$imm))), + (nxv2i64 ZPR:$Zs1))), + (!cast(NAME # "_D") PPR_3b:$Pg, ZPR:$Zs1, simm5_64b:$imm)>; } @@ -3551,11 +3645,67 @@ let Defs = [NZCV]; } -multiclass sve_int_ucmp_vi opc, string asm> { +multiclass sve_int_ucmp_vi opc, string asm, CondCode cc, + SDPatternOperator op = null_frag, + SDPatternOperator inv_op = null_frag> { def _B : sve_int_ucmp_vi<0b00, opc, asm, PPR8, ZPR8, imm0_127>; def _H : sve_int_ucmp_vi<0b01, opc, asm, PPR16, ZPR16, imm0_127>; def _S : sve_int_ucmp_vi<0b10, opc, asm, PPR32, ZPR32, imm0_127>; - def _D : sve_int_ucmp_vi<0b11, opc, asm, PPR64, ZPR64, imm0_127>; + def _D : sve_int_ucmp_vi<0b11, opc, asm, PPR64, ZPR64, imm0_127_64b>; + + // IR version + def : Pat<(nxv16i1 (setcc (nxv16i8 ZPR:$Zs1), + (nxv16i8 (AArch64dup (imm0_127:$imm))), + cc)), + (!cast(NAME # "_B") (PTRUE_B 31), ZPR:$Zs1, imm0_127:$imm)>; + def : Pat<(nxv8i1 (setcc (nxv8i16 ZPR:$Zs1), + (nxv8i16 (AArch64dup (imm0_127:$imm))), + cc)), + (!cast(NAME # "_H") (PTRUE_H 31), ZPR:$Zs1, imm0_127:$imm)>; + def : Pat<(nxv4i1 (setcc (nxv4i32 ZPR:$Zs1), + (nxv4i32 (AArch64dup (imm0_127:$imm))), + cc)), + (!cast(NAME # "_S") (PTRUE_S 31), ZPR:$Zs1, imm0_127:$imm)>; + def : Pat<(nxv2i1 (setcc (nxv2i64 ZPR:$Zs1), + (nxv2i64 (AArch64dup (imm0_127_64b:$imm))), + cc)), + (!cast(NAME # "_D") (PTRUE_D 31), ZPR:$Zs1, imm0_127_64b:$imm)>; + + // Intrinsic version + def : Pat<(nxv16i1 (op (nxv16i1 PPR_3b:$Pg), + (nxv16i8 ZPR:$Zs1), + (nxv16i8 (AArch64dup (imm0_127:$imm))))), + (!cast(NAME # "_B") PPR_3b:$Pg, ZPR:$Zs1, imm0_127:$imm)>; + def : Pat<(nxv8i1 (op (nxv8i1 PPR_3b:$Pg), + (nxv8i16 ZPR:$Zs1), + (nxv8i16 (AArch64dup (imm0_127:$imm))))), + (!cast(NAME # "_H") PPR_3b:$Pg, ZPR:$Zs1, imm0_127:$imm)>; + def : Pat<(nxv4i1 (op (nxv4i1 PPR_3b:$Pg), + (nxv4i32 ZPR:$Zs1), + (nxv4i32 (AArch64dup (imm0_127:$imm))))), + (!cast(NAME # "_S") PPR_3b:$Pg, ZPR:$Zs1, imm0_127:$imm)>; + def : Pat<(nxv2i1 (op (nxv2i1 PPR_3b:$Pg), + (nxv2i64 ZPR:$Zs1), + (nxv2i64 (AArch64dup (imm0_127_64b:$imm))))), + (!cast(NAME # "_D") PPR_3b:$Pg, ZPR:$Zs1, imm0_127_64b:$imm)>; + + // Inverted intrinsic version + def : Pat<(nxv16i1 (inv_op (nxv16i1 PPR_3b:$Pg), + (nxv16i8 (AArch64dup (imm0_127:$imm))), + (nxv16i8 ZPR:$Zs1))), + (!cast(NAME # "_B") PPR_3b:$Pg, ZPR:$Zs1, imm0_127:$imm)>; + def : Pat<(nxv8i1 (inv_op (nxv8i1 PPR_3b:$Pg), + (nxv8i16 (AArch64dup (imm0_127:$imm))), + (nxv8i16 ZPR:$Zs1))), + (!cast(NAME # "_H") PPR_3b:$Pg, ZPR:$Zs1, imm0_127:$imm)>; + def : Pat<(nxv4i1 (inv_op (nxv4i1 PPR_3b:$Pg), + (nxv4i32 (AArch64dup (imm0_127:$imm))), + (nxv4i32 ZPR:$Zs1))), + (!cast(NAME # "_S") PPR_3b:$Pg, ZPR:$Zs1, imm0_127:$imm)>; + def : Pat<(nxv2i1 (inv_op (nxv2i1 PPR_3b:$Pg), + (nxv2i64 (AArch64dup (imm0_127_64b:$imm))), + (nxv2i64 ZPR:$Zs1))), + (!cast(NAME # "_D") PPR_3b:$Pg, ZPR:$Zs1, imm0_127_64b:$imm)>; } Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll @@ -0,0 +1,949 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Signed Comparisons ;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; +; CMPEQ +; + +define @ir_cmpeq_b( %a) { +; CHECK-LABEL: ir_cmpeq_b +; CHECK: cmpeq p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp eq %a, %splat + ret %out +} + +define @int_cmpeq_b( %pg, %a) { +; CHECK-LABEL: int_cmpeq_b +; CHECK: cmpeq p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpeq.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpeq_h( %a) { +; CHECK-LABEL: ir_cmpeq_h +; CHECK: cmpeq p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp eq %a, %splat + ret %out +} + +define @int_cmpeq_h( %pg, %a) { +; CHECK-LABEL: int_cmpeq_h +; CHECK: cmpeq p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpeq.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpeq_s( %a) { +; CHECK-LABEL: ir_cmpeq_s +; CHECK: cmpeq p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp eq %a, %splat + ret %out +} + +define @int_cmpeq_s( %pg, %a) { +; CHECK-LABEL: int_cmpeq_s +; CHECK: cmpeq p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpeq.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpeq_d( %a) { +; CHECK-LABEL: ir_cmpeq_d +; CHECK: cmpeq p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp eq %a, %splat + ret %out +} + +define @int_cmpeq_d( %pg, %a) { +; CHECK-LABEL: int_cmpeq_d +; CHECK: cmpeq p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpeq.nxv2i64( %pg, + %a, + %splat) + ret %out +} + +; +; CMPGE +; + +define @ir_cmpge_b( %a) { +; CHECK-LABEL: ir_cmpge_b +; CHECK: cmpge p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sge %a, %splat + ret %out +} + +define @int_cmpge_b( %pg, %a) { +; CHECK-LABEL: int_cmpge_b +; CHECK: cmpge p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpge.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpge_h( %a) { +; CHECK-LABEL: ir_cmpge_h +; CHECK: cmpge p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sge %a, %splat + ret %out +} + +define @int_cmpge_h( %pg, %a) { +; CHECK-LABEL: int_cmpge_h +; CHECK: cmpge p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpge.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpge_s( %a) { +; CHECK-LABEL: ir_cmpge_s +; CHECK: cmpge p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sge %a, %splat + ret %out +} + +define @int_cmpge_s( %pg, %a) { +; CHECK-LABEL: int_cmpge_s +; CHECK: cmpge p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpge.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpge_d( %a) { +; CHECK-LABEL: ir_cmpge_d +; CHECK: cmpge p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sge %a, %splat + ret %out +} + +define @int_cmpge_d( %pg, %a) { +; CHECK-LABEL: int_cmpge_d +; CHECK: cmpge p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpge.nxv2i64( %pg, + %a, + %splat) + ret %out +} + +; +; CMPGT +; + +define @ir_cmpgt_b( %a) { +; CHECK-LABEL: ir_cmpgt_b +; CHECK: cmpgt p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sgt %a, %splat + ret %out +} + +define @int_cmpgt_b( %pg, %a) { +; CHECK-LABEL: int_cmpgt_b +; CHECK: cmpgt p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpgt.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpgt_h( %a) { +; CHECK-LABEL: ir_cmpgt_h +; CHECK: cmpgt p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sgt %a, %splat + ret %out +} + +define @int_cmpgt_h( %pg, %a) { +; CHECK-LABEL: int_cmpgt_h +; CHECK: cmpgt p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpgt.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpgt_s( %a) { +; CHECK-LABEL: ir_cmpgt_s +; CHECK: cmpgt p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sgt %a, %splat + ret %out +} + +define @int_cmpgt_s( %pg, %a) { +; CHECK-LABEL: int_cmpgt_s +; CHECK: cmpgt p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpgt.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpgt_d( %a) { +; CHECK-LABEL: ir_cmpgt_d +; CHECK: cmpgt p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sgt %a, %splat + ret %out +} + +define @int_cmpgt_d( %pg, %a) { +; CHECK-LABEL: int_cmpgt_d +; CHECK: cmpgt p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpgt.nxv2i64( %pg, + %a, + %splat) + ret %out +} + +; +; CMPLE +; + +define @ir_cmple_b( %a) { +; CHECK-LABEL: ir_cmple_b +; CHECK: cmple p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sle %a, %splat + ret %out +} + +define @int_cmple_b( %pg, %a) { +; CHECK-LABEL: int_cmple_b +; CHECK: cmple p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpge.nxv16i8( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmple_h( %a) { +; CHECK-LABEL: ir_cmple_h +; CHECK: cmple p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sle %a, %splat + ret %out +} + +define @int_cmple_h( %pg, %a) { +; CHECK-LABEL: int_cmple_h +; CHECK: cmple p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpge.nxv8i16( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmple_s( %a) { +; CHECK-LABEL: ir_cmple_s +; CHECK: cmple p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sle %a, %splat + ret %out +} + +define @int_cmple_s( %pg, %a) { +; CHECK-LABEL: int_cmple_s +; CHECK: cmple p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpge.nxv4i32( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmple_d( %a) { +; CHECK-LABEL: ir_cmple_d +; CHECK: cmple p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp sle %a, %splat + ret %out +} + +define @int_cmple_d( %pg, %a) { +; CHECK-LABEL: int_cmple_d +; CHECK: cmple p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpge.nxv2i64( %pg, + %splat, + %a) + ret %out +} + +; +; CMPLT +; + +define @ir_cmplt_b( %a) { +; CHECK-LABEL: ir_cmplt_b +; CHECK: cmplt p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp slt %a, %splat + ret %out +} + +define @int_cmplt_b( %pg, %a) { +; CHECK-LABEL: int_cmplt_b +; CHECK: cmplt p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpgt.nxv16i8( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmplt_h( %a) { +; CHECK-LABEL: ir_cmplt_h +; CHECK: cmplt p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp slt %a, %splat + ret %out +} + +define @int_cmplt_h( %pg, %a) { +; CHECK-LABEL: int_cmplt_h +; CHECK: cmplt p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpgt.nxv8i16( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmplt_s( %a) { +; CHECK-LABEL: ir_cmplt_s +; CHECK: cmplt p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp slt %a, %splat + ret %out +} + +define @int_cmplt_s( %pg, %a) { +; CHECK-LABEL: int_cmplt_s +; CHECK: cmplt p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpgt.nxv4i32( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmplt_d( %a) { +; CHECK-LABEL: ir_cmplt_d +; CHECK: cmplt p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp slt %a, %splat + ret %out +} + +define @int_cmplt_d( %pg, %a) { +; CHECK-LABEL: int_cmplt_d +; CHECK: cmplt p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpgt.nxv2i64( %pg, + %splat, + %a) + ret %out +} + +; +; CMPNE +; + +define @ir_cmpne_b( %a) { +; CHECK-LABEL: ir_cmpne_b +; CHECK: cmpne p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ne %a, %splat + ret %out +} + +define @int_cmpne_b( %pg, %a) { +; CHECK-LABEL: int_cmpne_b +; CHECK: cmpne p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpne.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpne_h( %a) { +; CHECK-LABEL: ir_cmpne_h +; CHECK: cmpne p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ne %a, %splat + ret %out +} + +define @int_cmpne_h( %pg, %a) { +; CHECK-LABEL: int_cmpne_h +; CHECK: cmpne p0.h, p0/z, z0.h, #-16 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpne.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpne_s( %a) { +; CHECK-LABEL: ir_cmpne_s +; CHECK: cmpne p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ne %a, %splat + ret %out +} + +define @int_cmpne_s( %pg, %a) { +; CHECK-LABEL: int_cmpne_s +; CHECK: cmpne p0.s, p0/z, z0.s, #15 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 15, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpne.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmpne_d( %a) { +; CHECK-LABEL: ir_cmpne_d +; CHECK: cmpne p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ne %a, %splat + ret %out +} + +define @int_cmpne_d( %pg, %a) { +; CHECK-LABEL: int_cmpne_d +; CHECK: cmpne p0.d, p0/z, z0.d, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmpne.nxv2i64( %pg, + %a, + %splat) + ret %out +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; Unsigned Comparisons ;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; +; CMPHI +; + +define @ir_cmphi_b( %a) { +; CHECK-LABEL: ir_cmphi_b +; CHECK: cmphi p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ugt %a, %splat + ret %out +} + +define @int_cmphi_b( %pg, %a) { +; CHECK-LABEL: int_cmphi_b +; CHECK: cmphi p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphi.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmphi_h( %a) { +; CHECK-LABEL: ir_cmphi_h +; CHECK: cmphi p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ugt %a, %splat + ret %out +} + +define @int_cmphi_h( %pg, %a) { +; CHECK-LABEL: int_cmphi_h +; CHECK: cmphi p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphi.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmphi_s( %a) { +; CHECK-LABEL: ir_cmphi_s +; CHECK: cmphi p0.s, p0/z, z0.s, #68 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 68, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ugt %a, %splat + ret %out +} + +define @int_cmphi_s( %pg, %a) { +; CHECK-LABEL: int_cmphi_s +; CHECK: cmphi p0.s, p0/z, z0.s, #68 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 68, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphi.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmphi_d( %a) { +; CHECK-LABEL: ir_cmphi_d +; CHECK: cmphi p0.d, p0/z, z0.d, #127 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ugt %a, %splat + ret %out +} + +define @int_cmphi_d( %pg, %a) { +; CHECK-LABEL: int_cmphi_d +; CHECK: cmphi p0.d, p0/z, z0.d, #127 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphi.nxv2i64( %pg, + %a, + %splat) + ret %out +} + +; +; CMPHS +; + +define @ir_cmphs_b( %a) { +; CHECK-LABEL: ir_cmphs_b +; CHECK: cmphs p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp uge %a, %splat + ret %out +} + +define @int_cmphs_b( %pg, %a) { +; CHECK-LABEL: int_cmphs_b +; CHECK: cmphs p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphs.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmphs_h( %a) { +; CHECK-LABEL: ir_cmphs_h +; CHECK: cmphs p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp uge %a, %splat + ret %out +} + +define @int_cmphs_h( %pg, %a) { +; CHECK-LABEL: int_cmphs_h +; CHECK: cmphs p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphs.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmphs_s( %a) { +; CHECK-LABEL: ir_cmphs_s +; CHECK: cmphs p0.s, p0/z, z0.s, #68 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 68, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp uge %a, %splat + ret %out +} + +define @int_cmphs_s( %pg, %a) { +; CHECK-LABEL: int_cmphs_s +; CHECK: cmphs p0.s, p0/z, z0.s, #68 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 68, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphs.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @ir_cmphs_d( %a) { +; CHECK-LABEL: ir_cmphs_d +; CHECK: cmphs p0.d, p0/z, z0.d, #127 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp uge %a, %splat + ret %out +} + +define @int_cmphs_d( %pg, %a) { +; CHECK-LABEL: int_cmphs_d +; CHECK: cmphs p0.d, p0/z, z0.d, #127 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphs.nxv2i64( %pg, + %a, + %splat) + ret %out +} + +; +; CMPLO +; + +define @ir_cmplo_b( %a) { +; CHECK-LABEL: ir_cmplo_b +; CHECK: cmplo p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ult %a, %splat + ret %out +} + +define @int_cmplo_b( %pg, %a) { +; CHECK-LABEL: int_cmplo_b +; CHECK: cmplo p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphi.nxv16i8( %pg, + %splat, + %a) + ret %out +} + +define @int_cmplo_h( %pg, %a) { +; CHECK-LABEL: int_cmplo_h +; CHECK: cmplo p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphi.nxv8i16( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmplo_s( %a) { +; CHECK-LABEL: ir_cmplo_s +; CHECK: cmplo p0.s, p0/z, z0.s, #68 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 68, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ult %a, %splat + ret %out +} + +define @int_cmplo_s( %pg, %a) { +; CHECK-LABEL: int_cmplo_s +; CHECK: cmplo p0.s, p0/z, z0.s, #68 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 68, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphi.nxv4i32( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmplo_d( %a) { +; CHECK-LABEL: ir_cmplo_d +; CHECK: cmplo p0.d, p0/z, z0.d, #127 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ult %a, %splat + ret %out +} + +define @int_cmplo_d( %pg, %a) { +; CHECK-LABEL: int_cmplo_d +; CHECK: cmplo p0.d, p0/z, z0.d, #127 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphi.nxv2i64( %pg, + %splat, + %a) + ret %out +} + +; +; CMPLS +; + +define @ir_cmpls_b( %a) { +; CHECK-LABEL: ir_cmpls_b +; CHECK: cmpls p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ule %a, %splat + ret %out +} + +define @int_cmpls_b( %pg, %a) { +; CHECK-LABEL: int_cmpls_b +; CHECK: cmpls p0.b, p0/z, z0.b, #4 +; CHECK-NEXT: ret + %elt = insertelement undef, i8 4, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphs.nxv16i8( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmpls_h( %a) { +; CHECK-LABEL: ir_cmpls_h +; CHECK: cmpls p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ule %a, %splat + ret %out +} + +define @int_cmpls_h( %pg, %a) { +; CHECK-LABEL: int_cmpls_h +; CHECK: cmpls p0.h, p0/z, z0.h, #0 +; CHECK-NEXT: ret + %elt = insertelement undef, i16 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphs.nxv8i16( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmpls_s( %a) { +; CHECK-LABEL: ir_cmpls_s +; CHECK: cmpls p0.s, p0/z, z0.s, #68 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 68, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ule %a, %splat + ret %out +} + +define @int_cmpls_s( %pg, %a) { +; CHECK-LABEL: int_cmpls_s +; CHECK: cmpls p0.s, p0/z, z0.s, #68 +; CHECK-NEXT: ret + %elt = insertelement undef, i32 68, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphs.nxv4i32( %pg, + %splat, + %a) + ret %out +} + +define @ir_cmpls_d( %a) { +; CHECK-LABEL: ir_cmpls_d +; CHECK: cmpls p0.d, p0/z, z0.d, #127 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = icmp ule %a, %splat + ret %out +} + +define @int_cmpls_d( %pg, %a) { +; CHECK-LABEL: int_cmpls_d +; CHECK: cmpls p0.d, p0/z, z0.d, #127 +; CHECK-NEXT: ret + %elt = insertelement undef, i64 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.cmphs.nxv2i64( %pg, + %splat, + %a) + ret %out +} + +declare @llvm.aarch64.sve.cmpeq.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpeq.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpeq.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmpeq.nxv2i64(, , ) + +declare @llvm.aarch64.sve.cmpge.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpge.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpge.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmpge.nxv2i64(, , ) + +declare @llvm.aarch64.sve.cmpgt.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpgt.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpgt.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmpgt.nxv2i64(, , ) + +declare @llvm.aarch64.sve.cmphi.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmphi.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmphi.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmphi.nxv2i64(, , ) + +declare @llvm.aarch64.sve.cmphs.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmphs.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmphs.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmphs.nxv2i64(, , ) + +declare @llvm.aarch64.sve.cmpne.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpne.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpne.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmpne.nxv2i64(, , ) Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares.ll @@ -0,0 +1,645 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; +; CMPEQ +; + +define @cmpeq_b( %pg, %a, %b) { +; CHECK-LABEL: cmpeq_b: +; CHECK: cmpeq p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpeq.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmpeq_h( %pg, %a, %b) { +; CHECK-LABEL: cmpeq_h: +; CHECK: cmpeq p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpeq.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmpeq_s( %pg, %a, %b) { +; CHECK-LABEL: cmpeq_s: +; CHECK: cmpeq p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpeq.nxv4i32( %pg, + %a, + %b) + ret %out +} + +define @cmpeq_d( %pg, %a, %b) { +; CHECK-LABEL: cmpeq_d: +; CHECK: cmpeq p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpeq.nxv2i64( %pg, + %a, + %b) + ret %out +} + +define @cmpeq_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmpeq_wide_b: +; CHECK: cmpeq p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpeq.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmpeq_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmpeq_wide_h: +; CHECK: cmpeq p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpeq.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmpeq_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmpeq_wide_s: +; CHECK: cmpeq p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpeq.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +; +; CMPGE +; + +define @cmpge_b( %pg, %a, %b) { +; CHECK-LABEL: cmpge_b: +; CHECK: cmpge p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpge.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmpge_h( %pg, %a, %b) { +; CHECK-LABEL: cmpge_h: +; CHECK: cmpge p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpge.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmpge_s( %pg, %a, %b) { +; CHECK-LABEL: cmpge_s: +; CHECK: cmpge p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpge.nxv4i32( %pg, + %a, + %b) + ret %out +} + +define @cmpge_d( %pg, %a, %b) { +; CHECK-LABEL: cmpge_d: +; CHECK: cmpge p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpge.nxv2i64( %pg, + %a, + %b) + ret %out +} + +define @cmpge_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmpge_wide_b: +; CHECK: cmpge p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpge.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmpge_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmpge_wide_h: +; CHECK: cmpge p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpge.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmpge_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmpge_wide_s: +; CHECK: cmpge p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpge.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +; +; CMPGT +; + +define @cmpgt_b( %pg, %a, %b) { +; CHECK-LABEL: cmpgt_b: +; CHECK: cmpgt p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpgt.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmpgt_h( %pg, %a, %b) { +; CHECK-LABEL: cmpgt_h: +; CHECK: cmpgt p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpgt.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmpgt_s( %pg, %a, %b) { +; CHECK-LABEL: cmpgt_s: +; CHECK: cmpgt p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpgt.nxv4i32( %pg, + %a, + %b) + ret %out +} + +define @cmpgt_d( %pg, %a, %b) { +; CHECK-LABEL: cmpgt_d: +; CHECK: cmpgt p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpgt.nxv2i64( %pg, + %a, + %b) + ret %out +} + +define @cmpgt_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmpgt_wide_b: +; CHECK: cmpgt p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpgt.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmpgt_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmpgt_wide_h: +; CHECK: cmpgt p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpgt.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmpgt_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmpgt_wide_s: +; CHECK: cmpgt p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpgt.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +; +; CMPHI +; + +define @cmphi_b( %pg, %a, %b) { +; CHECK-LABEL: cmphi_b: +; CHECK: cmphi p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphi.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmphi_h( %pg, %a, %b) { +; CHECK-LABEL: cmphi_h: +; CHECK: cmphi p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphi.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmphi_s( %pg, %a, %b) { +; CHECK-LABEL: cmphi_s: +; CHECK: cmphi p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphi.nxv4i32( %pg, + %a, + %b) + ret %out +} + +define @cmphi_d( %pg, %a, %b) { +; CHECK-LABEL: cmphi_d: +; CHECK: cmphi p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphi.nxv2i64( %pg, + %a, + %b) + ret %out +} + +define @cmphi_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmphi_wide_b: +; CHECK: cmphi p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphi.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmphi_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmphi_wide_h: +; CHECK: cmphi p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphi.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmphi_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmphi_wide_s: +; CHECK: cmphi p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphi.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +; +; CMPHS +; + +define @cmphs_b( %pg, %a, %b) { +; CHECK-LABEL: cmphs_b: +; CHECK: cmphs p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphs.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmphs_h( %pg, %a, %b) { +; CHECK-LABEL: cmphs_h: +; CHECK: cmphs p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphs.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmphs_s( %pg, %a, %b) { +; CHECK-LABEL: cmphs_s: +; CHECK: cmphs p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphs.nxv4i32( %pg, + %a, + %b) + ret %out +} + +define @cmphs_d( %pg, %a, %b) { +; CHECK-LABEL: cmphs_d: +; CHECK: cmphs p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphs.nxv2i64( %pg, + %a, + %b) + ret %out +} + +define @cmphs_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmphs_wide_b: +; CHECK: cmphs p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphs.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmphs_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmphs_wide_h: +; CHECK: cmphs p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphs.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmphs_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmphs_wide_s: +; CHECK: cmphs p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmphs.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +; +; CMPLE +; + +define @cmple_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmple_wide_b: +; CHECK: cmple p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmple.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmple_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmple_wide_h: +; CHECK: cmple p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmple.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmple_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmple_wide_s: +; CHECK: cmple p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmple.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +; +; CMPLO +; + +define @cmplo_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmplo_wide_b: +; CHECK: cmplo p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmplo.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmplo_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmplo_wide_h: +; CHECK: cmplo p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmplo.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmplo_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmplo_wide_s: +; CHECK: cmplo p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmplo.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +; +; CMPLS +; + +define @cmpls_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmpls_wide_b: +; CHECK: cmpls p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpls.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmpls_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmpls_wide_h: +; CHECK: cmpls p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpls.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmpls_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmpls_wide_s: +; CHECK: cmpls p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpls.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +; +; CMPLT +; + +define @cmplt_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmplt_wide_b: +; CHECK: cmplt p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmplt.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmplt_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmplt_wide_h: +; CHECK: cmplt p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmplt.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmplt_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmplt_wide_s: +; CHECK: cmplt p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmplt.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +; +; CMPNE +; + +define @cmpne_b( %pg, %a, %b) { +; CHECK-LABEL: cmpne_b: +; CHECK: cmpne p0.b, p0/z, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpne.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmpne_h( %pg, %a, %b) { +; CHECK-LABEL: cmpne_h: +; CHECK: cmpne p0.h, p0/z, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpne.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmpne_s( %pg, %a, %b) { +; CHECK-LABEL: cmpne_s: +; CHECK: cmpne p0.s, p0/z, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpne.nxv4i32( %pg, + %a, + %b) + ret %out +} + +define @cmpne_d( %pg, %a, %b) { +; CHECK-LABEL: cmpne_d: +; CHECK: cmpne p0.d, p0/z, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpne.nxv2i64( %pg, + %a, + %b) + ret %out +} + +define @cmpne_wide_b( %pg, %a, %b) { +; CHECK-LABEL: cmpne_wide_b: +; CHECK: cmpne p0.b, p0/z, z0.b, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpne.wide.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @cmpne_wide_h( %pg, %a, %b) { +; CHECK-LABEL: cmpne_wide_h: +; CHECK: cmpne p0.h, p0/z, z0.h, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpne.wide.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @cmpne_wide_s( %pg, %a, %b) { +; CHECK-LABEL: cmpne_wide_s: +; CHECK: cmpne p0.s, p0/z, z0.s, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.cmpne.wide.nxv4i32( %pg, + %a, + %b) + ret %out +} + +declare @llvm.aarch64.sve.cmpeq.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpeq.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpeq.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmpeq.nxv2i64(, , ) +declare @llvm.aarch64.sve.cmpeq.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpeq.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpeq.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.cmpge.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpge.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpge.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmpge.nxv2i64(, , ) +declare @llvm.aarch64.sve.cmpge.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpge.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpge.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.cmpgt.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpgt.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpgt.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmpgt.nxv2i64(, , ) +declare @llvm.aarch64.sve.cmpgt.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpgt.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpgt.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.cmphi.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmphi.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmphi.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmphi.nxv2i64(, , ) +declare @llvm.aarch64.sve.cmphi.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmphi.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmphi.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.cmphs.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmphs.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmphs.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmphs.nxv2i64(, , ) +declare @llvm.aarch64.sve.cmphs.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmphs.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmphs.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.cmple.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmple.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmple.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.cmplo.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmplo.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmplo.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.cmpls.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpls.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpls.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.cmplt.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmplt.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmplt.wide.nxv4i32(, , ) + +declare @llvm.aarch64.sve.cmpne.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpne.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpne.nxv4i32(, , ) +declare @llvm.aarch64.sve.cmpne.nxv2i64(, , ) +declare @llvm.aarch64.sve.cmpne.wide.nxv16i8(, , ) +declare @llvm.aarch64.sve.cmpne.wide.nxv8i16(, , ) +declare @llvm.aarch64.sve.cmpne.wide.nxv4i32(, , )