Index: llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -3092,7 +3092,7 @@ if (auto CNode = dyn_cast(N)) { int64_t ImmVal = CNode->getSExtValue(); SDLoc DL(N); - if (ImmVal >= -127 && ImmVal < 127) { + if (ImmVal >= -128 && ImmVal < 128) { Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32); return true; } Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -55,6 +55,10 @@ // Arithmetic instructions SDIV_PRED, UDIV_PRED, + SMIN_PRED, + UMIN_PRED, + SMAX_PRED, + UMAX_PRED, // Arithmetic instructions which write flags. ADDS, @@ -785,8 +789,8 @@ SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerDIV(SDValue Op, SelectionDAG &DAG, - unsigned NewOp) const; + SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG, + unsigned NewOp) const; SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const; SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -188,10 +188,6 @@ setOperationAction(ISD::UADDSAT, VT, Legal); setOperationAction(ISD::SSUBSAT, VT, Legal); setOperationAction(ISD::USUBSAT, VT, Legal); - setOperationAction(ISD::SMAX, VT, Legal); - setOperationAction(ISD::UMAX, VT, Legal); - setOperationAction(ISD::SMIN, VT, Legal); - setOperationAction(ISD::UMIN, VT, Legal); } for (auto VT : @@ -887,6 +883,10 @@ setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); setOperationAction(ISD::SDIV, VT, Custom); setOperationAction(ISD::UDIV, VT, Custom); + setOperationAction(ISD::SMIN, VT, Custom); + setOperationAction(ISD::UMIN, VT, Custom); + setOperationAction(ISD::SMAX, VT, Custom); + setOperationAction(ISD::UMAX, VT, Custom); } } setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom); @@ -1285,6 +1285,10 @@ case AArch64ISD::TLSDESC_CALLSEQ: return "AArch64ISD::TLSDESC_CALLSEQ"; case AArch64ISD::SDIV_PRED: return "AArch64ISD::SDIV_PRED"; case AArch64ISD::UDIV_PRED: return "AArch64ISD::UDIV_PRED"; + case AArch64ISD::SMIN_PRED: return "AArch64ISD::SMIN_PRED"; + case AArch64ISD::UMIN_PRED: return "AArch64ISD::UMIN_PRED"; + case AArch64ISD::SMAX_PRED: return "AArch64ISD::SMAX_PRED"; + case AArch64ISD::UMAX_PRED: return "AArch64ISD::UMAX_PRED"; case AArch64ISD::ADC: return "AArch64ISD::ADC"; case AArch64ISD::SBC: return "AArch64ISD::SBC"; case AArch64ISD::ADDS: return "AArch64ISD::ADDS"; @@ -3348,9 +3352,17 @@ case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); case ISD::SDIV: - return LowerDIV(Op, DAG, AArch64ISD::SDIV_PRED); + return LowerToPredicatedOp(Op, DAG, AArch64ISD::SDIV_PRED); case ISD::UDIV: - return LowerDIV(Op, DAG, AArch64ISD::UDIV_PRED); + return LowerToPredicatedOp(Op, DAG, AArch64ISD::UDIV_PRED); + case ISD::SMIN: + return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED); + case ISD::UMIN: + return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED); + case ISD::SMAX: + return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED); + case ISD::UMAX: + return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED); case ISD::SRA: case ISD::SRL: case ISD::SHL: @@ -7657,7 +7669,7 @@ return DAG.getNode(ISD::BITCAST, DL, VT, TBL); } -SDValue AArch64TargetLowering::LowerDIV(SDValue Op, +SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG, unsigned NewOp) const { EVT VT = Op.getValueType(); @@ -11391,6 +11403,18 @@ case Intrinsic::aarch64_sve_udiv: return DAG.getNode(AArch64ISD::UDIV_PRED, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3)); + case Intrinsic::aarch64_sve_smin: + return DAG.getNode(AArch64ISD::SMIN_PRED, SDLoc(N), N->getValueType(0), + N->getOperand(1), N->getOperand(2), N->getOperand(3)); + case Intrinsic::aarch64_sve_umin: + return DAG.getNode(AArch64ISD::UMIN_PRED, SDLoc(N), N->getValueType(0), + N->getOperand(1), N->getOperand(2), N->getOperand(3)); + case Intrinsic::aarch64_sve_smax: + return DAG.getNode(AArch64ISD::SMAX_PRED, SDLoc(N), N->getValueType(0), + N->getOperand(1), N->getOperand(2), N->getOperand(3)); + case Intrinsic::aarch64_sve_umax: + return DAG.getNode(AArch64ISD::UMAX_PRED, SDLoc(N), N->getValueType(0), + N->getOperand(1), N->getOperand(2), N->getOperand(3)); case Intrinsic::aarch64_sve_sel: return DAG.getNode(ISD::VSELECT, SDLoc(N), N->getValueType(0), N->getOperand(1), N->getOperand(2), N->getOperand(3)); Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -145,13 +145,17 @@ def AArch64lasta : SDNode<"AArch64ISD::LASTA", SDT_AArch64Reduce>; def AArch64lastb : SDNode<"AArch64ISD::LASTB", SDT_AArch64Reduce>; -def SDT_AArch64DIV : SDTypeProfile<1, 3, [ +def SDT_AArch64Arith : SDTypeProfile<1, 3, [ SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3>, SDTCVecEltisVT<1,i1>, SDTCisSameAs<2,3> ]>; -def AArch64sdiv_pred : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64DIV>; -def AArch64udiv_pred : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64DIV>; +def AArch64sdiv_pred : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64Arith>; +def AArch64udiv_pred : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64Arith>; +def AArch64smin_pred : SDNode<"AArch64ISD::SMIN_PRED", SDT_AArch64Arith>; +def AArch64umin_pred : SDNode<"AArch64ISD::UMIN_PRED", SDT_AArch64Arith>; +def AArch64smax_pred : SDNode<"AArch64ISD::SMAX_PRED", SDT_AArch64Arith>; +def AArch64umax_pred : SDNode<"AArch64ISD::UMAX_PRED", SDT_AArch64Arith>; def SDT_AArch64ReduceWithInit : SDTypeProfile<1, 3, [SDTCisVec<1>, SDTCisVec<3>]>; def AArch64clasta_n : SDNode<"AArch64ISD::CLASTA_N", SDT_AArch64ReduceWithInit>; @@ -227,10 +231,10 @@ defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", xor>; defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", and>; - defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", smax>; - defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", smin>; - defm UMAX_ZI : sve_int_arith_imm1_unsigned<0b01, "umax", umax>; - defm UMIN_ZI : sve_int_arith_imm1_unsigned<0b11, "umin", umin>; + defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", AArch64smax_pred>; + defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", AArch64smin_pred>; + defm UMAX_ZI : sve_int_arith_imm1_unsigned<0b01, "umax", AArch64umax_pred>; + defm UMIN_ZI : sve_int_arith_imm1_unsigned<0b11, "umin", AArch64umin_pred>; defm MUL_ZI : sve_int_arith_imm2<"mul", mul>; defm MUL_ZPmZ : sve_int_bin_pred_arit_2<0b000, "mul", int_aarch64_sve_mul>; @@ -275,10 +279,10 @@ defm FABS_ZPmZ : sve_int_un_pred_arit_1_fp<0b100, "fabs", int_aarch64_sve_fabs>; defm FNEG_ZPmZ : sve_int_un_pred_arit_1_fp<0b101, "fneg", int_aarch64_sve_fneg>; - defm SMAX_ZPmZ : sve_int_bin_pred_arit_1<0b000, "smax", int_aarch64_sve_smax>; - defm UMAX_ZPmZ : sve_int_bin_pred_arit_1<0b001, "umax", int_aarch64_sve_umax>; - defm SMIN_ZPmZ : sve_int_bin_pred_arit_1<0b010, "smin", int_aarch64_sve_smin>; - defm UMIN_ZPmZ : sve_int_bin_pred_arit_1<0b011, "umin", int_aarch64_sve_umin>; + defm SMAX_ZPmZ : sve_int_bin_pred_arit_1<0b000, "smax", AArch64smax_pred>; + defm UMAX_ZPmZ : sve_int_bin_pred_arit_1<0b001, "umax", AArch64umax_pred>; + defm SMIN_ZPmZ : sve_int_bin_pred_arit_1<0b010, "smin", AArch64smin_pred>; + defm UMIN_ZPmZ : sve_int_bin_pred_arit_1<0b011, "umin", AArch64umin_pred>; defm SABD_ZPmZ : sve_int_bin_pred_arit_1<0b100, "sabd", int_aarch64_sve_sabd>; defm UABD_ZPmZ : sve_int_bin_pred_arit_1<0b101, "uabd", int_aarch64_sve_uabd>; Index: llvm/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -324,6 +324,11 @@ : Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm)))))), (inst $Op1, i32:$imm)>; +class SVE_1_Op_Imm_Arith_Pred_Pat + : Pat<(vt (op (pt (AArch64ptrue 31)), (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm)))))), + (inst $Op1, i32:$imm)>; + class SVE_1_Op_Imm_Log_Pat : Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i64:$imm)))))), @@ -3840,10 +3845,10 @@ def _S : sve_int_arith_imm<0b10, { 0b1010, opc }, asm, ZPR32, simm8>; def _D : sve_int_arith_imm<0b11, { 0b1010, opc }, asm, ZPR64, simm8>; - def : SVE_1_Op_Imm_Arith_Pat(NAME # _B)>; - def : SVE_1_Op_Imm_Arith_Pat(NAME # _H)>; - def : SVE_1_Op_Imm_Arith_Pat(NAME # _S)>; - def : SVE_1_Op_Imm_Arith_Pat(NAME # _D)>; + def : SVE_1_Op_Imm_Arith_Pred_Pat(NAME # _B)>; + def : SVE_1_Op_Imm_Arith_Pred_Pat(NAME # _H)>; + def : SVE_1_Op_Imm_Arith_Pred_Pat(NAME # _S)>; + def : SVE_1_Op_Imm_Arith_Pred_Pat(NAME # _D)>; } multiclass sve_int_arith_imm1_unsigned opc, string asm, SDPatternOperator op> { @@ -3852,10 +3857,10 @@ def _S : sve_int_arith_imm<0b10, { 0b1010, opc }, asm, ZPR32, imm0_255>; def _D : sve_int_arith_imm<0b11, { 0b1010, opc }, asm, ZPR64, imm0_255>; - def : SVE_1_Op_Imm_Arith_Pat(NAME # _B)>; - def : SVE_1_Op_Imm_Arith_Pat(NAME # _H)>; - def : SVE_1_Op_Imm_Arith_Pat(NAME # _S)>; - def : SVE_1_Op_Imm_Arith_Pat(NAME # _D)>; + def : SVE_1_Op_Imm_Arith_Pred_Pat(NAME # _B)>; + def : SVE_1_Op_Imm_Arith_Pred_Pat(NAME # _H)>; + def : SVE_1_Op_Imm_Arith_Pred_Pat(NAME # _S)>; + def : SVE_1_Op_Imm_Arith_Pred_Pat(NAME # _D)>; } multiclass sve_int_arith_imm2 { Index: llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll =================================================================== --- llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll +++ llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll @@ -32,8 +32,8 @@ ret %div } -define @sdiv_widen_i32( %a, %b) { -; CHECK-LABEL: @sdiv_widen_i32 +define @sdiv_promote_i32( %a, %b) { +; CHECK-LABEL: @sdiv_promote_i32 ; CHECK-DAG: ptrue p0.d ; CHECK-DAG: sxtw z1.d, p0/m, z1.d ; CHECK-DAG: sxtw z0.d, p0/m, z0.d @@ -85,8 +85,8 @@ ret %div } -define @udiv_widen_i32( %a, %b) { -; CHECK-LABEL: @udiv_widen_i32 +define @udiv_promote_i32( %a, %b) { +; CHECK-LABEL: @udiv_promote_i32 ; CHECK-DAG: ptrue p0.d ; CHECK-DAG: and z1.d, z1.d, #0xffffffff ; CHECK-DAG: and z0.d, z0.d, #0xffffffff @@ -105,3 +105,179 @@ %div = udiv %a, %b ret %div } + +; +; SMIN +; + +define @smin_i8( %a, %b, %c) { +; CHECK-LABEL: @smin_i8 +; CHECK-DAG: ptrue p0.b +; CHECK-DAG: smin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: ret + %cmp = icmp slt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @smin_i16( %a, %b, %c) { +; CHECK-LABEL: @smin_i16 +; CHECK-DAG: ptrue p0.h +; CHECK-DAG: smin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %cmp = icmp slt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @smin_i32( %a, %b, %c) { +; CHECK-LABEL: smin_i32: +; CHECK-DAG: ptrue p0.s +; CHECK-DAG: smin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %cmp = icmp slt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @smin_i64( %a, %b, %c) { +; CHECK-LABEL: smin_i64: +; CHECK-DAG: ptrue p0.d +; CHECK-DAG: smin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %cmp = icmp slt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +; +; UMIN +; + +define @umin_i8( %a, %b, %c) { +; CHECK-LABEL: @umin_i8 +; CHECK-DAG: ptrue p0.b +; CHECK-DAG: umin z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: ret + %cmp = icmp ult %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @umin_i16( %a, %b, %c) { +; CHECK-LABEL: @umin_i16 +; CHECK-DAG: ptrue p0.h +; CHECK-DAG: umin z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %cmp = icmp ult %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @umin_i32( %a, %b, %c) { +; CHECK-LABEL: umin_i32: +; CHECK-DAG: ptrue p0.s +; CHECK-DAG: umin z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %cmp = icmp ult %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @umin_i64( %a, %b, %c) { +; CHECK-LABEL: umin_i64: +; CHECK-DAG: ptrue p0.d +; CHECK-DAG: umin z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %cmp = icmp ult %a, %b + %min = select %cmp, %a, %b + ret %min +} + +; +; SMAX +; + +define @smax_i8( %a, %b, %c) { +; CHECK-LABEL: @smax_i8 +; CHECK-DAG: ptrue p0.b +; CHECK-DAG: smax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: ret + %cmp = icmp sgt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @smax_i16( %a, %b, %c) { +; CHECK-LABEL: @smax_i16 +; CHECK-DAG: ptrue p0.h +; CHECK-DAG: smax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %cmp = icmp sgt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @smax_i32( %a, %b, %c) { +; CHECK-LABEL: smax_i32: +; CHECK-DAG: ptrue p0.s +; CHECK-DAG: smax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %cmp = icmp sgt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @smax_i64( %a, %b, %c) { +; CHECK-LABEL: smax_i64: +; CHECK-DAG: ptrue p0.d +; CHECK-DAG: smax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %cmp = icmp sgt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +; +; UMAX +; + +define @umax_i8( %a, %b, %c) { +; CHECK-LABEL: @umax_i8 +; CHECK-DAG: ptrue p0.b +; CHECK-DAG: umax z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: ret + %cmp = icmp ugt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @umax_i16( %a, %b, %c) { +; CHECK-LABEL: @umax_i16 +; CHECK-DAG: ptrue p0.h +; CHECK-DAG: umax z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %cmp = icmp ugt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @umax_i32( %a, %b, %c) { +; CHECK-LABEL: umax_i32: +; CHECK-DAG: ptrue p0.s +; CHECK-DAG: umax z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %cmp = icmp ugt %a, %b + %min = select %cmp, %a, %b + ret %min +} + +define @umax_i64( %a, %b, %c) { +; CHECK-LABEL: umax_i64: +; CHECK-DAG: ptrue p0.d +; CHECK-DAG: umax z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %cmp = icmp ugt %a, %b + %min = select %cmp, %a, %b + ret %min +} Index: llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-int-arith-imm.ll @@ -1,5 +1,221 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s +; SMAX + +define @smax_i8( %a) { +; CHECK-LABEL: smax_i8: +; CHECK: smax z0.b, z0.b, #-128 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) + %elt = insertelement undef, i8 -128, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.smax.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @smax_i16( %a) { +; CHECK-LABEL: smax_i16: +; CHECK: smax z0.h, z0.h, #127 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) + %elt = insertelement undef, i16 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.smax.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @smax_i32( %a) { +; CHECK-LABEL: smax_i32: +; CHECK: smax z0.s, z0.s, #-128 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) + %elt = insertelement undef, i32 -128, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.smax.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @smax_i64( %a) { +; CHECK-LABEL: smax_i64: +; CHECK: smax z0.d, z0.d, #127 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) + %elt = insertelement undef, i64 127, i64 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.smax.nxv2i64( %pg, + %a, + %splat) + ret %out +} + +; SMIN + +define @smin_i8( %a) { +; CHECK-LABEL: smin_i8: +; CHECK: smin z0.b, z0.b, #127 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) + %elt = insertelement undef, i8 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.smin.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @smin_i16( %a) { +; CHECK-LABEL: smin_i16: +; CHECK: smin z0.h, z0.h, #-128 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) + %elt = insertelement undef, i16 -128, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.smin.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @smin_i32( %a) { +; CHECK-LABEL: smin_i32: +; CHECK: smin z0.s, z0.s, #127 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) + %elt = insertelement undef, i32 127, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.smin.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @smin_i64( %a) { +; CHECK-LABEL: smin_i64: +; CHECK: smin z0.d, z0.d, #-128 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) + %elt = insertelement undef, i64 -128, i64 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.smin.nxv2i64( %pg, + %a, + %splat) + ret %out +} + +; UMAX + +define @umax_i8( %a) { +; CHECK-LABEL: umax_i8: +; CHECK: umax z0.b, z0.b, #0 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) + %elt = insertelement undef, i8 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.umax.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @umax_i16( %a) { +; CHECK-LABEL: umax_i16: +; CHECK: umax z0.h, z0.h, #255 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) + %elt = insertelement undef, i16 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.umax.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @umax_i32( %a) { +; CHECK-LABEL: umax_i32: +; CHECK: umax z0.s, z0.s, #0 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) + %elt = insertelement undef, i32 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.umax.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @umax_i64( %a) { +; CHECK-LABEL: umax_i64: +; CHECK: umax z0.d, z0.d, #255 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) + %elt = insertelement undef, i64 255, i64 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.umax.nxv2i64( %pg, + %a, + %splat) + ret %out +} + +; UMIN + +define @umin_i8( %a) { +; CHECK-LABEL: umin_i8: +; CHECK: umin z0.b, z0.b, #255 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) + %elt = insertelement undef, i8 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.umin.nxv16i8( %pg, + %a, + %splat) + ret %out +} + +define @umin_i16( %a) { +; CHECK-LABEL: umin_i16: +; CHECK: umin z0.h, z0.h, #0 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) + %elt = insertelement undef, i16 0, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.umin.nxv8i16( %pg, + %a, + %splat) + ret %out +} + +define @umin_i32( %a) { +; CHECK-LABEL: umin_i32: +; CHECK: umin z0.s, z0.s, #255 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) + %elt = insertelement undef, i32 255, i32 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.umin.nxv4i32( %pg, + %a, + %splat) + ret %out +} + +define @umin_i64( %a) { +; CHECK-LABEL: umin_i64: +; CHECK: umin z0.d, z0.d, #0 +; CHECK-NEXT: ret + %pg = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) + %elt = insertelement undef, i64 0, i64 0 + %splat = shufflevector %elt, undef, zeroinitializer + %out = call @llvm.aarch64.sve.umin.nxv2i64( %pg, + %a, + %splat) + ret %out +} + ; SQADD define @sqadd_b_lowimm( %a) { @@ -336,3 +552,28 @@ declare @llvm.aarch64.sve.uqsub.x.nxv8i16(, ) declare @llvm.aarch64.sve.uqsub.x.nxv4i32(, ) declare @llvm.aarch64.sve.uqsub.x.nxv2i64(, ) + +declare @llvm.aarch64.sve.smax.nxv16i8(, , ) +declare @llvm.aarch64.sve.smax.nxv8i16(, , ) +declare @llvm.aarch64.sve.smax.nxv4i32(, , ) +declare @llvm.aarch64.sve.smax.nxv2i64(, , ) + +declare @llvm.aarch64.sve.smin.nxv16i8(, , ) +declare @llvm.aarch64.sve.smin.nxv8i16(, , ) +declare @llvm.aarch64.sve.smin.nxv4i32(, , ) +declare @llvm.aarch64.sve.smin.nxv2i64(, , ) + +declare @llvm.aarch64.sve.umax.nxv16i8(, , ) +declare @llvm.aarch64.sve.umax.nxv8i16(, , ) +declare @llvm.aarch64.sve.umax.nxv4i32(, , ) +declare @llvm.aarch64.sve.umax.nxv2i64(, , ) + +declare @llvm.aarch64.sve.umin.nxv16i8(, , ) +declare @llvm.aarch64.sve.umin.nxv8i16(, , ) +declare @llvm.aarch64.sve.umin.nxv4i32(, , ) +declare @llvm.aarch64.sve.umin.nxv2i64(, , ) + +declare @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern) +declare @llvm.aarch64.sve.ptrue.nxv8i1(i32 %pattern) +declare @llvm.aarch64.sve.ptrue.nxv4i1(i32 %pattern) +declare @llvm.aarch64.sve.ptrue.nxv2i1(i32 %pattern)