Index: llvm/lib/Target/ARM/ARMISelLowering.h =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.h +++ llvm/lib/Target/ARM/ARMISelLowering.h @@ -241,6 +241,10 @@ VMLALVAu, // provided as low and high halves VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask VMLALVApu, + VMINVu, // Find minimum unsigned value of a vector and register + VMINVs, // Find minimum signed value of a vector and register + VMAXVu, // Find maximum unsigned value of a vector and register + VMAXVs, // Find maximum signed value of a vector and register SMULWB, // Signed multiply word by half word, bottom SMULWT, // Signed multiply word by half word, top @@ -751,6 +755,7 @@ SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const; SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSelectCCToVectorReduction(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1740,6 +1740,10 @@ case ARMISD::VMLALVAu: return "ARMISD::VMLALVAu"; case ARMISD::VMLALVAps: return "ARMISD::VMLALVAps"; case ARMISD::VMLALVApu: return "ARMISD::VMLALVApu"; + case ARMISD::VMINVu: return "ARMISD::VMINVu"; + case ARMISD::VMINVs: return "ARMISD::VMINVs"; + case ARMISD::VMAXVu: return "ARMISD::VMAXVu"; + case ARMISD::VMAXVs: return "ARMISD::VMAXVs"; case ARMISD::UMAAL: return "ARMISD::UMAAL"; case ARMISD::UMLAL: return "ARMISD::UMLAL"; case ARMISD::SMLAL: return "ARMISD::SMLAL"; @@ -5149,6 +5153,92 @@ return false; } +SDValue ARMTargetLowering::LowerSelectCCToVectorReduction(SDValue Op, SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + SDLoc dl(Op); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + ISD::CondCode CC = cast(Op.getOperand(4))->get(); + SDValue TrueVal = Op.getOperand(2); + SDValue FalseVal = Op.getOperand(3); + + unsigned int Opcode = 0; + bool IsUnsigned = false; + if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || + FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && + (CC == ISD::SETULT || CC == ISD::SETUGT)) { + Opcode = ARMISD::VMINVu; + IsUnsigned = true; + if (CC == ISD::SETUGT) + std::swap(TrueVal, FalseVal); + } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || + FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && + (CC == ISD::SETLT || CC == ISD::SETGT)) { + Opcode = ARMISD::VMINVs; + if (CC == ISD::SETGT) + std::swap(TrueVal, FalseVal); + } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || + FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && + (CC == ISD::SETUGT || CC == ISD::SETULT)) { + Opcode = ARMISD::VMAXVu; + IsUnsigned = true; + if (CC == ISD::SETULT) + std::swap(TrueVal, FalseVal); + } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || + FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && + (CC == ISD::SETGT || CC == ISD::SETLT)) { + Opcode = ARMISD::VMAXVs; + if (CC == ISD::SETLT) + std::swap(TrueVal, FalseVal); + } else + return SDValue(); + + // Normalise to the right hand side being the vector reduction + switch (TrueVal->getOpcode()) { + case ISD::VECREDUCE_UMIN: + case ISD::VECREDUCE_SMIN: + case ISD::VECREDUCE_UMAX: + case ISD::VECREDUCE_SMAX: + std::swap(LHS, RHS); + std::swap(TrueVal, FalseVal); + break; + } + + EVT VectorScalarType = + FalseVal->getOperand(0)->getValueType(0).getVectorElementType(); + + auto OperandsAreValid = [&](SDValue Compared, SDValue Selected) { + switch (Compared->getOpcode()) { + case ISD::AND: + if (!IsUnsigned) + return false; + if (Selected != Compared->getOperand(0)) + return false; + if (auto Mask = dyn_cast(Compared->getOperand(1))) + return Mask->getAPIntValue().isMask( + VectorScalarType.getScalarSizeInBits()); + return false; + case ISD::SIGN_EXTEND_INREG: + if (IsUnsigned) + return false; + if (Selected != Compared->getOperand(0)) + return false; + EVT ExtendedType = cast(Compared->getOperand(1))->getVT(); + return ExtendedType == VectorScalarType; + } + return Selected == Compared; + }; + + // Make sure that the values being compared are those being selected, + // otherwise it isn't a min/max. Sometimes the left and/or right side wrap + // the scalar reduction in another operation, like an AND or sign-extension, + // so check the first operand in those cases + if (!OperandsAreValid(LHS, TrueVal) || !OperandsAreValid(RHS, FalseVal)) + return SDValue(); + + return DAG.getNode(Opcode, dl, VT, LHS, FalseVal->getOperand(0)); +} + SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); SDLoc dl(Op); @@ -5186,6 +5276,10 @@ return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV); } + if (Subtarget->hasMVEIntegerOps()) + if (SDValue Reduction = LowerSelectCCToVectorReduction(Op, DAG)) + return Reduction; + SDValue LHS = Op.getOperand(0); SDValue RHS = Op.getOperand(1); ISD::CondCode CC = cast(Op.getOperand(4))->get(); Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -944,6 +944,14 @@ defm u32: MVE_VMINMAXV_p; } +def SDTVecReduceR : SDTypeProfile<1, 2, [ // Reduction of an integer and vector into an integer + SDTCisInt<0>, SDTCisInt<1>, SDTCisVec<2> +]>; +def ARMVMINVu : SDNode<"ARMISD::VMINVu", SDTVecReduceR>; +def ARMVMINVs : SDNode<"ARMISD::VMINVs", SDTVecReduceR>; +def ARMVMAXVu : SDNode<"ARMISD::VMAXVu", SDTVecReduceR>; +def ARMVMAXVs : SDNode<"ARMISD::VMAXVs", SDTVecReduceR>; + defm MVE_VMINV : MVE_VMINMAXV_ty<"vminv", 1, "int_arm_mve_minv">; defm MVE_VMAXV : MVE_VMINMAXV_ty<"vmaxv", 0, "int_arm_mve_maxv">; @@ -974,6 +982,32 @@ def : Pat<(i32 (vecreduce_umin (v4i32 MQPR:$src))), (i32 (MVE_VMINVu32 (t2MOVi (i32 4294967295)), $src))>; + def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMINVu8 $x, $src))>; + def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMINVu16 $x, $src))>; + def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMINVu32 $x, $src))>; + def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMINVs8 $x, $src))>; + def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMINVs16 $x, $src))>; + def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMINVs32 $x, $src))>; + + def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMAXVu8 $x, $src))>; + def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMAXVu16 $x, $src))>; + def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMAXVu32 $x, $src))>; + def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMAXVs8 $x, $src))>; + def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMAXVs16 $x, $src))>; + def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMAXVs32 $x, $src))>; + } multiclass MVE_VMINMAXAV_ty { Index: llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll @@ -0,0 +1,477 @@ +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s + +define arm_aapcs_vfpcc i8 @uminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: uminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: vminv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec) + %cmp = icmp ult i8 %x, %min + %1 = select i1 %cmp, i8 %x, i8 %min + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @uminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: uminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: vminv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec) + %cmp = icmp ult i16 %x, %min + %1 = select i1 %cmp, i16 %x, i16 %min + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @uminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: uminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec) + %cmp = icmp ult i32 %x, %min + %1 = select i1 %cmp, i32 %x, i32 %min + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @sminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: sminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: vminv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec) + %cmp = icmp slt i8 %x, %min + %1 = select i1 %cmp, i8 %x, i8 %min + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @sminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: sminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: vminv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec) + %cmp = icmp slt i16 %x, %min + %1 = select i1 %cmp, i16 %x, i16 %min + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @sminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: sminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec) + %cmp = icmp slt i32 %x, %min + %1 = select i1 %cmp, i32 %x, i32 %min + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @umaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: umaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: vmaxv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec) + %cmp = icmp ugt i8 %x, %max + %1 = select i1 %cmp, i8 %x, i8 %max + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @umaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: umaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: vmaxv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec) + %cmp = icmp ugt i16 %x, %max + %1 = select i1 %cmp, i16 %x, i16 %max + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @umaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: umaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec) + %cmp = icmp ugt i32 %x, %max + %1 = select i1 %cmp, i32 %x, i32 %max + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @smaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: vmaxv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %x, %max + %1 = select i1 %cmp, i8 %x, i8 %max + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @smaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: smaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: vmaxv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec) + %cmp = icmp sgt i16 %x, %max + %1 = select i1 %cmp, i16 %x, i16 %max + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @smaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: smaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec) + %cmp = icmp sgt i32 %x, %max + %1 = select i1 %cmp, i32 %x, i32 %max + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @commute_uminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: commute_uminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: vminv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec) + %cmp = icmp ult i8 %min, %x + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @commute_uminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: commute_uminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: vminv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec) + %cmp = icmp ult i16 %min, %x + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_uminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: commute_uminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec) + %cmp = icmp ult i32 %min, %x + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @commute_sminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: commute_sminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: vminv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec) + %cmp = icmp slt i8 %min, %x + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @commute_sminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: commute_sminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: vminv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec) + %cmp = icmp slt i16 %min, %x + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_sminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: commute_sminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec) + %cmp = icmp slt i32 %min, %x + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @commute_umaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: commute_umaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: vmaxv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec) + %cmp = icmp ugt i8 %max, %x + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @commute_umaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: commute_umaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: vmaxv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec) + %cmp = icmp ugt i16 %max, %x + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_umaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: commute_umaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec) + %cmp = icmp ugt i32 %max, %x + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @commute_smaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: commute_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: vmaxv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %max, %x + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @commute_smaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: commute_smaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: vmaxv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec) + %cmp = icmp sgt i16 %max, %x + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_smaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: commute_smaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec) + %cmp = icmp sgt i32 %max, %x + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @mismatch_smaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: mismatch_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: mvn r1, #127 +; CHECK-NEXT: sxtb r3, r0 +; CHECK-NEXT: vmaxv.s8 r1, q0 +; CHECK-NEXT: sxtb r2, r1 +; CHECK-NEXT: cmp r2, r3 +; CHECK-NEXT: csel r0, r0, r1, gt +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %x, %max + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i8 @mismatch2_smaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: mismatch2_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: mvn r1, #127 +; CHECK-NEXT: sxtb r3, r0 +; CHECK-NEXT: vmaxv.s8 r1, q0 +; CHECK-NEXT: sxtb r2, r1 +; CHECK-NEXT: cmp r3, r2 +; CHECK-NEXT: csel r0, r1, r0, gt +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %max, %x + %1 = select i1 %cmp, i8 %x, i8 %max + ret i8 %1 +} + +define arm_aapcs_vfpcc i8 @inverted_uminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: inverted_uminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: vminv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec) + %cmp = icmp ugt i8 %x, %min + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @inverted_uminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: inverted_uminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: vminv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec) + %cmp = icmp ugt i16 %x, %min + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @inverted_uminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: inverted_uminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec) + %cmp = icmp ugt i32 %x, %min + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @inverted_sminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: inverted_sminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: vminv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %x, %min + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @inverted_sminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: inverted_sminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: vminv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec) + %cmp = icmp sgt i16 %x, %min + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @inverted_sminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: inverted_sminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec) + %cmp = icmp sgt i32 %x, %min + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @inverted_umaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: inverted_umaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: vmaxv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec) + %cmp = icmp ult i8 %x, %max + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @inverted_umaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: inverted_umaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: vmaxv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec) + %cmp = icmp ult i16 %x, %max + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @inverted_umaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: inverted_umaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec) + %cmp = icmp ult i32 %x, %max + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @inverted_smaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: inverted_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: vmaxv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp slt i8 %x, %max + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @inverted_smaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: inverted_smaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: vmaxv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec) + %cmp = icmp slt i16 %x, %max + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @inverted_smaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: inverted_smaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec) + %cmp = icmp slt i32 %x, %max + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +declare i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32>) + +declare i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>) + +declare i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32>) + +declare i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>)