Index: llvm/lib/Target/ARM/ARMISelLowering.h =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.h +++ llvm/lib/Target/ARM/ARMISelLowering.h @@ -241,6 +241,10 @@ VMLALVAu, // provided as low and high halves VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask VMLALVApu, + VMINVu, // Find minimum unsigned value of a vector and register + VMINVs, // Find minimum signed value of a vector and register + VMAXVu, // Find maximum unsigned value of a vector and register + VMAXVs, // Find maximum signed value of a vector and register SMULWB, // Signed multiply word by half word, bottom SMULWT, // Signed multiply word by half word, top Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -987,6 +987,8 @@ setTargetDAGCombine(ISD::SMAX); setTargetDAGCombine(ISD::UMAX); setTargetDAGCombine(ISD::FP_EXTEND); + setTargetDAGCombine(ISD::SELECT); + setTargetDAGCombine(ISD::SELECT_CC); } if (!Subtarget->hasFP64()) { @@ -1740,6 +1742,10 @@ case ARMISD::VMLALVAu: return "ARMISD::VMLALVAu"; case ARMISD::VMLALVAps: return "ARMISD::VMLALVAps"; case ARMISD::VMLALVApu: return "ARMISD::VMLALVApu"; + case ARMISD::VMINVu: return "ARMISD::VMINVu"; + case ARMISD::VMINVs: return "ARMISD::VMINVs"; + case ARMISD::VMAXVu: return "ARMISD::VMAXVu"; + case ARMISD::VMAXVs: return "ARMISD::VMAXVs"; case ARMISD::UMAAL: return "ARMISD::UMAAL"; case ARMISD::UMLAL: return "ARMISD::UMLAL"; case ARMISD::SMLAL: return "ARMISD::SMLAL"; @@ -12093,6 +12099,111 @@ return SDValue(); } +static SDValue PerformSELECTCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + const ARMSubtarget *Subtarget) { + if (!Subtarget->hasMVEIntegerOps()) + return SDValue(); + + SDLoc dl(N); + SDValue SetCC; + SDValue LHS; + SDValue RHS; + ISD::CondCode CC; + SDValue TrueVal; + SDValue FalseVal; + + if (N->getOpcode() == ISD::SELECT && + N->getOperand(0)->getOpcode() == ISD::SETCC) { + SetCC = N->getOperand(0); + LHS = SetCC->getOperand(0); + RHS = SetCC->getOperand(1); + CC = cast(SetCC->getOperand(2))->get(); + TrueVal = N->getOperand(1); + FalseVal = N->getOperand(2); + } else if (N->getOpcode() == ISD::SELECT_CC) { + LHS = N->getOperand(0); + RHS = N->getOperand(1); + CC = cast(N->getOperand(4))->get(); + TrueVal = N->getOperand(2); + FalseVal = N->getOperand(3); + } else { + return SDValue(); + } + + unsigned int Opcode = 0; + if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || + FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && + (CC == ISD::SETULT || CC == ISD::SETUGT)) { + Opcode = ARMISD::VMINVu; + if (CC == ISD::SETUGT) + std::swap(TrueVal, FalseVal); + } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || + FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && + (CC == ISD::SETLT || CC == ISD::SETGT)) { + Opcode = ARMISD::VMINVs; + if (CC == ISD::SETGT) + std::swap(TrueVal, FalseVal); + } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || + FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && + (CC == ISD::SETUGT || CC == ISD::SETULT)) { + Opcode = ARMISD::VMAXVu; + if (CC == ISD::SETULT) + std::swap(TrueVal, FalseVal); + } else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || + FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && + (CC == ISD::SETGT || CC == ISD::SETLT)) { + Opcode = ARMISD::VMAXVs; + if (CC == ISD::SETLT) + std::swap(TrueVal, FalseVal); + } else + return SDValue(); + + // Normalise to the right hand side being the vector reduction + switch (TrueVal->getOpcode()) { + case ISD::VECREDUCE_UMIN: + case ISD::VECREDUCE_SMIN: + case ISD::VECREDUCE_UMAX: + case ISD::VECREDUCE_SMAX: + std::swap(LHS, RHS); + std::swap(TrueVal, FalseVal); + break; + } + + EVT VectorType = FalseVal->getOperand(0)->getValueType(0); + + if (VectorType != MVT::v16i8 && VectorType != MVT::v8i16 && + VectorType != MVT::v4i32) + return SDValue(); + + EVT VectorScalarType = VectorType.getVectorElementType(); + + // The values being selected must also be the ones being compared + if (TrueVal != LHS || FalseVal != RHS) + return SDValue(); + + EVT LeftType = LHS->getValueType(0); + EVT RightType = RHS->getValueType(0); + + // The types must match the reduced type too + if (LeftType != VectorScalarType || RightType != VectorScalarType) + return SDValue(); + + // Legalise the scalar to an i32 + if (VectorScalarType != MVT::i32) + LHS = DCI.DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS); + + // Generate the reduction as an i32 for legalisation purposes + auto Reduction = + DCI.DAG.getNode(Opcode, dl, MVT::i32, LHS, RHS->getOperand(0)); + + // The result isn't actually an i32 so truncate it back to its original type + if (VectorScalarType != MVT::i32) + Reduction = DCI.DAG.getNode(ISD::TRUNCATE, dl, VectorScalarType, Reduction); + + return Reduction; +} + static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget) { @@ -16049,6 +16160,8 @@ DAGCombinerInfo &DCI) const { switch (N->getOpcode()) { default: break; + case ISD::SELECT_CC: + case ISD::SELECT: return PerformSELECTCombine(N, DCI, Subtarget); case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget); case ISD::ABS: return PerformABSCombine(N, DCI, Subtarget); case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget); Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -944,6 +944,14 @@ defm u32: MVE_VMINMAXV_p; } +def SDTVecReduceR : SDTypeProfile<1, 2, [ // Reduction of an integer and vector into an integer + SDTCisInt<0>, SDTCisInt<1>, SDTCisVec<2> +]>; +def ARMVMINVu : SDNode<"ARMISD::VMINVu", SDTVecReduceR>; +def ARMVMINVs : SDNode<"ARMISD::VMINVs", SDTVecReduceR>; +def ARMVMAXVu : SDNode<"ARMISD::VMAXVu", SDTVecReduceR>; +def ARMVMAXVs : SDNode<"ARMISD::VMAXVs", SDTVecReduceR>; + defm MVE_VMINV : MVE_VMINMAXV_ty<"vminv", 1, "int_arm_mve_minv">; defm MVE_VMAXV : MVE_VMINMAXV_ty<"vmaxv", 0, "int_arm_mve_maxv">; @@ -974,6 +982,32 @@ def : Pat<(i32 (vecreduce_umin (v4i32 MQPR:$src))), (i32 (MVE_VMINVu32 (t2MOVi (i32 4294967295)), $src))>; + def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMINVu8 $x, $src))>; + def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMINVu16 $x, $src))>; + def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMINVu32 $x, $src))>; + def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMINVs8 $x, $src))>; + def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMINVs16 $x, $src))>; + def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMINVs32 $x, $src))>; + + def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMAXVu8 $x, $src))>; + def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMAXVu16 $x, $src))>; + def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMAXVu32 $x, $src))>; + def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMAXVs8 $x, $src))>; + def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMAXVs16 $x, $src))>; + def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMAXVs32 $x, $src))>; + } multiclass MVE_VMINMAXAV_ty { Index: llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll @@ -0,0 +1,647 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s + +define arm_aapcs_vfpcc zeroext i8 @uminv16i8(<16 x i8> %vec, i8 zeroext %min) { +; CHECK-LABEL: uminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u8 r0, q0 +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec) + %cmp = icmp ult i8 %x, %min + %1 = select i1 %cmp, i8 %x, i8 %min + ret i8 %1 +} + +define arm_aapcs_vfpcc zeroext i16 @uminv8i16(<8 x i16> %vec, i16 zeroext %min) { +; CHECK-LABEL: uminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u16 r0, q0 +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec) + %cmp = icmp ult i16 %x, %min + %1 = select i1 %cmp, i16 %x, i16 %min + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @uminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: uminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec) + %cmp = icmp ult i32 %x, %min + %1 = select i1 %cmp, i32 %x, i32 %min + ret i32 %1 +} + +define arm_aapcs_vfpcc signext i8 @sminv16i8(<16 x i8> %vec, i8 signext %min) { +; CHECK-LABEL: sminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s8 r0, q0 +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec) + %cmp = icmp slt i8 %x, %min + %1 = select i1 %cmp, i8 %x, i8 %min + ret i8 %1 +} + +define arm_aapcs_vfpcc signext i16 @sminv8i16(<8 x i16> %vec, i16 signext %min) { +; CHECK-LABEL: sminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s16 r0, q0 +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec) + %cmp = icmp slt i16 %x, %min + %1 = select i1 %cmp, i16 %x, i16 %min + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @sminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: sminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec) + %cmp = icmp slt i32 %x, %min + %1 = select i1 %cmp, i32 %x, i32 %min + ret i32 %1 +} + +define arm_aapcs_vfpcc zeroext i8 @umaxv16i8(<16 x i8> %vec, i8 zeroext %max) { +; CHECK-LABEL: umaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u8 r0, q0 +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec) + %cmp = icmp ugt i8 %x, %max + %1 = select i1 %cmp, i8 %x, i8 %max + ret i8 %1 +} + +define arm_aapcs_vfpcc zeroext i16 @umaxv8i16(<8 x i16> %vec, i16 zeroext %max) { +; CHECK-LABEL: umaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u16 r0, q0 +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec) + %cmp = icmp ugt i16 %x, %max + %1 = select i1 %cmp, i16 %x, i16 %max + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @umaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: umaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec) + %cmp = icmp ugt i32 %x, %max + %1 = select i1 %cmp, i32 %x, i32 %max + ret i32 %1 +} + +define arm_aapcs_vfpcc signext i8 @smaxv16i8(<16 x i8> %vec, i8 signext %max) { +; CHECK-LABEL: smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s8 r0, q0 +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %x, %max + %1 = select i1 %cmp, i8 %x, i8 %max + ret i8 %1 +} + +define arm_aapcs_vfpcc signext i16 @smaxv8i16(<8 x i16> %vec, i16 signext %max) { +; CHECK-LABEL: smaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s16 r0, q0 +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec) + %cmp = icmp sgt i16 %x, %max + %1 = select i1 %cmp, i16 %x, i16 %max + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @smaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: smaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec) + %cmp = icmp sgt i32 %x, %max + %1 = select i1 %cmp, i32 %x, i32 %max + ret i32 %1 +} + +define arm_aapcs_vfpcc zeroext i8 @commute_uminv16i8(<16 x i8> %vec, i8 zeroext %min) { +; CHECK-LABEL: commute_uminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u8 r0, q0 +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec) + %cmp = icmp ult i8 %min, %x + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc zeroext i16 @commute_uminv8i16(<8 x i16> %vec, i16 zeroext %min) { +; CHECK-LABEL: commute_uminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u16 r0, q0 +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec) + %cmp = icmp ult i16 %min, %x + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_uminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: commute_uminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec) + %cmp = icmp ult i32 %min, %x + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc signext i8 @commute_sminv16i8(<16 x i8> %vec, i8 signext %min) { +; CHECK-LABEL: commute_sminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s8 r0, q0 +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec) + %cmp = icmp slt i8 %min, %x + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc signext i16 @commute_sminv8i16(<8 x i16> %vec, i16 signext %min) { +; CHECK-LABEL: commute_sminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s16 r0, q0 +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec) + %cmp = icmp slt i16 %min, %x + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_sminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: commute_sminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec) + %cmp = icmp slt i32 %min, %x + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc zeroext i8 @commute_umaxv16i8(<16 x i8> %vec, i8 zeroext %max) { +; CHECK-LABEL: commute_umaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u8 r0, q0 +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec) + %cmp = icmp ugt i8 %max, %x + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc zeroext i16 @commute_umaxv8i16(<8 x i16> %vec, i16 zeroext %max) { +; CHECK-LABEL: commute_umaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u16 r0, q0 +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec) + %cmp = icmp ugt i16 %max, %x + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_umaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: commute_umaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec) + %cmp = icmp ugt i32 %max, %x + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc signext i8 @commute_smaxv16i8(<16 x i8> %vec, i8 signext %max) { +; CHECK-LABEL: commute_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s8 r0, q0 +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %max, %x + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc signext i16 @commute_smaxv8i16(<8 x i16> %vec, i16 signext %max) { +; CHECK-LABEL: commute_smaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s16 r0, q0 +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec) + %cmp = icmp sgt i16 %max, %x + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_smaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: commute_smaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec) + %cmp = icmp sgt i32 %max, %x + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc signext i8 @mismatch_smaxv16i8(<16 x i8> %vec, i8 signext %max) { +; CHECK-LABEL: mismatch_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: mvn r1, #127 +; CHECK-NEXT: vmaxv.s8 r1, q0 +; CHECK-NEXT: sxtb r2, r1 +; CHECK-NEXT: cmp r2, r0 +; CHECK-NEXT: csel r0, r0, r1, gt +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %x, %max + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc signext i8 @mismatch2_smaxv16i8(<16 x i8> %vec, i8 signext %max) { +; CHECK-LABEL: mismatch2_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: mvn r1, #127 +; CHECK-NEXT: vmaxv.s8 r1, q0 +; CHECK-NEXT: sxtb r2, r1 +; CHECK-NEXT: cmp r0, r2 +; CHECK-NEXT: csel r0, r1, r0, gt +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %max, %x + %1 = select i1 %cmp, i8 %x, i8 %max + ret i8 %1 +} + +define arm_aapcs_vfpcc zeroext i8 @inverted_uminv16i8(<16 x i8> %vec, i8 zeroext %min) { +; CHECK-LABEL: inverted_uminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u8 r0, q0 +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec) + %cmp = icmp ugt i8 %x, %min + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc zeroext i16 @inverted_uminv8i16(<8 x i16> %vec, i16 zeroext %min) { +; CHECK-LABEL: inverted_uminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u16 r0, q0 +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec) + %cmp = icmp ugt i16 %x, %min + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @inverted_uminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: inverted_uminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec) + %cmp = icmp ugt i32 %x, %min + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc signext i8 @inverted_sminv16i8(<16 x i8> %vec, i8 signext %min) { +; CHECK-LABEL: inverted_sminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s8 r0, q0 +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %x, %min + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc signext i16 @inverted_sminv8i16(<8 x i16> %vec, i16 signext %min) { +; CHECK-LABEL: inverted_sminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s16 r0, q0 +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec) + %cmp = icmp sgt i16 %x, %min + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @inverted_sminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: inverted_sminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec) + %cmp = icmp sgt i32 %x, %min + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc zeroext i8 @inverted_umaxv16i8(<16 x i8> %vec, i8 zeroext %max) { +; CHECK-LABEL: inverted_umaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u8 r0, q0 +; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec) + %cmp = icmp ult i8 %x, %max + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc zeroext i16 @inverted_umaxv8i16(<8 x i16> %vec, i16 zeroext %max) { +; CHECK-LABEL: inverted_umaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u16 r0, q0 +; CHECK-NEXT: uxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec) + %cmp = icmp ult i16 %x, %max + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @inverted_umaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: inverted_umaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec) + %cmp = icmp ult i32 %x, %max + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc signext i8 @inverted_smaxv16i8(<16 x i8> %vec, i8 signext %max) { +; CHECK-LABEL: inverted_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s8 r0, q0 +; CHECK-NEXT: sxtb r0, r0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp slt i8 %x, %max + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc signext i16 @inverted_smaxv8i16(<8 x i16> %vec, i16 signext %max) { +; CHECK-LABEL: inverted_smaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s16 r0, q0 +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec) + %cmp = icmp slt i16 %x, %max + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @inverted_smaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: inverted_smaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec) + %cmp = icmp slt i32 %x, %max + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc signext i16 @trunc_and_sext(<8 x i16> %vec, i32 %max) #1 { +; CHECK-LABEL: trunc_and_sext: +; CHECK: @ %bb.0: +; CHECK-NEXT: movw r1, #32768 +; CHECK-NEXT: movt r1, #65535 +; CHECK-NEXT: vmaxv.s16 r1, q0 +; CHECK-NEXT: sxth r2, r1 +; CHECK-NEXT: cmp r0, r2 +; CHECK-NEXT: csel r0, r0, r1, gt +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec) + %xs = sext i16 %x to i32 + %cmp = icmp sgt i32 %max, %xs + %mt = trunc i32 %max to i16 + %1 = select i1 %cmp, i16 %mt, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc signext i16 @trunc_and_zext(<8 x i16> %vec, i32 %max) #1 { +; CHECK-LABEL: trunc_and_zext: +; CHECK: @ %bb.0: +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: vmaxv.u16 r1, q0 +; CHECK-NEXT: uxth r2, r1 +; CHECK-NEXT: cmp r0, r2 +; CHECK-NEXT: csel r0, r0, r1, gt +; CHECK-NEXT: sxth r0, r0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec) + %xs = zext i16 %x to i32 + %cmp = icmp sgt i32 %max, %xs + %mt = trunc i32 %max to i16 + %1 = select i1 %cmp, i16 %mt, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i64 @uminv2i64(<2 x i64> %vec, i64 %min) { +; CHECK-LABEL: uminv2i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: vmov r12, s3 +; CHECK-NEXT: vmov lr, s1 +; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vmov r3, s2 +; CHECK-NEXT: cmp lr, r12 +; CHECK-NEXT: csel r4, r2, r3, lo +; CHECK-NEXT: cmp r2, r3 +; CHECK-NEXT: csel r2, r2, r3, lo +; CHECK-NEXT: cmp lr, r12 +; CHECK-NEXT: csel r5, r2, r4, eq +; CHECK-NEXT: csel r3, lr, r12, lo +; CHECK-NEXT: subs r2, r5, r0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: sbcs.w r2, r3, r1 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r5, r0, ne +; CHECK-NEXT: csel r1, r3, r1, ne +; CHECK-NEXT: pop {r4, r5, r7, pc} + %x = call i64 @llvm.experimental.vector.reduce.umin.v2i64(<2 x i64> %vec) + %cmp = icmp ult i64 %x, %min + %1 = select i1 %cmp, i64 %x, i64 %min + ret i64 %1 +} + +define arm_aapcs_vfpcc i64 @sminv2i64(<2 x i64> %vec, i64 %min) { +; CHECK-LABEL: sminv2i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: vmov r12, s3 +; CHECK-NEXT: vmov lr, s1 +; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vmov r3, s2 +; CHECK-NEXT: cmp lr, r12 +; CHECK-NEXT: csel r4, r2, r3, lt +; CHECK-NEXT: cmp r2, r3 +; CHECK-NEXT: csel r2, r2, r3, lo +; CHECK-NEXT: cmp lr, r12 +; CHECK-NEXT: csel r5, r2, r4, eq +; CHECK-NEXT: csel r3, lr, r12, lt +; CHECK-NEXT: subs r2, r5, r0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: sbcs.w r2, r3, r1 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r5, r0, ne +; CHECK-NEXT: csel r1, r3, r1, ne +; CHECK-NEXT: pop {r4, r5, r7, pc} + %x = call i64 @llvm.experimental.vector.reduce.smin.v2i64(<2 x i64> %vec) + %cmp = icmp slt i64 %x, %min + %1 = select i1 %cmp, i64 %x, i64 %min + ret i64 %1 +} + +define arm_aapcs_vfpcc i64 @umaxv2i64(<2 x i64> %vec, i64 %max) { +; CHECK-LABEL: umaxv2i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: vmov r12, s3 +; CHECK-NEXT: vmov lr, s1 +; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vmov r3, s2 +; CHECK-NEXT: cmp lr, r12 +; CHECK-NEXT: csel r4, r2, r3, hi +; CHECK-NEXT: cmp r2, r3 +; CHECK-NEXT: csel r2, r2, r3, hi +; CHECK-NEXT: cmp lr, r12 +; CHECK-NEXT: csel r5, r2, r4, eq +; CHECK-NEXT: csel r3, lr, r12, hi +; CHECK-NEXT: subs r2, r0, r5 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: sbcs.w r2, r1, r3 +; CHECK-NEXT: it lo +; CHECK-NEXT: movlo r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r5, r0, ne +; CHECK-NEXT: csel r1, r3, r1, ne +; CHECK-NEXT: pop {r4, r5, r7, pc} + %x = call i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64> %vec) + %cmp = icmp ugt i64 %x, %max + %1 = select i1 %cmp, i64 %x, i64 %max + ret i64 %1 +} + +define arm_aapcs_vfpcc i64 @smaxv2i64(<2 x i64> %vec, i64 %max) { +; CHECK-LABEL: smaxv2i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: vmov r12, s3 +; CHECK-NEXT: vmov lr, s1 +; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vmov r3, s2 +; CHECK-NEXT: cmp lr, r12 +; CHECK-NEXT: csel r4, r2, r3, gt +; CHECK-NEXT: cmp r2, r3 +; CHECK-NEXT: csel r2, r2, r3, hi +; CHECK-NEXT: cmp lr, r12 +; CHECK-NEXT: csel r5, r2, r4, eq +; CHECK-NEXT: csel r3, lr, r12, gt +; CHECK-NEXT: subs r2, r0, r5 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: sbcs.w r2, r1, r3 +; CHECK-NEXT: it lt +; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r5, r0, ne +; CHECK-NEXT: csel r1, r3, r1, ne +; CHECK-NEXT: pop {r4, r5, r7, pc} + %x = call i64 @llvm.experimental.vector.reduce.smax.v2i64(<2 x i64> %vec) + %cmp = icmp sgt i64 %x, %max + %1 = select i1 %cmp, i64 %x, i64 %max + ret i64 %1 +} + +declare i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32>) + +declare i64 @llvm.experimental.vector.reduce.umin.v2i64(<2 x i64>) + +declare i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>) + +declare i64 @llvm.experimental.vector.reduce.smin.v2i64(<2 x i64>) + +declare i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32>) + +declare i64 @llvm.experimental.vector.reduce.umax.v2i64(<2 x i64>) + +declare i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>) + +declare i64 @llvm.experimental.vector.reduce.smax.v2i64(<2 x i64>)