Index: llvm/lib/Target/ARM/ARMISelLowering.h =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.h +++ llvm/lib/Target/ARM/ARMISelLowering.h @@ -241,6 +241,10 @@ VMLALVAu, // provided as low and high halves VMLALVAps, // Same as VMLALVA[su] with a v4i1 predicate mask VMLALVApu, + VMINVu, // Find minimum unsigned value of a vector and register + VMINVs, // Find minimum signed value of a vector and register + VMAXVu, // Find maximum unsigned value of a vector and register + VMAXVs, // Find maximum signed value of a vector and register SMULWB, // Signed multiply word by half word, bottom SMULWT, // Signed multiply word by half word, top Index: llvm/lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- llvm/lib/Target/ARM/ARMISelLowering.cpp +++ llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -1740,6 +1740,10 @@ case ARMISD::VMLALVAu: return "ARMISD::VMLALVAu"; case ARMISD::VMLALVAps: return "ARMISD::VMLALVAps"; case ARMISD::VMLALVApu: return "ARMISD::VMLALVApu"; + case ARMISD::VMINVu: return "ARMISD::VMINVu"; + case ARMISD::VMINVs: return "ARMISD::VMINVs"; + case ARMISD::VMAXVu: return "ARMISD::VMAXVu"; + case ARMISD::VMAXVs: return "ARMISD::VMAXVs"; case ARMISD::UMAAL: return "ARMISD::UMAAL"; case ARMISD::UMLAL: return "ARMISD::UMLAL"; case ARMISD::SMLAL: return "ARMISD::SMLAL"; @@ -5181,6 +5185,59 @@ ConstantSDNode *CFVal = dyn_cast(FalseVal); ConstantSDNode *CTVal = dyn_cast(TrueVal); + if (Subtarget->hasMVEIntegerOps()) { + unsigned int Opcode = 0; + if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMIN || + FalseVal->getOpcode() == ISD::VECREDUCE_UMIN) && + CC == ISD::SETULT) + Opcode = ARMISD::VMINVu; + else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMIN || + FalseVal->getOpcode() == ISD::VECREDUCE_SMIN) && + CC == ISD::SETLT) + Opcode = ARMISD::VMINVs; + else if ((TrueVal->getOpcode() == ISD::VECREDUCE_UMAX || + FalseVal->getOpcode() == ISD::VECREDUCE_UMAX) && + CC == ISD::SETUGT) + Opcode = ARMISD::VMAXVu; + else if ((TrueVal->getOpcode() == ISD::VECREDUCE_SMAX || + FalseVal->getOpcode() == ISD::VECREDUCE_SMAX) && + CC == ISD::SETGT) + Opcode = ARMISD::VMAXVs; + + if (Opcode) { + // Copy to avoid corrupting the variables in the case that we don't lower + // to a vminv/vmaxv + SDValue Left = LHS; + SDValue Right = RHS; + SDValue Scalar = TrueVal; + SDValue Reduction = FalseVal; + // Normalise to the right hand side being the vector reduction + switch (Scalar->getOpcode()) { + case ISD::VECREDUCE_UMIN: + case ISD::VECREDUCE_SMIN: + case ISD::VECREDUCE_UMAX: + case ISD::VECREDUCE_SMAX: + SDValue tmp = Left; + Left = Right; + Right = tmp; + + tmp = Scalar; + Scalar = Reduction; + Reduction = tmp; + break; + } + + // Make sure that the reduction is on the right side of the comparison and + // the scalar on the left. Sometimes the left and/or right side wrap the + // scalar and/or reduction in another operation, like an AND or + // sign-extension, so check the first operand as well + if ((Left == Scalar || Left->getOperand(0) == Scalar) && + (Right == Reduction || Right->getOperand(0) == Reduction)) { + return DAG.getNode(Opcode, dl, VT, Scalar, Reduction->getOperand(0)); + } + } + } + if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal && LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) { unsigned TVal = CTVal->getZExtValue(); Index: llvm/lib/Target/ARM/ARMInstrMVE.td =================================================================== --- llvm/lib/Target/ARM/ARMInstrMVE.td +++ llvm/lib/Target/ARM/ARMInstrMVE.td @@ -944,6 +944,14 @@ defm u32: MVE_VMINMAXV_p; } +def SDTVecReduceR : SDTypeProfile<1, 2, [ // Reduction of an integer and vector into an integer + SDTCisInt<0>, SDTCisInt<1>, SDTCisVec<2> +]>; +def ARMVMINVu : SDNode<"ARMISD::VMINVu", SDTVecReduceR>; +def ARMVMINVs : SDNode<"ARMISD::VMINVs", SDTVecReduceR>; +def ARMVMAXVu : SDNode<"ARMISD::VMAXVu", SDTVecReduceR>; +def ARMVMAXVs : SDNode<"ARMISD::VMAXVs", SDTVecReduceR>; + defm MVE_VMINV : MVE_VMINMAXV_ty<"vminv", 1, "int_arm_mve_minv">; defm MVE_VMAXV : MVE_VMINMAXV_ty<"vmaxv", 0, "int_arm_mve_maxv">; @@ -974,6 +982,32 @@ def : Pat<(i32 (vecreduce_umin (v4i32 MQPR:$src))), (i32 (MVE_VMINVu32 (t2MOVi (i32 4294967295)), $src))>; + def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMINVu8 $x, $src))>; + def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMINVu16 $x, $src))>; + def : Pat<(i32 (ARMVMINVu (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMINVu32 $x, $src))>; + def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMINVs8 $x, $src))>; + def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMINVs16 $x, $src))>; + def : Pat<(i32 (ARMVMINVs (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMINVs32 $x, $src))>; + + def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMAXVu8 $x, $src))>; + def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMAXVu16 $x, $src))>; + def : Pat<(i32 (ARMVMAXVu (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMAXVu32 $x, $src))>; + def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v16i8 MQPR:$src))), + (i32 (MVE_VMAXVs8 $x, $src))>; + def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v8i16 MQPR:$src))), + (i32 (MVE_VMAXVs16 $x, $src))>; + def : Pat<(i32 (ARMVMAXVs (i32 rGPR:$x), (v4i32 MQPR:$src))), + (i32 (MVE_VMAXVs32 $x, $src))>; + } multiclass MVE_VMINMAXAV_ty { Index: llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll @@ -0,0 +1,321 @@ +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s + +define arm_aapcs_vfpcc i8 @uminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: uminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec) + %cmp = icmp ult i8 %x, %min + %1 = select i1 %cmp, i8 %x, i8 %min + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @uminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: uminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec) + %cmp = icmp ult i16 %x, %min + %1 = select i1 %cmp, i16 %x, i16 %min + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @uminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: uminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec) + %cmp = icmp ult i32 %x, %min + %1 = select i1 %cmp, i32 %x, i32 %min + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @sminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: sminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec) + %cmp = icmp slt i8 %x, %min + %1 = select i1 %cmp, i8 %x, i8 %min + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @sminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: sminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec) + %cmp = icmp slt i16 %x, %min + %1 = select i1 %cmp, i16 %x, i16 %min + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @sminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: sminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec) + %cmp = icmp slt i32 %x, %min + %1 = select i1 %cmp, i32 %x, i32 %min + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @umaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: umaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec) + %cmp = icmp ugt i8 %x, %max + %1 = select i1 %cmp, i8 %x, i8 %max + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @umaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: umaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec) + %cmp = icmp ugt i16 %x, %max + %1 = select i1 %cmp, i16 %x, i16 %max + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @umaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: umaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec) + %cmp = icmp ugt i32 %x, %max + %1 = select i1 %cmp, i32 %x, i32 %max + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @smaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %x, %max + %1 = select i1 %cmp, i8 %x, i8 %max + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @smaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: smaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec) + %cmp = icmp sgt i16 %x, %max + %1 = select i1 %cmp, i16 %x, i16 %max + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @smaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: smaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec) + %cmp = icmp sgt i32 %x, %max + %1 = select i1 %cmp, i32 %x, i32 %max + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @commute_uminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: commute_uminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8> %vec) + %cmp = icmp ult i8 %min, %x + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @commute_uminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: commute_uminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16> %vec) + %cmp = icmp ult i16 %min, %x + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_uminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: commute_uminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32> %vec) + %cmp = icmp ult i32 %min, %x + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @commute_sminv16i8(<16 x i8> %vec, i8 %min) { +; CHECK-LABEL: commute_sminv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %vec) + %cmp = icmp slt i8 %min, %x + %1 = select i1 %cmp, i8 %min, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @commute_sminv8i16(<8 x i16> %vec, i16 %min) { +; CHECK-LABEL: commute_sminv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %vec) + %cmp = icmp slt i16 %min, %x + %1 = select i1 %cmp, i16 %min, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_sminv4i32(<4 x i32> %vec, i32 %min) { +; CHECK-LABEL: commute_sminv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vminv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %vec) + %cmp = icmp slt i32 %min, %x + %1 = select i1 %cmp, i32 %min, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @commute_umaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: commute_umaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8> %vec) + %cmp = icmp ugt i8 %max, %x + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @commute_umaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: commute_umaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16> %vec) + %cmp = icmp ugt i16 %max, %x + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_umaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: commute_umaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.u32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32> %vec) + %cmp = icmp ugt i32 %max, %x + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @commute_smaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: commute_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s8 r0, q0 +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %max, %x + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i16 @commute_smaxv8i16(<8 x i16> %vec, i16 %max) { +; CHECK-LABEL: commute_smaxv8i16: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s16 r0, q0 +; CHECK-NEXT: bx lr + %x = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %vec) + %cmp = icmp sgt i16 %max, %x + %1 = select i1 %cmp, i16 %max, i16 %x + ret i16 %1 +} + +define arm_aapcs_vfpcc i32 @commute_smaxv4i32(<4 x i32> %vec, i32 %max) { +; CHECK-LABEL: commute_smaxv4i32: +; CHECK: @ %bb.0: +; CHECK-NEXT: vmaxv.s32 r0, q0 +; CHECK-NEXT: bx lr + %x = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %vec) + %cmp = icmp sgt i32 %max, %x + %1 = select i1 %cmp, i32 %max, i32 %x + ret i32 %1 +} + +define arm_aapcs_vfpcc i8 @mismatch_smaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: mismatch_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: mvn r1, #127 +; CHECK-NEXT: sxtb r3, r0 +; CHECK-NEXT: vmaxv.s8 r1, q0 +; CHECK-NEXT: sxtb r2, r1 +; CHECK-NEXT: cmp r2, r3 +; CHECK-NEXT: csel r0, r0, r1, gt +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %x, %max + %1 = select i1 %cmp, i8 %max, i8 %x + ret i8 %1 +} + +define arm_aapcs_vfpcc i8 @mismatch2_smaxv16i8(<16 x i8> %vec, i8 %max) { +; CHECK-LABEL: mismatch2_smaxv16i8: +; CHECK: @ %bb.0: +; CHECK-NEXT: mvn r1, #127 +; CHECK-NEXT: sxtb r3, r0 +; CHECK-NEXT: vmaxv.s8 r1, q0 +; CHECK-NEXT: sxtb r2, r1 +; CHECK-NEXT: cmp r3, r2 +; CHECK-NEXT: csel r0, r1, r0, gt +; CHECK-NEXT: bx lr + %x = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %vec) + %cmp = icmp sgt i8 %max, %x + %1 = select i1 %cmp, i8 %x, i8 %max + ret i8 %1 +} + +declare i8 @llvm.experimental.vector.reduce.umin.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.umin.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.umin.v4i32(<4 x i32>) + +declare i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>) + +declare i8 @llvm.experimental.vector.reduce.umax.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.umax.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.umax.v4i32(<4 x i32>) + +declare i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8>) + +declare i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16>) + +declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>)