diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -346,6 +346,8 @@ STRICT_UINT_TO_FP_VL, STRICT_VFCVT_RTZ_X_F_VL, STRICT_VFCVT_RTZ_XU_F_VL, + STRICT_FMAXNUM_VL, + STRICT_FMINNUM_VL, // WARNING: Do not add anything in the end unless you want the node to // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -812,7 +812,8 @@ setOperationAction({ISD::STRICT_FP_EXTEND, ISD::STRICT_FP_ROUND}, VT, Custom); setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, - ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA}, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT, ISD::STRICT_FMA, + ISD::STRICT_FMAXNUM, ISD::STRICT_FMINNUM}, VT, Legal); }; @@ -1040,7 +1041,8 @@ Custom); setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV, - ISD::STRICT_FSQRT, ISD::STRICT_FMA}, + ISD::STRICT_FSQRT, ISD::STRICT_FMA, + ISD::STRICT_FMAXNUM, ISD::STRICT_FMINNUM}, VT, Custom); } @@ -4568,6 +4570,12 @@ return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FSQRT_VL); case ISD::STRICT_FMA: return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_VFMADD_VL); + case ISD::STRICT_FMINNUM: + return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FMINNUM_VL, + /*HasMergeOp*/ true); + case ISD::STRICT_FMAXNUM: + return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FMAXNUM_VL, + /*HasMergeOp*/ true); case ISD::MGATHER: case ISD::VP_GATHER: return lowerMaskedGather(Op, DAG); @@ -14202,6 +14210,8 @@ NODE_NAME_CASE(STRICT_UINT_TO_FP_VL) NODE_NAME_CASE(STRICT_VFCVT_RTZ_X_F_VL) NODE_NAME_CASE(STRICT_VFCVT_RTZ_XU_F_VL) + NODE_NAME_CASE(STRICT_FMAXNUM_VL) + NODE_NAME_CASE(STRICT_FMINNUM_VL) NODE_NAME_CASE(VWMUL_VL) NODE_NAME_CASE(VWMULU_VL) NODE_NAME_CASE(VWMULSU_VL) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -1044,8 +1044,8 @@ } // 13.11. Vector Floating-Point MIN/MAX Instructions -defm : VPatBinaryFPSDNode_VV_VF; -defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_VV_VF; +defm : VPatBinaryFPSDNode_VV_VF; // 13.13. Vector Floating-Point Compare Instructions defm : VPatFPSetCCSDNode_VV_VF_FV; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -112,6 +112,8 @@ def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; +def riscv_strict_fminnum_vl : SDNode<"RISCVISD::STRICT_FMINNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; +def riscv_strict_fmaxnum_vl : SDNode<"RISCVISD::STRICT_FMAXNUM_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), @@ -128,6 +130,12 @@ def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; +def any_riscv_fmaxnum_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + [(riscv_fmaxnum_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + (riscv_strict_fmaxnum_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; +def any_riscv_fminnum_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + [(riscv_fminnum_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), + (riscv_strict_fminnum_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, @@ -1847,8 +1855,8 @@ defm : VPatWidenFPMulAccVL_VV_VF; // 13.11. Vector Floating-Point MIN/MAX Instructions -defm : VPatBinaryFPVL_VV_VF; -defm : VPatBinaryFPVL_VV_VF; +defm : VPatBinaryFPVL_VV_VF; +defm : VPatBinaryFPVL_VV_VF; // 13.13. Vector Floating-Point Compare Instructions defm : VPatFPSetCCVL_VV_VF_FV; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-constrained-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-constrained-sdnode.ll @@ -0,0 +1,367 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <1 x half> @llvm.experimental.constrained.maxnum.v1f16(<1 x half>, <1 x half>, metadata) + +define <1 x half> @vfmax_v1f16_vv(<1 x half> %a, <1 x half> %b) { +; CHECK-LABEL: vfmax_v1f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <1 x half> @llvm.experimental.constrained.maxnum.v1f16(<1 x half> %a, <1 x half> %b, metadata !"fpexcept.strict") + ret <1 x half> %v +} + +define <1 x half> @vfmax_v1f16_vf(<1 x half> %a, half %b) { +; CHECK-LABEL: vfmax_v1f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <1 x half> poison, half %b, i32 0 + %splat = shufflevector <1 x half> %head, <1 x half> poison, <1 x i32> zeroinitializer + %v = call <1 x half> @llvm.experimental.constrained.maxnum.v1f16(<1 x half> %a, <1 x half> %splat, metadata !"fpexcept.strict") + ret <1 x half> %v +} + +declare <2 x half> @llvm.experimental.constrained.maxnum.v2f16(<2 x half>, <2 x half>, metadata) + +define <2 x half> @vfmax_v2f16_vv(<2 x half> %a, <2 x half> %b) { +; CHECK-LABEL: vfmax_v2f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.experimental.constrained.maxnum.v2f16(<2 x half> %a, <2 x half> %b, metadata !"fpexcept.strict") + ret <2 x half> %v +} + +define <2 x half> @vfmax_v2f16_vf(<2 x half> %a, half %b) { +; CHECK-LABEL: vfmax_v2f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x half> poison, half %b, i32 0 + %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.experimental.constrained.maxnum.v2f16(<2 x half> %a, <2 x half> %splat, metadata !"fpexcept.strict") + ret <2 x half> %v +} + +declare <4 x half> @llvm.experimental.constrained.maxnum.v4f16(<4 x half>, <4 x half>, metadata) + +define <4 x half> @vfmax_v4f16_vv(<4 x half> %a, <4 x half> %b) { +; CHECK-LABEL: vfmax_v4f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.experimental.constrained.maxnum.v4f16(<4 x half> %a, <4 x half> %b, metadata !"fpexcept.strict") + ret <4 x half> %v +} + +define <4 x half> @vfmax_v4f16_vf(<4 x half> %a, half %b) { +; CHECK-LABEL: vfmax_v4f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x half> poison, half %b, i32 0 + %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.experimental.constrained.maxnum.v4f16(<4 x half> %a, <4 x half> %splat, metadata !"fpexcept.strict") + ret <4 x half> %v +} + +declare <8 x half> @llvm.experimental.constrained.maxnum.v8f16(<8 x half>, <8 x half>, metadata) + +define <8 x half> @vfmax_v8f16_vv(<8 x half> %a, <8 x half> %b) { +; CHECK-LABEL: vfmax_v8f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.experimental.constrained.maxnum.v8f16(<8 x half> %a, <8 x half> %b, metadata !"fpexcept.strict") + ret <8 x half> %v +} + +define <8 x half> @vfmax_v8f16_vf(<8 x half> %a, half %b) { +; CHECK-LABEL: vfmax_v8f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x half> poison, half %b, i32 0 + %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.experimental.constrained.maxnum.v8f16(<8 x half> %a, <8 x half> %splat, metadata !"fpexcept.strict") + ret <8 x half> %v +} + +declare <16 x half> @llvm.experimental.constrained.maxnum.v16f16(<16 x half>, <16 x half>, metadata) + +define <16 x half> @vfmax_v16f16_vv(<16 x half> %a, <16 x half> %b) { +; CHECK-LABEL: vfmax_v16f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.experimental.constrained.maxnum.v16f16(<16 x half> %a, <16 x half> %b, metadata !"fpexcept.strict") + ret <16 x half> %v +} + +define <16 x half> @vfmax_v16f16_vf(<16 x half> %a, half %b) { +; CHECK-LABEL: vfmax_v16f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x half> poison, half %b, i32 0 + %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.experimental.constrained.maxnum.v16f16(<16 x half> %a, <16 x half> %splat, metadata !"fpexcept.strict") + ret <16 x half> %v +} + +declare <32 x half> @llvm.experimental.constrained.maxnum.v32f16(<32 x half>, <32 x half>, metadata) + +define <32 x half> @vfmax_v32f16_vv(<32 x half> %a, <32 x half> %b) { +; CHECK-LABEL: vfmax_v32f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call <32 x half> @llvm.experimental.constrained.maxnum.v32f16(<32 x half> %a, <32 x half> %b, metadata !"fpexcept.strict") + ret <32 x half> %v +} + +define <32 x half> @vfmax_v32f16_vf(<32 x half> %a, half %b) { +; CHECK-LABEL: vfmax_v32f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <32 x half> poison, half %b, i32 0 + %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer + %v = call <32 x half> @llvm.experimental.constrained.maxnum.v32f16(<32 x half> %a, <32 x half> %splat, metadata !"fpexcept.strict") + ret <32 x half> %v +} + +declare <1 x float> @llvm.experimental.constrained.maxnum.v1f32(<1 x float>, <1 x float>, metadata) + +define <1 x float> @vfmax_v1f32_vv(<1 x float> %a, <1 x float> %b) { +; CHECK-LABEL: vfmax_v1f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32(<1 x float> %a, <1 x float> %b, metadata !"fpexcept.strict") + ret <1 x float> %v +} + +define <1 x float> @vfmax_v1f32_vf(<1 x float> %a, float %b) { +; CHECK-LABEL: vfmax_v1f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <1 x float> poison, float %b, i32 0 + %splat = shufflevector <1 x float> %head, <1 x float> poison, <1 x i32> zeroinitializer + %v = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32(<1 x float> %a, <1 x float> %splat, metadata !"fpexcept.strict") + ret <1 x float> %v +} + +declare <2 x float> @llvm.experimental.constrained.maxnum.v2f32(<2 x float>, <2 x float>, metadata) + +define <2 x float> @vfmax_v2f32_vv(<2 x float> %a, <2 x float> %b) { +; CHECK-LABEL: vfmax_v2f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.experimental.constrained.maxnum.v2f32(<2 x float> %a, <2 x float> %b, metadata !"fpexcept.strict") + ret <2 x float> %v +} + +define <2 x float> @vfmax_v2f32_vf(<2 x float> %a, float %b) { +; CHECK-LABEL: vfmax_v2f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x float> poison, float %b, i32 0 + %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.experimental.constrained.maxnum.v2f32(<2 x float> %a, <2 x float> %splat, metadata !"fpexcept.strict") + ret <2 x float> %v +} + +declare <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float>, <4 x float>, metadata) + +define <4 x float> @vfmax_v4f32_vv(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: vfmax_v4f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> %a, <4 x float> %b, metadata !"fpexcept.strict") + ret <4 x float> %v +} + +define <4 x float> @vfmax_v4f32_vf(<4 x float> %a, float %b) { +; CHECK-LABEL: vfmax_v4f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x float> poison, float %b, i32 0 + %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> %a, <4 x float> %splat, metadata !"fpexcept.strict") + ret <4 x float> %v +} + +declare <8 x float> @llvm.experimental.constrained.maxnum.v8f32(<8 x float>, <8 x float>, metadata) + +define <8 x float> @vfmax_v8f32_vv(<8 x float> %a, <8 x float> %b) { +; CHECK-LABEL: vfmax_v8f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.experimental.constrained.maxnum.v8f32(<8 x float> %a, <8 x float> %b, metadata !"fpexcept.strict") + ret <8 x float> %v +} + +define <8 x float> @vfmax_v8f32_vf(<8 x float> %a, float %b) { +; CHECK-LABEL: vfmax_v8f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x float> poison, float %b, i32 0 + %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.experimental.constrained.maxnum.v8f32(<8 x float> %a, <8 x float> %splat, metadata !"fpexcept.strict") + ret <8 x float> %v +} + +declare <16 x float> @llvm.experimental.constrained.maxnum.v16f32(<16 x float>, <16 x float>, metadata) + +define <16 x float> @vfmax_v16f32_vv(<16 x float> %a, <16 x float> %b) { +; CHECK-LABEL: vfmax_v16f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.experimental.constrained.maxnum.v16f32(<16 x float> %a, <16 x float> %b, metadata !"fpexcept.strict") + ret <16 x float> %v +} + +define <16 x float> @vfmax_v16f32_vf(<16 x float> %a, float %b) { +; CHECK-LABEL: vfmax_v16f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x float> poison, float %b, i32 0 + %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.experimental.constrained.maxnum.v16f32(<16 x float> %a, <16 x float> %splat, metadata !"fpexcept.strict") + ret <16 x float> %v +} + +declare <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double>, <1 x double>, metadata) + +define <1 x double> @vfmax_v1f64_vv(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: vfmax_v1f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double> %a, <1 x double> %b, metadata !"fpexcept.strict") + ret <1 x double> %v +} + +define <1 x double> @vfmax_v1f64_vf(<1 x double> %a, double %b) { +; CHECK-LABEL: vfmax_v1f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <1 x double> poison, double %b, i32 0 + %splat = shufflevector <1 x double> %head, <1 x double> poison, <1 x i32> zeroinitializer + %v = call <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double> %a, <1 x double> %splat, metadata !"fpexcept.strict") + ret <1 x double> %v +} + +declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <2 x double>, metadata) + +define <2 x double> @vfmax_v2f64_vv(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: vfmax_v2f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double> %a, <2 x double> %b, metadata !"fpexcept.strict") + ret <2 x double> %v +} + +define <2 x double> @vfmax_v2f64_vf(<2 x double> %a, double %b) { +; CHECK-LABEL: vfmax_v2f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x double> poison, double %b, i32 0 + %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double> %a, <2 x double> %splat, metadata !"fpexcept.strict") + ret <2 x double> %v +} + +declare <4 x double> @llvm.experimental.constrained.maxnum.v4f64(<4 x double>, <4 x double>, metadata) + +define <4 x double> @vfmax_v4f64_vv(<4 x double> %a, <4 x double> %b) { +; CHECK-LABEL: vfmax_v4f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.experimental.constrained.maxnum.v4f64(<4 x double> %a, <4 x double> %b, metadata !"fpexcept.strict") + ret <4 x double> %v +} + +define <4 x double> @vfmax_v4f64_vf(<4 x double> %a, double %b) { +; CHECK-LABEL: vfmax_v4f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x double> poison, double %b, i32 0 + %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.experimental.constrained.maxnum.v4f64(<4 x double> %a, <4 x double> %splat, metadata !"fpexcept.strict") + ret <4 x double> %v +} + +declare <8 x double> @llvm.experimental.constrained.maxnum.v8f64(<8 x double>, <8 x double>, metadata) + +define <8 x double> @vfmax_v8f64_vv(<8 x double> %a, <8 x double> %b) { +; CHECK-LABEL: vfmax_v8f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.experimental.constrained.maxnum.v8f64(<8 x double> %a, <8 x double> %b, metadata !"fpexcept.strict") + ret <8 x double> %v +} + +define <8 x double> @vfmax_v8f64_vf(<8 x double> %a, double %b) { +; CHECK-LABEL: vfmax_v8f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x double> poison, double %b, i32 0 + %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.experimental.constrained.maxnum.v8f64(<8 x double> %a, <8 x double> %splat, metadata !"fpexcept.strict") + ret <8 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-constrained-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-constrained-sdnode.ll @@ -0,0 +1,367 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <1 x half> @llvm.experimental.constrained.minnum.v1f16(<1 x half>, <1 x half>, metadata) + +define <1 x half> @vfmin_v1f16_vv(<1 x half> %a, <1 x half> %b) { +; CHECK-LABEL: vfmin_v1f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <1 x half> @llvm.experimental.constrained.minnum.v1f16(<1 x half> %a, <1 x half> %b, metadata !"fpexcept.strict") + ret <1 x half> %v +} + +define <1 x half> @vfmin_v1f16_vf(<1 x half> %a, half %b) { +; CHECK-LABEL: vfmin_v1f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <1 x half> poison, half %b, i32 0 + %splat = shufflevector <1 x half> %head, <1 x half> poison, <1 x i32> zeroinitializer + %v = call <1 x half> @llvm.experimental.constrained.minnum.v1f16(<1 x half> %a, <1 x half> %splat, metadata !"fpexcept.strict") + ret <1 x half> %v +} + +declare <2 x half> @llvm.experimental.constrained.minnum.v2f16(<2 x half>, <2 x half>, metadata) + +define <2 x half> @vfmin_v2f16_vv(<2 x half> %a, <2 x half> %b) { +; CHECK-LABEL: vfmin_v2f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.experimental.constrained.minnum.v2f16(<2 x half> %a, <2 x half> %b, metadata !"fpexcept.strict") + ret <2 x half> %v +} + +define <2 x half> @vfmin_v2f16_vf(<2 x half> %a, half %b) { +; CHECK-LABEL: vfmin_v2f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x half> poison, half %b, i32 0 + %splat = shufflevector <2 x half> %head, <2 x half> poison, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.experimental.constrained.minnum.v2f16(<2 x half> %a, <2 x half> %splat, metadata !"fpexcept.strict") + ret <2 x half> %v +} + +declare <4 x half> @llvm.experimental.constrained.minnum.v4f16(<4 x half>, <4 x half>, metadata) + +define <4 x half> @vfmin_v4f16_vv(<4 x half> %a, <4 x half> %b) { +; CHECK-LABEL: vfmin_v4f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.experimental.constrained.minnum.v4f16(<4 x half> %a, <4 x half> %b, metadata !"fpexcept.strict") + ret <4 x half> %v +} + +define <4 x half> @vfmin_v4f16_vf(<4 x half> %a, half %b) { +; CHECK-LABEL: vfmin_v4f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x half> poison, half %b, i32 0 + %splat = shufflevector <4 x half> %head, <4 x half> poison, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.experimental.constrained.minnum.v4f16(<4 x half> %a, <4 x half> %splat, metadata !"fpexcept.strict") + ret <4 x half> %v +} + +declare <8 x half> @llvm.experimental.constrained.minnum.v8f16(<8 x half>, <8 x half>, metadata) + +define <8 x half> @vfmin_v8f16_vv(<8 x half> %a, <8 x half> %b) { +; CHECK-LABEL: vfmin_v8f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.experimental.constrained.minnum.v8f16(<8 x half> %a, <8 x half> %b, metadata !"fpexcept.strict") + ret <8 x half> %v +} + +define <8 x half> @vfmin_v8f16_vf(<8 x half> %a, half %b) { +; CHECK-LABEL: vfmin_v8f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x half> poison, half %b, i32 0 + %splat = shufflevector <8 x half> %head, <8 x half> poison, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.experimental.constrained.minnum.v8f16(<8 x half> %a, <8 x half> %splat, metadata !"fpexcept.strict") + ret <8 x half> %v +} + +declare <16 x half> @llvm.experimental.constrained.minnum.v16f16(<16 x half>, <16 x half>, metadata) + +define <16 x half> @vfmin_v16f16_vv(<16 x half> %a, <16 x half> %b) { +; CHECK-LABEL: vfmin_v16f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.experimental.constrained.minnum.v16f16(<16 x half> %a, <16 x half> %b, metadata !"fpexcept.strict") + ret <16 x half> %v +} + +define <16 x half> @vfmin_v16f16_vf(<16 x half> %a, half %b) { +; CHECK-LABEL: vfmin_v16f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x half> poison, half %b, i32 0 + %splat = shufflevector <16 x half> %head, <16 x half> poison, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.experimental.constrained.minnum.v16f16(<16 x half> %a, <16 x half> %splat, metadata !"fpexcept.strict") + ret <16 x half> %v +} + +declare <32 x half> @llvm.experimental.constrained.minnum.v32f16(<32 x half>, <32 x half>, metadata) + +define <32 x half> @vfmin_v32f16_vv(<32 x half> %a, <32 x half> %b) { +; CHECK-LABEL: vfmin_v32f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call <32 x half> @llvm.experimental.constrained.minnum.v32f16(<32 x half> %a, <32 x half> %b, metadata !"fpexcept.strict") + ret <32 x half> %v +} + +define <32 x half> @vfmin_v32f16_vf(<32 x half> %a, half %b) { +; CHECK-LABEL: vfmin_v32f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <32 x half> poison, half %b, i32 0 + %splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer + %v = call <32 x half> @llvm.experimental.constrained.minnum.v32f16(<32 x half> %a, <32 x half> %splat, metadata !"fpexcept.strict") + ret <32 x half> %v +} + +declare <1 x float> @llvm.experimental.constrained.minnum.v1f32(<1 x float>, <1 x float>, metadata) + +define <1 x float> @vfmin_v1f32_vv(<1 x float> %a, <1 x float> %b) { +; CHECK-LABEL: vfmin_v1f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <1 x float> @llvm.experimental.constrained.minnum.v1f32(<1 x float> %a, <1 x float> %b, metadata !"fpexcept.strict") + ret <1 x float> %v +} + +define <1 x float> @vfmin_v1f32_vf(<1 x float> %a, float %b) { +; CHECK-LABEL: vfmin_v1f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <1 x float> poison, float %b, i32 0 + %splat = shufflevector <1 x float> %head, <1 x float> poison, <1 x i32> zeroinitializer + %v = call <1 x float> @llvm.experimental.constrained.minnum.v1f32(<1 x float> %a, <1 x float> %splat, metadata !"fpexcept.strict") + ret <1 x float> %v +} + +declare <2 x float> @llvm.experimental.constrained.minnum.v2f32(<2 x float>, <2 x float>, metadata) + +define <2 x float> @vfmin_v2f32_vv(<2 x float> %a, <2 x float> %b) { +; CHECK-LABEL: vfmin_v2f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.experimental.constrained.minnum.v2f32(<2 x float> %a, <2 x float> %b, metadata !"fpexcept.strict") + ret <2 x float> %v +} + +define <2 x float> @vfmin_v2f32_vf(<2 x float> %a, float %b) { +; CHECK-LABEL: vfmin_v2f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x float> poison, float %b, i32 0 + %splat = shufflevector <2 x float> %head, <2 x float> poison, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.experimental.constrained.minnum.v2f32(<2 x float> %a, <2 x float> %splat, metadata !"fpexcept.strict") + ret <2 x float> %v +} + +declare <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float>, <4 x float>, metadata) + +define <4 x float> @vfmin_v4f32_vv(<4 x float> %a, <4 x float> %b) { +; CHECK-LABEL: vfmin_v4f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> %a, <4 x float> %b, metadata !"fpexcept.strict") + ret <4 x float> %v +} + +define <4 x float> @vfmin_v4f32_vf(<4 x float> %a, float %b) { +; CHECK-LABEL: vfmin_v4f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x float> poison, float %b, i32 0 + %splat = shufflevector <4 x float> %head, <4 x float> poison, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> %a, <4 x float> %splat, metadata !"fpexcept.strict") + ret <4 x float> %v +} + +declare <8 x float> @llvm.experimental.constrained.minnum.v8f32(<8 x float>, <8 x float>, metadata) + +define <8 x float> @vfmin_v8f32_vv(<8 x float> %a, <8 x float> %b) { +; CHECK-LABEL: vfmin_v8f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.experimental.constrained.minnum.v8f32(<8 x float> %a, <8 x float> %b, metadata !"fpexcept.strict") + ret <8 x float> %v +} + +define <8 x float> @vfmin_v8f32_vf(<8 x float> %a, float %b) { +; CHECK-LABEL: vfmin_v8f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x float> poison, float %b, i32 0 + %splat = shufflevector <8 x float> %head, <8 x float> poison, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.experimental.constrained.minnum.v8f32(<8 x float> %a, <8 x float> %splat, metadata !"fpexcept.strict") + ret <8 x float> %v +} + +declare <16 x float> @llvm.experimental.constrained.minnum.v16f32(<16 x float>, <16 x float>, metadata) + +define <16 x float> @vfmin_v16f32_vv(<16 x float> %a, <16 x float> %b) { +; CHECK-LABEL: vfmin_v16f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.experimental.constrained.minnum.v16f32(<16 x float> %a, <16 x float> %b, metadata !"fpexcept.strict") + ret <16 x float> %v +} + +define <16 x float> @vfmin_v16f32_vf(<16 x float> %a, float %b) { +; CHECK-LABEL: vfmin_v16f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <16 x float> poison, float %b, i32 0 + %splat = shufflevector <16 x float> %head, <16 x float> poison, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.experimental.constrained.minnum.v16f32(<16 x float> %a, <16 x float> %splat, metadata !"fpexcept.strict") + ret <16 x float> %v +} + +declare <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double>, <1 x double>, metadata) + +define <1 x double> @vfmin_v1f64_vv(<1 x double> %a, <1 x double> %b) { +; CHECK-LABEL: vfmin_v1f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double> %a, <1 x double> %b, metadata !"fpexcept.strict") + ret <1 x double> %v +} + +define <1 x double> @vfmin_v1f64_vf(<1 x double> %a, double %b) { +; CHECK-LABEL: vfmin_v1f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <1 x double> poison, double %b, i32 0 + %splat = shufflevector <1 x double> %head, <1 x double> poison, <1 x i32> zeroinitializer + %v = call <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double> %a, <1 x double> %splat, metadata !"fpexcept.strict") + ret <1 x double> %v +} + +declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata) + +define <2 x double> @vfmin_v2f64_vv(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: vfmin_v2f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double> %a, <2 x double> %b, metadata !"fpexcept.strict") + ret <2 x double> %v +} + +define <2 x double> @vfmin_v2f64_vf(<2 x double> %a, double %b) { +; CHECK-LABEL: vfmin_v2f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <2 x double> poison, double %b, i32 0 + %splat = shufflevector <2 x double> %head, <2 x double> poison, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double> %a, <2 x double> %splat, metadata !"fpexcept.strict") + ret <2 x double> %v +} + +declare <4 x double> @llvm.experimental.constrained.minnum.v4f64(<4 x double>, <4 x double>, metadata) + +define <4 x double> @vfmin_v4f64_vv(<4 x double> %a, <4 x double> %b) { +; CHECK-LABEL: vfmin_v4f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.experimental.constrained.minnum.v4f64(<4 x double> %a, <4 x double> %b, metadata !"fpexcept.strict") + ret <4 x double> %v +} + +define <4 x double> @vfmin_v4f64_vf(<4 x double> %a, double %b) { +; CHECK-LABEL: vfmin_v4f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <4 x double> poison, double %b, i32 0 + %splat = shufflevector <4 x double> %head, <4 x double> poison, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.experimental.constrained.minnum.v4f64(<4 x double> %a, <4 x double> %splat, metadata !"fpexcept.strict") + ret <4 x double> %v +} + +declare <8 x double> @llvm.experimental.constrained.minnum.v8f64(<8 x double>, <8 x double>, metadata) + +define <8 x double> @vfmin_v8f64_vv(<8 x double> %a, <8 x double> %b) { +; CHECK-LABEL: vfmin_v8f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.experimental.constrained.minnum.v8f64(<8 x double> %a, <8 x double> %b, metadata !"fpexcept.strict") + ret <8 x double> %v +} + +define <8 x double> @vfmin_v8f64_vf(<8 x double> %a, double %b) { +; CHECK-LABEL: vfmin_v8f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement <8 x double> poison, double %b, i32 0 + %splat = shufflevector <8 x double> %head, <8 x double> poison, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.experimental.constrained.minnum.v8f64(<8 x double> %a, <8 x double> %splat, metadata !"fpexcept.strict") + ret <8 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-constrained-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-constrained-sdnode.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.experimental.constrained.maxnum.nxv1f16(, , metadata) + +define @vfmax_nxv1f16_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv1f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv1f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv1f16_vf( %a, half %b) { +; CHECK-LABEL: vfmax_nxv1f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv1f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv2f16(, , metadata) + +define @vfmax_nxv2f16_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv2f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv2f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv2f16_vf( %a, half %b) { +; CHECK-LABEL: vfmax_nxv2f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv2f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv4f16(, , metadata) + +define @vfmax_nxv4f16_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv4f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv4f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv4f16_vf( %a, half %b) { +; CHECK-LABEL: vfmax_nxv4f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv4f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv8f16(, , metadata) + +define @vfmax_nxv8f16_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv8f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv8f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv8f16_vf( %a, half %b) { +; CHECK-LABEL: vfmax_nxv8f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv8f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv16f16(, , metadata) + +define @vfmax_nxv16f16_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv16f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv16f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv16f16_vf( %a, half %b) { +; CHECK-LABEL: vfmax_nxv16f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv16f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv32f16(, , metadata) + +define @vfmax_nxv32f16_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv32f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv32f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv32f16_vf( %a, half %b) { +; CHECK-LABEL: vfmax_nxv32f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv32f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv1f32(, , metadata) + +define @vfmax_nxv1f32_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv1f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv1f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv1f32_vf( %a, float %b) { +; CHECK-LABEL: vfmax_nxv1f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv1f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv2f32(, , metadata) + +define @vfmax_nxv2f32_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv2f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv2f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv2f32_vf( %a, float %b) { +; CHECK-LABEL: vfmax_nxv2f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv2f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv4f32(, , metadata) + +define @vfmax_nxv4f32_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv4f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv4f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv4f32_vf( %a, float %b) { +; CHECK-LABEL: vfmax_nxv4f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv4f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv8f32(, , metadata) + +define @vfmax_nxv8f32_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv8f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv8f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv8f32_vf( %a, float %b) { +; CHECK-LABEL: vfmax_nxv8f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv8f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv16f32(, , metadata) + +define @vfmax_nxv16f32_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv16f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv16f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv16f32_vf( %a, float %b) { +; CHECK-LABEL: vfmax_nxv16f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv16f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv1f64(, , metadata) + +define @vfmax_nxv1f64_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv1f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv1f64( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv1f64_vf( %a, double %b) { +; CHECK-LABEL: vfmax_nxv1f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv1f64( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv2f64(, , metadata) + +define @vfmax_nxv2f64_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv2f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv2f64( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv2f64_vf( %a, double %b) { +; CHECK-LABEL: vfmax_nxv2f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv2f64( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv4f64(, , metadata) + +define @vfmax_nxv4f64_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv4f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv4f64( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv4f64_vf( %a, double %b) { +; CHECK-LABEL: vfmax_nxv4f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv4f64( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.maxnum.nxv8f64(, , metadata) + +define @vfmax_nxv8f64_vv( %a, %b) { +; CHECK-LABEL: vfmax_nxv8f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.maxnum.nxv8f64( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmax_nxv8f64_vf( %a, double %b) { +; CHECK-LABEL: vfmax_nxv8f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.maxnum.nxv8f64( %a, %splat, metadata !"fpexcept.strict") + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-constrained-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-constrained-sdnode.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.experimental.constrained.minnum.nxv1f16(, , metadata) + +define @vfmin_nxv1f16_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv1f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv1f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv1f16_vf( %a, half %b) { +; CHECK-LABEL: vfmin_nxv1f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv1f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv2f16(, , metadata) + +define @vfmin_nxv2f16_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv2f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv2f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv2f16_vf( %a, half %b) { +; CHECK-LABEL: vfmin_nxv2f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv2f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv4f16(, , metadata) + +define @vfmin_nxv4f16_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv4f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv4f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv4f16_vf( %a, half %b) { +; CHECK-LABEL: vfmin_nxv4f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv4f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv8f16(, , metadata) + +define @vfmin_nxv8f16_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv8f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv8f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv8f16_vf( %a, half %b) { +; CHECK-LABEL: vfmin_nxv8f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv8f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv16f16(, , metadata) + +define @vfmin_nxv16f16_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv16f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv16f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv16f16_vf( %a, half %b) { +; CHECK-LABEL: vfmin_nxv16f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv16f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv32f16(, , metadata) + +define @vfmin_nxv32f16_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv32f16_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv32f16( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv32f16_vf( %a, half %b) { +; CHECK-LABEL: vfmin_nxv32f16_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, half %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv32f16( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv1f32(, , metadata) + +define @vfmin_nxv1f32_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv1f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv1f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv1f32_vf( %a, float %b) { +; CHECK-LABEL: vfmin_nxv1f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv1f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv2f32(, , metadata) + +define @vfmin_nxv2f32_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv2f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv2f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv2f32_vf( %a, float %b) { +; CHECK-LABEL: vfmin_nxv2f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv2f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv4f32(, , metadata) + +define @vfmin_nxv4f32_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv4f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv4f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv4f32_vf( %a, float %b) { +; CHECK-LABEL: vfmin_nxv4f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv4f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv8f32(, , metadata) + +define @vfmin_nxv8f32_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv8f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv8f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv8f32_vf( %a, float %b) { +; CHECK-LABEL: vfmin_nxv8f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv8f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv16f32(, , metadata) + +define @vfmin_nxv16f32_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv16f32_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv16f32( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv16f32_vf( %a, float %b) { +; CHECK-LABEL: vfmin_nxv16f32_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, float %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv16f32( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv1f64(, , metadata) + +define @vfmin_nxv1f64_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv1f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv1f64( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv1f64_vf( %a, double %b) { +; CHECK-LABEL: vfmin_nxv1f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv1f64( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv2f64(, , metadata) + +define @vfmin_nxv2f64_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv2f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv2f64( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv2f64_vf( %a, double %b) { +; CHECK-LABEL: vfmin_nxv2f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv2f64( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv4f64(, , metadata) + +define @vfmin_nxv4f64_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv4f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv4f64( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv4f64_vf( %a, double %b) { +; CHECK-LABEL: vfmin_nxv4f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv4f64( %a, %splat, metadata !"fpexcept.strict") + ret %v +} + +declare @llvm.experimental.constrained.minnum.nxv8f64(, , metadata) + +define @vfmin_nxv8f64_vv( %a, %b) { +; CHECK-LABEL: vfmin_nxv8f64_vv: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.experimental.constrained.minnum.nxv8f64( %a, %b, metadata !"fpexcept.strict") + ret %v +} + +define @vfmin_nxv8f64_vf( %a, double %b) { +; CHECK-LABEL: vfmin_nxv8f64_vf: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret + %head = insertelement poison, double %b, i32 0 + %splat = shufflevector %head, poison, zeroinitializer + %v = call @llvm.experimental.constrained.minnum.nxv8f64( %a, %splat, metadata !"fpexcept.strict") + ret %v +}