diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -241,6 +241,7 @@ FNEG_VL, FABS_VL, FSQRT_VL, + FCLASS_VL, FCOPYSIGN_VL, // Has a merge operand VFCVT_RTZ_X_F_VL, VFCVT_RTZ_XU_F_VL, @@ -821,6 +822,7 @@ SelectionDAG &DAG) const; SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc, bool HasMergeOp = false, bool HasMask = true) const; + SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc, bool HasMergeOp = false) const; SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -778,7 +778,8 @@ setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal); setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND, - ISD::FROUNDEVEN, ISD::FRINT, ISD::FNEARBYINT}, + ISD::FROUNDEVEN, ISD::FRINT, ISD::FNEARBYINT, + ISD::IS_FPCLASS}, VT, Custom); setOperationAction(FloatingPointVecReduceOps, VT, Custom); @@ -1035,7 +1036,8 @@ setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT, - ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM}, + ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM, + ISD::IS_FPCLASS}, VT, Custom); setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); @@ -4350,8 +4352,8 @@ return Op; } -static SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG, - const RISCVSubtarget &Subtarget) { +SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op, + SelectionDAG &DAG) const { SDLoc DL(Op); MVT VT = Op.getSimpleValueType(); MVT XLenVT = Subtarget.getXLenVT(); @@ -4380,6 +4382,50 @@ TDCMask |= RISCV::FPMASK_Negative_Zero; SDValue TDCMaskV = DAG.getConstant(TDCMask, DL, XLenVT); + + if (VT.isVector()) { + SDValue Op0 = Op.getOperand(0); + MVT VT0 = Op.getOperand(0).getSimpleValueType(); + MVT DstVT = VT0.changeVectorElementTypeToInteger(); + + if (VT.isScalableVector()) { + SDValue VL = DAG.getRegister(RISCV::X0, XLenVT); + SDValue Mask = getAllOnesMask(DstVT, VL, DL, DAG); + SDValue FPCLASS = DAG.getNode(RISCVISD::FCLASS_VL, DL, DstVT, + {Op0, Mask, VL}, Op->getFlags()); + SDValue AND = DAG.getNode(ISD::AND, DL, DstVT, FPCLASS, + DAG.getConstant(TDCMask, DL, DstVT)); + return DAG.getSetCC(DL, VT, AND, DAG.getConstant(0, DL, DstVT), + ISD::SETNE); + } + + MVT ContainerVT0 = getContainerForFixedLengthVector(VT0); + MVT ContainerVT = getContainerForFixedLengthVector(VT); + MVT ContainerDstVT = getContainerForFixedLengthVector(DstVT); + auto [Mask, VL] = + getDefaultVLOps(DstVT, ContainerDstVT, DL, DAG, Subtarget); + + SDValue FPCLASS = DAG.getNode( + RISCVISD::FCLASS_VL, DL, DAG.getVTList(ContainerDstVT, MVT::Other), + {convertToScalableVector(ContainerVT0, Op0, DAG, Subtarget) /*Op0*/, + Mask, VL}, + Op->getFlags()); + + TDCMaskV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerDstVT, + DAG.getUNDEF(ContainerDstVT), TDCMaskV, VL); + SDValue AND = DAG.getNode(RISCVISD::AND_VL, DL, ContainerDstVT, FPCLASS, + TDCMaskV, DAG.getUNDEF(ContainerDstVT), Mask, VL); + + SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); + SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerDstVT, + DAG.getUNDEF(ContainerDstVT), SplatZero, VL); + + SDValue VMSNE = DAG.getNode(RISCVISD::SETCC_VL, DL, ContainerVT, + {AND, SplatZero, DAG.getCondCode(ISD::SETNE), + DAG.getUNDEF(ContainerVT), Mask, VL}); + return convertFromScalableVector(VT, VMSNE, DAG, Subtarget); + } + SDValue FPCLASS = DAG.getNode(RISCVISD::FPCLASS, DL, VT, Op.getOperand(0)); SDValue AND = DAG.getNode(ISD::AND, DL, VT, FPCLASS, TDCMaskV); return DAG.getSetCC(DL, VT, AND, DAG.getConstant(0, DL, XLenVT), @@ -4507,7 +4553,7 @@ case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); case ISD::IS_FPCLASS: - return LowerIS_FPCLASS(Op, DAG, Subtarget); + return LowerIS_FPCLASS(Op, DAG); case ISD::BITREVERSE: { MVT VT = Op.getSimpleValueType(); SDLoc DL(Op); @@ -15202,6 +15248,7 @@ NODE_NAME_CASE(FNEG_VL) NODE_NAME_CASE(FABS_VL) NODE_NAME_CASE(FSQRT_VL) + NODE_NAME_CASE(FCLASS_VL) NODE_NAME_CASE(VFMADD_VL) NODE_NAME_CASE(VFNMADD_VL) NODE_NAME_CASE(VFMSUB_VL) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -129,6 +129,14 @@ [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; +def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", + SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, + SDTCisFP<1>, SDTCisVec<1>, + SDTCisSameSizeAs<0, 1>, + SDTCVecEltisVT<2, i1>, + SDTCisSameNumEltsAs<0, 2>, + SDTCisVT<3, XLenVT>]>>; + def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, @@ -1999,6 +2007,12 @@ (!cast("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; + + // 14.14. Vector Floating-Point Classify Instruction + def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), + (vti.Mask true_mask), VLOpFrag), + (!cast("PseudoVFCLASS_V_"# vti.LMul.MX) + vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll @@ -0,0 +1,170 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s + +define <2 x i1> @isnan_v2f16(<2 x half> %x) { +; CHECK-LABEL: isnan_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.is.fpclass.v2f16(<2 x half> %x, i32 3) ; nan + ret <2 x i1> %1 +} + +define <2 x i1> @isnan_v2f32(<2 x float> %x) { +; CHECK-LABEL: isnan_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 927 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.is.fpclass.v2f32(<2 x float> %x, i32 639) + ret <2 x i1> %1 +} + + +define <4 x i1> @isnan_v4f32(<4 x float> %x) { +; CHECK-LABEL: isnan_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> %x, i32 3) ; nan + ret <4 x i1> %1 +} + +define <8 x i1> @isnan_v8f32(<8 x float> %x) { +; CHECK-LABEL: isnan_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <8 x i1> @llvm.is.fpclass.v8f32(<8 x float> %x, i32 2) + ret <8 x i1> %1 +} + +define <16 x i1> @isnan_v16f32(<16 x float> %x) { +; CHECK-LABEL: isnan_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 256 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f32(<16 x float> %x, i32 1) + ret <16 x i1> %1 +} + +define <2 x i1> @isnormal_v2f64(<2 x double> %x) { +; CHECK-LABEL: isnormal_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 129 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.is.fpclass.v2f64(<2 x double> %x, i32 516) ; 0x204 = "inf" + ret <2 x i1> %1 +} + +define <4 x i1> @isposinf_v4f64(<4 x double> %x) { +; CHECK-LABEL: isposinf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <4 x i1> @llvm.is.fpclass.v4f64(<4 x double> %x, i32 512) ; 0x200 = "+inf" + ret <4 x i1> %1 +} + +define <8 x i1> @isneginf_v8f64(<8 x double> %x) { +; CHECK-LABEL: isneginf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vand.vi v8, v8, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <8 x i1> @llvm.is.fpclass.v8f64(<8 x double> %x, i32 4) ; "-inf" + ret <8 x i1> %1 +} + +define <16 x i1> @isfinite_v16f64(<16 x double> %x) { +; CHECK-LABEL: isfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 126 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f64(<16 x double> %x, i32 504) ; 0x1f8 = "finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isposfinite_v16f64(<16 x double> %x) { +; CHECK-LABEL: isposfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 112 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f64(<16 x double> %x, i32 448) ; 0x1c0 = "+finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isnegfinite_v16f64(<16 x double> %x) { +; CHECK-LABEL: isnegfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vand.vi v8, v8, 14 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f64(<16 x double> %x, i32 56) ; 0x38 = "-finite" + ret <16 x i1> %1 +} + +define <16 x i1> @isnotfinite_v16f64(<16 x double> %x) { +; CHECK-LABEL: isnotfinite_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 897 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f64(<16 x double> %x, i32 519) ; 0x207 = "inf|nan" + ret <16 x i1> %1 +} + +declare <2 x i1> @llvm.is.fpclass.v2f16(<2 x half>, i32) +declare <2 x i1> @llvm.is.fpclass.v2f32(<2 x float>, i32) +declare <4 x i1> @llvm.is.fpclass.v4f32(<4 x float>, i32) +declare <8 x i1> @llvm.is.fpclass.v8f32(<8 x float>, i32) +declare <16 x i1> @llvm.is.fpclass.v16f32(<16 x float>, i32) +declare <2 x i1> @llvm.is.fpclass.v2f64(<2 x double>, i32) +declare <4 x i1> @llvm.is.fpclass.v4f64(<4 x double>, i32) +declare <8 x i1> @llvm.is.fpclass.v8f64(<8 x double>, i32) +declare <16 x i1> @llvm.is.fpclass.v16f64(<16 x double>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-sdnode.ll @@ -0,0 +1,169 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +define @isnan_nxv2f16( %x) { +; CHECK-LABEL: isnan_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv2f16( %x, i32 3) ; nan + ret %1 +} + +define @isnan_nxv2f32( %x) { +; CHECK-LABEL: isnan_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 927 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv2f32( %x, i32 639) + ret %1 +} + + +define @isnan_nxv4f32( %x) { +; CHECK-LABEL: isnan_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv4f32( %x, i32 3) ; nan + ret %1 +} + +define @isnan_nxv8f32( %x) { +; CHECK-LABEL: isnan_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv8f32( %x, i32 2) + ret %1 +} + +define @isnan_nxv16f32( %x) { +; CHECK-LABEL: isnan_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 256 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 1) + ret %1 +} + +define @isnormal_nxv2f64( %x) { +; CHECK-LABEL: isnormal_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 129 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv2f64( %x, i32 516) ; 0x204 = "inf" + ret %1 +} + +define @isposinf_nxv4f64( %x) { +; CHECK-LABEL: isposinf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 128 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv4f64( %x, i32 512) ; 0x200 = "+inf" + ret %1 +} + +define @isneginf_nxv8f64( %x) { +; CHECK-LABEL: isneginf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vand.vi v8, v8, 1 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv8f64( %x, i32 4) ; "-inf" + ret %1 +} + +define @isfinite_nxv16f32( %x) { +; CHECK-LABEL: isfinite_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 126 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 504) ; 0x1f8 = "finite" + ret %1 +} + +define @isposfinite_nxv16f32( %x) { +; CHECK-LABEL: isposfinite_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 112 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 448) ; 0x1c0 = "+finite" + ret %1 +} + +define @isnegfinite_nxv16f32( %x) { +; CHECK-LABEL: isnegfinite_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: vand.vi v8, v8, 14 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 56) ; 0x38 = "-finite" + ret %1 +} + +define @isnotfinite_nxv16f32( %x) { +; CHECK-LABEL: isnotfinite_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 897 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call @llvm.is.fpclass.nxv16f32( %x, i32 519) ; 0x207 = "inf|nan" + ret %1 +} + +declare @llvm.is.fpclass.nxv2f16(, i32) +declare @llvm.is.fpclass.nxv2f32(, i32) +declare @llvm.is.fpclass.nxv4f32(, i32) +declare @llvm.is.fpclass.nxv8f32(, i32) +declare @llvm.is.fpclass.nxv16f32(, i32) +declare @llvm.is.fpclass.nxv2f64(, i32) +declare @llvm.is.fpclass.nxv4f64(, i32) +declare @llvm.is.fpclass.nxv8f64(, i32)