diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -235,6 +235,7 @@ FNEG_VL, FABS_VL, FSQRT_VL, + FCLASS_VL, FCOPYSIGN_VL, // Has a merge operand VFCVT_RTZ_X_F_VL, VFCVT_RTZ_XU_F_VL, @@ -815,6 +816,7 @@ SelectionDAG &DAG) const; SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc, bool HasMergeOp = false, bool HasMask = true) const; + SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc, bool HasMergeOp = false) const; SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1038,7 +1038,8 @@ setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT, - ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM}, + ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM, + ISD::IS_FPCLASS}, VT, Custom); setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); @@ -4353,8 +4354,8 @@ return Op; } -static SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG, - const RISCVSubtarget &Subtarget) { +SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op, + SelectionDAG &DAG) const { SDLoc DL(Op); MVT VT = Op.getSimpleValueType(); MVT XLenVT = Subtarget.getXLenVT(); @@ -4383,6 +4384,34 @@ TDCMask |= RISCV::FPMASK_Negative_Zero; SDValue TDCMaskV = DAG.getConstant(TDCMask, DL, XLenVT); + + if (VT.isFixedLengthVector()) { + SDValue Op0 = Op.getOperand(0); + MVT VT0 = Op.getOperand(0).getSimpleValueType(); + MVT DstVT = VT0.changeVectorElementTypeToInteger(); + MVT ContainerDstVT = getContainerForFixedLengthVector(DstVT); + auto [Mask, VL] = + getDefaultVLOps(DstVT, ContainerDstVT, DL, DAG, Subtarget); + MVT ContainerVT0 = getContainerForFixedLengthVector(VT0); + SDValue FPCLASS = DAG.getNode( + RISCVISD::FCLASS_VL, DL, DAG.getVTList(ContainerDstVT, MVT::Other), + {convertToScalableVector(ContainerVT0, Op0, DAG, Subtarget) /*Op0*/, + Mask, VL}, + Op->getFlags()); + MVT ContainerVT = getContainerForFixedLengthVector(VT); + TDCMaskV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerDstVT, + DAG.getUNDEF(ContainerDstVT), TDCMaskV, VL); + SDValue AND = DAG.getNode(RISCVISD::AND_VL, DL, ContainerDstVT, FPCLASS, + TDCMaskV, DAG.getUNDEF(ContainerDstVT), Mask, VL); + SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); + SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerDstVT, + DAG.getUNDEF(ContainerDstVT), SplatZero, VL); + SDValue VMSEQ = DAG.getNode(RISCVISD::SETCC_VL, DL, ContainerVT, + {AND, SplatZero, DAG.getCondCode(ISD::SETNE), + DAG.getUNDEF(ContainerVT), Mask, VL}); + return convertFromScalableVector(VT, VMSEQ, DAG, Subtarget); + } + SDValue FPCLASS = DAG.getNode(RISCVISD::FPCLASS, DL, VT, Op.getOperand(0)); SDValue AND = DAG.getNode(ISD::AND, DL, VT, FPCLASS, TDCMaskV); return DAG.getSetCC(DL, VT, AND, DAG.getConstant(0, DL, XLenVT), @@ -4501,7 +4530,7 @@ case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); case ISD::IS_FPCLASS: - return LowerIS_FPCLASS(Op, DAG, Subtarget); + return LowerIS_FPCLASS(Op, DAG); case ISD::BITREVERSE: { MVT VT = Op.getSimpleValueType(); SDLoc DL(Op); @@ -15148,6 +15177,7 @@ NODE_NAME_CASE(FNEG_VL) NODE_NAME_CASE(FABS_VL) NODE_NAME_CASE(FSQRT_VL) + NODE_NAME_CASE(FCLASS_VL) NODE_NAME_CASE(VFMADD_VL) NODE_NAME_CASE(VFNMADD_VL) NODE_NAME_CASE(VFMSUB_VL) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -129,6 +129,14 @@ [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; +def riscv_fclass_vl : SDNode<"RISCVISD::FCLASS_VL", + SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, + SDTCisFP<1>, SDTCisVec<1>, + SDTCisSameSizeAs<0, 1>, + SDTCVecEltisVT<2, i1>, + SDTCisSameNumEltsAs<0, 2>, + SDTCisVT<3, XLenVT>]>>; + def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, @@ -2017,6 +2025,12 @@ (!cast("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, (vti.Mask V0), GPR:$vl, vti.Log2SEW, TA_MA)>; + + // 14.14. Vector Floating-Point Classify Instruction + def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), + (vti.Mask true_mask), VLOpFrag), + (!cast("PseudoVFCLASS_V_"# vti.LMul.MX) + vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfclass.ll @@ -0,0 +1,91 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s + +define <2 x i1> @isnan_v2f16(<2 x half> %x) nounwind { +; CHECK-LABEL: isnan_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.is.fpclass.v2f16(<2 x half> %x, i32 3) ; nan + ret <2 x i1> %1 +} + +define <2 x i1> @isnan_v2f32(<2 x float> %x) nounwind { +; CHECK-LABEL: isnan_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.is.fpclass.v2f32(<2 x float> %x, i32 3) ; nan + ret <2 x i1> %1 +} + + +define <4 x i1> @isnan_v4f32(<4 x float> %x) nounwind { +; CHECK-LABEL: isnan_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> %x, i32 3) ; nan + ret <4 x i1> %1 +} + +define <8 x i1> @isnan_v8f32(<8 x float> %x) nounwind { +; CHECK-LABEL: isnan_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <8 x i1> @llvm.is.fpclass.v8f32(<8 x float> %x, i32 3) ; nan + ret <8 x i1> %1 +} + +define <16 x i1> @isnan_v16f32(<16 x float> %x) nounwind { +; CHECK-LABEL: isnan_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 768 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <16 x i1> @llvm.is.fpclass.v16f32(<16 x float> %x, i32 3) ; nan + ret <16 x i1> %1 +} + +define <2 x i1> @isnormal_v2f64(<2 x double> %x) nounwind { +; CHECK-LABEL: isnormal_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: li a0, 66 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vmsne.vi v0, v8, 0 +; CHECK-NEXT: ret + %1 = call <2 x i1> @llvm.is.fpclass.v2f64(<2 x double> %x, i32 264) ; 0x108 = "normal" + ret <2 x i1> %1 +} + +declare <2 x i1> @llvm.is.fpclass.v2f16(<2 x half>, i32) +declare <2 x i1> @llvm.is.fpclass.v2f32(<2 x float>, i32) +declare <4 x i1> @llvm.is.fpclass.v4f32(<4 x float>, i32) +declare <8 x i1> @llvm.is.fpclass.v8f32(<8 x float>, i32) +declare <16 x i1> @llvm.is.fpclass.v16f32(<16 x float>, i32) +declare <2 x i1> @llvm.is.fpclass.v2f64(<2 x double>, i32)