diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -334,6 +334,7 @@ STRICT_FSUB_VL, STRICT_FMUL_VL, STRICT_FDIV_VL, + STRICT_FSQRT_VL, STRICT_FP_EXTEND_VL, // WARNING: Do not add anything in the end unless you want the node to diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -806,7 +806,7 @@ setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom); setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, ISD::STRICT_FMUL, - ISD::STRICT_FDIV}, + ISD::STRICT_FDIV, ISD::STRICT_FSQRT}, VT, Legal); }; @@ -1023,7 +1023,8 @@ setOperationAction(ISD::STRICT_FP_EXTEND, VT, Custom); setOperationAction({ISD::STRICT_FADD, ISD::STRICT_FSUB, - ISD::STRICT_FMUL, ISD::STRICT_FDIV}, + ISD::STRICT_FMUL, ISD::STRICT_FDIV, + ISD::STRICT_FSQRT}, VT, Custom); } @@ -4503,6 +4504,8 @@ case ISD::STRICT_FDIV: return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FDIV_VL, /*HasMergeOp*/ true); + case ISD::STRICT_FSQRT: + return lowerToScalableOp(Op, DAG, RISCVISD::STRICT_FSQRT_VL); case ISD::MGATHER: case ISD::VP_GATHER: return lowerMaskedGather(Op, DAG); @@ -14098,6 +14101,7 @@ NODE_NAME_CASE(STRICT_FSUB_VL) NODE_NAME_CASE(STRICT_FMUL_VL) NODE_NAME_CASE(STRICT_FDIV_VL) + NODE_NAME_CASE(STRICT_FSQRT_VL) NODE_NAME_CASE(STRICT_FP_EXTEND_VL) NODE_NAME_CASE(VWMUL_VL) NODE_NAME_CASE(VWMULU_VL) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -1005,7 +1005,7 @@ foreach vti = AllFloatVectors in { // 13.8. Vector Floating-Point Square-Root Instruction - def : Pat<(fsqrt (vti.Vector vti.RegClass:$rs2)), + def : Pat<(any_fsqrt (vti.Vector vti.RegClass:$rs2)), (!cast("PseudoVFSQRT_V_"# vti.LMul.MX#"_E"#vti.SEW) vti.RegClass:$rs2, vti.AVL, vti.Log2SEW)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -111,6 +111,7 @@ def riscv_strict_fsub_vl : SDNode<"RISCVISD::STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; def riscv_strict_fmul_vl : SDNode<"RISCVISD::STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; def riscv_strict_fdiv_vl : SDNode<"RISCVISD::STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; +def riscv_strict_fsqrt_vl : SDNode<"RISCVISD::STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), [(riscv_fadd_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), @@ -124,6 +125,9 @@ def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl), (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$merge, node:$mask, node:$vl)]>; +def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), + [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), + (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, @@ -1813,7 +1817,7 @@ foreach vti = AllFloatVectors in { // 13.8. Vector Floating-Point Square-Root Instruction - def : Pat<(riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), + def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask V0), VLOpFrag), (!cast("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll @@ -0,0 +1,150 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.experimental.constrained.sqrt.v2f16(<2 x half>, metadata, metadata) + +define <2 x half> @vfsqrt_v2f16(<2 x half> %v) { +; CHECK-LABEL: vfsqrt_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <2 x half> @llvm.experimental.constrained.sqrt.v2f16(<2 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <2 x half> %r +} + +declare <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half>, metadata, metadata) + +define <4 x half> @vfsqrt_v4f16(<4 x half> %v) { +; CHECK-LABEL: vfsqrt_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <4 x half> %r +} + +declare <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half>, metadata, metadata) + +define <8 x half> @vfsqrt_v8f16(<8 x half> %v) { +; CHECK-LABEL: vfsqrt_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <8 x half> %r +} + +declare <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half>, metadata, metadata) + +define <16 x half> @vfsqrt_v16f16(<16 x half> %v) { +; CHECK-LABEL: vfsqrt_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <16 x half> %r +} + +declare <32 x half> @llvm.experimental.constrained.sqrt.v32f16(<32 x half>, metadata, metadata) + +define <32 x half> @vfsqrt_v32f16(<32 x half> %v) { +; CHECK-LABEL: vfsqrt_v32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 32 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <32 x half> @llvm.experimental.constrained.sqrt.v32f16(<32 x half> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <32 x half> %r +} + +declare <2 x float> @llvm.experimental.constrained.sqrt.v2f32(<2 x float>, metadata, metadata) + +define <2 x float> @vfsqrt_v2f32(<2 x float> %v) { +; CHECK-LABEL: vfsqrt_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <2 x float> @llvm.experimental.constrained.sqrt.v2f32(<2 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <2 x float> %r +} + +declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata) + +define <4 x float> @vfsqrt_v4f32(<4 x float> %v) { +; CHECK-LABEL: vfsqrt_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <4 x float> %r +} + +declare <8 x float> @llvm.experimental.constrained.sqrt.v8f32(<8 x float>, metadata, metadata) + +define <8 x float> @vfsqrt_v8f32(<8 x float> %v) { +; CHECK-LABEL: vfsqrt_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <8 x float> @llvm.experimental.constrained.sqrt.v8f32(<8 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <8 x float> %r +} + +declare <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float>, metadata, metadata) + +define <16 x float> @vfsqrt_v16f32(<16 x float> %v) { +; CHECK-LABEL: vfsqrt_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <16 x float> %r +} + +declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata) + +define <2 x double> @vfsqrt_v2f64(<2 x double> %v) { +; CHECK-LABEL: vfsqrt_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <2 x double> %r +} + +declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata) + +define <4 x double> @vfsqrt_v4f64(<4 x double> %v) { +; CHECK-LABEL: vfsqrt_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <4 x double> %r +} + +declare <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double>, metadata, metadata) + +define <8 x double> @vfsqrt_v8f64(<8 x double> %v) { +; CHECK-LABEL: vfsqrt_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret <8 x double> %r +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll @@ -0,0 +1,185 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.experimental.constrained.sqrt.nxv1f16(, metadata, metadata) + +define @vfsqrt_nxv1f16( %v) { +; CHECK-LABEL: vfsqrt_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv1f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv2f16(, metadata, metadata) + +define @vfsqrt_nxv2f16( %v) { +; CHECK-LABEL: vfsqrt_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv2f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv4f16(, metadata, metadata) + +define @vfsqrt_nxv4f16( %v) { +; CHECK-LABEL: vfsqrt_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv4f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv8f16(, metadata, metadata) + +define @vfsqrt_nxv8f16( %v) { +; CHECK-LABEL: vfsqrt_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv8f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv16f16(, metadata, metadata) + +define @vfsqrt_nxv16f16( %v) { +; CHECK-LABEL: vfsqrt_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv16f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv32f16(, metadata, metadata) + +define @vfsqrt_nxv32f16( %v) { +; CHECK-LABEL: vfsqrt_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv32f16( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv1f32(, metadata, metadata) + +define @vfsqrt_nxv1f32( %v) { +; CHECK-LABEL: vfsqrt_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv1f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv2f32(, metadata, metadata) + +define @vfsqrt_nxv2f32( %v) { +; CHECK-LABEL: vfsqrt_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv2f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv4f32(, metadata, metadata) + +define @vfsqrt_nxv4f32( %v) { +; CHECK-LABEL: vfsqrt_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv4f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv8f32(, metadata, metadata) + +define @vfsqrt_nxv8f32( %v) { +; CHECK-LABEL: vfsqrt_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv8f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv16f32(, metadata, metadata) + +define @vfsqrt_nxv16f32( %v) { +; CHECK-LABEL: vfsqrt_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv16f32( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv1f64(, metadata, metadata) + +define @vfsqrt_nxv1f64( %v) { +; CHECK-LABEL: vfsqrt_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv1f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv2f64(, metadata, metadata) + +define @vfsqrt_nxv2f64( %v) { +; CHECK-LABEL: vfsqrt_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv2f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv4f64(, metadata, metadata) + +define @vfsqrt_nxv4f64( %v) { +; CHECK-LABEL: vfsqrt_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv4f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +} + +declare @llvm.experimental.constrained.sqrt.nxv8f64(, metadata, metadata) + +define @vfsqrt_nxv8f64( %v) { +; CHECK-LABEL: vfsqrt_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma +; CHECK-NEXT: vfsqrt.v v8, v8 +; CHECK-NEXT: ret + %r = call @llvm.experimental.constrained.sqrt.nxv8f64( %v, metadata !"round.dynamic", metadata !"fpexcept.strict") + ret %r +}