diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -479,6 +479,11 @@ def HasVInstructionsF16 : Predicate<"Subtarget->hasVInstructionsF16()">; +def HasVInstructionsF16Mininal : Predicate<"Subtarget->hasVInstructionsF16Mininal()">, + AssemblerPredicate<(any_of FeatureStdExtZvfhmin, FeatureStdExtZvfh), + "'Zvfhmin' (Vector Half-Precision Floating-Point Minimal) or " + "'Zvfh' (Vector Half-Precision Floating-Point)">; + def HasStdExtZfhOrZvfh : Predicate<"Subtarget->hasStdExtZfh() || Subtarget->hasStdExtZvfh()">, AssemblerPredicate<(any_of FeatureStdExtZfh, FeatureStdExtZvfh), diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -181,7 +181,7 @@ addRegClassForRVV(VT); } - if (Subtarget.hasVInstructionsF16()) + if (Subtarget.hasVInstructionsF16Mininal()) for (MVT VT : F16VecVTs) addRegClassForRVV(VT); @@ -851,6 +851,12 @@ continue; SetCommonVFPActions(VT); } + } else if (Subtarget.hasVInstructionsF16Mininal()) { + for (MVT VT : F16VecVTs) { + setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); + setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom); + // TODO: make others promote? + } } if (Subtarget.hasVInstructionsF32()) { @@ -1008,6 +1014,14 @@ if (!useRVVForFixedLengthVectorVT(VT)) continue; + if (VT.getVectorElementType() == MVT::f16 && + !Subtarget.hasVInstructionsF16()) { + setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); + setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom); + // TODO: make others promote? + continue; + } + // By default everything must be expanded. for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) setOperationAction(Op, VT, Expand); @@ -2143,7 +2157,7 @@ return false; break; case MVT::f16: - if (!Subtarget.hasVInstructionsF16()) + if (!Subtarget.hasVInstructionsF16Mininal()) return false; break; case MVT::f32: diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -5416,11 +5416,13 @@ { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - let Predicates = !listconcat(GetVTypePredicates.Predicates, - GetVTypePredicates.Predicates) in - defm : VPatConversionTA; + // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. + let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Mininal], + !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates)) in + defm : VPatConversionTA; } } @@ -5450,8 +5452,9 @@ } } -multiclass VPatConversionVF_WF { - foreach fvtiToFWti = AllWidenableFloatVectors in +multiclass VPatConversionVF_WF wlist = AllWidenableFloatVectors> { + foreach fvtiToFWti = wlist in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; @@ -6677,7 +6680,16 @@ defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">; defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">; defm : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">; -defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">; +defvar WidenableFloatVectorsExceptF16 = !filter(fvtiToFWti, AllWidenableFloatVectors, + !ne(fvtiToFWti.Vti.Scalar, f16)); +defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F", + WidenableFloatVectorsExceptF16>; +// Define vfncvt.f.f.w for f16 when Zvfhmin is enable. +defvar F16WidenableFloatVectors = !filter(fvtiToFWti, AllWidenableFloatVectors, + !eq(fvtiToFWti.Vti.Scalar, f16)); +let Predicates = [HasVInstructionsF16Mininal] in +defm : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F", + F16WidenableFloatVectors>; defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -1185,8 +1185,9 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - let Predicates = !listconcat(GetVTypePredicates.Predicates, - GetVTypePredicates.Predicates) in + let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Mininal], + !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates)) in def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) fwti.RegClass:$rs1, fvti.AVL, fvti.Log2SEW)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -2132,8 +2132,9 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - let Predicates = !listconcat(GetVTypePredicates.Predicates, - GetVTypePredicates.Predicates) in + let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Mininal], + !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates)) in def : Pat<(fwti.Vector (any_riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask V0), @@ -2161,22 +2162,25 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - let Predicates = !listconcat(GetVTypePredicates.Predicates, - GetVTypePredicates.Predicates) in { - def : Pat<(fvti.Vector (any_riscv_fpround_vl - (fwti.Vector fwti.RegClass:$rs1), - (fwti.Mask V0), VLOpFrag)), - (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") - (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, - (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; + // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. + let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Mininal], + !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates)) in + def : Pat<(fvti.Vector (any_riscv_fpround_vl + (fwti.Vector fwti.RegClass:$rs1), + (fwti.Mask V0), VLOpFrag)), + (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_MASK") + (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, + (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask V0), VLOpFrag)), (!cast("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_MASK") (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, (fwti.Mask V0), GPR:$vl, fvti.Log2SEW, TA_MA)>; - } } // 14. Vector Reduction Operations diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -162,6 +162,9 @@ // Vector codegen related methods. bool hasVInstructions() const { return HasStdExtZve32x; } bool hasVInstructionsI64() const { return HasStdExtZve64x; } + bool hasVInstructionsF16Mininal() const { + return HasStdExtZvfhmin || HasStdExtZvfh; + } bool hasVInstructionsF16() const { return HasStdExtZvfh; } // FIXME: Consider Zfinx in the future bool hasVInstructionsF32() const { return HasStdExtZve32f && HasStdExtF; } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfhmin,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfhmin,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s declare <2 x float> @llvm.vp.fpext.v2f32.v2f16(<2 x half>, <2 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll @@ -1,6 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfhmin,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfhmin,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s + declare <2 x half> @llvm.vp.fptrunc.v2f16.v2f32(<2 x float>, <2 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -7,6 +7,13 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfhmin,+experimental-zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfhmin \ +; RUN: -target-abi=ilp32d 2>&1 | FileCheck %s --check-prefixes=ZVFMIN +; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfhmin \ +; RUN: -target-abi=lp64d 2>&1 | FileCheck %s --check-prefixes=ZVFMIN + +; ZVFMIN: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vfadd + declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( , , diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll @@ -3,6 +3,10 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfhmin \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfhmin \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( , , diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll @@ -3,6 +3,10 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfhmin,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfhmin,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s define @vfpext_nxv1f16_nxv1f32( %va) { ; diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfhmin,+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfhmin,+v -verify-machineinstrs < %s | FileCheck %s declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll @@ -3,6 +3,10 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfhmin,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfhmin,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s define @vfptrunc_nxv1f32_nxv1f16( %va) { ; diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfhmin,+v,+m -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfhmin,+v,+m -verify-machineinstrs < %s | FileCheck %s declare @llvm.vp.fptrunc.nxv2f16.nxv2f32(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll @@ -3,6 +3,10 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfhmin \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfhmin \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( , ,