diff --git a/llvm/lib/Support/RISCVISAInfo.cpp b/llvm/lib/Support/RISCVISAInfo.cpp --- a/llvm/lib/Support/RISCVISAInfo.cpp +++ b/llvm/lib/Support/RISCVISAInfo.cpp @@ -143,6 +143,7 @@ {"zve64x", RISCVExtensionVersion{1, 0}}, {"zvfh", RISCVExtensionVersion{1, 0}}, + {"zvfhmin", RISCVExtensionVersion{1, 0}}, {"zvl1024b", RISCVExtensionVersion{1, 0}}, {"zvl128b", RISCVExtensionVersion{1, 0}}, @@ -985,6 +986,7 @@ static const char *ImpliedExtsZvfbfmin[] = {"zve32f", "zfbfmin"}; static const char *ImpliedExtsZvfbfwma[] = {"zvfbfmin"}; static const char *ImpliedExtsZvfh[] = {"zve32f", "zfhmin"}; +static const char *ImpliedExtsZvfhmin[] = {"zve32f"}; static const char *ImpliedExtsZvkn[] = {"zvkb", "zvkned", "zvknhb", "zvkt"}; static const char *ImpliedExtsZvknc[] = {"zvbc", "zvkn"}; static const char *ImpliedExtsZvkng[] = {"zvkg", "zvkn"}; @@ -1051,6 +1053,7 @@ {{"zvfbfmin"}, {ImpliedExtsZvfbfmin}}, {{"zvfbfwma"}, {ImpliedExtsZvfbfwma}}, {{"zvfh"}, {ImpliedExtsZvfh}}, + {{"zvfhmin"}, {ImpliedExtsZvfhmin}}, {{"zvkn"}, {ImpliedExtsZvkn}}, {{"zvknc"}, {ImpliedExtsZvknc}}, {{"zvkng"}, {ImpliedExtsZvkng}}, diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -492,8 +492,18 @@ "'Zvfh' (Vector Half-Precision Floating-Point)", [FeatureStdExtZve32f, FeatureStdExtZfhmin]>; +def FeatureStdExtZvfhmin + : SubtargetFeature<"zvfhmin", "HasStdExtZvfhmin", "true", + "'Zvfhmin' (Vector Half-Precision Floating-Point Minimal)", + [FeatureStdExtZve32f]>; + def HasVInstructionsF16 : Predicate<"Subtarget->hasVInstructionsF16()">; +def HasVInstructionsF16Minimal : Predicate<"Subtarget->hasVInstructionsF16Minimal()">, + AssemblerPredicate<(any_of FeatureStdExtZvfhmin, FeatureStdExtZvfh), + "'Zvfhmin' (Vector Half-Precision Floating-Point Minimal) or " + "'Zvfh' (Vector Half-Precision Floating-Point)">; + def HasStdExtZfhOrZvfh : Predicate<"Subtarget->hasStdExtZfh() || Subtarget->hasStdExtZvfh()">, AssemblerPredicate<(any_of FeatureStdExtZfh, FeatureStdExtZvfh), diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -186,7 +186,7 @@ addRegClassForRVV(VT); } - if (Subtarget.hasVInstructionsF16()) + if (Subtarget.hasVInstructionsF16Minimal()) for (MVT VT : F16VecVTs) addRegClassForRVV(VT); @@ -910,6 +910,14 @@ continue; SetCommonVFPActions(VT); } + } else if (Subtarget.hasVInstructionsF16Minimal()) { + for (MVT VT : F16VecVTs) { + if (!isTypeLegal(VT)) + continue; + setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); + setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom); + // TODO: make others promote? + } } if (Subtarget.hasVInstructionsF32()) { @@ -1093,6 +1101,14 @@ // expansion to a build_vector of 0s. setOperationAction(ISD::UNDEF, VT, Custom); + if (VT.getVectorElementType() == MVT::f16 && + !Subtarget.hasVInstructionsF16()) { + setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); + setOperationAction({ISD::VP_FP_ROUND, ISD::VP_FP_EXTEND}, VT, Custom); + // TODO: make others promote? + continue; + } + // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT, Custom); @@ -2260,7 +2276,7 @@ return false; break; case MVT::f16: - if (!Subtarget.hasVInstructionsF16()) + if (!Subtarget.hasVInstructionsF16Minimal()) return false; break; case MVT::f32: @@ -12338,10 +12354,18 @@ VL); } -static SDValue performVFMADD_VLCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue performVFMADD_VLCombine(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { if (SDValue V = combineVFMADD_VLWithVFNEG_VL(N, DAG)) return V; + if (N->getValueType(0).isScalableVector() && + N->getValueType(0).getVectorElementType() == MVT::f32 && + (Subtarget.hasVInstructionsF16Minimal() && + !Subtarget.hasVInstructionsF16())) { + return SDValue(); + } + // FIXME: Ignore strict opcodes for now. if (N->isTargetStrictFPOpcode()) return SDValue(); @@ -12392,7 +12416,15 @@ N->getOperand(2), Mask, VL); } -static SDValue performVFMUL_VLCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue performVFMUL_VLCombine(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + if (N->getValueType(0).isScalableVector() && + N->getValueType(0).getVectorElementType() == MVT::f32 && + (Subtarget.hasVInstructionsF16Minimal() && + !Subtarget.hasVInstructionsF16())) { + return SDValue(); + } + // FIXME: Ignore strict opcodes for now. assert(!N->isTargetStrictFPOpcode() && "Unexpected opcode"); @@ -12425,7 +12457,15 @@ Op1, Merge, Mask, VL); } -static SDValue performFADDSUB_VLCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue performFADDSUB_VLCombine(SDNode *N, SelectionDAG &DAG, + const RISCVSubtarget &Subtarget) { + if (N->getValueType(0).isScalableVector() && + N->getValueType(0).getVectorElementType() == MVT::f32 && + (Subtarget.hasVInstructionsF16Minimal() && + !Subtarget.hasVInstructionsF16())) { + return SDValue(); + } + SDValue Op0 = N->getOperand(0); SDValue Op1 = N->getOperand(1); SDValue Merge = N->getOperand(2); @@ -13561,12 +13601,12 @@ case RISCVISD::STRICT_VFNMADD_VL: case RISCVISD::STRICT_VFMSUB_VL: case RISCVISD::STRICT_VFNMSUB_VL: - return performVFMADD_VLCombine(N, DAG); + return performVFMADD_VLCombine(N, DAG, Subtarget); case RISCVISD::FMUL_VL: - return performVFMUL_VLCombine(N, DAG); + return performVFMUL_VLCombine(N, DAG, Subtarget); case RISCVISD::FADD_VL: case RISCVISD::FSUB_VL: - return performFADDSUB_VLCombine(N, DAG); + return performFADDSUB_VLCombine(N, DAG, Subtarget); case ISD::LOAD: case ISD::STORE: { if (DCI.isAfterLegalizeDAG()) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -5870,11 +5870,13 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - let Predicates = !listconcat(GetVTypePredicates.Predicates, - GetVTypePredicates.Predicates) in - defm : VPatConversionTA; + // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. + let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], + !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates)) in + defm : VPatConversionTA; } } @@ -5939,8 +5941,9 @@ } } -multiclass VPatConversionVF_WF_RM { - foreach fvtiToFWti = AllWidenableFloatVectors in { +multiclass VPatConversionVF_WF_RM wlist = AllWidenableFloatVectors> { + foreach fvtiToFWti = wlist in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, @@ -7196,8 +7199,17 @@ defm : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">; defm : VPatConversionVF_WI_RM <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">; defm : VPatConversionVF_WI_RM <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">; -defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">; -defm : VPatConversionVF_WF_BF_RM<"int_riscv_vfncvtbf16_f_f_w", +defvar WidenableFloatVectorsExceptF16 = !filter(fvtiToFWti, AllWidenableFloatVectors, + !ne(fvtiToFWti.Vti.Scalar, f16)); +defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F", + WidenableFloatVectorsExceptF16>; +// Define vfncvt.f.f.w for f16 when Zvfhmin is enable. +defvar F16WidenableFloatVectors = !filter(fvtiToFWti, AllWidenableFloatVectors, + !eq(fvtiToFWti.Vti.Scalar, f16)); +let Predicates = [HasVInstructionsF16Minimal] in +defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F", + F16WidenableFloatVectors>; +defm : VPatConversionVF_WF_BF_RM<"int_riscv_vfncvtbf16_f_f_w", "PseudoVFNCVTBF16_F_F">; defm : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -1401,8 +1401,9 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - let Predicates = !listconcat(GetVTypePredicates.Predicates, - GetVTypePredicates.Predicates) in + let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], + !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates)) in def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX) (fvti.Vector (IMPLICIT_DEF)), diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -2589,8 +2589,9 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - let Predicates = !listconcat(GetVTypePredicates.Predicates, - GetVTypePredicates.Predicates) in + let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], + !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates)) in def : Pat<(fwti.Vector (any_riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask V0), @@ -2619,8 +2620,10 @@ foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; - let Predicates = !listconcat(GetVTypePredicates.Predicates, - GetVTypePredicates.Predicates) in { + // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. + let Predicates = !if(!eq(fvti.Scalar, f16), [HasVInstructionsF16Minimal], + !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates)) in { def : Pat<(fvti.Vector (any_riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask V0), VLOpFrag)), @@ -2632,6 +2635,8 @@ FRM_DYN, GPR:$vl, fvti.Log2SEW, TA_MA)>; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask V0), VLOpFrag)), diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -166,6 +166,9 @@ // Vector codegen related methods. bool hasVInstructions() const { return HasStdExtZve32x; } bool hasVInstructionsI64() const { return HasStdExtZve64x; } + bool hasVInstructionsF16Minimal() const { + return HasStdExtZvfhmin || HasStdExtZvfh; + } bool hasVInstructionsF16() const { return HasStdExtZvfh; } bool hasVInstructionsBF16() const { return HasStdExtZvfbfmin; } bool hasVInstructionsF32() const { return HasStdExtZve32f; } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s declare <2 x float> @llvm.vp.fpext.v2f32.v2f16(<2 x half>, <2 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll @@ -1,6 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s + declare <2 x half> @llvm.vp.fptrunc.v2f16.v2f32(<2 x float>, <2 x i1>, i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd.ll @@ -7,6 +7,12 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfhmin,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s +; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfhmin \ +; RUN: -target-abi=ilp32d 2>&1 | FileCheck %s --check-prefixes=ZVFMIN +; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfhmin \ +; RUN: -target-abi=lp64d 2>&1 | FileCheck %s --check-prefixes=ZVFMIN + +; ZVFMIN: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vfadd declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( , diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f.ll @@ -3,7 +3,10 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s - +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfhmin \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfhmin \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( , , diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-sdnode.ll @@ -3,6 +3,10 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s define @vfpext_nxv1f16_nxv1f32( %va) { ; diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -verify-machineinstrs < %s | FileCheck %s declare @llvm.vp.fpext.nxv2f32.nxv2f16(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll @@ -3,6 +3,10 @@ ; RUN: -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s define @vfptrunc_nxv1f32_nxv1f16( %va) { ; diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -verify-machineinstrs < %s | FileCheck %s declare @llvm.vp.fptrunc.nxv2f16.nxv2f32(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-f-f.ll @@ -3,7 +3,10 @@ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s - +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfhmin \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfhmin \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s declare @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16( , ,