diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst --- a/llvm/docs/RISCVUsage.rst +++ b/llvm/docs/RISCVUsage.rst @@ -104,8 +104,8 @@ ``Zfh`` Supported ``Zfhmin`` Supported ``Zfinx`` Supported - ``Zhinx`` Assembly Support - ``Zhinxmin`` Assembly Support + ``Zhinx`` Supported + ``Zhinxmin`` Supported ``Zicbom`` Assembly Support ``Zicbop`` Assembly Support ``Zicboz`` Assembly Support diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -150,6 +150,7 @@ def HasStdExtZhinx : Predicate<"Subtarget->hasStdExtZhinx()">, AssemblerPredicate<(all_of FeatureStdExtZhinx), "'Zhinx' (Half Float in Integer)">; +def NoStdExtZhinx : Predicate<"!Subtarget->hasStdExtZhinx()">; def HasStdExtZhinxOrZhinxmin : Predicate<"Subtarget->hasStdExtZhinx() || Subtarget->hasStdExtZhinxmin()">, diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -881,7 +881,7 @@ default: llvm_unreachable("Unexpected size"); case MVT::f16: - Opc = RISCV::FMV_H_X; + Opc = Subtarget->hasStdExtZhinxOrZhinxmin() ? RISCV::COPY : RISCV::FMV_H_X; break; case MVT::f32: Opc = Subtarget->hasStdExtZfinx() ? RISCV::COPY : RISCV::FMV_W_X; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -121,6 +121,8 @@ addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); if (Subtarget.hasStdExtD()) addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); + if (Subtarget.hasStdExtZhinxOrZhinxmin()) + addRegisterClass(MVT::f16, &RISCV::GPRF16RegClass); if (Subtarget.hasStdExtZfinx()) addRegisterClass(MVT::f32, &RISCV::GPRF32RegClass); if (Subtarget.hasStdExtZdinx()) { @@ -360,11 +362,11 @@ ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN}; - if (Subtarget.hasStdExtZfhOrZfhmin()) + if (Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) setOperationAction(ISD::BITCAST, MVT::i16, Custom); - if (Subtarget.hasStdExtZfhOrZfhmin()) { - if (Subtarget.hasStdExtZfh()) { + if (Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) { + if (Subtarget.hasStdExtZfhOrZhinx()) { setOperationAction(FPLegalNodeTypes, MVT::f16, Legal); setOperationAction(FPRndMode, MVT::f16, Subtarget.hasStdExtZfa() ? Legal : Custom); @@ -1079,7 +1081,7 @@ // Custom-legalize bitcasts from fixed-length vectors to scalar types. setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, Custom); - if (Subtarget.hasStdExtZfhOrZfhmin()) + if (Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) setOperationAction(ISD::BITCAST, MVT::f16, Custom); if (Subtarget.hasStdExtFOrZfinx()) setOperationAction(ISD::BITCAST, MVT::f32, Custom); @@ -1143,7 +1145,7 @@ if (Subtarget.hasStdExtZbkb()) setTargetDAGCombine(ISD::BITREVERSE); - if (Subtarget.hasStdExtZfhOrZfhmin()) + if (Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); if (Subtarget.hasStdExtFOrZfinx()) setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT, @@ -1805,7 +1807,7 @@ bool ForCodeSize) const { bool IsLegalVT = false; if (VT == MVT::f16) - IsLegalVT = Subtarget.hasStdExtZfhOrZfhmin(); + IsLegalVT = Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin(); else if (VT == MVT::f32) IsLegalVT = Subtarget.hasStdExtFOrZfinx(); else if (VT == MVT::f64) @@ -1872,7 +1874,7 @@ // Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled. // We might still end up using a GPR but that will be decided based on ABI. if (VT == MVT::f16 && Subtarget.hasStdExtFOrZfinx() && - !Subtarget.hasStdExtZfhOrZfhmin()) + !Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) return MVT::f32; return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); @@ -1884,7 +1886,7 @@ // Use f32 to pass f16 if it is legal and Zfh/Zfhmin is not enabled. // We might still end up using a GPR but that will be decided based on ABI. if (VT == MVT::f16 && Subtarget.hasStdExtFOrZfinx() && - !Subtarget.hasStdExtZfhOrZfhmin()) + !Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) return 1; return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); @@ -2344,7 +2346,7 @@ if (!DstVT.isVector()) { // In absense of Zfh, promote f16 to f32, then saturate the result. - if (Src.getSimpleValueType() == MVT::f16 && !Subtarget.hasStdExtZfh()) { + if (Src.getSimpleValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) { Src = DAG.getNode(ISD::FP_EXTEND, SDLoc(Op), MVT::f32, Src); } @@ -4436,7 +4438,7 @@ EVT Op0VT = Op0.getValueType(); MVT XLenVT = Subtarget.getXLenVT(); if (VT == MVT::f16 && Op0VT == MVT::i16 && - Subtarget.hasStdExtZfhOrZfhmin()) { + Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) { SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0); SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); return FPConv; @@ -8808,7 +8810,7 @@ if (IsStrict) { SDValue Chain = N->getOperand(0); // In absense of Zfh, promote f16 to f32, then convert. - if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh()) { + if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) { Op0 = DAG.getNode(ISD::STRICT_FP_EXTEND, DL, {MVT::f32, MVT::Other}, {Chain, Op0}); Chain = Op0.getValue(1); @@ -8824,7 +8826,7 @@ return; } // In absense of Zfh, promote f16 to f32, then convert. - if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh()) + if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0); unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64; @@ -8864,7 +8866,7 @@ return; // In absense of Zfh, promote f16 to f32, then convert. - if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh()) + if (Op0.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfhOrZhinx()) Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op0); SDValue Res = @@ -9175,7 +9177,7 @@ EVT Op0VT = Op0.getValueType(); MVT XLenVT = Subtarget.getXLenVT(); if (VT == MVT::i16 && Op0VT == MVT::f16 && - Subtarget.hasStdExtZfhOrZfhmin()) { + Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin()) { SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0); Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() && @@ -12741,6 +12743,7 @@ return false; case RISCV::Select_GPR_Using_CC_GPR: case RISCV::Select_FPR16_Using_CC_GPR: + case RISCV::Select_FPR16INX_Using_CC_GPR: case RISCV::Select_FPR32_Using_CC_GPR: case RISCV::Select_FPR32INX_Using_CC_GPR: case RISCV::Select_FPR64_Using_CC_GPR: @@ -13122,6 +13125,14 @@ FSGNJXOpc = RISCV::FSGNJX_H; RC = &RISCV::FPR16RegClass; break; + case RISCV::PseudoFROUND_H_INX: + CmpOpc = RISCV::FLT_H_INX; + F2IOpc = RISCV::FCVT_W_H_INX; + I2FOpc = RISCV::FCVT_H_W_INX; + FSGNJOpc = RISCV::FSGNJ_H_INX; + FSGNJXOpc = RISCV::FSGNJX_H_INX; + RC = &RISCV::GPRF16RegClass; + break; case RISCV::PseudoFROUND_S: CmpOpc = RISCV::FLT_S; F2IOpc = RISCV::FCVT_W_S; @@ -13243,6 +13254,7 @@ return emitReadCycleWidePseudo(MI, BB); case RISCV::Select_GPR_Using_CC_GPR: case RISCV::Select_FPR16_Using_CC_GPR: + case RISCV::Select_FPR16INX_Using_CC_GPR: case RISCV::Select_FPR32_Using_CC_GPR: case RISCV::Select_FPR32INX_Using_CC_GPR: case RISCV::Select_FPR64_Using_CC_GPR: @@ -13257,8 +13269,12 @@ return emitSplitF64Pseudo(MI, BB, Subtarget); case RISCV::PseudoQuietFLE_H: return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget); + case RISCV::PseudoQuietFLE_H_INX: + return emitQuietFCMP(MI, BB, RISCV::FLE_H_INX, RISCV::FEQ_H_INX, Subtarget); case RISCV::PseudoQuietFLT_H: return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget); + case RISCV::PseudoQuietFLT_H_INX: + return emitQuietFCMP(MI, BB, RISCV::FLT_H_INX, RISCV::FEQ_H_INX, Subtarget); case RISCV::PseudoQuietFLE_S: return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget); case RISCV::PseudoQuietFLE_S_INX: @@ -13461,6 +13477,7 @@ return emitVFROUND_NOEXCEPT_MASK(MI, BB, RISCV::PseudoVFCVT_X_F_V_MF4_MASK, RISCV::PseudoVFCVT_F_X_V_MF4_MASK); case RISCV::PseudoFROUND_H: + case RISCV::PseudoFROUND_H_INX: case RISCV::PseudoFROUND_S: case RISCV::PseudoFROUND_S_INX: case RISCV::PseudoFROUND_D: @@ -15575,9 +15592,9 @@ switch (FPVT.getSimpleVT().SimpleTy) { case MVT::f16: - return Subtarget.hasStdExtZfhOrZfhmin(); + return Subtarget.hasStdExtZfhOrZfhminOrZhinxOrZhinxmin(); case MVT::f32: - return Subtarget.hasStdExtF(); + return Subtarget.hasStdExtFOrZfinx(); case MVT::f64: return Subtarget.hasStdExtD(); default: @@ -15714,7 +15731,7 @@ switch (SVT.getSimpleVT().SimpleTy) { case MVT::f16: return VT.isVector() ? Subtarget.hasVInstructionsF16() - : Subtarget.hasStdExtZfh(); + : Subtarget.hasStdExtZfhOrZhinx(); case MVT::f32: return Subtarget.hasStdExtFOrZfinx(); case MVT::f64: diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -1288,6 +1288,7 @@ case RISCV::FSGNJ_D_INX: case RISCV::FSGNJ_D_IN32X: case RISCV::FSGNJ_S_INX: + case RISCV::FSGNJ_H_INX: // The canonical floating-point move is fsgnj rd, rs, rs. return MI.getOperand(1).isReg() && MI.getOperand(2).isReg() && MI.getOperand(1).getReg() == MI.getOperand(2).getReg(); @@ -1320,6 +1321,7 @@ case RISCV::FSGNJ_D_INX: case RISCV::FSGNJ_D_IN32X: case RISCV::FSGNJ_S_INX: + case RISCV::FSGNJ_H_INX: // The canonical floating-point move is fsgnj rd, rs, rs. if (MI.getOperand(1).isReg() && MI.getOperand(2).isReg() && MI.getOperand(1).getReg() == MI.getOperand(2).getReg()) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -38,39 +38,42 @@ let DecoderMethod = "DecodeGPRRegisterClass"; } -def ZfhExt : ExtInfo<0, [HasStdExtZfh]>; -def Zfh64Ext : ExtInfo<0, [HasStdExtZfh, IsRV64]>; -def ZfhminExt : ExtInfo<0, [HasStdExtZfhOrZfhmin]>; -def ZhinxExt : ExtInfo<1, [HasStdExtZhinx]>; -def ZhinxminExt : ExtInfo<1, [HasStdExtZhinxOrZhinxmin]>; -def Zhinx64Ext : ExtInfo<1, [HasStdExtZhinx, IsRV64]>; +def ZfhExt : ExtInfo<0, [HasStdExtZfh]>; +def Zfh64Ext : ExtInfo<0, [HasStdExtZfh, IsRV64]>; +def ZfhminExt : ExtInfo<0, [HasStdExtZfhOrZfhmin]>; +def ZhinxExt : ExtInfo<1, [HasStdExtZhinx]>; +def ZhinxminExt : ExtInfo<1, [HasStdExtZhinxOrZhinxmin]>; +def Zhinx64Ext : ExtInfo<1, [HasStdExtZhinx, IsRV64]>; -def ZfhminDExt : ExtInfo<0, [HasStdExtZfhOrZfhmin, HasStdExtD]>; -def ZhinxminZdinxExt : ExtInfo<1, [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx]>; +def ZfhminDExt : ExtInfo<0, [HasStdExtZfhOrZfhmin, HasStdExtD]>; +def ZhinxminZdinxExt : ExtInfo<1, [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx, IsRV64]>; +def ZhinxminZdinx32Ext : ExtInfo<2, [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx, IsRV32]>; def H : ExtInfo_r; def H_INX : ExtInfo_r; -def HH : ExtInfo_rr; -def HH_INX : ExtInfo_rr; -def XH : ExtInfo_rr; -def XH_INX : ExtInfo_rr; -def HX : ExtInfo_rr; -def HX_INX : ExtInfo_rr; -def XH_64 : ExtInfo_rr; -def HX_64 : ExtInfo_rr; -def XH_INX_64 : ExtInfo_rr; -def HX_INX_64 : ExtInfo_rr; -def HFmin : ExtInfo_rr; -def HF_INXmin : ExtInfo_rr; -def HF_INX : ExtInfo_rr; -def FHmin : ExtInfo_rr; -def FH_INXmin : ExtInfo_rr; -def FH_INX : ExtInfo_rr; -def DHmin : ExtInfo_rr; -def DH_INXmin : ExtInfo_rr; -def HDmin : ExtInfo_rr; -def HD_INXmin : ExtInfo_rr; +def HH : ExtInfo_rr; +def HH_INX : ExtInfo_rr; +def XH : ExtInfo_rr; +def XH_INX : ExtInfo_rr; +def HX : ExtInfo_rr; +def HX_INX : ExtInfo_rr; +def XH_64 : ExtInfo_rr; +def HX_64 : ExtInfo_rr; +def XH_INX_64 : ExtInfo_rr; +def HX_INX_64 : ExtInfo_rr; +def HFmin : ExtInfo_rr; +def HF_INXmin : ExtInfo_rr; +def HF_INX : ExtInfo_rr; +def FHmin : ExtInfo_rr; +def FH_INXmin : ExtInfo_rr; +def FH_INX : ExtInfo_rr; +def DHmin : ExtInfo_rr; +def DH_INXmin : ExtInfo_rr; +def DH_INX32min : ExtInfo_rr; +def HDmin : ExtInfo_rr; +def HD_INXmin : ExtInfo_rr; +def HD_INX32min : ExtInfo_rr; defvar HINX = [H, H_INX]; defvar HHINX = [HH, HH_INX]; @@ -80,8 +83,8 @@ defvar HXIN64X = [HX_64, HX_INX_64]; defvar HFINXmin = [HFmin, HF_INXmin]; defvar FHINXmin = [FHmin, FH_INXmin]; -defvar DHINXmin = [DHmin, DH_INXmin]; -defvar HDINXmin = [HDmin, HD_INXmin]; +defvar DHINXmin = [DHmin, DH_INXmin, DH_INX32min]; +defvar HDINXmin = [HDmin, HD_INXmin, HD_INX32min]; //===----------------------------------------------------------------------===// // Instructions @@ -227,7 +230,12 @@ (FLT_H_INX GPR:$rd, FPR16INX:$rt, FPR16INX:$rs), 0>; def : InstAlias<"fge.h $rd, $rs, $rt", (FLE_H_INX GPR:$rd, FPR16INX:$rt, FPR16INX:$rs), 0>; -} // Predicates = [HasStdExtZhinx] + +let usesCustomInserter = 1 in { +def PseudoQuietFLE_H_INX : PseudoQuietFCMP; +def PseudoQuietFLT_H_INX : PseudoQuietFCMP; +} +} // Predicates = [HasStdExtZhinxOrZhinxmin] //===----------------------------------------------------------------------===// // Pseudo-instructions and codegen patterns @@ -278,25 +286,74 @@ // fnmadd: -(rs1 * rs2 + rs3) (the nsz flag on the FMA) def : Pat<(fneg (any_fma_nsz FPR16:$rs1, FPR16:$rs2, FPR16:$rs3)), (FNMADD_H FPR16:$rs1, FPR16:$rs2, FPR16:$rs3, FRM_DYN)>; +} // Predicates = [HasStdExtZfh] + +let Predicates = [HasStdExtZhinx] in { + +/// Float conversion operations + +// [u]int32<->float conversion patterns must be gated on IsRV32 or IsRV64, so +// are defined later. + +/// Float arithmetic operations + +def : PatFprFprDynFrm; +def : PatFprFprDynFrm; +def : PatFprFprDynFrm; +def : PatFprFprDynFrm; + +def : Pat<(any_fsqrt FPR16INX:$rs1), (FSQRT_H_INX FPR16INX:$rs1, FRM_DYN)>; + +def : Pat<(fneg FPR16INX:$rs1), (FSGNJN_H_INX $rs1, $rs1)>; +def : Pat<(fabs FPR16INX:$rs1), (FSGNJX_H_INX $rs1, $rs1)>; + +def : Pat<(riscv_fpclass FPR16INX:$rs1), (FCLASS_H_INX $rs1)>; + +def : PatFprFpr; +def : Pat<(fcopysign FPR16INX:$rs1, (fneg FPR16INX:$rs2)), (FSGNJN_H_INX $rs1, $rs2)>; +def : Pat<(fcopysign FPR16INX:$rs1, FPR32INX:$rs2), + (FSGNJ_H_INX $rs1, (FCVT_H_S_INX $rs2, FRM_DYN))>; + +// fmadd: rs1 * rs2 + rs3 +def : Pat<(any_fma FPR16INX:$rs1, FPR16INX:$rs2, FPR16INX:$rs3), + (FMADD_H_INX $rs1, $rs2, $rs3, FRM_DYN)>; + +// fmsub: rs1 * rs2 - rs3 +def : Pat<(any_fma FPR16INX:$rs1, FPR16INX:$rs2, (fneg FPR16INX:$rs3)), + (FMSUB_H_INX FPR16INX:$rs1, FPR16INX:$rs2, FPR16INX:$rs3, FRM_DYN)>; + +// fnmsub: -rs1 * rs2 + rs3 +def : Pat<(any_fma (fneg FPR16INX:$rs1), FPR16INX:$rs2, FPR16INX:$rs3), + (FNMSUB_H_INX FPR16INX:$rs1, FPR16INX:$rs2, FPR16INX:$rs3, FRM_DYN)>; + +// fnmadd: -rs1 * rs2 - rs3 +def : Pat<(any_fma (fneg FPR16INX:$rs1), FPR16INX:$rs2, (fneg FPR16INX:$rs3)), + (FNMADD_H_INX FPR16INX:$rs1, FPR16INX:$rs2, FPR16INX:$rs3, FRM_DYN)>; + +// fnmadd: -(rs1 * rs2 + rs3) (the nsz flag on the FMA) +def : Pat<(fneg (any_fma_nsz FPR16INX:$rs1, FPR16INX:$rs2, FPR16INX:$rs3)), + (FNMADD_H_INX FPR16INX:$rs1, FPR16INX:$rs2, FPR16INX:$rs3, FRM_DYN)>; +} // Predicates = [HasStdExtZhinx] // The ratified 20191213 ISA spec defines fmin and fmax in a way that matches // LLVM's fminnum and fmaxnum // . -def : PatFprFpr; -def : PatFprFpr; +defm : PatFprFpr_m; +defm : PatFprFpr_m; /// Setcc // FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for // strict versions of those. // Match non-signaling FEQ_D -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +let Predicates = [HasStdExtZfh] in { // Match signaling FEQ_H def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETEQ), (AND (FLE_H $rs1, $rs2), @@ -309,26 +366,58 @@ (FLE_H $rs1, $rs1)>; def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETOEQ), (FLE_H $rs1, $rs1)>; +} // Predicates = [HasStdExtZfh] -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +let Predicates = [HasStdExtZhinx] in { +// Match signaling FEQ_H +def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETEQ), + (AND (FLE_H_INX $rs1, $rs2), + (FLE_H_INX $rs2, $rs1))>; +def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs2, SETOEQ), + (AND (FLE_H_INX $rs1, $rs2), + (FLE_H_INX $rs2, $rs1))>; +// If both operands are the same, use a single FLE. +def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETEQ), + (FLE_H_INX $rs1, $rs1)>; +def : Pat<(strict_fsetccs FPR16INX:$rs1, FPR16INX:$rs1, SETOEQ), + (FLE_H_INX $rs1, $rs1)>; +} // Predicates = [HasStdExtZhinx] + +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +let Predicates = [HasStdExtZfh] in { defm Select_FPR16 : SelectCC_GPR_rrirr; def PseudoFROUND_H : PseudoFROUND; } // Predicates = [HasStdExtZfh] +let Predicates = [HasStdExtZhinx] in { +defm Select_FPR16INX : SelectCC_GPR_rrirr; + +def PseudoFROUND_H_INX : PseudoFROUND; +} // Predicates = [HasStdExtZhinx] + let Predicates = [HasStdExtZfhOrZfhmin] in { /// Loads - defm : LdPat; /// Stores - defm : StPat; +} // Predicates = [HasStdExtZfhOrZfhmin] + +let Predicates = [HasStdExtZhinxOrZhinxmin] in { +/// Loads +def : Pat<(f16 (load GPR:$rs1)), (COPY_TO_REGCLASS (LH GPR:$rs1, 0), GPRF16)>; +/// Stores +def : Pat<(store (f16 FPR16INX:$rs2), GPR:$rs1), + (SH (COPY_TO_REGCLASS FPR16INX:$rs2, GPR), GPR:$rs1, 0)>; +} // Predicates = [HasStdExtZhinxOrZhinxmin] + +let Predicates = [HasStdExtZfhOrZfhmin] in { /// Float conversion operations // f32 -> f16, f16 -> f32 @@ -343,6 +432,21 @@ def : Pat<(fcopysign FPR32:$rs1, FPR16:$rs2), (FSGNJ_S $rs1, (FCVT_S_H $rs2))>; } // Predicates = [HasStdExtZfhOrZfhmin] +let Predicates = [HasStdExtZhinxOrZhinxmin] in { +/// Float conversion operations + +// f32 -> f16, f16 -> f32 +def : Pat<(any_fpround FPR32INX:$rs1), (FCVT_H_S_INX FPR32INX:$rs1, FRM_DYN)>; +def : Pat<(any_fpextend FPR16INX:$rs1), (FCVT_S_H_INX FPR16INX:$rs1)>; + +// Moves (no conversion) +def : Pat<(riscv_fmv_h_x GPR:$src), (COPY_TO_REGCLASS GPR:$src, GPR)>; +def : Pat<(riscv_fmv_x_anyexth FPR16INX:$src), (COPY_TO_REGCLASS FPR16INX:$src, GPR)>; +def : Pat<(riscv_fmv_x_signexth FPR16INX:$src), (COPY_TO_REGCLASS FPR16INX:$src, GPR)>; + +def : Pat<(fcopysign FPR32INX:$rs1, FPR16INX:$rs2), (FSGNJ_S_INX $rs1, (FCVT_S_H_INX $rs2))>; +} // Predicates = [HasStdExtZhinxOrZhinxmin] + let Predicates = [HasStdExtZfh, IsRV32] in { // half->[u]int. Round-to-zero must be used. def : Pat<(i32 (any_fp_to_sint FPR16:$rs1)), (FCVT_W_H $rs1, 0b001)>; @@ -363,6 +467,26 @@ def : Pat<(any_uint_to_fp (i32 GPR:$rs1)), (FCVT_H_WU $rs1, FRM_DYN)>; } // Predicates = [HasStdExtZfh, IsRV32] +let Predicates = [HasStdExtZhinx, IsRV32] in { +// half->[u]int. Round-to-zero must be used. +def : Pat<(i32 (any_fp_to_sint FPR16INX:$rs1)), (FCVT_W_H_INX $rs1, 0b001)>; +def : Pat<(i32 (any_fp_to_uint FPR16INX:$rs1)), (FCVT_WU_H_INX $rs1, 0b001)>; + +// Saturating float->[u]int32. +def : Pat<(i32 (riscv_fcvt_x FPR16INX:$rs1, timm:$frm)), (FCVT_W_H_INX $rs1, timm:$frm)>; +def : Pat<(i32 (riscv_fcvt_xu FPR16INX:$rs1, timm:$frm)), (FCVT_WU_H_INX $rs1, timm:$frm)>; + +// half->int32 with current rounding mode. +def : Pat<(i32 (any_lrint FPR16INX:$rs1)), (FCVT_W_H_INX $rs1, FRM_DYN)>; + +// half->int32 rounded to nearest with ties rounded away from zero. +def : Pat<(i32 (any_lround FPR16INX:$rs1)), (FCVT_W_H_INX $rs1, FRM_RMM)>; + +// [u]int->half. Match GCC and default to using dynamic rounding mode. +def : Pat<(any_sint_to_fp (i32 GPR:$rs1)), (FCVT_H_W_INX $rs1, FRM_DYN)>; +def : Pat<(any_uint_to_fp (i32 GPR:$rs1)), (FCVT_H_WU_INX $rs1, FRM_DYN)>; +} // Predicates = [HasStdExtZhinx, IsRV32] + let Predicates = [HasStdExtZfh, IsRV64] in { // Use target specific isd nodes to help us remember the result is sign // extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be @@ -393,6 +517,36 @@ def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_H_LU $rs1, FRM_DYN)>; } // Predicates = [HasStdExtZfh, IsRV64] +let Predicates = [HasStdExtZhinx, IsRV64] in { +// Use target specific isd nodes to help us remember the result is sign +// extended. Matching sext_inreg+fptoui/fptosi may cause the conversion to be +// duplicated if it has another user that didn't need the sign_extend. +def : Pat<(riscv_any_fcvt_w_rv64 FPR16INX:$rs1, timm:$frm), (FCVT_W_H_INX $rs1, timm:$frm)>; +def : Pat<(riscv_any_fcvt_wu_rv64 FPR16INX:$rs1, timm:$frm), (FCVT_WU_H_INX $rs1, timm:$frm)>; + +// half->[u]int64. Round-to-zero must be used. +def : Pat<(i64 (any_fp_to_sint FPR16INX:$rs1)), (FCVT_L_H_INX $rs1, 0b001)>; +def : Pat<(i64 (any_fp_to_uint FPR16INX:$rs1)), (FCVT_LU_H_INX $rs1, 0b001)>; + +// Saturating float->[u]int64. +def : Pat<(i64 (riscv_fcvt_x FPR16INX:$rs1, timm:$frm)), (FCVT_L_H_INX $rs1, timm:$frm)>; +def : Pat<(i64 (riscv_fcvt_xu FPR16INX:$rs1, timm:$frm)), (FCVT_LU_H_INX $rs1, timm:$frm)>; + +// half->int64 with current rounding mode. +def : Pat<(i64 (any_lrint FPR16INX:$rs1)), (FCVT_L_H_INX $rs1, FRM_DYN)>; +def : Pat<(i64 (any_llrint FPR16INX:$rs1)), (FCVT_L_H_INX $rs1, FRM_DYN)>; + +// half->int64 rounded to nearest with ties rounded away from zero. +def : Pat<(i64 (any_lround FPR16INX:$rs1)), (FCVT_L_H_INX $rs1, FRM_RMM)>; +def : Pat<(i64 (any_llround FPR16INX:$rs1)), (FCVT_L_H_INX $rs1, FRM_RMM)>; + +// [u]int->fp. Match GCC and default to using dynamic rounding mode. +def : Pat<(any_sint_to_fp (i64 (sexti32 (i64 GPR:$rs1)))), (FCVT_H_W_INX $rs1, FRM_DYN)>; +def : Pat<(any_uint_to_fp (i64 (zexti32 (i64 GPR:$rs1)))), (FCVT_H_WU_INX $rs1, FRM_DYN)>; +def : Pat<(any_sint_to_fp (i64 GPR:$rs1)), (FCVT_H_L_INX $rs1, FRM_DYN)>; +def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_H_LU_INX $rs1, FRM_DYN)>; +} // Predicates = [HasStdExtZhinx, IsRV64] + let Predicates = [HasStdExtZfhOrZfhmin, HasStdExtD] in { /// Float conversion operations // f64 -> f16, f16 -> f64 @@ -405,6 +559,30 @@ def : Pat<(fcopysign FPR64:$rs1, FPR16:$rs2), (FSGNJ_D $rs1, (FCVT_D_H $rs2))>; } // Predicates = [HasStdExtZfhOrZfhmin, HasStdExtD] +let Predicates = [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx, IsRV32] in { +/// Float conversion operations +// f64 -> f16, f16 -> f64 +def : Pat<(any_fpround FPR64IN32X:$rs1), (FCVT_H_D_IN32X FPR64IN32X:$rs1, FRM_DYN)>; +def : Pat<(any_fpextend FPR16INX:$rs1), (FCVT_D_H_IN32X FPR16INX:$rs1)>; + +/// Float arithmetic operations +def : Pat<(fcopysign FPR16INX:$rs1, FPR64IN32X:$rs2), + (FSGNJ_H_INX $rs1, (FCVT_H_D_IN32X $rs2, 0b111))>; +def : Pat<(fcopysign FPR64IN32X:$rs1, FPR16INX:$rs2), (FSGNJ_D_IN32X $rs1, (FCVT_D_H_IN32X $rs2))>; +} // Predicates = [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx, IsRV32] + +let Predicates = [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx, IsRV64] in { +/// Float conversion operations +// f64 -> f16, f16 -> f64 +def : Pat<(any_fpround FPR64INX:$rs1), (FCVT_H_D_INX FPR64INX:$rs1, FRM_DYN)>; +def : Pat<(any_fpextend FPR16INX:$rs1), (FCVT_D_H_INX FPR16INX:$rs1)>; + +/// Float arithmetic operations +def : Pat<(fcopysign FPR16INX:$rs1, FPR64INX:$rs2), + (FSGNJ_H_INX $rs1, (FCVT_H_D_INX $rs2, 0b111))>; +def : Pat<(fcopysign FPR64INX:$rs1, FPR16INX:$rs2), (FSGNJ_D_INX $rs1, (FCVT_D_H_INX $rs2))>; +} // Predicates = [HasStdExtZhinxOrZhinxmin, HasStdExtZdinx, IsRV64] + let Predicates = [HasStdExtZfhmin, NoStdExtZfh, IsRV32] in { // half->[u]int. Round-to-zero must be used. def : Pat<(i32 (any_fp_to_sint FPR16:$rs1)), (FCVT_W_S (FCVT_S_H $rs1), FRM_RTZ)>; @@ -421,6 +599,22 @@ def : Pat<(any_uint_to_fp (i32 GPR:$rs1)), (FCVT_H_S (FCVT_S_WU $rs1, FRM_DYN), FRM_DYN)>; } // Predicates = [HasStdExtZfhmin, NoStdExtZfh, IsRV32] +let Predicates = [HasStdExtZhinxmin, NoStdExtZhinx, IsRV32] in { +// half->[u]int. Round-to-zero must be used. +def : Pat<(i32 (any_fp_to_sint FPR16INX:$rs1)), (FCVT_W_S_INX (FCVT_S_H_INX $rs1), FRM_RTZ)>; +def : Pat<(i32 (any_fp_to_uint FPR16INX:$rs1)), (FCVT_WU_S_INX (FCVT_S_H_INX $rs1), FRM_RTZ)>; + +// half->int32 with current rounding mode. +def : Pat<(i32 (any_lrint FPR16INX:$rs1)), (FCVT_W_S_INX (FCVT_S_H_INX $rs1), FRM_DYN)>; + +// half->int32 rounded to nearest with ties rounded away from zero. +def : Pat<(i32 (any_lround FPR16INX:$rs1)), (FCVT_W_S_INX (FCVT_S_H_INX $rs1), FRM_RMM)>; + +// [u]int->half. Match GCC and default to using dynamic rounding mode. +def : Pat<(any_sint_to_fp (i32 GPR:$rs1)), (FCVT_H_S_INX (FCVT_S_W_INX $rs1, FRM_DYN), FRM_DYN)>; +def : Pat<(any_uint_to_fp (i32 GPR:$rs1)), (FCVT_H_S_INX (FCVT_S_WU_INX $rs1, FRM_DYN), FRM_DYN)>; +} // Predicates = [HasStdExtZhinxmin, NoStdExtZhinx, IsRV32] + let Predicates = [HasStdExtZfhmin, NoStdExtZfh, IsRV64] in { // half->[u]int64. Round-to-zero must be used. def : Pat<(i64 (any_fp_to_sint FPR16:$rs1)), (FCVT_L_S (FCVT_S_H $rs1), FRM_RTZ)>; @@ -438,3 +632,21 @@ def : Pat<(any_sint_to_fp (i64 GPR:$rs1)), (FCVT_H_S (FCVT_S_L $rs1, FRM_DYN), FRM_DYN)>; def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_H_S (FCVT_S_LU $rs1, FRM_DYN), FRM_DYN)>; } // Predicates = [HasStdExtZfhmin, NoStdExtZfh, IsRV64] + +let Predicates = [HasStdExtZhinxmin, NoStdExtZhinx, IsRV64] in { +// half->[u]int64. Round-to-zero must be used. +def : Pat<(i64 (any_fp_to_sint FPR16INX:$rs1)), (FCVT_L_S_INX (FCVT_S_H_INX $rs1), FRM_RTZ)>; +def : Pat<(i64 (any_fp_to_uint FPR16INX:$rs1)), (FCVT_LU_S_INX (FCVT_S_H_INX $rs1), FRM_RTZ)>; + +// half->int64 with current rounding mode. +def : Pat<(i64 (any_lrint FPR16INX:$rs1)), (FCVT_L_S_INX (FCVT_S_H_INX $rs1), FRM_DYN)>; +def : Pat<(i64 (any_llrint FPR16INX:$rs1)), (FCVT_L_S_INX (FCVT_S_H_INX $rs1), FRM_DYN)>; + +// half->int64 rounded to nearest with ties rounded away from zero. +def : Pat<(i64 (any_lround FPR16INX:$rs1)), (FCVT_L_S_INX (FCVT_S_H_INX $rs1), FRM_RMM)>; +def : Pat<(i64 (any_llround FPR16INX:$rs1)), (FCVT_L_S_INX (FCVT_S_H_INX $rs1), FRM_RMM)>; + +// [u]int->fp. Match GCC and default to using dynamic rounding mode. +def : Pat<(any_sint_to_fp (i64 GPR:$rs1)), (FCVT_H_S_INX (FCVT_S_L_INX $rs1, FRM_DYN), FRM_DYN)>; +def : Pat<(any_uint_to_fp (i64 GPR:$rs1)), (FCVT_H_S_INX (FCVT_S_LU_INX $rs1, FRM_DYN), FRM_DYN)>; +} // Predicates = [HasStdExtZhinxmin, NoStdExtZhinx, IsRV64] diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -116,6 +116,13 @@ bool hasStdExtFOrZfinx() const { return HasStdExtF || HasStdExtZfinx; } bool hasStdExtDOrZdinx() const { return HasStdExtD || HasStdExtZdinx; } bool hasStdExtZfhOrZfhmin() const { return HasStdExtZfh || HasStdExtZfhmin; } + bool hasStdExtZfhOrZhinx() const { return HasStdExtZfh || HasStdExtZhinx; } + bool hasStdExtZhinxOrZhinxmin() const { + return HasStdExtZhinx || HasStdExtZhinxmin; + } + bool hasStdExtZfhOrZfhminOrZhinxOrZhinxmin() const { + return hasStdExtZfhOrZfhmin() || hasStdExtZhinxOrZhinxmin(); + } bool is64Bit() const { return IsRV64; } MVT getXLenVT() const { return XLenVT; } unsigned getXLen() const { return XLen; } diff --git a/llvm/test/CodeGen/RISCV/half-arith-strict.ll b/llvm/test/CodeGen/RISCV/half-arith-strict.ll --- a/llvm/test/CodeGen/RISCV/half-arith-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-arith-strict.ll @@ -3,12 +3,24 @@ ; RUN: -disable-strictnode-mutation -target-abi ilp32f < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -disable-strictnode-mutation -target-abi lp64f < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -disable-strictnode-mutation -target-abi ilp32 < %s \ +; RUN: | FileCheck -check-prefix=CHECK-ZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -disable-strictnode-mutation -target-abi lp64 < %s \ +; RUN: | FileCheck -check-prefix=CHECK-ZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -disable-strictnode-mutation -target-abi ilp32f < %s \ ; RUN: | FileCheck -check-prefix=CHECK-ZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -disable-strictnode-mutation -target-abi lp64f < %s \ ; RUN: | FileCheck -check-prefix=CHECK-ZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -disable-strictnode-mutation -target-abi ilp32 < %s \ +; RUN: | FileCheck -check-prefix=CHECK-ZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -disable-strictnode-mutation -target-abi lp64 < %s \ +; RUN: | FileCheck -check-prefix=CHECK-ZHINXMIN %s ; FIXME: We can't test without Zfh because soft promote legalization isn't ; implemented in SelectionDAG for STRICT nodes. @@ -19,6 +31,11 @@ ; CHECK-NEXT: fadd.h fa0, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fadd_h: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fadd_h: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -26,6 +43,14 @@ ; CHECK-ZFHMIN-NEXT: fadd.s fa5, fa4, fa5 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fadd_h: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.fadd.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -37,6 +62,11 @@ ; CHECK-NEXT: fsub.h fa0, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fsub_h: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fsub.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fsub_h: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -44,6 +74,14 @@ ; CHECK-ZFHMIN-NEXT: fsub.s fa5, fa4, fa5 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fsub_h: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fsub.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.fsub.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -55,6 +93,11 @@ ; CHECK-NEXT: fmul.h fa0, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmul_h: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fmul.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fmul_h: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -62,6 +105,14 @@ ; CHECK-ZFHMIN-NEXT: fmul.s fa5, fa4, fa5 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fmul_h: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.fmul.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -73,6 +124,11 @@ ; CHECK-NEXT: fdiv.h fa0, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fdiv_h: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fdiv.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fdiv_h: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -80,6 +136,14 @@ ; CHECK-ZFHMIN-NEXT: fdiv.s fa5, fa4, fa5 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fdiv_h: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fdiv.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.fdiv.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -91,12 +155,24 @@ ; CHECK-NEXT: fsqrt.h fa0, fa0 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fsqrt_h: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fsqrt.h a0, a0 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fsqrt_h: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK-ZFHMIN-NEXT: fsqrt.s fa5, fa5 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fsqrt_h: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fsqrt.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -122,6 +198,11 @@ ; CHECK-NEXT: fmadd.h fa0, fa0, fa1, fa2 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmadd_h: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fmadd.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fmadd_h: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa2 @@ -130,6 +211,15 @@ ; CHECK-ZFHMIN-NEXT: fmadd.s fa5, fa3, fa4, fa5 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fmadd_h: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %b, half %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -143,6 +233,12 @@ ; CHECK-NEXT: fmsub.h fa0, fa0, fa1, fa5 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmsub_h: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a2, a2, zero +; CHECK-ZHINX-NEXT: fmsub.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fmsub_h: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa2 @@ -158,6 +254,21 @@ ; CHECK-ZFHMIN-NEXT: fmadd.s fa5, fa3, fa4, fa5 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fmsub_h: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fneg.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %c_ = fadd half 0.0, %c ; avoid negation using xor %negc = fneg half %c_ %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %b, half %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp @@ -173,6 +284,13 @@ ; CHECK-NEXT: fnmadd.h fa0, fa4, fa1, fa5 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmadd_h: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, zero +; CHECK-ZHINX-NEXT: fadd.h a2, a2, zero +; CHECK-ZHINX-NEXT: fnmadd.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fnmadd_h: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -194,6 +312,27 @@ ; CHECK-ZFHMIN-NEXT: fmadd.s fa5, fa5, fa3, fa4 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fnmadd_h: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fneg.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fneg.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %a_ = fadd half 0.0, %a %c_ = fadd half 0.0, %c %nega = fneg half %a_ @@ -211,6 +350,13 @@ ; CHECK-NEXT: fnmadd.h fa0, fa4, fa0, fa5 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmadd_h_2: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a1, a1, zero +; CHECK-ZHINX-NEXT: fadd.h a2, a2, zero +; CHECK-ZHINX-NEXT: fnmadd.h a0, a1, a0, a2 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fnmadd_h_2: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -232,6 +378,27 @@ ; CHECK-ZFHMIN-NEXT: fmadd.s fa5, fa3, fa5, fa4 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fnmadd_h_2: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fneg.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fneg.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %b_ = fadd half 0.0, %b %c_ = fadd half 0.0, %c %negb = fneg half %b_ @@ -248,6 +415,12 @@ ; CHECK-NEXT: fnmsub.h fa0, fa5, fa1, fa2 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmsub_h: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, zero +; CHECK-ZHINX-NEXT: fnmsub.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fnmsub_h: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -263,6 +436,21 @@ ; CHECK-ZFHMIN-NEXT: fmadd.s fa5, fa5, fa3, fa4 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fnmsub_h: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fneg.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %a_ = fadd half 0.0, %a %nega = fneg half %a_ %1 = call half @llvm.experimental.constrained.fma.f16(half %nega, half %b, half %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp @@ -277,6 +465,12 @@ ; CHECK-NEXT: fnmsub.h fa0, fa5, fa0, fa2 ; CHECK-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmsub_h_2: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a1, a1, zero +; CHECK-ZHINX-NEXT: fnmsub.h a0, a1, a0, a2 +; CHECK-ZHINX-NEXT: ret +; ; CHECK-ZFHMIN-LABEL: fnmsub_h_2: ; CHECK-ZFHMIN: # %bb.0: ; CHECK-ZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -292,6 +486,21 @@ ; CHECK-ZFHMIN-NEXT: fmadd.s fa5, fa3, fa5, fa4 ; CHECK-ZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK-ZFHMIN-NEXT: ret +; +; CHECK-ZHINXMIN-LABEL: fnmsub_h_2: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fneg.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %b_ = fadd half 0.0, %b %negb = fneg half %b_ %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %negb, half %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll --- a/llvm/test/CodeGen/RISCV/half-arith.ll +++ b/llvm/test/CodeGen/RISCV/half-arith.ll @@ -3,6 +3,10 @@ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefix=CHECKIZFH %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck -check-prefix=CHECKIZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefix=CHECK-ZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefix=CHECK-ZHINX %s ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ @@ -11,6 +15,10 @@ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefixes=CHECKIZFHMIN,CHECK-RV32-FSGNJ %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck --check-prefixes=CHECKIZFHMIN,CHECK-RV64-FSGNJ %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck --check-prefixes=CHECKZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck --check-prefixes=CHECKZHINXMIN %s ; These tests are each targeted at a particular RISC-V FPU instruction. ; Compares and conversions can be found in half-fcmp.ll and half-convert.ll @@ -23,6 +31,11 @@ ; CHECKIZFH-NEXT: fadd.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fadd_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fadd_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -82,6 +95,21 @@ ; CHECKIZFHMIN-NEXT: fadd.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fadd_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fadd_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = fadd half %a, %b ret half %1 } @@ -92,6 +120,11 @@ ; CHECKIZFH-NEXT: fsub.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fsub_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fsub.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fsub_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -151,6 +184,21 @@ ; CHECKIZFHMIN-NEXT: fsub.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fsub_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fsub.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fsub_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fsub.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = fsub half %a, %b ret half %1 } @@ -161,6 +209,11 @@ ; CHECKIZFH-NEXT: fmul.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmul_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fmul.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fmul_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -220,6 +273,21 @@ ; CHECKIZFHMIN-NEXT: fmul.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fmul_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fmul_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = fmul half %a, %b ret half %1 } @@ -230,6 +298,11 @@ ; CHECKIZFH-NEXT: fdiv.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fdiv_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fdiv.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fdiv_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -289,6 +362,21 @@ ; CHECKIZFHMIN-NEXT: fdiv.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fdiv_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fdiv.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fdiv_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fdiv.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = fdiv half %a, %b ret half %1 } @@ -301,6 +389,11 @@ ; CHECKIZFH-NEXT: fsqrt.h fa0, fa0 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fsqrt_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fsqrt.h a0, a0 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fsqrt_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -333,6 +426,19 @@ ; CHECKIZFHMIN-NEXT: fsqrt.s fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fsqrt_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fsqrt.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fsqrt_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fsqrt.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.sqrt.f16(half %a) ret half %1 } @@ -345,6 +451,11 @@ ; CHECKIZFH-NEXT: fsgnj.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fsgnj_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fsgnj.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fsgnj_s: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 1048568 @@ -392,6 +503,22 @@ ; CHECK-RV64-FSGNJ-NEXT: flh fa0, 0(sp) ; CHECK-RV64-FSGNJ-NEXT: addi sp, sp, 16 ; CHECK-RV64-FSGNJ-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fsgnj_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK-ZHINXMIN-NEXT: addi a2, sp, 12 +; CHECK-ZHINXMIN-NEXT: sh a1, 0(a2) +; CHECK-ZHINXMIN-NEXT: addi a1, sp, 8 +; CHECK-ZHINXMIN-NEXT: sh a0, 0(a1) +; CHECK-ZHINXMIN-NEXT: lbu a0, 13(sp) +; CHECK-ZHINXMIN-NEXT: lbu a2, 9(sp) +; CHECK-ZHINXMIN-NEXT: andi a0, a0, 128 +; CHECK-ZHINXMIN-NEXT: andi a2, a2, 127 +; CHECK-ZHINXMIN-NEXT: or a0, a2, a0 +; CHECK-ZHINXMIN-NEXT: sb a0, 9(sp) +; CHECK-ZHINXMIN-NEXT: lh a0, 0(a1) +; CHECK-ZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK-ZHINXMIN-NEXT: ret ; CHECKFSGNJ-LABEL: fsgnj_s: ; CHECKFSGNJ: # %bb.0: ; CHECKFSGNJ-NEXT: addi sp, sp, -16 @@ -434,6 +561,13 @@ ; CHECKIZFH-NEXT: feq.h a0, fa5, fa4 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fneg_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, a0 +; CHECK-ZHINX-NEXT: fneg.h a1, a0 +; CHECK-ZHINX-NEXT: feq.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fneg_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -507,6 +641,29 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa4 ; CHECKIZFHMIN-NEXT: feq.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fneg_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fadd.s a0, a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fneg.s a1, a0 +; CHECKZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: feq.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fneg_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fneg.s a1, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: feq.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: ret %1 = fadd half %a, %a %2 = fneg half %1 %3 = fcmp oeq half %1, %2 @@ -523,6 +680,12 @@ ; CHECKIZFH-NEXT: fsgnjn.h fa0, fa0, fa5 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fsgnjn_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a1, a0, a1 +; CHECK-ZHINX-NEXT: fsgnjn.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fsgnjn_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -644,6 +807,29 @@ ; CHECK-RV64-FSGNJ-NEXT: flh fa0, 0(sp) ; CHECK-RV64-FSGNJ-NEXT: addi sp, sp, 16 ; CHECK-RV64-FSGNJ-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fsgnjn_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a1, a2, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fneg.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: addi a2, sp, 8 +; CHECK-ZHINXMIN-NEXT: sh a0, 0(a2) +; CHECK-ZHINXMIN-NEXT: addi a0, sp, 12 +; CHECK-ZHINXMIN-NEXT: sh a1, 0(a0) +; CHECK-ZHINXMIN-NEXT: lbu a0, 9(sp) +; CHECK-ZHINXMIN-NEXT: lbu a1, 13(sp) +; CHECK-ZHINXMIN-NEXT: andi a0, a0, 127 +; CHECK-ZHINXMIN-NEXT: andi a1, a1, 128 +; CHECK-ZHINXMIN-NEXT: or a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: sb a0, 9(sp) +; CHECK-ZHINXMIN-NEXT: lh a0, 0(a2) +; CHECK-ZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK-ZHINXMIN-NEXT: ret ; CHECKFSGNJ-LABEL: fsgnjn_s: ; CHECKFSGNJ: # %bb.0: ; CHECKFSGNJ-NEXT: addi sp, sp, -16 @@ -704,6 +890,13 @@ ; CHECKIZFH-NEXT: fadd.h fa0, fa4, fa5 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fabs_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, a1 +; CHECK-ZHINX-NEXT: fabs.h a1, a0 +; CHECK-ZHINX-NEXT: fadd.h a0, a1, a0 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fabs_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -791,6 +984,33 @@ ; CHECKIZFHMIN-NEXT: fadd.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fabs_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fabs.s a1, a0 +; CHECKZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fadd.s a0, a1, a0 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fabs_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fabs.s a1, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a1, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = fadd half %a, %b %2 = call half @llvm.fabs.f16(half %1) %3 = fadd half %2, %1 @@ -805,6 +1025,11 @@ ; CHECKIZFH-NEXT: fmin.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmin_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fmin.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fmin_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -864,6 +1089,21 @@ ; CHECKIZFHMIN-NEXT: fmin.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fmin_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fmin_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.minnum.f16(half %a, half %b) ret half %1 } @@ -876,6 +1116,11 @@ ; CHECKIZFH-NEXT: fmax.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmax_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fmax.h a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fmax_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -935,6 +1180,21 @@ ; CHECKIZFHMIN-NEXT: fmax.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fmax_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmax.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fmax_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmax.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.maxnum.f16(half %a, half %b) ret half %1 } @@ -947,6 +1207,11 @@ ; CHECKIZFH-NEXT: fmadd.h fa0, fa0, fa1, fa2 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmadd_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fmadd.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fmadd_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1021,6 +1286,23 @@ ; CHECKIZFHMIN-NEXT: fmadd.s fa5, fa3, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fmadd_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fmadd_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.fma.f16(half %a, half %b, half %c) ret half %1 } @@ -1033,6 +1315,12 @@ ; CHECKIZFH-NEXT: fmsub.h fa0, fa0, fa1, fa5 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmsub_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a2, a2, zero +; CHECK-ZHINX-NEXT: fmsub.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fmsub_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1136,6 +1424,35 @@ ; CHECKIZFHMIN-NEXT: fmadd.s fa5, fa3, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fmsub_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fneg.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fmsub_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fneg.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %c_ = fadd half 0.0, %c ; avoid negation using xor %negc = fsub half -0.0, %c_ %1 = call half @llvm.fma.f16(half %a, half %b, half %negc) @@ -1151,6 +1468,13 @@ ; CHECKIZFH-NEXT: fnmadd.h fa0, fa4, fa1, fa5 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmadd_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, zero +; CHECK-ZHINX-NEXT: fadd.h a2, a2, zero +; CHECK-ZHINX-NEXT: fnmadd.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fnmadd_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1288,6 +1612,47 @@ ; CHECKIZFHMIN-NEXT: fmadd.s fa5, fa5, fa3, fa4 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fnmadd_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fneg.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fneg.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fnmadd_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fneg.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fneg.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %a_ = fadd half 0.0, %a %c_ = fadd half 0.0, %c %nega = fsub half -0.0, %a_ @@ -1305,6 +1670,13 @@ ; CHECKIZFH-NEXT: fnmadd.h fa0, fa4, fa0, fa5 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmadd_s_2: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a1, a1, zero +; CHECK-ZHINX-NEXT: fadd.h a2, a2, zero +; CHECK-ZHINX-NEXT: fnmadd.h a0, a1, a0, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fnmadd_s_2: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1442,6 +1814,47 @@ ; CHECKIZFHMIN-NEXT: fmadd.s fa5, fa3, fa5, fa4 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fnmadd_s_2: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fneg.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fneg.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fnmadd_s_2: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fneg.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fneg.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %b_ = fadd half 0.0, %b %c_ = fadd half 0.0, %c %negb = fsub half -0.0, %b_ @@ -1469,6 +1882,13 @@ ; CHECKIZFH-NEXT: fneg.h fa0, fa5 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmadd_s_3: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fmadd.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: lui a1, 1048568 +; CHECK-ZHINX-NEXT: xor a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fnmadd_s_3: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1550,6 +1970,27 @@ ; CHECKIZFHMIN-NEXT: fneg.s fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fnmadd_s_3: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: lui a1, 1048568 +; CHECKZHINXMIN-NEXT: xor a0, a0, a1 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fnmadd_s_3: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: lui a1, 1048568 +; CHECK-ZHINXMIN-NEXT: xor a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: ret %1 = call half @llvm.fma.f16(half %a, half %b, half %c) %neg = fneg half %1 ret half %neg @@ -1572,6 +2013,13 @@ ; CHECKIZFH-NEXT: fnmadd.h fa0, fa0, fa1, fa2 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmadd_nsz: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fmadd.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: lui a1, 1048568 +; CHECK-ZHINX-NEXT: xor a0, a0, a1 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fnmadd_nsz: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1653,6 +2101,27 @@ ; CHECKIZFHMIN-NEXT: fneg.s fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fnmadd_nsz: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: lui a1, 1048568 +; CHECKZHINXMIN-NEXT: xor a0, a0, a1 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fnmadd_nsz: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: lui a1, 1048568 +; CHECK-ZHINXMIN-NEXT: xor a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: ret %1 = call nsz half @llvm.fma.f16(half %a, half %b, half %c) %neg = fneg nsz half %1 ret half %neg @@ -1666,6 +2135,12 @@ ; CHECKIZFH-NEXT: fnmsub.h fa0, fa5, fa1, fa2 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmsub_s: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, zero +; CHECK-ZHINX-NEXT: fnmsub.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fnmsub_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1767,6 +2242,35 @@ ; CHECKIZFHMIN-NEXT: fmadd.s fa5, fa5, fa3, fa4 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fnmsub_s: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fneg.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fnmsub_s: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fneg.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %a_ = fadd half 0.0, %a %nega = fsub half -0.0, %a_ %1 = call half @llvm.fma.f16(half %nega, half %b, half %c) @@ -1781,6 +2285,12 @@ ; CHECKIZFH-NEXT: fnmsub.h fa0, fa5, fa0, fa2 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmsub_s_2: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a1, a1, zero +; CHECK-ZHINX-NEXT: fnmsub.h a0, a1, a0, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fnmsub_s_2: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1884,6 +2394,35 @@ ; CHECKIZFHMIN-NEXT: fmadd.s fa5, fa3, fa5, fa4 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fnmsub_s_2: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fneg.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fnmsub_s_2: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fneg.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %b_ = fadd half 0.0, %b %negb = fsub half -0.0, %b_ %1 = call half @llvm.fma.f16(half %a, half %negb, half %c) @@ -1896,6 +2435,11 @@ ; CHECKIZFH-NEXT: fmadd.h fa0, fa0, fa1, fa2 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmadd_s_contract: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fmadd.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fmadd_s_contract: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1983,6 +2527,29 @@ ; CHECKIZFHMIN-NEXT: fadd.s fa5, fa5, fa4 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fmadd_s_contract: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a2 +; CHECKZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fmadd_s_contract: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a2 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %1 = fmul contract half %a, %b %2 = fadd contract half %1, %c ret half %2 @@ -1996,6 +2563,12 @@ ; CHECKIZFH-NEXT: fmsub.h fa0, fa0, fa1, fa5 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fmsub_s_contract: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a2, a2, zero +; CHECK-ZHINX-NEXT: fmsub.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fmsub_s_contract: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -2099,6 +2672,35 @@ ; CHECKIZFHMIN-NEXT: fsub.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fmsub_s_contract: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fsub.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fmsub_s_contract: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fsub.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %c_ = fadd half 0.0, %c ; avoid negation using xor %1 = fmul contract half %a, %b %2 = fsub contract half %1, %c_ @@ -2115,6 +2717,14 @@ ; CHECKIZFH-NEXT: fnmadd.h fa0, fa4, fa3, fa5 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmadd_s_contract: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, zero +; CHECK-ZHINX-NEXT: fadd.h a1, a1, zero +; CHECK-ZHINX-NEXT: fadd.h a2, a2, zero +; CHECK-ZHINX-NEXT: fnmadd.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fnmadd_s_contract: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -2261,6 +2871,53 @@ ; CHECKIZFHMIN-NEXT: fsub.s fa5, fa5, fa4 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fnmadd_s_contract: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fneg.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a2 +; CHECKZHINXMIN-NEXT: fsub.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fnmadd_s_contract: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECK-ZHINXMIN-NEXT: fadd.s a2, a2, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fneg.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a2 +; CHECK-ZHINXMIN-NEXT: fsub.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %a_ = fadd half 0.0, %a ; avoid negation using xor %b_ = fadd half 0.0, %b ; avoid negation using xor %c_ = fadd half 0.0, %c ; avoid negation using xor @@ -2279,6 +2936,13 @@ ; CHECKIZFH-NEXT: fnmsub.h fa0, fa4, fa5, fa2 ; CHECKIZFH-NEXT: ret ; +; CHECK-ZHINX-LABEL: fnmsub_s_contract: +; CHECK-ZHINX: # %bb.0: +; CHECK-ZHINX-NEXT: fadd.h a0, a0, zero +; CHECK-ZHINX-NEXT: fadd.h a1, a1, zero +; CHECK-ZHINX-NEXT: fnmsub.h a0, a0, a1, a2 +; CHECK-ZHINX-NEXT: ret +; ; RV32I-LABEL: fnmsub_s_contract: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -2399,6 +3063,41 @@ ; CHECKIZFHMIN-NEXT: fsub.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKZHINXMIN-LABEL: fnmsub_s_contract: +; CHECKZHINXMIN: # %bb.0: +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECKZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKZHINXMIN-NEXT: fcvt.s.h a1, a2 +; CHECKZHINXMIN-NEXT: fsub.s a0, a1, a0 +; CHECKZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKZHINXMIN-NEXT: ret +; CHECK-ZHINXMIN-LABEL: fnmsub_s_contract: +; CHECK-ZHINXMIN: # %bb.0: +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fadd.s a0, a0, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fadd.s a1, a1, zero +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.s.h a1, a2 +; CHECK-ZHINXMIN-NEXT: fsub.s a0, a1, a0 +; CHECK-ZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK-ZHINXMIN-NEXT: ret %a_ = fadd half 0.0, %a ; avoid negation using xor %b_ = fadd half 0.0, %b ; avoid negation using xor %1 = fmul contract half %a_, %b_ diff --git a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll --- a/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/half-bitmanip-dagcombines.ll @@ -7,12 +7,20 @@ ; RUN: | FileCheck -check-prefix=RV64I %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck -check-prefix=RV64IZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: < %s | FileCheck -check-prefix=RV32IZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: < %s | FileCheck -check-prefix=RV64IZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: < %s | FileCheck -check-prefix=RV32IZFHMIN %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV64I %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: < %s | FileCheck -check-prefix=RV64IZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: < %s | FileCheck --check-prefixes=RVIZHINXMIN,RV32IZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: < %s | FileCheck --check-prefixes=RVIZHINXMIN,RV64IZHINXMIN %s ; This file tests cases where simple floating point operations can be ; profitably handled though bit manipulation if a soft-float ABI is being used @@ -45,6 +53,18 @@ ; RV64IZFH-NEXT: xor a0, a0, a1 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fneg: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, 1048568 +; RV32IZHINX-NEXT: xor a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fneg: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, 1048568 +; RV64IZHINX-NEXT: xor a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: fneg: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: lui a1, 1048568 @@ -56,6 +76,12 @@ ; RV64IZFHMIN-NEXT: lui a1, 1048568 ; RV64IZFHMIN-NEXT: xor a0, a0, a1 ; RV64IZFHMIN-NEXT: ret +; +; RVIZHINXMIN-LABEL: fneg: +; RVIZHINXMIN: # %bb.0: +; RVIZHINXMIN-NEXT: lui a1, 1048568 +; RVIZHINXMIN-NEXT: xor a0, a0, a1 +; RVIZHINXMIN-NEXT: ret %1 = fneg half %a ret half %1 } @@ -87,6 +113,18 @@ ; RV64IZFH-NEXT: srli a0, a0, 49 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fabs: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: slli a0, a0, 17 +; RV32IZHINX-NEXT: srli a0, a0, 17 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fabs: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: slli a0, a0, 49 +; RV64IZHINX-NEXT: srli a0, a0, 49 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: fabs: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: slli a0, a0, 17 @@ -98,6 +136,18 @@ ; RV64IZFHMIN-NEXT: slli a0, a0, 49 ; RV64IZFHMIN-NEXT: srli a0, a0, 49 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: fabs: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: slli a0, a0, 17 +; RV32IZHINXMIN-NEXT: srli a0, a0, 17 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: fabs: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: slli a0, a0, 49 +; RV64IZHINXMIN-NEXT: srli a0, a0, 49 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.fabs.f16(half %a) ret half %1 } @@ -145,6 +195,16 @@ ; RV64IZFH-NEXT: fmv.x.h a0, fa5 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcopysign_fneg: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fsgnjn.h a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcopysign_fneg: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fsgnjn.h a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: fcopysign_fneg: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 @@ -186,6 +246,46 @@ ; RV64IZFHMIN-NEXT: fmv.x.h a0, fa5 ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: fcopysign_fneg: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fneg.s a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.h.s a1, a1 +; RV32IZHINXMIN-NEXT: addi a2, sp, 8 +; RV32IZHINXMIN-NEXT: sh a0, 0(a2) +; RV32IZHINXMIN-NEXT: addi a0, sp, 12 +; RV32IZHINXMIN-NEXT: sh a1, 0(a0) +; RV32IZHINXMIN-NEXT: lbu a0, 9(sp) +; RV32IZHINXMIN-NEXT: lbu a1, 13(sp) +; RV32IZHINXMIN-NEXT: andi a0, a0, 127 +; RV32IZHINXMIN-NEXT: andi a1, a1, 128 +; RV32IZHINXMIN-NEXT: or a0, a0, a1 +; RV32IZHINXMIN-NEXT: sb a0, 9(sp) +; RV32IZHINXMIN-NEXT: lh a0, 0(a2) +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: fcopysign_fneg: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fneg.s a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.h.s a1, a1 +; RV64IZHINXMIN-NEXT: mv a2, sp +; RV64IZHINXMIN-NEXT: sh a0, 0(a2) +; RV64IZHINXMIN-NEXT: addi a0, sp, 8 +; RV64IZHINXMIN-NEXT: sh a1, 0(a0) +; RV64IZHINXMIN-NEXT: lbu a0, 1(sp) +; RV64IZHINXMIN-NEXT: lbu a1, 9(sp) +; RV64IZHINXMIN-NEXT: andi a0, a0, 127 +; RV64IZHINXMIN-NEXT: andi a1, a1, 128 +; RV64IZHINXMIN-NEXT: or a0, a0, a1 +; RV64IZHINXMIN-NEXT: sb a0, 1(sp) +; RV64IZHINXMIN-NEXT: lh a0, 0(a2) +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = fneg half %b %2 = call half @llvm.copysign.f16(half %a, half %1) ret half %2 diff --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll @@ -3,10 +3,18 @@ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefix=RV32IZFH %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck -check-prefix=RV64IZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefix=RV32IZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefix=RV64IZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefix=RV32IZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck -check-prefix=RV64IZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefix=RV32IZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefix=RV64IZHINXMIN %s declare void @abort() declare void @exit(i32) @@ -35,6 +43,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_false: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: li a0, 1 +; RV32IZHINX-NEXT: bnez a0, .LBB0_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.then +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB0_2: # %if.else +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_false: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: li a0, 1 +; RV64IZHINX-NEXT: bnez a0, .LBB0_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.then +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB0_2: # %if.else +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_false: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: li a0, 1 @@ -56,6 +86,28 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_false: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: li a0, 1 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB0_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.then +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB0_2: # %if.else +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_false: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: li a0, 1 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB0_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.then +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB0_2: # %if.else +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp false half %a, %b br i1 %1, label %if.then, label %if.else if.then: @@ -88,6 +140,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_oeq: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: feq.h a0, a0, a1 +; RV32IZHINX-NEXT: bnez a0, .LBB1_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB1_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_oeq: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: feq.h a0, a0, a1 +; RV64IZHINX-NEXT: bnez a0, .LBB1_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB1_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_oeq: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -113,6 +187,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_oeq: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a1 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB1_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB1_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_oeq: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB1_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB1_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp oeq half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -148,6 +248,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_oeq_alt: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: feq.h a0, a0, a1 +; RV32IZHINX-NEXT: bnez a0, .LBB2_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB2_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_oeq_alt: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: feq.h a0, a0, a1 +; RV64IZHINX-NEXT: bnez a0, .LBB2_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB2_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_oeq_alt: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -173,6 +295,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_oeq_alt: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a1 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB2_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB2_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_oeq_alt: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB2_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB2_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp oeq half %a, %b br i1 %1, label %if.then, label %if.else if.then: @@ -205,6 +353,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_ogt: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: flt.h a0, a1, a0 +; RV32IZHINX-NEXT: bnez a0, .LBB3_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB3_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_ogt: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: flt.h a0, a1, a0 +; RV64IZHINX-NEXT: bnez a0, .LBB3_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB3_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_ogt: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -230,6 +400,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_ogt: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: flt.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB3_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB3_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_ogt: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: flt.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB3_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB3_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp ogt half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -262,6 +458,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_oge: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fle.h a0, a1, a0 +; RV32IZHINX-NEXT: bnez a0, .LBB4_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB4_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_oge: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fle.h a0, a1, a0 +; RV64IZHINX-NEXT: bnez a0, .LBB4_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB4_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_oge: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -287,6 +505,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_oge: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fle.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB4_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB4_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_oge: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fle.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB4_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB4_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp oge half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -319,6 +563,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_olt: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: flt.h a0, a0, a1 +; RV32IZHINX-NEXT: bnez a0, .LBB5_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB5_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_olt: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: flt.h a0, a0, a1 +; RV64IZHINX-NEXT: bnez a0, .LBB5_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB5_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_olt: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -344,6 +610,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_olt: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: flt.s a0, a0, a1 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB5_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB5_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_olt: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: flt.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB5_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB5_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp olt half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -376,6 +668,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_ole: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fle.h a0, a0, a1 +; RV32IZHINX-NEXT: bnez a0, .LBB6_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB6_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_ole: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fle.h a0, a0, a1 +; RV64IZHINX-NEXT: bnez a0, .LBB6_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB6_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_ole: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -401,6 +715,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_ole: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, a0, a1 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB6_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB6_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_ole: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fle.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB6_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB6_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp ole half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -437,6 +777,32 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_one: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: flt.h a2, a0, a1 +; RV32IZHINX-NEXT: flt.h a0, a1, a0 +; RV32IZHINX-NEXT: or a0, a0, a2 +; RV32IZHINX-NEXT: bnez a0, .LBB7_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB7_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_one: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: flt.h a2, a0, a1 +; RV64IZHINX-NEXT: flt.h a0, a1, a0 +; RV64IZHINX-NEXT: or a0, a0, a2 +; RV64IZHINX-NEXT: bnez a0, .LBB7_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB7_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_one: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -466,6 +832,36 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_one: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a0, a1 +; RV32IZHINXMIN-NEXT: flt.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: or a0, a0, a2 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB7_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB7_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_one: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: flt.s a2, a0, a1 +; RV64IZHINXMIN-NEXT: flt.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: or a0, a0, a2 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB7_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB7_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp one half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -502,6 +898,32 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_ord: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: feq.h a1, a1, a1 +; RV32IZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZHINX-NEXT: and a0, a0, a1 +; RV32IZHINX-NEXT: bnez a0, .LBB8_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB8_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_ord: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: feq.h a1, a1, a1 +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: bnez a0, .LBB8_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB8_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_ord: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -531,6 +953,36 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_ord: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: feq.s a1, a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV32IZHINXMIN-NEXT: and a0, a0, a1 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB8_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB8_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_ord: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: feq.s a1, a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB8_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB8_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp ord half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -567,6 +1019,32 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_ueq: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: flt.h a2, a0, a1 +; RV32IZHINX-NEXT: flt.h a0, a1, a0 +; RV32IZHINX-NEXT: or a0, a0, a2 +; RV32IZHINX-NEXT: beqz a0, .LBB9_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB9_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_ueq: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: flt.h a2, a0, a1 +; RV64IZHINX-NEXT: flt.h a0, a1, a0 +; RV64IZHINX-NEXT: or a0, a0, a2 +; RV64IZHINX-NEXT: beqz a0, .LBB9_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB9_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_ueq: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -596,6 +1074,36 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_ueq: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a0, a1 +; RV32IZHINXMIN-NEXT: flt.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: or a0, a0, a2 +; RV32IZHINXMIN-NEXT: beqz a0, .LBB9_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB9_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_ueq: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: flt.s a2, a0, a1 +; RV64IZHINXMIN-NEXT: flt.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: or a0, a0, a2 +; RV64IZHINXMIN-NEXT: beqz a0, .LBB9_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB9_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp ueq half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -628,6 +1136,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_ugt: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fle.h a0, a0, a1 +; RV32IZHINX-NEXT: beqz a0, .LBB10_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB10_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_ugt: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fle.h a0, a0, a1 +; RV64IZHINX-NEXT: beqz a0, .LBB10_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB10_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_ugt: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -653,6 +1183,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_ugt: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, a0, a1 +; RV32IZHINXMIN-NEXT: beqz a0, .LBB10_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB10_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_ugt: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fle.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: beqz a0, .LBB10_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB10_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp ugt half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -685,6 +1241,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_uge: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: flt.h a0, a0, a1 +; RV32IZHINX-NEXT: beqz a0, .LBB11_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB11_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_uge: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: flt.h a0, a0, a1 +; RV64IZHINX-NEXT: beqz a0, .LBB11_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB11_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_uge: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -710,6 +1288,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_uge: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: flt.s a0, a0, a1 +; RV32IZHINXMIN-NEXT: beqz a0, .LBB11_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB11_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_uge: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: flt.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: beqz a0, .LBB11_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB11_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp uge half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -742,6 +1346,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_ult: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fle.h a0, a1, a0 +; RV32IZHINX-NEXT: beqz a0, .LBB12_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB12_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_ult: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fle.h a0, a1, a0 +; RV64IZHINX-NEXT: beqz a0, .LBB12_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB12_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_ult: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -767,6 +1393,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_ult: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fle.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: beqz a0, .LBB12_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB12_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_ult: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fle.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: beqz a0, .LBB12_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB12_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp ult half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -799,6 +1451,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_ule: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: flt.h a0, a1, a0 +; RV32IZHINX-NEXT: beqz a0, .LBB13_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB13_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_ule: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: flt.h a0, a1, a0 +; RV64IZHINX-NEXT: beqz a0, .LBB13_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB13_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_ule: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -824,6 +1498,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_ule: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: flt.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: beqz a0, .LBB13_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB13_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_ule: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: flt.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: beqz a0, .LBB13_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB13_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp ule half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -856,6 +1556,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_une: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: feq.h a0, a0, a1 +; RV32IZHINX-NEXT: beqz a0, .LBB14_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB14_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_une: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: feq.h a0, a0, a1 +; RV64IZHINX-NEXT: beqz a0, .LBB14_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB14_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_une: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -881,6 +1603,32 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_une: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a1 +; RV32IZHINXMIN-NEXT: beqz a0, .LBB14_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB14_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_une: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: beqz a0, .LBB14_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB14_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp une half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -917,6 +1665,32 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_uno: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: feq.h a1, a1, a1 +; RV32IZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZHINX-NEXT: and a0, a0, a1 +; RV32IZHINX-NEXT: beqz a0, .LBB15_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB15_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_uno: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: feq.h a1, a1, a1 +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: beqz a0, .LBB15_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB15_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_uno: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -946,6 +1720,36 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_uno: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: feq.s a1, a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV32IZHINXMIN-NEXT: and a0, a0, a1 +; RV32IZHINXMIN-NEXT: beqz a0, .LBB15_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB15_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_uno: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: feq.s a1, a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: beqz a0, .LBB15_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB15_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp uno half %a, %b br i1 %1, label %if.then, label %if.else if.else: @@ -978,6 +1782,28 @@ ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: call abort@plt ; +; RV32IZHINX-LABEL: br_fcmp_true: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: li a0, 1 +; RV32IZHINX-NEXT: bnez a0, .LBB16_2 +; RV32IZHINX-NEXT: # %bb.1: # %if.else +; RV32IZHINX-NEXT: ret +; RV32IZHINX-NEXT: .LBB16_2: # %if.then +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call abort@plt +; +; RV64IZHINX-LABEL: br_fcmp_true: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: li a0, 1 +; RV64IZHINX-NEXT: bnez a0, .LBB16_2 +; RV64IZHINX-NEXT: # %bb.1: # %if.else +; RV64IZHINX-NEXT: ret +; RV64IZHINX-NEXT: .LBB16_2: # %if.then +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call abort@plt +; ; RV32IZFHMIN-LABEL: br_fcmp_true: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: li a0, 1 @@ -999,6 +1825,28 @@ ; RV64IZFHMIN-NEXT: addi sp, sp, -16 ; RV64IZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFHMIN-NEXT: call abort@plt +; +; RV32IZHINXMIN-LABEL: br_fcmp_true: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: li a0, 1 +; RV32IZHINXMIN-NEXT: bnez a0, .LBB16_2 +; RV32IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV32IZHINXMIN-NEXT: ret +; RV32IZHINXMIN-NEXT: .LBB16_2: # %if.then +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: call abort@plt +; +; RV64IZHINXMIN-LABEL: br_fcmp_true: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: li a0, 1 +; RV64IZHINXMIN-NEXT: bnez a0, .LBB16_2 +; RV64IZHINXMIN-NEXT: # %bb.1: # %if.else +; RV64IZHINXMIN-NEXT: ret +; RV64IZHINXMIN-NEXT: .LBB16_2: # %if.then +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: call abort@plt %1 = fcmp true half %a, %b br i1 %1, label %if.then, label %if.else if.else: diff --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll --- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll @@ -5,24 +5,48 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f -disable-strictnode-mutation < %s \ ; RUN: | FileCheck -check-prefixes=CHECKIZFH,RV64IZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefixes=CHECKIZHINX,RV32IZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefixes=CHECKIZHINX,RV64IZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh -verify-machineinstrs \ ; RUN: -target-abi ilp32d -disable-strictnode-mutation < %s \ ; RUN: | FileCheck -check-prefix=RV32IDZFH %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64d -disable-strictnode-mutation < %s \ ; RUN: | FileCheck -check-prefix=RV64IDZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx,+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefix=RV32IZDINXZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zdinx,+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefix=RV64IZDINXZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi ilp32f -disable-strictnode-mutation < %s \ ; RUN: | FileCheck -check-prefixes=CHECK32-IZFHMIN,RV32IFZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64f -disable-strictnode-mutation < %s \ ; RUN: | FileCheck -check-prefixes=CHECK64-IZFHMIN,RV64IFZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefixes=CHECK32-IZHINXMIN,RV32IZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefixes=CHECK64-IZHINXMIN,RV64IZHINXMIN %s ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin -verify-machineinstrs \ ; RUN: -target-abi ilp32d -disable-strictnode-mutation < %s \ ; RUN: | FileCheck -check-prefixes=CHECK32-IZFHMIN,RV32IDZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64d -disable-strictnode-mutation < %s \ ; RUN: | FileCheck -check-prefixes=CHECK64-IZFHMIN,RV64IDZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx,+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefixes=CHECK32-IZDINXZHINXMIN,RV32IZDINXZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zdinx,+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefixes=CHECK64-IZDINXZHINXMIN,RV64IZDINXZHINXMIN %s ; NOTE: The rounding mode metadata does not effect which instruction is ; selected. Dynamic rounding mode is always used for operations that @@ -39,6 +63,16 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_si_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_si_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_si_h: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.w.h a0, fa0, rtz @@ -49,6 +83,16 @@ ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_si_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_si_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_si_h: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -60,6 +104,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_si_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_si_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_si_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_si_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call i16 @llvm.experimental.constrained.fptosi.i16.f16(half %a, metadata !"fpexcept.strict") strictfp ret i16 %1 } @@ -76,6 +144,16 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_ui_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_ui_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_ui_h: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz @@ -86,6 +164,16 @@ ; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_ui_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_ui_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_ui_h: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -97,6 +185,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_ui_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_ui_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_ui_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_ui_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call i16 @llvm.experimental.constrained.fptoui.i16.f16(half %a, metadata !"fpexcept.strict") strictfp ret i16 %1 } @@ -108,6 +220,11 @@ ; CHECKIZFH-NEXT: fcvt.w.h a0, fa0, rtz ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_w_h: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.w.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_w_h: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.w.h a0, fa0, rtz @@ -118,6 +235,16 @@ ; RV64IDZFH-NEXT: fcvt.w.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_w_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_w_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_w_h: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -129,6 +256,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_w_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_w_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_w_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_w_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -140,6 +291,11 @@ ; CHECKIZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_wu_h: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_wu_h: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz @@ -150,6 +306,16 @@ ; RV64IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_wu_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_wu_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_wu_h: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -161,6 +327,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -177,6 +367,13 @@ ; CHECKIZFH-NEXT: add a0, a0, a1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_wu_h_multiple_use: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZHINX-NEXT: seqz a1, a0 +; CHECKIZHINX-NEXT: add a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_wu_h_multiple_use: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz @@ -191,6 +388,20 @@ ; RV64IDZFH-NEXT: add a0, a0, a1 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_wu_h_multiple_use: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: seqz a1, a0 +; RV32IZDINXZHINX-NEXT: add a0, a0, a1 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_wu_h_multiple_use: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: seqz a1, a0 +; RV64IZDINXZHINX-NEXT: add a0, a0, a1 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_wu_h_multiple_use: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -206,6 +417,38 @@ ; CHECK64-IZFHMIN-NEXT: seqz a1, a0 ; CHECK64-IZFHMIN-NEXT: add a0, a0, a1 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_h_multiple_use: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: seqz a1, a0 +; CHECK32-IZHINXMIN-NEXT: add a0, a0, a1 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_h_multiple_use: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: seqz a1, a0 +; CHECK64-IZHINXMIN-NEXT: add a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_h_multiple_use: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: seqz a1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: add a0, a0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_h_multiple_use: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: seqz a1, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: add a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %a = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") strictfp %b = icmp eq i32 %a, 0 %c = select i1 %b, i32 1, i32 %a @@ -227,6 +470,20 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_l_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __fixhfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_l_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_l_h: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: addi sp, sp, -16 @@ -241,6 +498,20 @@ ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_l_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: call __fixhfdi@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_l_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_l_h: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16 @@ -255,6 +526,36 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_l_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __fixhfdi@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_l_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_l_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: call __fixhfdi@plt +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_l_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %a, metadata !"fpexcept.strict") strictfp ret i64 %1 } @@ -275,6 +576,20 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_lu_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __fixunshfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_lu_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_lu_h: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: addi sp, sp, -16 @@ -289,6 +604,20 @@ ; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_lu_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: call __fixunshfdi@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_lu_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_lu_h: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16 @@ -303,6 +632,36 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_lu_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __fixunshfdi@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_lu_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_lu_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunshfdi@plt +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_lu_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %a, metadata !"fpexcept.strict") strictfp ret i64 %1 } @@ -323,6 +682,20 @@ ; RV64IZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_si: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: slli a0, a0, 16 +; RV32IZHINX-NEXT: srai a0, a0, 16 +; RV32IZHINX-NEXT: fcvt.h.w a0, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_si: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: slli a0, a0, 48 +; RV64IZHINX-NEXT: srai a0, a0, 48 +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_si: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: slli a0, a0, 16 @@ -337,6 +710,20 @@ ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_si: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: slli a0, a0, 16 +; RV32IZDINXZHINX-NEXT: srai a0, a0, 16 +; RV32IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_si: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: slli a0, a0, 48 +; RV64IZDINXZHINX-NEXT: srai a0, a0, 48 +; RV64IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_si: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: slli a0, a0, 16 @@ -352,6 +739,38 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_si: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: slli a0, a0, 16 +; CHECK32-IZHINXMIN-NEXT: srai a0, a0, 16 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_si: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: slli a0, a0, 48 +; CHECK64-IZHINXMIN-NEXT: srai a0, a0, 48 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_si: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: slli a0, a0, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: srai a0, a0, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_si: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: slli a0, a0, 48 +; CHECK64-IZDINXZHINXMIN-NEXT: srai a0, a0, 48 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -363,6 +782,11 @@ ; CHECKIZFH-NEXT: fcvt.h.w fa0, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_si_signext: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.w a0, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_si_signext: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.h.w fa0, a0 @@ -373,6 +797,16 @@ ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_si_signext: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_si_signext: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_si_signext: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.w fa5, a0 @@ -384,6 +818,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_si_signext: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_si_signext: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_si_signext: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_si_signext: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -403,6 +861,20 @@ ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_ui: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: slli a0, a0, 16 +; RV32IZHINX-NEXT: srli a0, a0, 16 +; RV32IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_ui: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: slli a0, a0, 48 +; RV64IZHINX-NEXT: srli a0, a0, 48 +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_ui: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: slli a0, a0, 16 @@ -417,6 +889,20 @@ ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_ui: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: slli a0, a0, 16 +; RV32IZDINXZHINX-NEXT: srli a0, a0, 16 +; RV32IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_ui: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: slli a0, a0, 48 +; RV64IZDINXZHINX-NEXT: srli a0, a0, 48 +; RV64IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_ui: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: slli a0, a0, 16 @@ -432,6 +918,38 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_ui: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: slli a0, a0, 16 +; CHECK32-IZHINXMIN-NEXT: srli a0, a0, 16 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_ui: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: slli a0, a0, 48 +; CHECK64-IZHINXMIN-NEXT: srli a0, a0, 48 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_ui: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: slli a0, a0, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: srli a0, a0, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_ui: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: slli a0, a0, 48 +; CHECK64-IZDINXZHINXMIN-NEXT: srli a0, a0, 48 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -443,6 +961,11 @@ ; CHECKIZFH-NEXT: fcvt.h.wu fa0, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_ui_zeroext: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.wu a0, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_ui_zeroext: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.h.wu fa0, a0 @@ -453,6 +976,16 @@ ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_ui_zeroext: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_ui_zeroext: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_ui_zeroext: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.wu fa5, a0 @@ -464,6 +997,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_ui_zeroext: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_ui_zeroext: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_ui_zeroext: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_ui_zeroext: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -474,6 +1031,11 @@ ; CHECKIZFH-NEXT: fcvt.h.w fa0, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_w: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.w a0, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_w: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.h.w fa0, a0 @@ -484,6 +1046,16 @@ ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_w: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_w: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_w: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.w fa5, a0 @@ -496,6 +1068,32 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_w: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_w: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: sext.w a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_w: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_w: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: sext.w a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -508,6 +1106,12 @@ ; CHECKIZFH-NEXT: fcvt.h.w fa0, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_w_load: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lw a0, 0(a0) +; CHECKIZHINX-NEXT: fcvt.h.w a0, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_w_load: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: lw a0, 0(a0) @@ -520,6 +1124,18 @@ ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_w_load: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: lw a0, 0(a0) +; RV32IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_w_load: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: lw a0, 0(a0) +; RV64IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_w_load: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: lw a0, 0(a0) @@ -533,6 +1149,34 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_w_load: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_w_load: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_w_load: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_w_load: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %a = load i32, ptr %p %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 @@ -544,6 +1188,11 @@ ; CHECKIZFH-NEXT: fcvt.h.wu fa0, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_wu: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.wu a0, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_wu: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.h.wu fa0, a0 @@ -554,6 +1203,16 @@ ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_wu: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_wu: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_wu: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.wu fa5, a0 @@ -567,6 +1226,34 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_wu: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_wu: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: slli a0, a0, 32 +; CHECK64-IZHINXMIN-NEXT: srli a0, a0, 32 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_wu: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_wu: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: slli a0, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: srli a0, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -585,6 +1272,18 @@ ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_wu_load: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lw a0, 0(a0) +; RV32IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_wu_load: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lwu a0, 0(a0) +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_wu_load: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: lw a0, 0(a0) @@ -597,6 +1296,18 @@ ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_wu_load: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: lw a0, 0(a0) +; RV32IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_wu_load: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: lwu a0, 0(a0) +; RV64IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_wu_load: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: lw a0, 0(a0) @@ -610,6 +1321,34 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_wu_load: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_wu_load: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: lwu a0, 0(a0) +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_wu_load: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_wu_load: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: lwu a0, 0(a0) +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %a = load i32, ptr %p %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 @@ -630,6 +1369,20 @@ ; RV64IZFH-NEXT: fcvt.h.l fa0, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_l: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __floatdihf@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_l: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.l a0, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_l: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: addi sp, sp, -16 @@ -644,6 +1397,20 @@ ; RV64IDZFH-NEXT: fcvt.h.l fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_l: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: call __floatdihf@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_l: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.l a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_l: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16 @@ -658,6 +1425,36 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_l: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __floatdihf@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_l: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_l: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: call __floatdihf@plt +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_l: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -678,6 +1475,20 @@ ; RV64IZFH-NEXT: fcvt.h.lu fa0, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_lu: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __floatundihf@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_lu: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.lu a0, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_lu: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: addi sp, sp, -16 @@ -692,6 +1503,20 @@ ; RV64IDZFH-NEXT: fcvt.h.lu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_lu: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: call __floatundihf@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_lu: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.lu a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_lu: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16 @@ -706,6 +1531,36 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_lu: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __floatundihf@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_lu: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_lu: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: call __floatundihf@plt +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_lu: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -717,6 +1572,11 @@ ; CHECKIZFH-NEXT: fcvt.h.s fa0, fa0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_s: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_s: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.h.s fa0, fa0 @@ -727,6 +1587,16 @@ ; RV64IDZFH-NEXT: fcvt.h.s fa0, fa0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_s: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_s: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_s: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.h.s fa0, fa0 @@ -736,6 +1606,26 @@ ; CHECK64-IZFHMIN: # %bb.0: ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa0 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_s: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_s: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_s: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_s: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.fptrunc.f16.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } @@ -747,6 +1637,11 @@ ; CHECKIZFH-NEXT: fcvt.s.h fa0, fa0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_s_h: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_s_h: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.s.h fa0, fa0 @@ -757,6 +1652,16 @@ ; RV64IDZFH-NEXT: fcvt.s.h fa0, fa0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_s_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_s_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_s_h: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa0, fa0 @@ -766,6 +1671,26 @@ ; CHECK64-IZFHMIN: # %bb.0: ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa0, fa0 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_s_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_s_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_s_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_s_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call float @llvm.experimental.constrained.fpext.f32.f16(half %a, metadata !"fpexcept.strict") ret float %1 } @@ -790,6 +1715,24 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_d: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __truncdfhf2@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_d: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call __truncdfhf2@plt +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_d: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.h.d fa0, fa0 @@ -800,6 +1743,22 @@ ; RV64IDZFH-NEXT: fcvt.h.d fa0, fa0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_d: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: sw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: lw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: lw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: fcvt.h.d a0, a0 +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_d: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.d a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32IFZFHMIN-LABEL: fcvt_h_d: ; RV32IFZFHMIN: # %bb.0: ; RV32IFZFHMIN-NEXT: addi sp, sp, -16 @@ -818,6 +1777,24 @@ ; RV64IFZFHMIN-NEXT: addi sp, sp, 16 ; RV64IFZFHMIN-NEXT: ret ; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_d: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __truncdfhf2@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_d: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK64-IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; CHECK64-IZHINXMIN-NEXT: call __truncdfhf2@plt +; CHECK64-IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; CHECK64-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK64-IZHINXMIN-NEXT: ret +; ; RV32IDZFHMIN-LABEL: fcvt_h_d: ; RV32IDZFHMIN: # %bb.0: ; RV32IDZFHMIN-NEXT: fcvt.h.d fa0, fa0 @@ -827,6 +1804,22 @@ ; RV64IDZFHMIN: # %bb.0: ; RV64IDZFHMIN-NEXT: fcvt.h.d fa0, fa0 ; RV64IDZFHMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_d: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_d: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.fptrunc.f16.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") ret half %1 } @@ -853,6 +1846,26 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_d_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call __extendsfdf2@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_d_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call __extendsfdf2@plt +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_d_h: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.d.h fa0, fa0 @@ -863,6 +1876,22 @@ ; RV64IDZFH-NEXT: fcvt.d.h fa0, fa0 ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_d_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: fcvt.d.h a0, a0 +; RV32IZDINXZHINX-NEXT: sw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: sw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: lw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: lw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_d_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.d.h a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32IFZFHMIN-LABEL: fcvt_d_h: ; RV32IFZFHMIN: # %bb.0: ; RV32IFZFHMIN-NEXT: addi sp, sp, -16 @@ -883,6 +1912,26 @@ ; RV64IFZFHMIN-NEXT: addi sp, sp, 16 ; RV64IFZFHMIN-NEXT: ret ; +; CHECK32-IZHINXMIN-LABEL: fcvt_d_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: call __extendsfdf2@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_d_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK64-IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: call __extendsfdf2@plt +; CHECK64-IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; CHECK64-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK64-IZHINXMIN-NEXT: ret +; ; RV32IDZFHMIN-LABEL: fcvt_d_h: ; RV32IDZFHMIN: # %bb.0: ; RV32IDZFHMIN-NEXT: fcvt.d.h fa0, fa0 @@ -892,6 +1941,22 @@ ; RV64IDZFHMIN: # %bb.0: ; RV64IDZFHMIN-NEXT: fcvt.d.h fa0, fa0 ; RV64IDZFHMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_d_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_d_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = call double @llvm.experimental.constrained.fpext.f64.f16(half %a, metadata !"fpexcept.strict") ret double %1 } @@ -913,6 +1978,22 @@ ; RV64IZFH-NEXT: fsh fa5, 0(a1) ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_w_demanded_bits: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi a0, a0, 1 +; RV32IZHINX-NEXT: fcvt.h.w a2, a0 +; RV32IZHINX-NEXT: sh a2, 0(a1) +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_w_demanded_bits: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addiw a2, a0, 1 +; RV64IZHINX-NEXT: addi a0, a0, 1 +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: sh a0, 0(a1) +; RV64IZHINX-NEXT: mv a0, a2 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_w_demanded_bits: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: addi a0, a0, 1 @@ -927,6 +2008,22 @@ ; RV64IDZFH-NEXT: fsh fa5, 0(a1) ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_w_demanded_bits: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi a0, a0, 1 +; RV32IZDINXZHINX-NEXT: fcvt.h.w a2, a0 +; RV32IZDINXZHINX-NEXT: sh a2, 0(a1) +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_w_demanded_bits: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addiw a2, a0, 1 +; RV64IZDINXZHINX-NEXT: addi a0, a0, 1 +; RV64IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZDINXZHINX-NEXT: sh a0, 0(a1) +; RV64IZDINXZHINX-NEXT: mv a0, a2 +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_w_demanded_bits: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: addi a0, a0, 1 @@ -942,6 +2039,38 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; CHECK64-IZFHMIN-NEXT: fsh fa5, 0(a1) ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_w_demanded_bits: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi a0, a0, 1 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a2, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK32-IZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_w_demanded_bits: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: addiw a0, a0, 1 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a2, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK64-IZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_w_demanded_bits: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi a0, a0, 1 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a2, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_w_demanded_bits: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: addiw a0, a0, 1 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a2, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK64-IZDINXZHINXMIN-NEXT: ret %3 = add i32 %0, 1 %4 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp store half %4, ptr %1, align 2 @@ -964,6 +2093,20 @@ ; RV64IZFH-NEXT: fsh fa5, 0(a1) ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_wu_demanded_bits: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi a0, a0, 1 +; RV32IZHINX-NEXT: fcvt.h.wu a2, a0 +; RV32IZHINX-NEXT: sh a2, 0(a1) +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_wu_demanded_bits: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addiw a0, a0, 1 +; RV64IZHINX-NEXT: fcvt.h.wu a2, a0 +; RV64IZHINX-NEXT: sh a2, 0(a1) +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: fcvt_h_wu_demanded_bits: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: addi a0, a0, 1 @@ -978,6 +2121,20 @@ ; RV64IDZFH-NEXT: fsh fa5, 0(a1) ; RV64IDZFH-NEXT: ret ; +; RV32IZDINXZHINX-LABEL: fcvt_h_wu_demanded_bits: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi a0, a0, 1 +; RV32IZDINXZHINX-NEXT: fcvt.h.wu a2, a0 +; RV32IZDINXZHINX-NEXT: sh a2, 0(a1) +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_wu_demanded_bits: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addiw a0, a0, 1 +; RV64IZDINXZHINX-NEXT: fcvt.h.wu a2, a0 +; RV64IZDINXZHINX-NEXT: sh a2, 0(a1) +; RV64IZDINXZHINX-NEXT: ret +; ; CHECK32-IZFHMIN-LABEL: fcvt_h_wu_demanded_bits: ; CHECK32-IZFHMIN: # %bb.0: ; CHECK32-IZFHMIN-NEXT: addi a0, a0, 1 @@ -995,8 +2152,49 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; CHECK64-IZFHMIN-NEXT: fsh fa5, 0(a1) ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_wu_demanded_bits: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi a0, a0, 1 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a2, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK32-IZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_wu_demanded_bits: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: addiw a0, a0, 1 +; CHECK64-IZHINXMIN-NEXT: slli a2, a0, 32 +; CHECK64-IZHINXMIN-NEXT: srli a2, a2, 32 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a2, a2 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK64-IZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_wu_demanded_bits: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi a0, a0, 1 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a2, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_wu_demanded_bits: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: addiw a0, a0, 1 +; CHECK64-IZDINXZHINXMIN-NEXT: slli a2, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: srli a2, a2, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a2, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK64-IZDINXZHINXMIN-NEXT: ret %3 = add i32 %0, 1 %4 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp store half %4, ptr %1, align 2 ret i32 %3 } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32IZDINXZHINXMIN: {{.*}} +; RV32IZHINXMIN: {{.*}} +; RV64IZDINXZHINXMIN: {{.*}} +; RV64IZHINXMIN: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll --- a/llvm/test/CodeGen/RISCV/half-convert.ll +++ b/llvm/test/CodeGen/RISCV/half-convert.ll @@ -7,6 +7,14 @@ ; RUN: -target-abi ilp32d < %s | FileCheck -check-prefix=RV32IDZFH %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64d < %s | FileCheck -check-prefix=RV64IDZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefixes=CHECKIZHINX,RV32IZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefixes=CHECKIZHINX,RV64IZHINX %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx,+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefixes=CHECKIZDINXZHINX,RV32IZDINXZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zdinx,+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefixes=CHECKIZDINXZHINX,RV64IZDINXZHINX %s ; RUN: llc -mtriple=riscv32 -verify-machineinstrs \ ; RUN: < %s | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs \ @@ -19,6 +27,14 @@ ; RUN: -target-abi ilp32d < %s | FileCheck -check-prefixes=CHECK32-IZFHMIN,RV32IDZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64d < %s | FileCheck -check-prefixes=CHECK64-IZFHMIN,RV64IDZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefixes=CHECK32-IZHINXMIN,RV32IZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefixes=CHECK64-IZHINXMIN,RV64IZHINXMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zdinx,+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefixes=CHECK32-IZDINXZHINXMIN,RV32IZDINXZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zdinx,+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefixes=CHECK64-IZDINXZHINXMIN,RV64IZDINXZHINXMIN %s define i16 @fcvt_si_h(half %a) nounwind { ; RV32IZFH-LABEL: fcvt_si_h: @@ -41,6 +57,26 @@ ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_si_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_si_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_si_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_si_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_si_h: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -76,6 +112,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_si_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_si_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_si_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_si_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptosi half %a to i16 ret i16 %1 } @@ -141,6 +201,62 @@ ; RV64IDZFH-NEXT: and a0, a0, a1 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_si_h_sat: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: feq.s a1, a0, a0 +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI1_0) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; RV32IZHINX-NEXT: neg a1, a1 +; RV32IZHINX-NEXT: lui a3, 815104 +; RV32IZHINX-NEXT: fmax.s a0, a0, a3 +; RV32IZHINX-NEXT: fmin.s a0, a0, a2 +; RV32IZHINX-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINX-NEXT: and a0, a1, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_si_h_sat: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: lui a1, 815104 +; RV64IZHINX-NEXT: lui a2, %hi(.LCPI1_0) +; RV64IZHINX-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; RV64IZHINX-NEXT: fmax.s a1, a0, a1 +; RV64IZHINX-NEXT: feq.s a0, a0, a0 +; RV64IZHINX-NEXT: neg a0, a0 +; RV64IZHINX-NEXT: fmin.s a1, a1, a2 +; RV64IZHINX-NEXT: fcvt.l.s a1, a1, rtz +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_si_h_sat: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: feq.s a1, a0, a0 +; RV32IZDINXZHINX-NEXT: lui a2, %hi(.LCPI1_0) +; RV32IZDINXZHINX-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; RV32IZDINXZHINX-NEXT: neg a1, a1 +; RV32IZDINXZHINX-NEXT: lui a3, 815104 +; RV32IZDINXZHINX-NEXT: fmax.s a0, a0, a3 +; RV32IZDINXZHINX-NEXT: fmin.s a0, a0, a2 +; RV32IZDINXZHINX-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZDINXZHINX-NEXT: and a0, a1, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_si_h_sat: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: lui a1, 815104 +; RV64IZDINXZHINX-NEXT: lui a2, %hi(.LCPI1_0) +; RV64IZDINXZHINX-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; RV64IZDINXZHINX-NEXT: fmax.s a1, a0, a1 +; RV64IZDINXZHINX-NEXT: feq.s a0, a0, a0 +; RV64IZDINXZHINX-NEXT: neg a0, a0 +; RV64IZDINXZHINX-NEXT: fmin.s a1, a1, a2 +; RV64IZDINXZHINX-NEXT: fcvt.l.s a1, a1, rtz +; RV64IZDINXZHINX-NEXT: and a0, a0, a1 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_si_h_sat: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -16 @@ -256,6 +372,62 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a1, fa5, rtz ; CHECK64-IZFHMIN-NEXT: and a0, a0, a1 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_si_h_sat: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: feq.s a1, a0, a0 +; CHECK32-IZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0) +; CHECK32-IZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; CHECK32-IZHINXMIN-NEXT: neg a1, a1 +; CHECK32-IZHINXMIN-NEXT: lui a3, 815104 +; CHECK32-IZHINXMIN-NEXT: fmax.s a0, a0, a3 +; CHECK32-IZHINXMIN-NEXT: fmin.s a0, a0, a2 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_si_h_sat: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: lui a1, 815104 +; CHECK64-IZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0) +; CHECK64-IZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; CHECK64-IZHINXMIN-NEXT: fmax.s a1, a0, a1 +; CHECK64-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZHINXMIN-NEXT: neg a0, a0 +; CHECK64-IZHINXMIN-NEXT: fmin.s a1, a1, a2 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a1, a1, rtz +; CHECK64-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_si_h_sat: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a1, a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; CHECK32-IZDINXZHINXMIN-NEXT: neg a1, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a3, 815104 +; CHECK32-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, a3 +; CHECK32-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_si_h_sat: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: lui a1, 815104 +; CHECK64-IZDINXZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0) +; CHECK64-IZDINXZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; CHECK64-IZDINXZHINXMIN-NEXT: fmax.s a1, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: neg a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fmin.s a1, a1, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a1, a1, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i16 @llvm.fptosi.sat.i16.f16(half %a) ret i16 %0 @@ -283,6 +455,26 @@ ; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_ui_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_ui_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_ui_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_ui_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_ui_h: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -318,6 +510,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_ui_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_ui_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_ui_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_ui_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptoui half %a to i16 ret i16 %1 } @@ -367,6 +583,46 @@ ; RV64IDZFH-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_ui_h_sat: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI3_0) +; RV32IZHINX-NEXT: lw a1, %lo(.LCPI3_0)(a1) +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: fmax.s a0, a0, zero +; RV32IZHINX-NEXT: fmin.s a0, a0, a1 +; RV32IZHINX-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_ui_h_sat: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI3_0) +; RV64IZHINX-NEXT: lw a1, %lo(.LCPI3_0)(a1) +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: fmax.s a0, a0, zero +; RV64IZHINX-NEXT: fmin.s a0, a0, a1 +; RV64IZHINX-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_ui_h_sat: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: lui a1, %hi(.LCPI3_0) +; RV32IZDINXZHINX-NEXT: lw a1, %lo(.LCPI3_0)(a1) +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: fmax.s a0, a0, zero +; RV32IZDINXZHINX-NEXT: fmin.s a0, a0, a1 +; RV32IZDINXZHINX-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_ui_h_sat: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: lui a1, %hi(.LCPI3_0) +; RV64IZDINXZHINX-NEXT: lw a1, %lo(.LCPI3_0)(a1) +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: fmax.s a0, a0, zero +; RV64IZDINXZHINX-NEXT: fmin.s a0, a0, a1 +; RV64IZDINXZHINX-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_ui_h_sat: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -32 @@ -464,6 +720,46 @@ ; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_ui_h_sat: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: lui a1, %hi(.LCPI3_0) +; CHECK32-IZHINXMIN-NEXT: lw a1, %lo(.LCPI3_0)(a1) +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK32-IZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_ui_h_sat: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: lui a1, %hi(.LCPI3_0) +; CHECK64-IZHINXMIN-NEXT: lw a1, %lo(.LCPI3_0)(a1) +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK64-IZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_ui_h_sat: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: lui a1, %hi(.LCPI3_0) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, %lo(.LCPI3_0)(a1) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK32-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_ui_h_sat: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: lui a1, %hi(.LCPI3_0) +; CHECK64-IZDINXZHINXMIN-NEXT: lw a1, %lo(.LCPI3_0)(a1) +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK64-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i16 @llvm.fptoui.sat.i16.f16(half %a) ret i16 %0 @@ -486,6 +782,16 @@ ; RV64IDZFH-NEXT: fcvt.w.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_w_h: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.w.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_w_h: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: fcvt.w.h a0, a0, rtz +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_w_h: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -521,6 +827,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_w_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_w_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_w_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_w_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptosi half %a to i32 ret i32 %1 } @@ -553,6 +883,24 @@ ; RV64IDZFH-NEXT: and a0, a1, a0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_w_h_sat: +; CHECKIZHINX: # %bb.0: # %start +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: seqz a0, a0 +; CHECKIZHINX-NEXT: addi a0, a0, -1 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_w_h_sat: +; CHECKIZDINXZHINX: # %bb.0: # %start +; CHECKIZDINXZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZDINXZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZDINXZHINX-NEXT: seqz a0, a0 +; CHECKIZDINXZHINX-NEXT: addi a0, a0, -1 +; CHECKIZDINXZHINX-NEXT: and a0, a0, a1 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_w_h_sat: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -32 @@ -662,6 +1010,46 @@ ; CHECK64-IZFHMIN-NEXT: addi a1, a1, -1 ; CHECK64-IZFHMIN-NEXT: and a0, a1, a0 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_w_h_sat: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECK32-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK32-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK32-IZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK32-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_w_h_sat: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECK64-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK64-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_w_h_sat: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_w_h_sat: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK64-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i32 @llvm.fptosi.sat.i32.f16(half %a) ret i32 %0 @@ -684,6 +1072,16 @@ ; RV64IDZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_wu_h: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_wu_h: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_wu_h: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -719,6 +1117,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptoui half %a to i32 ret i32 %1 } @@ -747,6 +1169,20 @@ ; RV64IDZFH-NEXT: add a0, a0, a1 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_wu_h_multiple_use: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZHINX-NEXT: seqz a1, a0 +; CHECKIZHINX-NEXT: add a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_wu_h_multiple_use: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZDINXZHINX-NEXT: seqz a1, a0 +; CHECKIZDINXZHINX-NEXT: add a0, a0, a1 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_wu_h_multiple_use: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -790,6 +1226,38 @@ ; CHECK64-IZFHMIN-NEXT: seqz a1, a0 ; CHECK64-IZFHMIN-NEXT: add a0, a0, a1 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_h_multiple_use: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: seqz a1, a0 +; CHECK32-IZHINXMIN-NEXT: add a0, a0, a1 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_h_multiple_use: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: seqz a1, a0 +; CHECK64-IZHINXMIN-NEXT: add a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_h_multiple_use: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: seqz a1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: add a0, a0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_h_multiple_use: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: seqz a1, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: add a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %a = fptoui half %x to i32 %b = icmp eq i32 %a, 0 %c = select i1 %b, i32 1, i32 %a @@ -837,6 +1305,46 @@ ; RV64IDZFH-NEXT: srli a0, a0, 32 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_wu_h_sat: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV32IZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZHINX-NEXT: seqz a0, a0 +; RV32IZHINX-NEXT: addi a0, a0, -1 +; RV32IZHINX-NEXT: and a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_wu_h_sat: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addiw a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a1, a0 +; RV64IZHINX-NEXT: slli a0, a0, 32 +; RV64IZHINX-NEXT: srli a0, a0, 32 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_wu_h_sat: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV32IZDINXZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZDINXZHINX-NEXT: seqz a0, a0 +; RV32IZDINXZHINX-NEXT: addi a0, a0, -1 +; RV32IZDINXZHINX-NEXT: and a0, a0, a1 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_wu_h_sat: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV64IZDINXZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZDINXZHINX-NEXT: seqz a0, a0 +; RV64IZDINXZHINX-NEXT: addiw a0, a0, -1 +; RV64IZDINXZHINX-NEXT: and a0, a1, a0 +; RV64IZDINXZHINX-NEXT: slli a0, a0, 32 +; RV64IZDINXZHINX-NEXT: srli a0, a0, 32 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_wu_h_sat: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -16 @@ -928,6 +1436,50 @@ ; CHECK64-IZFHMIN-NEXT: slli a0, a0, 32 ; CHECK64-IZFHMIN-NEXT: srli a0, a0, 32 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_h_sat: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; CHECK32-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK32-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK32-IZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK32-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_h_sat: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; CHECK64-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZHINXMIN-NEXT: addiw a0, a0, -1 +; CHECK64-IZHINXMIN-NEXT: and a0, a1, a0 +; CHECK64-IZHINXMIN-NEXT: slli a0, a0, 32 +; CHECK64-IZHINXMIN-NEXT: srli a0, a0, 32 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_h_sat: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_h_sat: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: addiw a0, a0, -1 +; CHECK64-IZDINXZHINXMIN-NEXT: and a0, a1, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: slli a0, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: srli a0, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i32 @llvm.fptoui.sat.i32.f16(half %a) ret i32 %0 @@ -963,6 +1515,34 @@ ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_l_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __fixhfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_l_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_l_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: call __fixhfdi@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_l_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_l_h: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1001,6 +1581,36 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_l_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __fixhfdi@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_l_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_l_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: call __fixhfdi@plt +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_l_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptosi half %a to i64 ret i64 %1 } @@ -1102,6 +1712,112 @@ ; RV64IDZFH-NEXT: and a0, a1, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_l_h_sat: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: addi sp, sp, -32 +; RV32IZHINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s4, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI10_0) +; RV32IZHINX-NEXT: lw a1, %lo(.LCPI10_0)(a1) +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: flt.s s1, a1, s0 +; RV32IZHINX-NEXT: neg s2, s1 +; RV32IZHINX-NEXT: lui a0, 913408 +; RV32IZHINX-NEXT: fle.s s3, a0, s0 +; RV32IZHINX-NEXT: neg s4, s3 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixsfdi@plt +; RV32IZHINX-NEXT: and a0, s4, a0 +; RV32IZHINX-NEXT: or a0, s2, a0 +; RV32IZHINX-NEXT: feq.s a2, s0, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: lui a4, 524288 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s3, .LBB10_2 +; RV32IZHINX-NEXT: # %bb.1: # %start +; RV32IZHINX-NEXT: mv a3, a1 +; RV32IZHINX-NEXT: .LBB10_2: # %start +; RV32IZHINX-NEXT: and a0, a2, a0 +; RV32IZHINX-NEXT: beqz s1, .LBB10_4 +; RV32IZHINX-NEXT: # %bb.3: +; RV32IZHINX-NEXT: addi a3, a4, -1 +; RV32IZHINX-NEXT: .LBB10_4: # %start +; RV32IZHINX-NEXT: and a1, a2, a3 +; RV32IZHINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s4, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 32 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_l_h_sat: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: fcvt.l.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_l_h_sat: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: addi sp, sp, -32 +; RV32IZDINXZHINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: sw s4, 8(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: lui a1, %hi(.LCPI10_0) +; RV32IZDINXZHINX-NEXT: lw a1, %lo(.LCPI10_0)(a1) +; RV32IZDINXZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZDINXZHINX-NEXT: flt.s s1, a1, s0 +; RV32IZDINXZHINX-NEXT: neg s2, s1 +; RV32IZDINXZHINX-NEXT: lui a0, 913408 +; RV32IZDINXZHINX-NEXT: fle.s s3, a0, s0 +; RV32IZDINXZHINX-NEXT: neg s4, s3 +; RV32IZDINXZHINX-NEXT: mv a0, s0 +; RV32IZDINXZHINX-NEXT: call __fixsfdi@plt +; RV32IZDINXZHINX-NEXT: and a0, s4, a0 +; RV32IZDINXZHINX-NEXT: or a0, s2, a0 +; RV32IZDINXZHINX-NEXT: feq.s a2, s0, s0 +; RV32IZDINXZHINX-NEXT: neg a2, a2 +; RV32IZDINXZHINX-NEXT: lui a4, 524288 +; RV32IZDINXZHINX-NEXT: lui a3, 524288 +; RV32IZDINXZHINX-NEXT: beqz s3, .LBB10_2 +; RV32IZDINXZHINX-NEXT: # %bb.1: # %start +; RV32IZDINXZHINX-NEXT: mv a3, a1 +; RV32IZDINXZHINX-NEXT: .LBB10_2: # %start +; RV32IZDINXZHINX-NEXT: and a0, a2, a0 +; RV32IZDINXZHINX-NEXT: beqz s1, .LBB10_4 +; RV32IZDINXZHINX-NEXT: # %bb.3: +; RV32IZDINXZHINX-NEXT: addi a3, a4, -1 +; RV32IZDINXZHINX-NEXT: .LBB10_4: # %start +; RV32IZDINXZHINX-NEXT: and a1, a2, a3 +; RV32IZDINXZHINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: lw s4, 8(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 32 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_l_h_sat: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: fcvt.l.h a1, a0, rtz +; RV64IZDINXZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZDINXZHINX-NEXT: seqz a0, a0 +; RV64IZDINXZHINX-NEXT: addi a0, a0, -1 +; RV64IZDINXZHINX-NEXT: and a0, a0, a1 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_l_h_sat: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -32 @@ -1292,6 +2008,114 @@ ; RV32IDZFHMIN-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IDZFHMIN-NEXT: addi sp, sp, 16 ; RV32IDZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_l_h_sat: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -32 +; CHECK32-IZHINXMIN-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: sw s4, 8(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: lui a1, %hi(.LCPI10_0) +; CHECK32-IZHINXMIN-NEXT: lw a1, %lo(.LCPI10_0)(a1) +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; CHECK32-IZHINXMIN-NEXT: flt.s s1, a1, s0 +; CHECK32-IZHINXMIN-NEXT: neg s2, s1 +; CHECK32-IZHINXMIN-NEXT: lui a0, 913408 +; CHECK32-IZHINXMIN-NEXT: fle.s s3, a0, s0 +; CHECK32-IZHINXMIN-NEXT: neg s4, s3 +; CHECK32-IZHINXMIN-NEXT: mv a0, s0 +; CHECK32-IZHINXMIN-NEXT: call __fixsfdi@plt +; CHECK32-IZHINXMIN-NEXT: and a0, s4, a0 +; CHECK32-IZHINXMIN-NEXT: or a0, s2, a0 +; CHECK32-IZHINXMIN-NEXT: feq.s a2, s0, s0 +; CHECK32-IZHINXMIN-NEXT: neg a2, a2 +; CHECK32-IZHINXMIN-NEXT: lui a4, 524288 +; CHECK32-IZHINXMIN-NEXT: lui a3, 524288 +; CHECK32-IZHINXMIN-NEXT: beqz s3, .LBB10_2 +; CHECK32-IZHINXMIN-NEXT: # %bb.1: # %start +; CHECK32-IZHINXMIN-NEXT: mv a3, a1 +; CHECK32-IZHINXMIN-NEXT: .LBB10_2: # %start +; CHECK32-IZHINXMIN-NEXT: and a0, a2, a0 +; CHECK32-IZHINXMIN-NEXT: beqz s1, .LBB10_4 +; CHECK32-IZHINXMIN-NEXT: # %bb.3: +; CHECK32-IZHINXMIN-NEXT: addi a3, a4, -1 +; CHECK32-IZHINXMIN-NEXT: .LBB10_4: # %start +; CHECK32-IZHINXMIN-NEXT: and a1, a2, a3 +; CHECK32-IZHINXMIN-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: lw s4, 8(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 32 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_l_h_sat: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a1, a0, rtz +; CHECK64-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK64-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_l_h_sat: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -32 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: sw s4, 8(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: lui a1, %hi(.LCPI10_0) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, %lo(.LCPI10_0)(a1) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h s0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: flt.s s1, a1, s0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg s2, s1 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a0, 913408 +; CHECK32-IZDINXZHINXMIN-NEXT: fle.s s3, a0, s0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg s4, s3 +; CHECK32-IZDINXZHINXMIN-NEXT: mv a0, s0 +; CHECK32-IZDINXZHINXMIN-NEXT: call __fixsfdi@plt +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, s4, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: or a0, s2, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a2, s0, s0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg a2, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a4, 524288 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a3, 524288 +; CHECK32-IZDINXZHINXMIN-NEXT: beqz s3, .LBB10_2 +; CHECK32-IZDINXZHINXMIN-NEXT: # %bb.1: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: mv a3, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: .LBB10_2: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a2, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: beqz s1, .LBB10_4 +; CHECK32-IZDINXZHINXMIN-NEXT: # %bb.3: +; CHECK32-IZDINXZHINXMIN-NEXT: addi a3, a4, -1 +; CHECK32-IZDINXZHINXMIN-NEXT: .LBB10_4: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: and a1, a2, a3 +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: lw s4, 8(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 32 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_l_h_sat: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a1, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK64-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i64 @llvm.fptosi.sat.i64.f16(half %a) ret i64 %0 @@ -1327,6 +2151,34 @@ ; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_lu_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __fixunshfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_lu_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_lu_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: call __fixunshfdi@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_lu_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_lu_h: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1365,6 +2217,36 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_lu_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __fixunshfdi@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_lu_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_lu_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunshfdi@plt +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_lu_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptoui half %a to i64 ret i64 %1 } @@ -1438,6 +2320,72 @@ ; RV64IDZFH-NEXT: and a0, a1, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_lu_h_sat: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI12_0) +; RV32IZHINX-NEXT: lw a1, %lo(.LCPI12_0)(a1) +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: flt.s a1, a1, a0 +; RV32IZHINX-NEXT: neg s0, a1 +; RV32IZHINX-NEXT: fle.s a1, zero, a0 +; RV32IZHINX-NEXT: neg s1, a1 +; RV32IZHINX-NEXT: call __fixunssfdi@plt +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: or a0, s0, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, s0, a1 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_lu_h_sat: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: fcvt.lu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_lu_h_sat: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: lui a1, %hi(.LCPI12_0) +; RV32IZDINXZHINX-NEXT: lw a1, %lo(.LCPI12_0)(a1) +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: flt.s a1, a1, a0 +; RV32IZDINXZHINX-NEXT: neg s0, a1 +; RV32IZDINXZHINX-NEXT: fle.s a1, zero, a0 +; RV32IZDINXZHINX-NEXT: neg s1, a1 +; RV32IZDINXZHINX-NEXT: call __fixunssfdi@plt +; RV32IZDINXZHINX-NEXT: and a0, s1, a0 +; RV32IZDINXZHINX-NEXT: or a0, s0, a0 +; RV32IZDINXZHINX-NEXT: and a1, s1, a1 +; RV32IZDINXZHINX-NEXT: or a1, s0, a1 +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_lu_h_sat: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: fcvt.lu.h a1, a0, rtz +; RV64IZDINXZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZDINXZHINX-NEXT: seqz a0, a0 +; RV64IZDINXZHINX-NEXT: addi a0, a0, -1 +; RV64IZDINXZHINX-NEXT: and a0, a0, a1 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_lu_h_sat: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -16 @@ -1538,6 +2486,74 @@ ; CHECK64-IZFHMIN-NEXT: addi a1, a1, -1 ; CHECK64-IZFHMIN-NEXT: and a0, a1, a0 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_lu_h_sat: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: lui a1, %hi(.LCPI12_0) +; CHECK32-IZHINXMIN-NEXT: lw a1, %lo(.LCPI12_0)(a1) +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: flt.s a1, a1, a0 +; CHECK32-IZHINXMIN-NEXT: neg s0, a1 +; CHECK32-IZHINXMIN-NEXT: fle.s a1, zero, a0 +; CHECK32-IZHINXMIN-NEXT: neg s1, a1 +; CHECK32-IZHINXMIN-NEXT: call __fixunssfdi@plt +; CHECK32-IZHINXMIN-NEXT: and a0, s1, a0 +; CHECK32-IZHINXMIN-NEXT: or a0, s0, a0 +; CHECK32-IZHINXMIN-NEXT: and a1, s1, a1 +; CHECK32-IZHINXMIN-NEXT: or a1, s0, a1 +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_lu_h_sat: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a1, a0, rtz +; CHECK64-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK64-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_lu_h_sat: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: lui a1, %hi(.LCPI12_0) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, %lo(.LCPI12_0)(a1) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a1, a1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg s0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a1, zero, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunssfdi@plt +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, s1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: or a0, s0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: and a1, s1, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: or a1, s0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_lu_h_sat: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a1, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK64-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i64 @llvm.fptoui.sat.i64.f16(half %a) ret i64 %0 @@ -1573,6 +2589,34 @@ ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_si: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: slli a0, a0, 16 +; RV32IZHINX-NEXT: srai a0, a0, 16 +; RV32IZHINX-NEXT: fcvt.h.w a0, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_si: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: slli a0, a0, 48 +; RV64IZHINX-NEXT: srai a0, a0, 48 +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_h_si: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: slli a0, a0, 16 +; RV32IZDINXZHINX-NEXT: srai a0, a0, 16 +; RV32IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_si: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: slli a0, a0, 48 +; RV64IZDINXZHINX-NEXT: srai a0, a0, 48 +; RV64IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_si: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1612,6 +2656,38 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_si: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: slli a0, a0, 16 +; CHECK32-IZHINXMIN-NEXT: srai a0, a0, 16 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_si: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: slli a0, a0, 48 +; CHECK64-IZHINXMIN-NEXT: srai a0, a0, 48 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_si: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: slli a0, a0, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: srai a0, a0, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_si: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: slli a0, a0, 48 +; CHECK64-IZDINXZHINXMIN-NEXT: srai a0, a0, 48 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = sitofp i16 %a to half ret half %1 } @@ -1632,6 +2708,16 @@ ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_si_signext: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.w a0, a0 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_h_si_signext: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_si_signext: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1663,6 +2749,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_si_signext: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_si_signext: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_si_signext: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_si_signext: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = sitofp i16 %a to half ret half %1 } @@ -1696,6 +2806,34 @@ ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_ui: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: slli a0, a0, 16 +; RV32IZHINX-NEXT: srli a0, a0, 16 +; RV32IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_ui: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: slli a0, a0, 48 +; RV64IZHINX-NEXT: srli a0, a0, 48 +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_h_ui: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: slli a0, a0, 16 +; RV32IZDINXZHINX-NEXT: srli a0, a0, 16 +; RV32IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_ui: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: slli a0, a0, 48 +; RV64IZDINXZHINX-NEXT: srli a0, a0, 48 +; RV64IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_ui: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1735,6 +2873,38 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_ui: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: slli a0, a0, 16 +; CHECK32-IZHINXMIN-NEXT: srli a0, a0, 16 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_ui: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: slli a0, a0, 48 +; CHECK64-IZHINXMIN-NEXT: srli a0, a0, 48 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_ui: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: slli a0, a0, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: srli a0, a0, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_ui: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: slli a0, a0, 48 +; CHECK64-IZDINXZHINXMIN-NEXT: srli a0, a0, 48 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = uitofp i16 %a to half ret half %1 } @@ -1755,6 +2925,16 @@ ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_ui_zeroext: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.wu a0, a0 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_h_ui_zeroext: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_ui_zeroext: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1786,6 +2966,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_ui_zeroext: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_ui_zeroext: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_ui_zeroext: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_ui_zeroext: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = uitofp i16 %a to half ret half %1 } @@ -1806,6 +3010,16 @@ ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_w: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.w a0, a0 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_h_w: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_w: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1839,6 +3053,32 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_w: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_w: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: sext.w a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_w: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_w: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: sext.w a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = sitofp i32 %a to half ret half %1 } @@ -1862,6 +3102,18 @@ ; RV64IDZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_w_load: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lw a0, 0(a0) +; CHECKIZHINX-NEXT: fcvt.h.w a0, a0 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_h_w_load: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: lw a0, 0(a0) +; CHECKIZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_w_load: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1897,6 +3149,34 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_w_load: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_w_load: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_w_load: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_w_load: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %a = load i32, ptr %p %1 = sitofp i32 %a to half ret half %1 @@ -1918,6 +3198,16 @@ ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_wu: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.wu a0, a0 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_h_wu: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_wu: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1952,6 +3242,34 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_wu: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_wu: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: slli a0, a0, 32 +; CHECK64-IZHINXMIN-NEXT: srli a0, a0, 32 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_wu: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_wu: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: slli a0, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: srli a0, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = uitofp i32 %a to half ret half %1 } @@ -1981,6 +3299,30 @@ ; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_wu_load: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lw a0, 0(a0) +; RV32IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_wu_load: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lwu a0, 0(a0) +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_h_wu_load: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: lw a0, 0(a0) +; RV32IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_wu_load: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: lwu a0, 0(a0) +; RV64IZDINXZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_wu_load: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2016,6 +3358,34 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_wu_load: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_wu_load: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: lwu a0, 0(a0) +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_wu_load: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 0(a0) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_wu_load: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: lwu a0, 0(a0) +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %a = load i32, ptr %p %1 = uitofp i32 %a to half ret half %1 @@ -2050,6 +3420,34 @@ ; RV64IDZFH-NEXT: fcvt.h.l fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_l: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __floatdihf@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_l: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.l a0, a0 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_h_l: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: call __floatdihf@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_l: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.l a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_l: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2084,6 +3482,36 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_l: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __floatdihf@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_l: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_l: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: call __floatdihf@plt +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_l: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = sitofp i64 %a to half ret half %1 } @@ -2117,6 +3545,34 @@ ; RV64IDZFH-NEXT: fcvt.h.lu fa0, a0 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_lu: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __floatundihf@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_lu: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.lu a0, a0 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_h_lu: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: call __floatundihf@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_lu: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.lu a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_lu: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2151,6 +3607,36 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_lu: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __floatundihf@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_lu: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_lu: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZDINXZHINXMIN-NEXT: call __floatundihf@plt +; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_lu: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = uitofp i64 %a to half ret half %1 } @@ -2171,6 +3657,16 @@ ; RV64IDZFH-NEXT: fcvt.h.s fa0, fa0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_h_s: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_h_s: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_s: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2198,6 +3694,26 @@ ; CHECK64-IZFHMIN: # %bb.0: ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa0, fa0 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_s: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_s: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_s: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_s: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptrunc float %a to half ret half %1 } @@ -2218,6 +3734,16 @@ ; RV64IDZFH-NEXT: fcvt.s.h fa0, fa0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_s_h: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_s_h: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_s_h: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2249,6 +3775,26 @@ ; CHECK64-IZFHMIN: # %bb.0: ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa0, fa0 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_s_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_s_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_s_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_s_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fpext half %a to float ret float %1 } @@ -2282,6 +3828,40 @@ ; RV64IDZFH-NEXT: fcvt.h.d fa0, fa0 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_d: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: call __truncdfhf2@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_d: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: call __truncdfhf2@plt +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_h_d: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: sw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: lw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: lw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: fcvt.h.d a0, a0 +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_d: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.h.d a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_d: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2327,6 +3907,40 @@ ; RV64IDZFHMIN: # %bb.0: ; RV64IDZFHMIN-NEXT: fcvt.h.d fa0, fa0 ; RV64IDZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_d: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: call __truncdfhf2@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_d: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK64-IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; CHECK64-IZHINXMIN-NEXT: call __truncdfhf2@plt +; CHECK64-IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; CHECK64-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_d: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_d: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.d a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptrunc double %a to half ret half %1 } @@ -2362,6 +3976,42 @@ ; RV64IDZFH-NEXT: fcvt.d.h fa0, fa0 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_d_h: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call __extendsfdf2@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_d_h: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call __extendsfdf2@plt +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_d_h: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: fcvt.d.h a0, a0 +; RV32IZDINXZHINX-NEXT: sw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: sw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: lw a0, 8(sp) +; RV32IZDINXZHINX-NEXT: lw a1, 12(sp) +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_d_h: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.d.h a0, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_d_h: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2417,6 +4067,42 @@ ; RV64IDZFHMIN: # %bb.0: ; RV64IDZFHMIN-NEXT: fcvt.d.h fa0, fa0 ; RV64IDZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_d_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: call __extendsfdf2@plt +; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; CHECK32-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_d_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK64-IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: call __extendsfdf2@plt +; CHECK64-IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; CHECK64-IZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_d_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: sw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: sw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a0, 8(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, 12(sp) +; CHECK32-IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_d_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.d.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fpext half %a to double ret double %1 } @@ -2437,6 +4123,14 @@ ; RV64IDZFH-NEXT: fmv.h.x fa0, a0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: bitcast_h_i16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: bitcast_h_i16: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: bitcast_h_i16: ; RV32I: # %bb.0: ; RV32I-NEXT: ret @@ -2454,6 +4148,22 @@ ; CHECK64-IZFHMIN: # %bb.0: ; CHECK64-IZFHMIN-NEXT: fmv.h.x fa0, a0 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: bitcast_h_i16: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: bitcast_h_i16: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: bitcast_h_i16: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: bitcast_h_i16: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = bitcast i16 %a to half ret half %1 } @@ -2474,6 +4184,14 @@ ; RV64IDZFH-NEXT: fmv.x.h a0, fa0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: bitcast_i16_h: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: bitcast_i16_h: +; CHECKIZDINXZHINX: # %bb.0: +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: bitcast_i16_h: ; RV32I: # %bb.0: ; RV32I-NEXT: ret @@ -2491,6 +4209,22 @@ ; CHECK64-IZFHMIN: # %bb.0: ; CHECK64-IZFHMIN-NEXT: fmv.x.h a0, fa0 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: bitcast_i16_h: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: bitcast_i16_h: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: bitcast_i16_h: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: bitcast_i16_h: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = bitcast half %a to i16 ret i16 %1 } @@ -2525,6 +4259,38 @@ ; RV64IDZFH-NEXT: fsh fa5, 0(a1) ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_w_demanded_bits: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi a0, a0, 1 +; RV32IZHINX-NEXT: fcvt.h.w a2, a0 +; RV32IZHINX-NEXT: sh a2, 0(a1) +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_w_demanded_bits: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addiw a2, a0, 1 +; RV64IZHINX-NEXT: addi a0, a0, 1 +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: sh a0, 0(a1) +; RV64IZHINX-NEXT: mv a0, a2 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_h_w_demanded_bits: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi a0, a0, 1 +; RV32IZDINXZHINX-NEXT: fcvt.h.w a2, a0 +; RV32IZDINXZHINX-NEXT: sh a2, 0(a1) +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_w_demanded_bits: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addiw a2, a0, 1 +; RV64IZDINXZHINX-NEXT: addi a0, a0, 1 +; RV64IZDINXZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZDINXZHINX-NEXT: sh a0, 0(a1) +; RV64IZDINXZHINX-NEXT: mv a0, a2 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_w_demanded_bits: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2578,6 +4344,38 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; CHECK64-IZFHMIN-NEXT: fsh fa5, 0(a1) ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_w_demanded_bits: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi a0, a0, 1 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.w a2, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK32-IZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_w_demanded_bits: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: addiw a0, a0, 1 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.l a2, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK64-IZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_w_demanded_bits: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi a0, a0, 1 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.w a2, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_w_demanded_bits: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: addiw a0, a0, 1 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.l a2, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK64-IZDINXZHINXMIN-NEXT: ret %3 = add i32 %0, 1 %4 = sitofp i32 %3 to half store half %4, ptr %1, align 2 @@ -2614,6 +4412,34 @@ ; RV64IDZFH-NEXT: fsh fa5, 0(a1) ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_h_wu_demanded_bits: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi a0, a0, 1 +; RV32IZHINX-NEXT: fcvt.h.wu a2, a0 +; RV32IZHINX-NEXT: sh a2, 0(a1) +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_h_wu_demanded_bits: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addiw a0, a0, 1 +; RV64IZHINX-NEXT: fcvt.h.wu a2, a0 +; RV64IZHINX-NEXT: sh a2, 0(a1) +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_h_wu_demanded_bits: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi a0, a0, 1 +; RV32IZDINXZHINX-NEXT: fcvt.h.wu a2, a0 +; RV32IZDINXZHINX-NEXT: sh a2, 0(a1) +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_h_wu_demanded_bits: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addiw a0, a0, 1 +; RV64IZDINXZHINX-NEXT: fcvt.h.wu a2, a0 +; RV64IZDINXZHINX-NEXT: sh a2, 0(a1) +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_h_wu_demanded_bits: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2669,6 +4495,42 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; CHECK64-IZFHMIN-NEXT: fsh fa5, 0(a1) ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_h_wu_demanded_bits: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: addi a0, a0, 1 +; CHECK32-IZHINXMIN-NEXT: fcvt.s.wu a2, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK32-IZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_h_wu_demanded_bits: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: addiw a0, a0, 1 +; CHECK64-IZHINXMIN-NEXT: slli a2, a0, 32 +; CHECK64-IZHINXMIN-NEXT: srli a2, a2, 32 +; CHECK64-IZHINXMIN-NEXT: fcvt.s.lu a2, a2 +; CHECK64-IZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK64-IZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_h_wu_demanded_bits: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: addi a0, a0, 1 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.wu a2, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_h_wu_demanded_bits: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: addiw a0, a0, 1 +; CHECK64-IZDINXZHINXMIN-NEXT: slli a2, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: srli a2, a2, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.lu a2, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.h.s a2, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: sh a2, 0(a1) +; CHECK64-IZDINXZHINXMIN-NEXT: ret %3 = add i32 %0, 1 %4 = uitofp i32 %3 to half store half %4, ptr %1, align 2 @@ -2696,6 +4558,26 @@ ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_w_s_i16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_w_s_i16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_w_s_i16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_w_s_i16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_w_s_i16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2731,6 +4613,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_w_s_i16: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_w_s_i16: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_w_s_i16: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_w_s_i16: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptosi half %a to i16 ret i16 %1 } @@ -2796,6 +4702,62 @@ ; RV64IDZFH-NEXT: and a0, a0, a1 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_w_s_sat_i16: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: feq.s a1, a0, a0 +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI32_0) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI32_0)(a2) +; RV32IZHINX-NEXT: neg a1, a1 +; RV32IZHINX-NEXT: lui a3, 815104 +; RV32IZHINX-NEXT: fmax.s a0, a0, a3 +; RV32IZHINX-NEXT: fmin.s a0, a0, a2 +; RV32IZHINX-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINX-NEXT: and a0, a1, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_w_s_sat_i16: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: lui a1, 815104 +; RV64IZHINX-NEXT: lui a2, %hi(.LCPI32_0) +; RV64IZHINX-NEXT: lw a2, %lo(.LCPI32_0)(a2) +; RV64IZHINX-NEXT: fmax.s a1, a0, a1 +; RV64IZHINX-NEXT: feq.s a0, a0, a0 +; RV64IZHINX-NEXT: neg a0, a0 +; RV64IZHINX-NEXT: fmin.s a1, a1, a2 +; RV64IZHINX-NEXT: fcvt.l.s a1, a1, rtz +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_w_s_sat_i16: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: feq.s a1, a0, a0 +; RV32IZDINXZHINX-NEXT: lui a2, %hi(.LCPI32_0) +; RV32IZDINXZHINX-NEXT: lw a2, %lo(.LCPI32_0)(a2) +; RV32IZDINXZHINX-NEXT: neg a1, a1 +; RV32IZDINXZHINX-NEXT: lui a3, 815104 +; RV32IZDINXZHINX-NEXT: fmax.s a0, a0, a3 +; RV32IZDINXZHINX-NEXT: fmin.s a0, a0, a2 +; RV32IZDINXZHINX-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZDINXZHINX-NEXT: and a0, a1, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_w_s_sat_i16: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: lui a1, 815104 +; RV64IZDINXZHINX-NEXT: lui a2, %hi(.LCPI32_0) +; RV64IZDINXZHINX-NEXT: lw a2, %lo(.LCPI32_0)(a2) +; RV64IZDINXZHINX-NEXT: fmax.s a1, a0, a1 +; RV64IZDINXZHINX-NEXT: feq.s a0, a0, a0 +; RV64IZDINXZHINX-NEXT: neg a0, a0 +; RV64IZDINXZHINX-NEXT: fmin.s a1, a1, a2 +; RV64IZDINXZHINX-NEXT: fcvt.l.s a1, a1, rtz +; RV64IZDINXZHINX-NEXT: and a0, a0, a1 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_w_s_sat_i16: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -16 @@ -2915,6 +4877,62 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a1, fa5, rtz ; CHECK64-IZFHMIN-NEXT: and a0, a0, a1 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_w_s_sat_i16: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: feq.s a1, a0, a0 +; CHECK32-IZHINXMIN-NEXT: lui a2, %hi(.LCPI32_0) +; CHECK32-IZHINXMIN-NEXT: lw a2, %lo(.LCPI32_0)(a2) +; CHECK32-IZHINXMIN-NEXT: neg a1, a1 +; CHECK32-IZHINXMIN-NEXT: lui a3, 815104 +; CHECK32-IZHINXMIN-NEXT: fmax.s a0, a0, a3 +; CHECK32-IZHINXMIN-NEXT: fmin.s a0, a0, a2 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_w_s_sat_i16: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: lui a1, 815104 +; CHECK64-IZHINXMIN-NEXT: lui a2, %hi(.LCPI32_0) +; CHECK64-IZHINXMIN-NEXT: lw a2, %lo(.LCPI32_0)(a2) +; CHECK64-IZHINXMIN-NEXT: fmax.s a1, a0, a1 +; CHECK64-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZHINXMIN-NEXT: neg a0, a0 +; CHECK64-IZHINXMIN-NEXT: fmin.s a1, a1, a2 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a1, a1, rtz +; CHECK64-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_w_s_sat_i16: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a1, a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, %hi(.LCPI32_0) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a2, %lo(.LCPI32_0)(a2) +; CHECK32-IZDINXZHINXMIN-NEXT: neg a1, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a3, 815104 +; CHECK32-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, a3 +; CHECK32-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_w_s_sat_i16: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: lui a1, 815104 +; CHECK64-IZDINXZHINXMIN-NEXT: lui a2, %hi(.LCPI32_0) +; CHECK64-IZDINXZHINXMIN-NEXT: lw a2, %lo(.LCPI32_0)(a2) +; CHECK64-IZDINXZHINXMIN-NEXT: fmax.s a1, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: neg a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fmin.s a1, a1, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a1, a1, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i16 @llvm.fptosi.sat.i16.f16(half %a) ret i16 %0 @@ -2941,6 +4959,26 @@ ; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_wu_s_i16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_wu_s_i16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_wu_s_i16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_wu_s_i16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_wu_s_i16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -2976,6 +5014,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_s_i16: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_s_i16: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_s_i16: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_s_i16: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptoui half %a to i16 ret i16 %1 } @@ -3025,6 +5087,46 @@ ; RV64IDZFH-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_wu_s_sat_i16: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI34_0) +; RV32IZHINX-NEXT: lw a1, %lo(.LCPI34_0)(a1) +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: fmax.s a0, a0, zero +; RV32IZHINX-NEXT: fmin.s a0, a0, a1 +; RV32IZHINX-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_wu_s_sat_i16: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI34_0) +; RV64IZHINX-NEXT: lw a1, %lo(.LCPI34_0)(a1) +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: fmax.s a0, a0, zero +; RV64IZHINX-NEXT: fmin.s a0, a0, a1 +; RV64IZHINX-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_wu_s_sat_i16: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: lui a1, %hi(.LCPI34_0) +; RV32IZDINXZHINX-NEXT: lw a1, %lo(.LCPI34_0)(a1) +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: fmax.s a0, a0, zero +; RV32IZDINXZHINX-NEXT: fmin.s a0, a0, a1 +; RV32IZDINXZHINX-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_wu_s_sat_i16: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: lui a1, %hi(.LCPI34_0) +; RV64IZDINXZHINX-NEXT: lw a1, %lo(.LCPI34_0)(a1) +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: fmax.s a0, a0, zero +; RV64IZDINXZHINX-NEXT: fmin.s a0, a0, a1 +; RV64IZDINXZHINX-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_wu_s_sat_i16: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -32 @@ -3128,6 +5230,46 @@ ; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_s_sat_i16: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: lui a1, %hi(.LCPI34_0) +; CHECK32-IZHINXMIN-NEXT: lw a1, %lo(.LCPI34_0)(a1) +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK32-IZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_s_sat_i16: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: lui a1, %hi(.LCPI34_0) +; CHECK64-IZHINXMIN-NEXT: lw a1, %lo(.LCPI34_0)(a1) +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK64-IZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_s_sat_i16: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: lui a1, %hi(.LCPI34_0) +; CHECK32-IZDINXZHINXMIN-NEXT: lw a1, %lo(.LCPI34_0)(a1) +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK32-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_s_sat_i16: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: lui a1, %hi(.LCPI34_0) +; CHECK64-IZDINXZHINXMIN-NEXT: lw a1, %lo(.LCPI34_0)(a1) +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK64-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i16 @llvm.fptoui.sat.i16.f16(half %a) ret i16 %0 @@ -3154,6 +5296,26 @@ ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_w_s_i8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_w_s_i8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_w_s_i8: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_w_s_i8: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_w_s_i8: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -3189,6 +5351,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_w_s_i8: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_w_s_i8: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_w_s_i8: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_w_s_i8: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptosi half %a to i8 ret i8 %1 } @@ -3254,6 +5440,58 @@ ; RV64IDZFH-NEXT: and a0, a0, a1 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_w_s_sat_i8: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: feq.s a1, a0, a0 +; RV32IZHINX-NEXT: neg a1, a1 +; RV32IZHINX-NEXT: lui a2, 798720 +; RV32IZHINX-NEXT: fmax.s a0, a0, a2 +; RV32IZHINX-NEXT: lui a2, 274400 +; RV32IZHINX-NEXT: fmin.s a0, a0, a2 +; RV32IZHINX-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINX-NEXT: and a0, a1, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_w_s_sat_i8: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: feq.s a1, a0, a0 +; RV64IZHINX-NEXT: neg a1, a1 +; RV64IZHINX-NEXT: lui a2, 798720 +; RV64IZHINX-NEXT: fmax.s a0, a0, a2 +; RV64IZHINX-NEXT: lui a2, 274400 +; RV64IZHINX-NEXT: fmin.s a0, a0, a2 +; RV64IZHINX-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINX-NEXT: and a0, a1, a0 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_w_s_sat_i8: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: feq.s a1, a0, a0 +; RV32IZDINXZHINX-NEXT: neg a1, a1 +; RV32IZDINXZHINX-NEXT: lui a2, 798720 +; RV32IZDINXZHINX-NEXT: fmax.s a0, a0, a2 +; RV32IZDINXZHINX-NEXT: lui a2, 274400 +; RV32IZDINXZHINX-NEXT: fmin.s a0, a0, a2 +; RV32IZDINXZHINX-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZDINXZHINX-NEXT: and a0, a1, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_w_s_sat_i8: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: feq.s a1, a0, a0 +; RV64IZDINXZHINX-NEXT: neg a1, a1 +; RV64IZDINXZHINX-NEXT: lui a2, 798720 +; RV64IZDINXZHINX-NEXT: fmax.s a0, a0, a2 +; RV64IZDINXZHINX-NEXT: lui a2, 274400 +; RV64IZDINXZHINX-NEXT: fmin.s a0, a0, a2 +; RV64IZDINXZHINX-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZDINXZHINX-NEXT: and a0, a1, a0 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_w_s_sat_i8: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -16 @@ -3369,6 +5607,58 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.l.s a1, fa5, rtz ; CHECK64-IZFHMIN-NEXT: and a0, a0, a1 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_w_s_sat_i8: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: feq.s a1, a0, a0 +; CHECK32-IZHINXMIN-NEXT: neg a1, a1 +; CHECK32-IZHINXMIN-NEXT: lui a2, 798720 +; CHECK32-IZHINXMIN-NEXT: fmax.s a0, a0, a2 +; CHECK32-IZHINXMIN-NEXT: lui a2, 274400 +; CHECK32-IZHINXMIN-NEXT: fmin.s a0, a0, a2 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_w_s_sat_i8: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: feq.s a1, a0, a0 +; CHECK64-IZHINXMIN-NEXT: neg a1, a1 +; CHECK64-IZHINXMIN-NEXT: lui a2, 798720 +; CHECK64-IZHINXMIN-NEXT: fmax.s a0, a0, a2 +; CHECK64-IZHINXMIN-NEXT: lui a2, 274400 +; CHECK64-IZHINXMIN-NEXT: fmin.s a0, a0, a2 +; CHECK64-IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: and a0, a1, a0 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_w_s_sat_i8: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a1, a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: neg a1, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, 798720 +; CHECK32-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, 274400 +; CHECK32-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a2 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a1, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_w_s_sat_i8: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: feq.s a1, a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: neg a1, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: lui a2, 798720 +; CHECK64-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: lui a2, 274400 +; CHECK64-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a2 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: and a0, a1, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i8 @llvm.fptosi.sat.i8.f16(half %a) ret i8 %0 @@ -3396,6 +5686,26 @@ ; RV64IDZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_wu_s_i8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_wu_s_i8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_wu_s_i8: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_wu_s_i8: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_wu_s_i8: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -3431,6 +5741,30 @@ ; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_s_i8: +; CHECK32-IZHINXMIN: # %bb.0: +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_s_i8: +; CHECK64-IZHINXMIN: # %bb.0: +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_s_i8: +; CHECK32-IZDINXZHINXMIN: # %bb.0: +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_s_i8: +; CHECK64-IZDINXZHINXMIN: # %bb.0: +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret %1 = fptoui half %a to i8 ret i8 %1 } @@ -3480,6 +5814,42 @@ ; RV64IDZFH-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_wu_s_sat_i8: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: fmax.s a0, a0, zero +; RV32IZHINX-NEXT: lui a1, 276464 +; RV32IZHINX-NEXT: fmin.s a0, a0, a1 +; RV32IZHINX-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_wu_s_sat_i8: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: fmax.s a0, a0, zero +; RV64IZHINX-NEXT: lui a1, 276464 +; RV64IZHINX-NEXT: fmin.s a0, a0, a1 +; RV64IZHINX-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_wu_s_sat_i8: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: fmax.s a0, a0, zero +; RV32IZDINXZHINX-NEXT: lui a1, 276464 +; RV32IZDINXZHINX-NEXT: fmin.s a0, a0, a1 +; RV32IZDINXZHINX-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_wu_s_sat_i8: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: fmax.s a0, a0, zero +; RV64IZDINXZHINX-NEXT: lui a1, 276464 +; RV64IZDINXZHINX-NEXT: fmin.s a0, a0, a1 +; RV64IZDINXZHINX-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_wu_s_sat_i8: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -16 @@ -3575,6 +5945,42 @@ ; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4 ; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_s_sat_i8: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK32-IZHINXMIN-NEXT: lui a1, 276464 +; CHECK32-IZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_s_sat_i8: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK64-IZHINXMIN-NEXT: lui a1, 276464 +; CHECK64-IZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_s_sat_i8: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK32-IZDINXZHINXMIN-NEXT: lui a1, 276464 +; CHECK32-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_s_sat_i8: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fmax.s a0, a0, zero +; CHECK64-IZDINXZHINXMIN-NEXT: lui a1, 276464 +; CHECK64-IZDINXZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i8 @llvm.fptoui.sat.i8.f16(half %a) ret i8 %0 @@ -3622,6 +6028,46 @@ ; RV64IDZFH-NEXT: srli a0, a0, 32 ; RV64IDZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fcvt_wu_h_sat_zext: +; RV32IZHINX: # %bb.0: # %start +; RV32IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV32IZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZHINX-NEXT: seqz a0, a0 +; RV32IZHINX-NEXT: addi a0, a0, -1 +; RV32IZHINX-NEXT: and a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fcvt_wu_h_sat_zext: +; RV64IZHINX: # %bb.0: # %start +; RV64IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addiw a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a1, a0 +; RV64IZHINX-NEXT: slli a0, a0, 32 +; RV64IZHINX-NEXT: srli a0, a0, 32 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: fcvt_wu_h_sat_zext: +; RV32IZDINXZHINX: # %bb.0: # %start +; RV32IZDINXZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV32IZDINXZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZDINXZHINX-NEXT: seqz a0, a0 +; RV32IZDINXZHINX-NEXT: addi a0, a0, -1 +; RV32IZDINXZHINX-NEXT: and a0, a0, a1 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: fcvt_wu_h_sat_zext: +; RV64IZDINXZHINX: # %bb.0: # %start +; RV64IZDINXZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV64IZDINXZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZDINXZHINX-NEXT: seqz a0, a0 +; RV64IZDINXZHINX-NEXT: addiw a0, a0, -1 +; RV64IZDINXZHINX-NEXT: and a0, a1, a0 +; RV64IZDINXZHINX-NEXT: slli a0, a0, 32 +; RV64IZDINXZHINX-NEXT: srli a0, a0, 32 +; RV64IZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_wu_h_sat_zext: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -16 @@ -3715,6 +6161,50 @@ ; CHECK64-IZFHMIN-NEXT: slli a0, a0, 32 ; CHECK64-IZFHMIN-NEXT: srli a0, a0, 32 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_wu_h_sat_zext: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; CHECK32-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK32-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK32-IZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK32-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_wu_h_sat_zext: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; CHECK64-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZHINXMIN-NEXT: addiw a0, a0, -1 +; CHECK64-IZHINXMIN-NEXT: and a0, a1, a0 +; CHECK64-IZHINXMIN-NEXT: slli a0, a0, 32 +; CHECK64-IZHINXMIN-NEXT: srli a0, a0, 32 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_wu_h_sat_zext: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_wu_h_sat_zext: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: addiw a0, a0, -1 +; CHECK64-IZDINXZHINXMIN-NEXT: and a0, a1, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: slli a0, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: srli a0, a0, 32 +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i32 @llvm.fptoui.sat.i32.f16(half %a) ret i32 %0 @@ -3748,6 +6238,24 @@ ; RV64IDZFH-NEXT: and a0, a1, a0 ; RV64IDZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcvt_w_h_sat_sext: +; CHECKIZHINX: # %bb.0: # %start +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: seqz a0, a0 +; CHECKIZHINX-NEXT: addi a0, a0, -1 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; +; CHECKIZDINXZHINX-LABEL: fcvt_w_h_sat_sext: +; CHECKIZDINXZHINX: # %bb.0: # %start +; CHECKIZDINXZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZDINXZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZDINXZHINX-NEXT: seqz a0, a0 +; CHECKIZDINXZHINX-NEXT: addi a0, a0, -1 +; CHECKIZDINXZHINX-NEXT: and a0, a0, a1 +; CHECKIZDINXZHINX-NEXT: ret +; ; RV32I-LABEL: fcvt_w_h_sat_sext: ; RV32I: # %bb.0: # %start ; RV32I-NEXT: addi sp, sp, -32 @@ -3858,7 +6366,52 @@ ; CHECK64-IZFHMIN-NEXT: addi a1, a1, -1 ; CHECK64-IZFHMIN-NEXT: and a0, a1, a0 ; CHECK64-IZFHMIN-NEXT: ret +; +; CHECK32-IZHINXMIN-LABEL: fcvt_w_h_sat_sext: +; CHECK32-IZHINXMIN: # %bb.0: # %start +; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECK32-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK32-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK32-IZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK32-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK32-IZHINXMIN-NEXT: ret +; +; CHECK64-IZHINXMIN-LABEL: fcvt_w_h_sat_sext: +; CHECK64-IZHINXMIN: # %bb.0: # %start +; CHECK64-IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECK64-IZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK64-IZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZHINXMIN-NEXT: ret +; +; CHECK32-IZDINXZHINXMIN-LABEL: fcvt_w_h_sat_sext: +; CHECK32-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK32-IZDINXZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK32-IZDINXZHINXMIN-NEXT: ret +; +; CHECK64-IZDINXZHINXMIN-LABEL: fcvt_w_h_sat_sext: +; CHECK64-IZDINXZHINXMIN: # %bb.0: # %start +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECK64-IZDINXZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: seqz a0, a0 +; CHECK64-IZDINXZHINXMIN-NEXT: addi a0, a0, -1 +; CHECK64-IZDINXZHINXMIN-NEXT: and a0, a0, a1 +; CHECK64-IZDINXZHINXMIN-NEXT: ret start: %0 = tail call i32 @llvm.fptosi.sat.i32.f16(half %a) ret i32 %0 } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32IZDINXZHINXMIN: {{.*}} +; RV32IZHINXMIN: {{.*}} +; RV64IZDINXZHINXMIN: {{.*}} +; RV64IZHINXMIN: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll --- a/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll +++ b/llvm/test/CodeGen/RISCV/half-fcmp-strict.ll @@ -3,12 +3,24 @@ ; RUN: -target-abi ilp32f -disable-strictnode-mutation < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f -disable-strictnode-mutation < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefix=CHECKIZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefix=CHECKIZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi ilp32f -disable-strictnode-mutation < %s \ ; RUN: | FileCheck -check-prefix=CHECKIZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f -disable-strictnode-mutation < %s \ ; RUN: | FileCheck -check-prefix=CHECKIZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefix=CHECKIZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 -disable-strictnode-mutation < %s \ +; RUN: | FileCheck -check-prefix=CHECKIZHINXMIN %s define i32 @fcmp_oeq(half %a, half %b) nounwind strictfp { ; CHECK-LABEL: fcmp_oeq: @@ -16,10 +28,22 @@ ; CHECK-NEXT: feq.h a0, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_oeq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_oeq: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: feq.h a0, fa0, fa1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_oeq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -35,6 +59,15 @@ ; CHECK-NEXT: feq.h zero, fa1, fa0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ogt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a3, fflags +; CHECKIZHINX-NEXT: flt.h a2, a1, a0 +; CHECKIZHINX-NEXT: csrw fflags, a3 +; CHECKIZHINX-NEXT: feq.h zero, a1, a0 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_ogt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a1 @@ -42,6 +75,16 @@ ; CHECKIZFHMIN-NEXT: fsflags a1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa1, fa0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ogt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: csrr a3, fflags +; CHECKIZHINXMIN-NEXT: flt.s a0, a1, a2 +; CHECKIZHINXMIN-NEXT: csrw fflags, a3 +; CHECKIZHINXMIN-NEXT: feq.s zero, a1, a2 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -56,6 +99,15 @@ ; CHECK-NEXT: feq.h zero, fa1, fa0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_oge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a3, fflags +; CHECKIZHINX-NEXT: fle.h a2, a1, a0 +; CHECKIZHINX-NEXT: csrw fflags, a3 +; CHECKIZHINX-NEXT: feq.h zero, a1, a0 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_oge: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a1 @@ -63,6 +115,16 @@ ; CHECKIZFHMIN-NEXT: fsflags a1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa1, fa0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_oge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: csrr a3, fflags +; CHECKIZHINXMIN-NEXT: fle.s a0, a1, a2 +; CHECKIZHINXMIN-NEXT: csrw fflags, a3 +; CHECKIZHINXMIN-NEXT: feq.s zero, a1, a2 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -77,6 +139,15 @@ ; CHECK-NEXT: feq.h zero, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_olt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a3, fflags +; CHECKIZHINX-NEXT: flt.h a2, a0, a1 +; CHECKIZHINX-NEXT: csrw fflags, a3 +; CHECKIZHINX-NEXT: feq.h zero, a0, a1 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_olt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a1 @@ -84,6 +155,16 @@ ; CHECKIZFHMIN-NEXT: fsflags a1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa0, fa1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_olt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: csrr a3, fflags +; CHECKIZHINXMIN-NEXT: flt.s a0, a2, a1 +; CHECKIZHINXMIN-NEXT: csrw fflags, a3 +; CHECKIZHINXMIN-NEXT: feq.s zero, a2, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -98,6 +179,15 @@ ; CHECK-NEXT: feq.h zero, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ole: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a3, fflags +; CHECKIZHINX-NEXT: fle.h a2, a0, a1 +; CHECKIZHINX-NEXT: csrw fflags, a3 +; CHECKIZHINX-NEXT: feq.h zero, a0, a1 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_ole: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a1 @@ -105,6 +195,16 @@ ; CHECKIZFHMIN-NEXT: fsflags a1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa0, fa1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ole: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: csrr a3, fflags +; CHECKIZHINXMIN-NEXT: fle.s a0, a2, a1 +; CHECKIZHINXMIN-NEXT: csrw fflags, a3 +; CHECKIZHINXMIN-NEXT: feq.s zero, a2, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -126,6 +226,20 @@ ; CHECK-NEXT: feq.h zero, fa1, fa0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_one: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a2, fflags +; CHECKIZHINX-NEXT: flt.h a3, a0, a1 +; CHECKIZHINX-NEXT: csrw fflags, a2 +; CHECKIZHINX-NEXT: feq.h zero, a0, a1 +; CHECKIZHINX-NEXT: csrr a2, fflags +; CHECKIZHINX-NEXT: flt.h a4, a1, a0 +; CHECKIZHINX-NEXT: csrw fflags, a2 +; CHECKIZHINX-NEXT: or a2, a4, a3 +; CHECKIZHINX-NEXT: feq.h zero, a1, a0 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_one: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a0 @@ -138,6 +252,21 @@ ; CHECKIZFHMIN-NEXT: or a0, a2, a1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa1, fa0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_one: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: csrr a0, fflags +; CHECKIZHINXMIN-NEXT: flt.s a3, a2, a1 +; CHECKIZHINXMIN-NEXT: csrw fflags, a0 +; CHECKIZHINXMIN-NEXT: feq.s zero, a2, a1 +; CHECKIZHINXMIN-NEXT: csrr a0, fflags +; CHECKIZHINXMIN-NEXT: flt.s a4, a1, a2 +; CHECKIZHINXMIN-NEXT: csrw fflags, a0 +; CHECKIZHINXMIN-NEXT: or a0, a4, a3 +; CHECKIZHINXMIN-NEXT: feq.s zero, a1, a2 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -151,12 +280,28 @@ ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ord: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a1, a1, a1 +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_ord: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: feq.h a0, fa1, fa1 ; CHECKIZFHMIN-NEXT: feq.h a1, fa0, fa0 ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ord: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: feq.s a1, a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ord", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -179,6 +324,21 @@ ; CHECK-NEXT: feq.h zero, fa1, fa0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ueq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a2, fflags +; CHECKIZHINX-NEXT: flt.h a3, a0, a1 +; CHECKIZHINX-NEXT: csrw fflags, a2 +; CHECKIZHINX-NEXT: feq.h zero, a0, a1 +; CHECKIZHINX-NEXT: csrr a2, fflags +; CHECKIZHINX-NEXT: flt.h a4, a1, a0 +; CHECKIZHINX-NEXT: csrw fflags, a2 +; CHECKIZHINX-NEXT: or a3, a4, a3 +; CHECKIZHINX-NEXT: xori a2, a3, 1 +; CHECKIZHINX-NEXT: feq.h zero, a1, a0 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_ueq: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a0 @@ -192,6 +352,22 @@ ; CHECKIZFHMIN-NEXT: xori a0, a1, 1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa1, fa0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ueq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: csrr a0, fflags +; CHECKIZHINXMIN-NEXT: flt.s a3, a2, a1 +; CHECKIZHINXMIN-NEXT: csrw fflags, a0 +; CHECKIZHINXMIN-NEXT: feq.s zero, a2, a1 +; CHECKIZHINXMIN-NEXT: csrr a0, fflags +; CHECKIZHINXMIN-NEXT: flt.s a4, a1, a2 +; CHECKIZHINXMIN-NEXT: csrw fflags, a0 +; CHECKIZHINXMIN-NEXT: or a3, a4, a3 +; CHECKIZHINXMIN-NEXT: xori a0, a3, 1 +; CHECKIZHINXMIN-NEXT: feq.s zero, a1, a2 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -207,6 +383,16 @@ ; CHECK-NEXT: feq.h zero, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ugt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a2, fflags +; CHECKIZHINX-NEXT: fle.h a3, a0, a1 +; CHECKIZHINX-NEXT: csrw fflags, a2 +; CHECKIZHINX-NEXT: xori a2, a3, 1 +; CHECKIZHINX-NEXT: feq.h zero, a0, a1 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_ugt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a0 @@ -215,6 +401,17 @@ ; CHECKIZFHMIN-NEXT: xori a0, a1, 1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa0, fa1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ugt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: csrr a0, fflags +; CHECKIZHINXMIN-NEXT: fle.s a3, a2, a1 +; CHECKIZHINXMIN-NEXT: csrw fflags, a0 +; CHECKIZHINXMIN-NEXT: xori a0, a3, 1 +; CHECKIZHINXMIN-NEXT: feq.s zero, a2, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -230,6 +427,16 @@ ; CHECK-NEXT: feq.h zero, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_uge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a2, fflags +; CHECKIZHINX-NEXT: flt.h a3, a0, a1 +; CHECKIZHINX-NEXT: csrw fflags, a2 +; CHECKIZHINX-NEXT: xori a2, a3, 1 +; CHECKIZHINX-NEXT: feq.h zero, a0, a1 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_uge: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a0 @@ -238,6 +445,17 @@ ; CHECKIZFHMIN-NEXT: xori a0, a1, 1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa0, fa1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_uge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: csrr a0, fflags +; CHECKIZHINXMIN-NEXT: flt.s a3, a2, a1 +; CHECKIZHINXMIN-NEXT: csrw fflags, a0 +; CHECKIZHINXMIN-NEXT: xori a0, a3, 1 +; CHECKIZHINXMIN-NEXT: feq.s zero, a2, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -253,6 +471,16 @@ ; CHECK-NEXT: feq.h zero, fa1, fa0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ult: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a2, fflags +; CHECKIZHINX-NEXT: fle.h a3, a1, a0 +; CHECKIZHINX-NEXT: csrw fflags, a2 +; CHECKIZHINX-NEXT: xori a2, a3, 1 +; CHECKIZHINX-NEXT: feq.h zero, a1, a0 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_ult: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a0 @@ -261,6 +489,17 @@ ; CHECKIZFHMIN-NEXT: xori a0, a1, 1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa1, fa0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ult: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: csrr a0, fflags +; CHECKIZHINXMIN-NEXT: fle.s a3, a1, a2 +; CHECKIZHINXMIN-NEXT: csrw fflags, a0 +; CHECKIZHINXMIN-NEXT: xori a0, a3, 1 +; CHECKIZHINXMIN-NEXT: feq.s zero, a1, a2 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -276,6 +515,16 @@ ; CHECK-NEXT: feq.h zero, fa1, fa0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ule: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: csrr a2, fflags +; CHECKIZHINX-NEXT: flt.h a3, a1, a0 +; CHECKIZHINX-NEXT: csrw fflags, a2 +; CHECKIZHINX-NEXT: xori a2, a3, 1 +; CHECKIZHINX-NEXT: feq.h zero, a1, a0 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_ule: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: frflags a0 @@ -284,6 +533,17 @@ ; CHECKIZFHMIN-NEXT: xori a0, a1, 1 ; CHECKIZFHMIN-NEXT: feq.h zero, fa1, fa0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ule: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: csrr a0, fflags +; CHECKIZHINXMIN-NEXT: flt.s a3, a1, a2 +; CHECKIZHINXMIN-NEXT: csrw fflags, a0 +; CHECKIZHINXMIN-NEXT: xori a0, a3, 1 +; CHECKIZHINXMIN-NEXT: feq.s zero, a1, a2 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -296,11 +556,25 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_une: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a0, a0, a1 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_une: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: feq.h a0, fa0, fa1 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_une: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -315,6 +589,14 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_uno: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a1, a1, a1 +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmp_uno: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: feq.h a0, fa1, fa1 @@ -322,6 +604,16 @@ ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_uno: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: feq.s a1, a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmp.f16(half %a, half %b, metadata !"uno", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -335,12 +627,28 @@ ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_oeq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a2, a1, a0 +; CHECKIZHINX-NEXT: fle.h a0, a0, a1 +; CHECKIZHINX-NEXT: and a0, a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_oeq: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fle.h a0, fa1, fa0 ; CHECKIZFHMIN-NEXT: fle.h a1, fa0, fa1 ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_oeq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a2, a1, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: and a0, a0, a2 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oeq", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -353,10 +661,22 @@ ; CHECK-NEXT: flt.h a0, fa1, fa0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_ogt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a0, a1, a0 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_ogt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: flt.h a0, fa1, fa0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_ogt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: flt.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ogt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -368,10 +688,22 @@ ; CHECK-NEXT: fle.h a0, fa1, fa0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_oge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a1, a0 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_oge: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fle.h a0, fa1, fa0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_oge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"oge", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -383,10 +715,22 @@ ; CHECK-NEXT: flt.h a0, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_olt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_olt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: flt.h a0, fa0, fa1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_olt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"olt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -398,10 +742,22 @@ ; CHECK-NEXT: fle.h a0, fa0, fa1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_ole: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_ole: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fle.h a0, fa0, fa1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_ole: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ole", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -415,12 +771,28 @@ ; CHECK-NEXT: or a0, a1, a0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_one: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a0, a1 +; CHECKIZHINX-NEXT: flt.h a0, a1, a0 +; CHECKIZHINX-NEXT: or a0, a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_one: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: flt.h a0, fa0, fa1 ; CHECKIZFHMIN-NEXT: flt.h a1, fa1, fa0 ; CHECKIZFHMIN-NEXT: or a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_one: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: flt.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: or a0, a0, a2 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"one", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -434,12 +806,28 @@ ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_ord: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a1, a1, a1 +; CHECKIZHINX-NEXT: fle.h a0, a0, a0 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_ord: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fle.h a0, fa1, fa1 ; CHECKIZFHMIN-NEXT: fle.h a1, fa0, fa0 ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_ord: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a1, a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ord", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -454,6 +842,14 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_ueq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a0, a1 +; CHECKIZHINX-NEXT: flt.h a0, a1, a0 +; CHECKIZHINX-NEXT: or a0, a0, a2 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_ueq: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: flt.h a0, fa0, fa1 @@ -461,6 +857,16 @@ ; CHECKIZFHMIN-NEXT: or a0, a1, a0 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_ueq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: flt.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: or a0, a0, a2 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ueq", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -473,11 +879,25 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_ugt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a0, a1 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_ugt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fle.h a0, fa0, fa1 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_ugt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ugt", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -490,11 +910,25 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_uge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a0, a0, a1 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_uge: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: flt.h a0, fa0, fa1 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_uge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"uge", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -507,11 +941,25 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_ult: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a1, a0 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_ult: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fle.h a0, fa1, fa0 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_ult: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ult", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -524,11 +972,25 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_ule: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a0, a1, a0 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_ule: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: flt.h a0, fa1, fa0 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_ule: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: flt.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"ule", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -543,6 +1005,14 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_une: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a2, a1, a0 +; CHECKIZHINX-NEXT: fle.h a0, a0, a1 +; CHECKIZHINX-NEXT: and a0, a0, a2 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_une: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fle.h a0, fa1, fa0 @@ -550,6 +1020,16 @@ ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_une: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a2, a1, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: and a0, a0, a2 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"une", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 @@ -564,6 +1044,14 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmps_uno: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a1, a1, a1 +; CHECKIZHINX-NEXT: fle.h a0, a0, a0 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fcmps_uno: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fle.h a0, fa1, fa1 @@ -571,6 +1059,16 @@ ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmps_uno: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a1, a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = call i1 @llvm.experimental.constrained.fcmps.f16(half %a, half %b, metadata !"uno", metadata !"fpexcept.strict") strictfp %2 = zext i1 %1 to i32 ret i32 %2 diff --git a/llvm/test/CodeGen/RISCV/half-fcmp.ll b/llvm/test/CodeGen/RISCV/half-fcmp.ll --- a/llvm/test/CodeGen/RISCV/half-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-fcmp.ll @@ -3,6 +3,10 @@ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefix=CHECKIZFH %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck -check-prefix=CHECKIZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefix=CHECKIZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefix=CHECKIZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ @@ -15,6 +19,10 @@ ; RUN: < %s | FileCheck -check-prefix=CHECKIZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: < %s | FileCheck -check-prefix=CHECKIZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefix=CHECKIZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefix=CHECKIZHINXMIN %s define i32 @fcmp_false(half %a, half %b) nounwind { ; CHECKIZFH-LABEL: fcmp_false: @@ -22,6 +30,11 @@ ; CHECKIZFH-NEXT: li a0, 0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_false: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: li a0, 0 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_false: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 0 @@ -41,6 +54,11 @@ ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: li a0, 0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_false: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: li a0, 0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp false half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -52,6 +70,11 @@ ; CHECKIZFH-NEXT: feq.h a0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_oeq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_oeq: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a1 @@ -81,6 +104,13 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: feq.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_oeq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp oeq half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -92,6 +122,11 @@ ; CHECKIZFH-NEXT: flt.h a0, fa1, fa0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ogt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a0, a1, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ogt: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a0 @@ -121,6 +156,13 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: flt.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ogt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: flt.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ogt half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -132,6 +174,11 @@ ; CHECKIZFH-NEXT: fle.h a0, fa1, fa0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_oge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a1, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_oge: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a0 @@ -161,6 +208,13 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fle.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_oge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp oge half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -172,6 +226,11 @@ ; CHECKIZFH-NEXT: flt.h a0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_olt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_olt: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a1 @@ -201,6 +260,13 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: flt.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_olt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp olt half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -212,6 +278,11 @@ ; CHECKIZFH-NEXT: fle.h a0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ole: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ole: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a1 @@ -241,6 +312,13 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fle.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ole: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ole half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -254,6 +332,13 @@ ; CHECKIZFH-NEXT: or a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_one: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a0, a1 +; CHECKIZHINX-NEXT: flt.h a0, a1, a0 +; CHECKIZHINX-NEXT: or a0, a0, a2 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_one: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a1 @@ -291,6 +376,15 @@ ; CHECKIZFHMIN-NEXT: flt.s a1, fa4, fa5 ; CHECKIZFHMIN-NEXT: or a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_one: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: flt.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: or a0, a0, a2 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp one half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -304,6 +398,13 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ord: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a1, a1, a1 +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ord: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a0 @@ -341,6 +442,15 @@ ; CHECKIZFHMIN-NEXT: feq.s a1, fa5, fa5 ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ord: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: feq.s a1, a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ord half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -355,6 +465,14 @@ ; CHECKIZFH-NEXT: xori a0, a0, 1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ueq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a0, a1 +; CHECKIZHINX-NEXT: flt.h a0, a1, a0 +; CHECKIZHINX-NEXT: or a0, a0, a2 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ueq: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a1 @@ -396,6 +514,16 @@ ; CHECKIZFHMIN-NEXT: or a0, a1, a0 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ueq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: flt.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: or a0, a0, a2 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ueq half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -408,6 +536,12 @@ ; CHECKIZFH-NEXT: xori a0, a0, 1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ugt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a0, a1 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ugt: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a1 @@ -441,6 +575,14 @@ ; CHECKIZFHMIN-NEXT: fle.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ugt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ugt half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -453,6 +595,12 @@ ; CHECKIZFH-NEXT: xori a0, a0, 1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_uge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a0, a0, a1 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_uge: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a1 @@ -486,6 +634,14 @@ ; CHECKIZFHMIN-NEXT: flt.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_uge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp uge half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -498,6 +654,12 @@ ; CHECKIZFH-NEXT: xori a0, a0, 1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ult: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a1, a0 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ult: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a0 @@ -531,6 +693,14 @@ ; CHECKIZFHMIN-NEXT: fle.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ult: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ult half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -543,6 +713,12 @@ ; CHECKIZFH-NEXT: xori a0, a0, 1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_ule: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a0, a1, a0 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ule: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a0 @@ -576,6 +752,14 @@ ; CHECKIZFHMIN-NEXT: flt.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_ule: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: flt.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ule half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -588,6 +772,12 @@ ; CHECKIZFH-NEXT: xori a0, a0, 1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_une: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a0, a0, a1 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_une: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a1 @@ -621,6 +811,14 @@ ; CHECKIZFHMIN-NEXT: feq.s a0, fa5, fa4 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_une: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp une half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -635,6 +833,14 @@ ; CHECKIZFH-NEXT: xori a0, a0, 1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_uno: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a1, a1, a1 +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_uno: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x fa5, a0 @@ -676,6 +882,16 @@ ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_uno: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: feq.s a1, a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp uno half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 @@ -687,6 +903,11 @@ ; CHECKIZFH-NEXT: li a0, 1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fcmp_true: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: li a0, 1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_true: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 @@ -706,6 +927,11 @@ ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: li a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fcmp_true: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: li a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp true half %a, %b %2 = zext i1 %1 to i32 ret i32 %2 diff --git a/llvm/test/CodeGen/RISCV/half-frem.ll b/llvm/test/CodeGen/RISCV/half-frem.ll --- a/llvm/test/CodeGen/RISCV/half-frem.ll +++ b/llvm/test/CodeGen/RISCV/half-frem.ll @@ -5,12 +5,24 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s \ ; RUN: | FileCheck -check-prefix=RV64IZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s \ +; RUN: | FileCheck -check-prefix=RV32IZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s \ +; RUN: | FileCheck -check-prefix=RV64IZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi ilp32f < %s \ ; RUN: | FileCheck -check-prefix=RV32IZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s \ ; RUN: | FileCheck -check-prefix=RV64IZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s \ +; RUN: | FileCheck -check-prefix=RV32IZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s \ +; RUN: | FileCheck -check-prefix=RV64IZHINXMIN %s define half @frem_f16(half %a, half %b) nounwind { ; RV32IZFH-LABEL: frem_f16: @@ -37,6 +49,30 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: frem_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: fcvt.s.h a1, a1 +; RV32IZHINX-NEXT: call fmodf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: frem_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: fcvt.s.h a1, a1 +; RV64IZHINX-NEXT: call fmodf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: frem_f16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 @@ -60,6 +96,30 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: frem_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: call fmodf@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: frem_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: call fmodf@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = frem half %a, %b ret half %1 } diff --git a/llvm/test/CodeGen/RISCV/half-imm.ll b/llvm/test/CodeGen/RISCV/half-imm.ll --- a/llvm/test/CodeGen/RISCV/half-imm.ll +++ b/llvm/test/CodeGen/RISCV/half-imm.ll @@ -3,10 +3,22 @@ ; RUN: -target-abi ilp32f < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s \ +; RUN: | FileCheck -check-prefix=RV32IZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s \ +; RUN: | FileCheck -check-prefix=RV64IZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefixes=CHECKIZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck -check-prefixes=CHECKIZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s \ +; RUN: | FileCheck -check-prefixes=CHECKIZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s \ +; RUN: | FileCheck -check-prefixes=CHECKIZHINXMIN %s ; TODO: constant pool shouldn't be necessary for RV32IZfh and RV64IZfh define half @half_imm() nounwind { @@ -16,11 +28,29 @@ ; CHECK-NEXT: flh fa0, %lo(.LCPI0_0)(a0) ; CHECK-NEXT: ret ; +; RV32IZHINX-LABEL: half_imm: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a0, %hi(.LCPI0_0) +; RV32IZHINX-NEXT: lh a0, %lo(.LCPI0_0)(a0) +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: half_imm: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a0, %hi(.LCPI0_0) +; RV64IZHINX-NEXT: lh a0, %lo(.LCPI0_0)(a0) +; RV64IZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: half_imm: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI0_0) ; CHECKIZFHMIN-NEXT: flh fa0, %lo(.LCPI0_0)(a0) ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: half_imm: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: lui a0, %hi(.LCPI0_0) +; CHECKIZHINXMIN-NEXT: lh a0, %lo(.LCPI0_0)(a0) +; CHECKIZHINXMIN-NEXT: ret ret half 3.0 } @@ -32,6 +62,20 @@ ; CHECK-NEXT: fadd.h fa0, fa0, fa5 ; CHECK-NEXT: ret ; +; RV32IZHINX-LABEL: half_imm_op: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI1_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI1_0)(a1) +; RV32IZHINX-NEXT: fadd.h a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: half_imm_op: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI1_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI1_0)(a1) +; RV64IZHINX-NEXT: fadd.h a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: half_imm_op: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -40,6 +84,14 @@ ; CHECKIZFHMIN-NEXT: fadd.s fa5, fa5, fa4 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: half_imm_op: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 260096 +; CHECKIZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fadd half %a, 1.0 ret half %1 } diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll @@ -5,12 +5,24 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zfh \ ; RUN: -verify-machineinstrs -target-abi lp64f | \ ; RUN: FileCheck -check-prefixes=CHECKIZFH,RV64IZFH,RV64IFZFH %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zhinx \ +; RUN: -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefixes=CHECKIZHINX,RV32IZHINX %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zhinx \ +; RUN: -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefixes=CHECKIZHINX,RV64IZHINX %s ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \ ; RUN: -mattr=+zfh -verify-machineinstrs -target-abi ilp32d | \ ; RUN: FileCheck -check-prefixes=CHECKIZFH,RV32IZFH,RV32IDZFH %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \ ; RUN: -mattr=+zfh -verify-machineinstrs -target-abi lp64d | \ ; RUN: FileCheck -check-prefixes=CHECKIZFH,RV64IZFH,RV64IDZFH %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \ +; RUN: -mattr=+zhinx -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefixes=CHECKIZHINX,RV32IZHINX,RV32IZDINXZHINX %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \ +; RUN: -mattr=+zhinx -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefixes=CHECKIZHINX,RV64IZHINX,RV64IZDINXZHINX %s ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 \ ; RUN: -verify-machineinstrs | \ ; RUN: FileCheck -check-prefix=RV32I %s @@ -31,6 +43,19 @@ ; RUN: -mattr=+zfhmin -verify-machineinstrs -target-abi lp64d | \ ; RUN: FileCheck -check-prefixes=CHECKIZFHMIN,RV64IZFHMIN,RV64IDZFHMIN %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zhinxmin \ +; RUN: -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefixes=CHECKIZHINXMIN,RV32IZHINXMIN %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zhinxmin \ +; RUN: -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefixes=CHECKIZHINXMIN,RV64IZHINXMIN %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \ +; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefixes=CHECKIZHINXMIN,RV32IZHINXMIN,RV32IZDINXZHINXMIN %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \ +; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefixes=CHECKIZHINXMIN,RV64IZHINXMIN,RV64IZDINXZHINXMIN %s + declare half @llvm.sqrt.f16(half) define half @sqrt_f16(half %a) nounwind { @@ -39,6 +64,11 @@ ; CHECKIZFH-NEXT: fsqrt.h fa0, fa0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: sqrt_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fsqrt.h a0, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: sqrt_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -71,6 +101,13 @@ ; CHECKIZFHMIN-NEXT: fsqrt.s fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: sqrt_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fsqrt.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.sqrt.f16(half %a) ret half %1 } @@ -101,6 +138,29 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: powi_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call __powisf2@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: powi_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: sext.w a1, a1 +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call __powisf2@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: powi_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -157,6 +217,29 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: powi_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: call __powisf2@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: powi_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: sext.w a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: call __powisf2@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.powi.f16.i32(half %a, i32 %b) ret half %1 } @@ -186,6 +269,28 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: sin_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call sinf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: sin_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call sinf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: sin_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -233,6 +338,28 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: sin_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: call sinf@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sin_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: call sinf@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.sin.f16(half %a) ret half %1 } @@ -262,6 +389,28 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: cos_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call cosf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: cos_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call cosf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: cos_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -309,6 +458,28 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: cos_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: call cosf@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: cos_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: call cosf@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.cos.f16(half %a) ret half %1 } @@ -355,6 +526,46 @@ ; RV64IFZFH-NEXT: addi sp, sp, 16 ; RV64IFZFH-NEXT: ret ; +; RV32IZHINX-LABEL: sincos_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call sinf@plt +; RV32IZHINX-NEXT: fcvt.h.s s1, a0 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call cosf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: fadd.h a0, s1, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: sincos_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -32 +; RV64IZHINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h s0, a0 +; RV64IZHINX-NEXT: mv a0, s0 +; RV64IZHINX-NEXT: call sinf@plt +; RV64IZHINX-NEXT: fcvt.h.s s1, a0 +; RV64IZHINX-NEXT: mv a0, s0 +; RV64IZHINX-NEXT: call cosf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: fadd.h a0, s1, a0 +; RV64IZHINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 32 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: sincos_f16: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: addi sp, sp, -32 @@ -556,6 +767,52 @@ ; RV64IDZFHMIN-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload ; RV64IDZFHMIN-NEXT: addi sp, sp, 32 ; RV64IDZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: sincos_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call sinf@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s s1, a0 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call cosf@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, s1 +; RV32IZHINXMIN-NEXT: fadd.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sincos_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -32 +; RV64IZHINXMIN-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV64IZHINXMIN-NEXT: mv a0, s0 +; RV64IZHINXMIN-NEXT: call sinf@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s s1, a0 +; RV64IZHINXMIN-NEXT: mv a0, s0 +; RV64IZHINXMIN-NEXT: call cosf@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, s1 +; RV64IZHINXMIN-NEXT: fadd.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 32 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.sin.f16(half %a) %2 = call half @llvm.cos.f16(half %a) %3 = fadd half %1, %2 @@ -589,6 +846,30 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: pow_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: fcvt.s.h a1, a1 +; RV32IZHINX-NEXT: call powf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: pow_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: fcvt.s.h a1, a1 +; RV64IZHINX-NEXT: call powf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: pow_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -664,6 +945,30 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: pow_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: call powf@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: pow_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: call powf@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.pow.f16(half %a, half %b) ret half %1 } @@ -693,6 +998,28 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: exp_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call expf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: exp_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call expf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: exp_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -740,6 +1067,28 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: exp_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: call expf@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: exp_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: call expf@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.exp.f16(half %a) ret half %1 } @@ -769,6 +1118,28 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: exp2_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call exp2f@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: exp2_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call exp2f@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: exp2_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -816,6 +1187,28 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: exp2_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: call exp2f@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: exp2_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: call exp2f@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.exp2.f16(half %a) ret half %1 } @@ -845,6 +1238,28 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: log_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call logf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: log_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call logf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: log_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -892,6 +1307,28 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: log_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: call logf@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: log_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: call logf@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.log.f16(half %a) ret half %1 } @@ -921,6 +1358,28 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: log10_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call log10f@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: log10_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call log10f@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: log10_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -968,6 +1427,28 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: log10_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: call log10f@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: log10_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: call log10f@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.log10.f16(half %a) ret half %1 } @@ -997,6 +1478,28 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: log2_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call log2f@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: log2_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call log2f@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: log2_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1044,6 +1547,28 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: log2_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: call log2f@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: log2_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: call log2f@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.log2.f16(half %a) ret half %1 } @@ -1056,6 +1581,11 @@ ; CHECKIZFH-NEXT: fmadd.h fa0, fa0, fa1, fa2 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fma_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fmadd.h a0, a0, a1, a2 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fma_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1130,6 +1660,15 @@ ; CHECKIZFHMIN-NEXT: fmadd.s fa5, fa3, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fma_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fmadd.s a0, a0, a1, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.fma.f16(half %a, half %b, half %c) ret half %1 } @@ -1142,6 +1681,11 @@ ; CHECKIZFH-NEXT: fmadd.h fa0, fa0, fa1, fa2 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fmuladd_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fmadd.h a0, a0, a1, a2 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: fmuladd_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -1229,6 +1773,18 @@ ; CHECKIZFHMIN-NEXT: fadd.s fa5, fa5, fa4 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fmuladd_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fmul.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a2 +; CHECKIZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.fmuladd.f16(half %a, half %b, half %c) ret half %1 } @@ -1241,6 +1797,18 @@ ; CHECKIZFH-NEXT: fabs.h fa0, fa0 ; CHECKIZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fabs_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: slli a0, a0, 17 +; RV32IZHINX-NEXT: srli a0, a0, 17 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fabs_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: slli a0, a0, 49 +; RV64IZHINX-NEXT: srli a0, a0, 49 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: fabs_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 17 @@ -1259,6 +1827,18 @@ ; CHECKIZFHMIN-NEXT: fabs.s fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: fabs_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: slli a0, a0, 17 +; RV32IZHINXMIN-NEXT: srli a0, a0, 17 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: fabs_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: slli a0, a0, 49 +; RV64IZHINXMIN-NEXT: srli a0, a0, 49 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.fabs.f16(half %a) ret half %1 } @@ -1271,6 +1851,11 @@ ; CHECKIZFH-NEXT: fmin.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: minnum_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fmin.h a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: minnum_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1330,6 +1915,14 @@ ; CHECKIZFHMIN-NEXT: fmin.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: minnum_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fmin.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.minnum.f16(half %a, half %b) ret half %1 } @@ -1342,6 +1935,11 @@ ; CHECKIZFH-NEXT: fmax.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: maxnum_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fmax.h a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: maxnum_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1401,6 +1999,14 @@ ; CHECKIZFHMIN-NEXT: fmax.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: maxnum_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fmax.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.maxnum.f16(half %a, half %b) ret half %1 } @@ -1430,6 +2036,11 @@ ; CHECKIZFH-NEXT: fsgnj.h fa0, fa0, fa1 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: copysign_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fsgnj.h a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: copysign_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: lui a2, 1048568 @@ -1477,6 +2088,40 @@ ; RV64IZFHMIN-NEXT: flh fa0, 0(sp) ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: copysign_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: addi a2, sp, 12 +; RV32IZHINXMIN-NEXT: sh a1, 0(a2) +; RV32IZHINXMIN-NEXT: addi a1, sp, 8 +; RV32IZHINXMIN-NEXT: sh a0, 0(a1) +; RV32IZHINXMIN-NEXT: lbu a0, 13(sp) +; RV32IZHINXMIN-NEXT: lbu a2, 9(sp) +; RV32IZHINXMIN-NEXT: andi a0, a0, 128 +; RV32IZHINXMIN-NEXT: andi a2, a2, 127 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: sb a0, 9(sp) +; RV32IZHINXMIN-NEXT: lh a0, 0(a1) +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: copysign_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: addi a2, sp, 8 +; RV64IZHINXMIN-NEXT: sh a1, 0(a2) +; RV64IZHINXMIN-NEXT: mv a1, sp +; RV64IZHINXMIN-NEXT: sh a0, 0(a1) +; RV64IZHINXMIN-NEXT: lbu a0, 9(sp) +; RV64IZHINXMIN-NEXT: lbu a2, 1(sp) +; RV64IZHINXMIN-NEXT: andi a0, a0, 128 +; RV64IZHINXMIN-NEXT: andi a2, a2, 127 +; RV64IZHINXMIN-NEXT: or a0, a2, a0 +; RV64IZHINXMIN-NEXT: sb a0, 1(sp) +; RV64IZHINXMIN-NEXT: lh a0, 0(a1) +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.copysign.f16(half %a, half %b) ret half %1 } @@ -1498,6 +2143,20 @@ ; CHECKIZFH-NEXT: .LBB17_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: floor_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI17_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI17_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB17_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rdn +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rdn +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB17_2: +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: floor_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1539,6 +2198,21 @@ ; CHECKIZFHMIN-NEXT: .LBB17_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: floor_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB17_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB17_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.floor.f16(half %a) ret half %1 } @@ -1560,6 +2234,20 @@ ; CHECKIZFH-NEXT: .LBB18_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: ceil_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI18_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI18_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB18_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rup +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rup +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB18_2: +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: ceil_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1601,6 +2289,21 @@ ; CHECKIZFHMIN-NEXT: .LBB18_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: ceil_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB18_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB18_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.ceil.f16(half %a) ret half %1 } @@ -1622,6 +2325,20 @@ ; CHECKIZFH-NEXT: .LBB19_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: trunc_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI19_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI19_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB19_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rtz +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB19_2: +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: trunc_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1663,6 +2380,21 @@ ; CHECKIZFHMIN-NEXT: .LBB19_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: trunc_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB19_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB19_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.trunc.f16(half %a) ret half %1 } @@ -1684,6 +2416,20 @@ ; CHECKIZFH-NEXT: .LBB20_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: rint_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI20_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI20_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB20_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0 +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1 +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB20_2: +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: rint_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1725,6 +2471,21 @@ ; CHECKIZFHMIN-NEXT: .LBB20_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: rint_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB20_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1 +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB20_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.rint.f16(half %a) ret half %1 } @@ -1754,6 +2515,28 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: nearbyint_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call nearbyintf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: nearbyint_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call nearbyintf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32I-LABEL: nearbyint_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1801,6 +2584,28 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: nearbyint_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: call nearbyintf@plt +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: nearbyint_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: call nearbyintf@plt +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.nearbyint.f16(half %a) ret half %1 } @@ -1822,6 +2627,20 @@ ; CHECKIZFH-NEXT: .LBB22_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: round_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI22_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI22_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB22_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rmm +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rmm +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB22_2: +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: round_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1863,6 +2682,21 @@ ; CHECKIZFHMIN-NEXT: .LBB22_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: round_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB22_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB22_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.round.f16(half %a) ret half %1 } @@ -1884,6 +2718,20 @@ ; CHECKIZFH-NEXT: .LBB23_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: roundeven_f16: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI23_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI23_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB23_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rne +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rne +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB23_2: +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: roundeven_f16: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -1925,6 +2773,21 @@ ; CHECKIZFHMIN-NEXT: .LBB23_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: roundeven_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB23_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB23_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call half @llvm.roundeven.f16(half %a) ret half %1 } @@ -1938,6 +2801,13 @@ ; CHECKIZFH-NEXT: snez a0, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: isnan_d_fpclass: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fclass.h a0, a0 +; CHECKIZHINX-NEXT: andi a0, a0, 768 +; CHECKIZHINX-NEXT: snez a0, a0 +; CHECKIZHINX-NEXT: ret +; ; RV32I-LABEL: isnan_d_fpclass: ; RV32I: # %bb.0: ; RV32I-NEXT: slli a0, a0, 17 @@ -1975,6 +2845,29 @@ ; RV64IZFHMIN-NEXT: slli a1, a1, 10 ; RV64IZFHMIN-NEXT: slt a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: isnan_d_fpclass: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: slli a0, a0, 17 +; RV32IZHINXMIN-NEXT: srli a0, a0, 17 +; RV32IZHINXMIN-NEXT: li a1, 31 +; RV32IZHINXMIN-NEXT: slli a1, a1, 10 +; RV32IZHINXMIN-NEXT: slt a0, a1, a0 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: isnan_d_fpclass: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: slli a0, a0, 49 +; RV64IZHINXMIN-NEXT: srli a0, a0, 49 +; RV64IZHINXMIN-NEXT: li a1, 31 +; RV64IZHINXMIN-NEXT: slli a1, a1, 10 +; RV64IZHINXMIN-NEXT: slt a0, a1, a0 +; RV64IZHINXMIN-NEXT: ret %1 = call i1 @llvm.is.fpclass.f16(half %x, i32 3) ; nan ret i1 %1 } +;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: +; RV32IZDINXZHINX: {{.*}} +; RV32IZDINXZHINXMIN: {{.*}} +; RV64IZDINXZHINX: {{.*}} +; RV64IZDINXZHINXMIN: {{.*}} diff --git a/llvm/test/CodeGen/RISCV/half-isnan.ll b/llvm/test/CodeGen/RISCV/half-isnan.ll --- a/llvm/test/CodeGen/RISCV/half-isnan.ll +++ b/llvm/test/CodeGen/RISCV/half-isnan.ll @@ -3,10 +3,18 @@ ; RUN: -target-abi ilp32f < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefixes=CHECKIZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefixes=CHECKIZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefixes=CHECKIZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck -check-prefixes=CHECKIZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefixes=CHECKIZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefixes=CHECKIZHINXMIN %s define zeroext i1 @half_is_nan(half %a) nounwind { ; CHECK-LABEL: half_is_nan: @@ -15,12 +23,25 @@ ; CHECK-NEXT: xori a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: half_is_nan: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: xori a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: half_is_nan: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: feq.s a0, fa5, fa5 ; CHECKIZFHMIN-NEXT: xori a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: half_is_nan: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: xori a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp uno half %a, 0.000000e+00 ret i1 %1 } @@ -31,11 +52,22 @@ ; CHECK-NEXT: feq.h a0, fa0, fa0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: half_not_nan: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: half_not_nan: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: feq.s a0, fa5, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: half_not_nan: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ord half %a, 0.000000e+00 ret i1 %1 } diff --git a/llvm/test/CodeGen/RISCV/half-mem.ll b/llvm/test/CodeGen/RISCV/half-mem.ll --- a/llvm/test/CodeGen/RISCV/half-mem.ll +++ b/llvm/test/CodeGen/RISCV/half-mem.ll @@ -3,10 +3,18 @@ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefixes=CHECKIZFH,RV32IZFH %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck -check-prefixes=CHECKIZFH,RV64IZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefixes=CHECKIZHINX,RV32IZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefixes=CHECKIZHINX,RV64IZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefixes=CHECKIZFHMIN,RV32IZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck -check-prefixes=CHECKIZFHMIN,RV64IZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefixes=CHECKIZHINXMIN,RV32IZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefixes=CHECKIZHINXMIN,RV64IZHINXMIN %s define half @flh(ptr %a) nounwind { ; CHECKIZFH-LABEL: flh: @@ -16,6 +24,14 @@ ; CHECKIZFH-NEXT: fadd.h fa0, fa5, fa4 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: flh: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lh a1, 0(a0) +; CHECKIZHINX-NEXT: addi a0, a0, 6 +; CHECKIZHINX-NEXT: lh a0, 0(a0) +; CHECKIZHINX-NEXT: fadd.h a0, a1, a0 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: flh: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: flh fa5, 6(a0) @@ -25,6 +41,17 @@ ; CHECKIZFHMIN-NEXT: fadd.s fa5, fa4, fa5 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: flh: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: lh a1, 0(a0) +; CHECKIZHINXMIN-NEXT: addi a0, a0, 6 +; CHECKIZHINXMIN-NEXT: lh a0, 0(a0) +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fadd.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = load half, ptr %a %2 = getelementptr half, ptr %a, i32 3 %3 = load half, ptr %2 @@ -44,6 +71,14 @@ ; CHECKIZFH-NEXT: fsh fa5, 16(a0) ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: fsh: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fadd.h a1, a1, a2 +; CHECKIZHINX-NEXT: sh a1, 0(a0) +; CHECKIZHINX-NEXT: addi a0, a0, 16 +; CHECKIZHINX-NEXT: sh a1, 0(a0) +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: fsh: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -53,6 +88,17 @@ ; CHECKIZFHMIN-NEXT: fsh fa5, 0(a0) ; CHECKIZFHMIN-NEXT: fsh fa5, 16(a0) ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: fsh: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a2, a2 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fadd.s a1, a1, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a1, a1 +; CHECKIZHINXMIN-NEXT: sh a1, 0(a0) +; CHECKIZHINXMIN-NEXT: addi a0, a0, 16 +; CHECKIZHINXMIN-NEXT: sh a1, 0(a0) +; CHECKIZHINXMIN-NEXT: ret %1 = fadd half %b, %c store half %1, ptr %a %2 = getelementptr half, ptr %a, i32 8 @@ -77,6 +123,18 @@ ; CHECKIZFH-NEXT: fsh fa0, 18(a1) ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: flh_fsh_global: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fadd.h a0, a0, a1 +; CHECKIZHINX-NEXT: lui a1, %hi(G) +; CHECKIZHINX-NEXT: addi a1, a1, %lo(G) +; CHECKIZHINX-NEXT: lh a2, 0(a1) +; CHECKIZHINX-NEXT: sh a0, 0(a1) +; CHECKIZHINX-NEXT: addi a1, a1, 18 +; CHECKIZHINX-NEXT: lh a2, 0(a1) +; CHECKIZHINX-NEXT: sh a0, 0(a1) +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: flh_fsh_global: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -90,6 +148,21 @@ ; CHECKIZFHMIN-NEXT: flh fa5, 18(a1) ; CHECKIZFHMIN-NEXT: fsh fa0, 18(a1) ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: flh_fsh_global: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fadd.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, %hi(G) +; CHECKIZHINXMIN-NEXT: addi a1, a1, %lo(G) +; CHECKIZHINXMIN-NEXT: lh a2, 0(a1) +; CHECKIZHINXMIN-NEXT: sh a0, 0(a1) +; CHECKIZHINXMIN-NEXT: addi a1, a1, 18 +; CHECKIZHINXMIN-NEXT: lh a2, 0(a1) +; CHECKIZHINXMIN-NEXT: sh a0, 0(a1) +; CHECKIZHINXMIN-NEXT: ret %1 = fadd half %a, %b %2 = load volatile half, ptr @G store half %1, ptr @G @@ -118,6 +191,25 @@ ; RV64IZFH-NEXT: fsh fa0, -273(a0) ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: flh_fsh_constant: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, 912092 +; RV32IZHINX-NEXT: addi a1, a1, -273 +; RV32IZHINX-NEXT: lh a2, 0(a1) +; RV32IZHINX-NEXT: fadd.h a0, a0, a2 +; RV32IZHINX-NEXT: sh a0, 0(a1) +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: flh_fsh_constant: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, 228023 +; RV64IZHINX-NEXT: slli a1, a1, 2 +; RV64IZHINX-NEXT: addi a1, a1, -273 +; RV64IZHINX-NEXT: lh a2, 0(a1) +; RV64IZHINX-NEXT: fadd.h a0, a0, a2 +; RV64IZHINX-NEXT: sh a0, 0(a1) +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: flh_fsh_constant: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: lui a0, 912092 @@ -140,6 +232,31 @@ ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: fsh fa0, -273(a0) ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: flh_fsh_constant: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: lui a1, 912092 +; RV32IZHINXMIN-NEXT: addi a1, a1, -273 +; RV32IZHINXMIN-NEXT: lh a2, 0(a1) +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a2, a2 +; RV32IZHINXMIN-NEXT: fadd.s a0, a0, a2 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: sh a0, 0(a1) +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: flh_fsh_constant: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: lui a1, 228023 +; RV64IZHINXMIN-NEXT: slli a1, a1, 2 +; RV64IZHINXMIN-NEXT: addi a1, a1, -273 +; RV64IZHINXMIN-NEXT: lh a2, 0(a1) +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a2, a2 +; RV64IZHINXMIN-NEXT: fadd.s a0, a0, a2 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: sh a0, 0(a1) +; RV64IZHINXMIN-NEXT: ret %1 = inttoptr i32 3735928559 to ptr %2 = load volatile half, ptr %1 %3 = fadd half %a, %2 @@ -180,6 +297,42 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: flh_stack: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: mv s0, a0 +; RV32IZHINX-NEXT: mv a0, sp +; RV32IZHINX-NEXT: mv s1, sp +; RV32IZHINX-NEXT: call notdead@plt +; RV32IZHINX-NEXT: lh a0, 0(s1) +; RV32IZHINX-NEXT: fadd.h a0, a0, s0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: flh_stack: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -32 +; RV64IZHINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: mv s0, a0 +; RV64IZHINX-NEXT: addi a0, sp, 4 +; RV64IZHINX-NEXT: addi s1, sp, 4 +; RV64IZHINX-NEXT: call notdead@plt +; RV64IZHINX-NEXT: lh a0, 0(s1) +; RV64IZHINX-NEXT: fadd.h a0, a0, s0 +; RV64IZHINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 32 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: flh_stack: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 @@ -215,6 +368,48 @@ ; RV64IZFHMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: flh_stack: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: mv s0, a0 +; RV32IZHINXMIN-NEXT: mv a0, sp +; RV32IZHINXMIN-NEXT: mv s1, sp +; RV32IZHINXMIN-NEXT: call notdead@plt +; RV32IZHINXMIN-NEXT: lh a0, 0(s1) +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, s0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fadd.s a0, a0, a1 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: flh_stack: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -32 +; RV64IZHINXMIN-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: mv s0, a0 +; RV64IZHINXMIN-NEXT: addi a0, sp, 4 +; RV64IZHINXMIN-NEXT: addi s1, sp, 4 +; RV64IZHINXMIN-NEXT: call notdead@plt +; RV64IZHINXMIN-NEXT: lh a0, 0(s1) +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, s0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fadd.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 32 +; RV64IZHINXMIN-NEXT: ret %1 = alloca half, align 4 call void @notdead(ptr %1) %2 = load half, ptr %1 @@ -247,6 +442,32 @@ ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: fsh_stack: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fadd.h a0, a0, a1 +; RV32IZHINX-NEXT: addi a1, sp, 8 +; RV32IZHINX-NEXT: sh a0, 0(a1) +; RV32IZHINX-NEXT: addi a0, sp, 8 +; RV32IZHINX-NEXT: call notdead@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: fsh_stack: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fadd.h a0, a0, a1 +; RV64IZHINX-NEXT: addi a1, sp, 4 +; RV64IZHINX-NEXT: sh a0, 0(a1) +; RV64IZHINX-NEXT: addi a0, sp, 4 +; RV64IZHINX-NEXT: call notdead@plt +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: fsh_stack: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: addi sp, sp, -16 @@ -276,6 +497,38 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: fsh_stack: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fadd.s a0, a0, a1 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: addi a1, sp, 8 +; RV32IZHINXMIN-NEXT: sh a0, 0(a1) +; RV32IZHINXMIN-NEXT: addi a0, sp, 8 +; RV32IZHINXMIN-NEXT: call notdead@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: fsh_stack: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fadd.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: addi a1, sp, 4 +; RV64IZHINXMIN-NEXT: sh a0, 0(a1) +; RV64IZHINXMIN-NEXT: addi a0, sp, 4 +; RV64IZHINXMIN-NEXT: call notdead@plt +; RV64IZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-NEXT: ret %1 = fadd half %a, %b ; force store from FPR16 %2 = alloca half, align 4 store half %1, ptr %2 diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll --- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll @@ -3,10 +3,18 @@ ; RUN: -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIZFH,RV32IZFH %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64f | FileCheck -check-prefixes=CHECKIZFH,RV64IZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=CHECKIZHINX,RV32IZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs < %s \ +; RUN: -target-abi=lp64 | FileCheck -check-prefixes=CHECKIZHINX,RV64IZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs < %s \ ; RUN: -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIZFHMIN,RV32IZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64f | FileCheck -check-prefixes=CHECKIZFHMIN,RV64IZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=CHECKIZHINXMIN,RV32IZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs < %s \ +; RUN: -target-abi=lp64 | FileCheck -check-prefixes=CHECKIZHINXMIN,RV64IZHINXMIN %s define signext i32 @test_floor_si32(half %x) { ; CHECKIZFH-LABEL: test_floor_si32: @@ -18,6 +26,25 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_floor_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI0_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI0_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB0_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rdn +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rdn +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB0_2: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: seqz a0, a0 +; CHECKIZHINX-NEXT: addi a0, a0, -1 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_floor_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -39,6 +66,27 @@ ; CHECKIZFHMIN-NEXT: addi a1, a1, -1 ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_floor_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB0_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB0_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: seqz a0, a0 +; CHECKIZHINXMIN-NEXT: addi a0, a0, -1 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = call i32 @llvm.fptosi.sat.i32.f16(half %a) ret i32 %b @@ -103,6 +151,75 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_floor_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI1_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI1_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB1_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB1_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: lui a0, 913408 +; RV32IZHINX-NEXT: fle.s s1, a0, s0 +; RV32IZHINX-NEXT: neg s2, s1 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixsfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI1_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI1_1)(a2) +; RV32IZHINX-NEXT: and a0, s2, a0 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: feq.s a2, s0, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: lui a5, 524288 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB1_4 +; RV32IZHINX-NEXT: # %bb.3: +; RV32IZHINX-NEXT: mv a3, a1 +; RV32IZHINX-NEXT: .LBB1_4: +; RV32IZHINX-NEXT: and a0, a2, a0 +; RV32IZHINX-NEXT: beqz a4, .LBB1_6 +; RV32IZHINX-NEXT: # %bb.5: +; RV32IZHINX-NEXT: addi a3, a5, -1 +; RV32IZHINX-NEXT: .LBB1_6: +; RV32IZHINX-NEXT: and a1, a2, a3 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_floor_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI1_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI1_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB1_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB1_2: +; RV64IZHINX-NEXT: fcvt.l.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_floor_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -175,6 +292,78 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_floor_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB1_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB1_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: lui a0, 913408 +; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0 +; RV32IZHINXMIN-NEXT: neg s2, s1 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixsfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI1_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI1_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s2, a0 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: lui a5, 524288 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB1_4 +; RV32IZHINXMIN-NEXT: # %bb.3: +; RV32IZHINXMIN-NEXT: mv a3, a1 +; RV32IZHINXMIN-NEXT: .LBB1_4: +; RV32IZHINXMIN-NEXT: and a0, a2, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB1_6 +; RV32IZHINXMIN-NEXT: # %bb.5: +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 +; RV32IZHINXMIN-NEXT: .LBB1_6: +; RV32IZHINXMIN-NEXT: and a1, a2, a3 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_floor_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB1_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB1_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = call i64 @llvm.fptosi.sat.i64.f16(half %a) ret i64 %b @@ -190,6 +379,44 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_floor_ui32: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI2_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI2_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB2_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB2_2: +; RV32IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV32IZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZHINX-NEXT: seqz a0, a0 +; RV32IZHINX-NEXT: addi a0, a0, -1 +; RV32IZHINX-NEXT: and a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_floor_ui32: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI2_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI2_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB2_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB2_2: +; RV64IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a1, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_floor_ui32: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -233,6 +460,48 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a0, a1 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_floor_ui32: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB2_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB2_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV32IZHINXMIN-NEXT: seqz a0, a0 +; RV32IZHINXMIN-NEXT: addi a0, a0, -1 +; RV32IZHINXMIN-NEXT: and a0, a0, a1 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_floor_ui32: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB2_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB2_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a1, a0 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = call i32 @llvm.fptoui.sat.i32.f16(half %a) ret i32 %b @@ -284,6 +553,60 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_floor_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI3_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI3_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB3_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB3_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixunssfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI3_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI3_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_floor_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI3_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI3_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB3_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB3_2: +; RV64IZHINX-NEXT: fcvt.lu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_floor_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -343,6 +666,63 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_floor_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB3_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB3_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI3_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI3_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_floor_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB3_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB3_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = call i64 @llvm.fptoui.sat.i64.f16(half %a) ret i64 %b @@ -358,6 +738,25 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_ceil_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI4_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI4_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB4_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rup +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rup +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB4_2: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: seqz a0, a0 +; CHECKIZHINX-NEXT: addi a0, a0, -1 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_ceil_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -379,6 +778,27 @@ ; CHECKIZFHMIN-NEXT: addi a1, a1, -1 ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_ceil_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB4_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB4_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: seqz a0, a0 +; CHECKIZHINXMIN-NEXT: addi a0, a0, -1 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = call i32 @llvm.fptosi.sat.i32.f16(half %a) ret i32 %b @@ -443,6 +863,75 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_ceil_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI5_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI5_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB5_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB5_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: lui a0, 913408 +; RV32IZHINX-NEXT: fle.s s1, a0, s0 +; RV32IZHINX-NEXT: neg s2, s1 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixsfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI5_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI5_1)(a2) +; RV32IZHINX-NEXT: and a0, s2, a0 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: feq.s a2, s0, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: lui a5, 524288 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB5_4 +; RV32IZHINX-NEXT: # %bb.3: +; RV32IZHINX-NEXT: mv a3, a1 +; RV32IZHINX-NEXT: .LBB5_4: +; RV32IZHINX-NEXT: and a0, a2, a0 +; RV32IZHINX-NEXT: beqz a4, .LBB5_6 +; RV32IZHINX-NEXT: # %bb.5: +; RV32IZHINX-NEXT: addi a3, a5, -1 +; RV32IZHINX-NEXT: .LBB5_6: +; RV32IZHINX-NEXT: and a1, a2, a3 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_ceil_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI5_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI5_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB5_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB5_2: +; RV64IZHINX-NEXT: fcvt.l.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_ceil_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -515,6 +1004,78 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_ceil_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB5_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB5_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: lui a0, 913408 +; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0 +; RV32IZHINXMIN-NEXT: neg s2, s1 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixsfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI5_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI5_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s2, a0 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: lui a5, 524288 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB5_4 +; RV32IZHINXMIN-NEXT: # %bb.3: +; RV32IZHINXMIN-NEXT: mv a3, a1 +; RV32IZHINXMIN-NEXT: .LBB5_4: +; RV32IZHINXMIN-NEXT: and a0, a2, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB5_6 +; RV32IZHINXMIN-NEXT: # %bb.5: +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 +; RV32IZHINXMIN-NEXT: .LBB5_6: +; RV32IZHINXMIN-NEXT: and a1, a2, a3 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_ceil_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB5_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB5_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = call i64 @llvm.fptosi.sat.i64.f16(half %a) ret i64 %b @@ -530,6 +1091,44 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_ceil_ui32: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI6_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI6_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB6_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB6_2: +; RV32IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV32IZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZHINX-NEXT: seqz a0, a0 +; RV32IZHINX-NEXT: addi a0, a0, -1 +; RV32IZHINX-NEXT: and a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_ceil_ui32: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI6_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI6_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB6_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB6_2: +; RV64IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a1, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_ceil_ui32: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -573,6 +1172,48 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a0, a1 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_ceil_ui32: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB6_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB6_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV32IZHINXMIN-NEXT: seqz a0, a0 +; RV32IZHINXMIN-NEXT: addi a0, a0, -1 +; RV32IZHINXMIN-NEXT: and a0, a0, a1 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_ceil_ui32: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB6_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB6_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a1, a0 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = call i32 @llvm.fptoui.sat.i32.f16(half %a) ret i32 %b @@ -624,6 +1265,60 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_ceil_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI7_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI7_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB7_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB7_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixunssfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI7_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI7_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_ceil_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI7_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI7_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB7_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB7_2: +; RV64IZHINX-NEXT: fcvt.lu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_ceil_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -683,6 +1378,63 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_ceil_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB7_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB7_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI7_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI7_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_ceil_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB7_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB7_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = call i64 @llvm.fptoui.sat.i64.f16(half %a) ret i64 %b @@ -698,6 +1450,25 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_trunc_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI8_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI8_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB8_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rtz +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB8_2: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: seqz a0, a0 +; CHECKIZHINX-NEXT: addi a0, a0, -1 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_trunc_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -719,6 +1490,27 @@ ; CHECKIZFHMIN-NEXT: addi a1, a1, -1 ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_trunc_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB8_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB8_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: seqz a0, a0 +; CHECKIZHINXMIN-NEXT: addi a0, a0, -1 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = call i32 @llvm.fptosi.sat.i32.f16(half %a) ret i32 %b @@ -783,6 +1575,75 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_trunc_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI9_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI9_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB9_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB9_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: lui a0, 913408 +; RV32IZHINX-NEXT: fle.s s1, a0, s0 +; RV32IZHINX-NEXT: neg s2, s1 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixsfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI9_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI9_1)(a2) +; RV32IZHINX-NEXT: and a0, s2, a0 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: feq.s a2, s0, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: lui a5, 524288 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB9_4 +; RV32IZHINX-NEXT: # %bb.3: +; RV32IZHINX-NEXT: mv a3, a1 +; RV32IZHINX-NEXT: .LBB9_4: +; RV32IZHINX-NEXT: and a0, a2, a0 +; RV32IZHINX-NEXT: beqz a4, .LBB9_6 +; RV32IZHINX-NEXT: # %bb.5: +; RV32IZHINX-NEXT: addi a3, a5, -1 +; RV32IZHINX-NEXT: .LBB9_6: +; RV32IZHINX-NEXT: and a1, a2, a3 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_trunc_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI9_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI9_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB9_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB9_2: +; RV64IZHINX-NEXT: fcvt.l.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_trunc_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -855,6 +1716,78 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_trunc_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB9_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB9_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: lui a0, 913408 +; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0 +; RV32IZHINXMIN-NEXT: neg s2, s1 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixsfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI9_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI9_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s2, a0 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: lui a5, 524288 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB9_4 +; RV32IZHINXMIN-NEXT: # %bb.3: +; RV32IZHINXMIN-NEXT: mv a3, a1 +; RV32IZHINXMIN-NEXT: .LBB9_4: +; RV32IZHINXMIN-NEXT: and a0, a2, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB9_6 +; RV32IZHINXMIN-NEXT: # %bb.5: +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 +; RV32IZHINXMIN-NEXT: .LBB9_6: +; RV32IZHINXMIN-NEXT: and a1, a2, a3 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_trunc_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB9_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB9_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = call i64 @llvm.fptosi.sat.i64.f16(half %a) ret i64 %b @@ -870,6 +1803,44 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_trunc_ui32: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI10_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI10_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB10_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB10_2: +; RV32IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV32IZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZHINX-NEXT: seqz a0, a0 +; RV32IZHINX-NEXT: addi a0, a0, -1 +; RV32IZHINX-NEXT: and a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_trunc_ui32: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI10_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI10_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB10_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB10_2: +; RV64IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a1, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_trunc_ui32: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -913,6 +1884,48 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a0, a1 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_trunc_ui32: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB10_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB10_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV32IZHINXMIN-NEXT: seqz a0, a0 +; RV32IZHINXMIN-NEXT: addi a0, a0, -1 +; RV32IZHINXMIN-NEXT: and a0, a0, a1 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_trunc_ui32: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB10_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB10_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a1, a0 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = call i32 @llvm.fptoui.sat.i32.f16(half %a) ret i32 %b @@ -964,6 +1977,60 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_trunc_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI11_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI11_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB11_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB11_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixunssfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI11_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI11_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_trunc_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI11_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI11_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB11_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB11_2: +; RV64IZHINX-NEXT: fcvt.lu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_trunc_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1023,6 +2090,63 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_trunc_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB11_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB11_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI11_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI11_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_trunc_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB11_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB11_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = call i64 @llvm.fptoui.sat.i64.f16(half %a) ret i64 %b @@ -1038,6 +2162,25 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_round_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI12_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI12_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB12_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rmm +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rmm +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB12_2: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: seqz a0, a0 +; CHECKIZHINX-NEXT: addi a0, a0, -1 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_round_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1059,6 +2202,27 @@ ; CHECKIZFHMIN-NEXT: addi a1, a1, -1 ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_round_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB12_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB12_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: seqz a0, a0 +; CHECKIZHINXMIN-NEXT: addi a0, a0, -1 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = call i32 @llvm.fptosi.sat.i32.f16(half %a) ret i32 %b @@ -1123,6 +2287,75 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_round_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI13_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI13_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB13_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB13_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: lui a0, 913408 +; RV32IZHINX-NEXT: fle.s s1, a0, s0 +; RV32IZHINX-NEXT: neg s2, s1 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixsfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI13_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI13_1)(a2) +; RV32IZHINX-NEXT: and a0, s2, a0 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: feq.s a2, s0, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: lui a5, 524288 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB13_4 +; RV32IZHINX-NEXT: # %bb.3: +; RV32IZHINX-NEXT: mv a3, a1 +; RV32IZHINX-NEXT: .LBB13_4: +; RV32IZHINX-NEXT: and a0, a2, a0 +; RV32IZHINX-NEXT: beqz a4, .LBB13_6 +; RV32IZHINX-NEXT: # %bb.5: +; RV32IZHINX-NEXT: addi a3, a5, -1 +; RV32IZHINX-NEXT: .LBB13_6: +; RV32IZHINX-NEXT: and a1, a2, a3 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_round_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI13_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI13_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB13_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB13_2: +; RV64IZHINX-NEXT: fcvt.l.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_round_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1195,6 +2428,78 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_round_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB13_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB13_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: lui a0, 913408 +; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0 +; RV32IZHINXMIN-NEXT: neg s2, s1 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixsfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI13_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI13_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s2, a0 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: lui a5, 524288 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB13_4 +; RV32IZHINXMIN-NEXT: # %bb.3: +; RV32IZHINXMIN-NEXT: mv a3, a1 +; RV32IZHINXMIN-NEXT: .LBB13_4: +; RV32IZHINXMIN-NEXT: and a0, a2, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB13_6 +; RV32IZHINXMIN-NEXT: # %bb.5: +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 +; RV32IZHINXMIN-NEXT: .LBB13_6: +; RV32IZHINXMIN-NEXT: and a1, a2, a3 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_round_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB13_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB13_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = call i64 @llvm.fptosi.sat.i64.f16(half %a) ret i64 %b @@ -1210,6 +2515,44 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_round_ui32: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI14_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI14_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB14_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB14_2: +; RV32IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV32IZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZHINX-NEXT: seqz a0, a0 +; RV32IZHINX-NEXT: addi a0, a0, -1 +; RV32IZHINX-NEXT: and a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_round_ui32: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI14_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI14_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB14_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB14_2: +; RV64IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a1, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_round_ui32: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1253,6 +2596,48 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a0, a1 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_round_ui32: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB14_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB14_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV32IZHINXMIN-NEXT: seqz a0, a0 +; RV32IZHINXMIN-NEXT: addi a0, a0, -1 +; RV32IZHINXMIN-NEXT: and a0, a0, a1 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_round_ui32: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB14_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB14_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a1, a0 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = call i32 @llvm.fptoui.sat.i32.f16(half %a) ret i32 %b @@ -1304,6 +2689,60 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_round_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI15_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI15_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB15_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB15_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixunssfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI15_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI15_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_round_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI15_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI15_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB15_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB15_2: +; RV64IZHINX-NEXT: fcvt.lu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_round_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1363,6 +2802,63 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_round_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB15_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB15_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI15_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_round_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB15_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB15_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = call i64 @llvm.fptoui.sat.i64.f16(half %a) ret i64 %b @@ -1378,6 +2874,25 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_roundeven_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI16_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI16_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB16_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rne +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rne +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB16_2: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: feq.h a0, a0, a0 +; CHECKIZHINX-NEXT: seqz a0, a0 +; CHECKIZHINX-NEXT: addi a0, a0, -1 +; CHECKIZHINX-NEXT: and a0, a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_roundeven_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1399,6 +2914,27 @@ ; CHECKIZFHMIN-NEXT: addi a1, a1, -1 ; CHECKIZFHMIN-NEXT: and a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_roundeven_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB16_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB16_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a0 +; CHECKIZHINXMIN-NEXT: seqz a0, a0 +; CHECKIZHINXMIN-NEXT: addi a0, a0, -1 +; CHECKIZHINXMIN-NEXT: and a0, a0, a1 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = call i32 @llvm.fptosi.sat.i32.f16(half %a) ret i32 %b @@ -1463,6 +2999,75 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_roundeven_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI17_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI17_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB17_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB17_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: lui a0, 913408 +; RV32IZHINX-NEXT: fle.s s1, a0, s0 +; RV32IZHINX-NEXT: neg s2, s1 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixsfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI17_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI17_1)(a2) +; RV32IZHINX-NEXT: and a0, s2, a0 +; RV32IZHINX-NEXT: flt.s a4, a2, s0 +; RV32IZHINX-NEXT: neg a2, a4 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: feq.s a2, s0, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: lui a5, 524288 +; RV32IZHINX-NEXT: lui a3, 524288 +; RV32IZHINX-NEXT: beqz s1, .LBB17_4 +; RV32IZHINX-NEXT: # %bb.3: +; RV32IZHINX-NEXT: mv a3, a1 +; RV32IZHINX-NEXT: .LBB17_4: +; RV32IZHINX-NEXT: and a0, a2, a0 +; RV32IZHINX-NEXT: beqz a4, .LBB17_6 +; RV32IZHINX-NEXT: # %bb.5: +; RV32IZHINX-NEXT: addi a3, a5, -1 +; RV32IZHINX-NEXT: .LBB17_6: +; RV32IZHINX-NEXT: and a1, a2, a3 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_roundeven_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI17_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI17_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB17_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB17_2: +; RV64IZHINX-NEXT: fcvt.l.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_roundeven_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1535,6 +3140,78 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_roundeven_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB17_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB17_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: lui a0, 913408 +; RV32IZHINXMIN-NEXT: fle.s s1, a0, s0 +; RV32IZHINXMIN-NEXT: neg s2, s1 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixsfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI17_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI17_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s2, a0 +; RV32IZHINXMIN-NEXT: flt.s a4, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a4 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: feq.s a2, s0, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: lui a5, 524288 +; RV32IZHINXMIN-NEXT: lui a3, 524288 +; RV32IZHINXMIN-NEXT: beqz s1, .LBB17_4 +; RV32IZHINXMIN-NEXT: # %bb.3: +; RV32IZHINXMIN-NEXT: mv a3, a1 +; RV32IZHINXMIN-NEXT: .LBB17_4: +; RV32IZHINXMIN-NEXT: and a0, a2, a0 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: beqz a4, .LBB17_6 +; RV32IZHINXMIN-NEXT: # %bb.5: +; RV32IZHINXMIN-NEXT: addi a3, a5, -1 +; RV32IZHINXMIN-NEXT: .LBB17_6: +; RV32IZHINXMIN-NEXT: and a1, a2, a3 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_roundeven_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB17_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB17_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = call i64 @llvm.fptosi.sat.i64.f16(half %a) ret i64 %b @@ -1550,6 +3227,44 @@ ; CHECKIZFH-NEXT: and a0, a1, a0 ; CHECKIZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_roundeven_ui32: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI18_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI18_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB18_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB18_2: +; RV32IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV32IZHINX-NEXT: feq.h a0, a0, a0 +; RV32IZHINX-NEXT: seqz a0, a0 +; RV32IZHINX-NEXT: addi a0, a0, -1 +; RV32IZHINX-NEXT: and a0, a0, a1 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_roundeven_ui32: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI18_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI18_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB18_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB18_2: +; RV64IZHINX-NEXT: fcvt.wu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a1, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_roundeven_ui32: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1593,6 +3308,48 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a0, a1 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_roundeven_ui32: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB18_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB18_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV32IZHINXMIN-NEXT: seqz a0, a0 +; RV32IZHINXMIN-NEXT: addi a0, a0, -1 +; RV32IZHINXMIN-NEXT: and a0, a0, a1 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_roundeven_ui32: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB18_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB18_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.wu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a1, a0 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = call i32 @llvm.fptoui.sat.i32.f16(half %a) ret i32 %b @@ -1644,6 +3401,60 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_roundeven_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI19_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI19_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB19_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB19_2: +; RV32IZHINX-NEXT: fcvt.s.h s0, a0 +; RV32IZHINX-NEXT: fle.s a0, zero, s0 +; RV32IZHINX-NEXT: neg s1, a0 +; RV32IZHINX-NEXT: mv a0, s0 +; RV32IZHINX-NEXT: call __fixunssfdi@plt +; RV32IZHINX-NEXT: lui a2, %hi(.LCPI19_1) +; RV32IZHINX-NEXT: lw a2, %lo(.LCPI19_1)(a2) +; RV32IZHINX-NEXT: and a0, s1, a0 +; RV32IZHINX-NEXT: flt.s a2, a2, s0 +; RV32IZHINX-NEXT: neg a2, a2 +; RV32IZHINX-NEXT: or a0, a2, a0 +; RV32IZHINX-NEXT: and a1, s1, a1 +; RV32IZHINX-NEXT: or a1, a2, a1 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_roundeven_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI19_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI19_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB19_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB19_2: +; RV64IZHINX-NEXT: fcvt.lu.h a1, a0, rtz +; RV64IZHINX-NEXT: feq.h a0, a0, a0 +; RV64IZHINX-NEXT: seqz a0, a0 +; RV64IZHINX-NEXT: addi a0, a0, -1 +; RV64IZHINX-NEXT: and a0, a0, a1 +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_roundeven_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1703,6 +3514,63 @@ ; RV64IZFHMIN-NEXT: addi a1, a1, -1 ; RV64IZFHMIN-NEXT: and a0, a1, a0 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_roundeven_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB19_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB19_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h s0, a0 +; RV32IZHINXMIN-NEXT: fle.s a0, zero, s0 +; RV32IZHINXMIN-NEXT: neg s1, a0 +; RV32IZHINXMIN-NEXT: mv a0, s0 +; RV32IZHINXMIN-NEXT: call __fixunssfdi@plt +; RV32IZHINXMIN-NEXT: lui a2, %hi(.LCPI19_0) +; RV32IZHINXMIN-NEXT: lw a2, %lo(.LCPI19_0)(a2) +; RV32IZHINXMIN-NEXT: and a0, s1, a0 +; RV32IZHINXMIN-NEXT: flt.s a2, a2, s0 +; RV32IZHINXMIN-NEXT: neg a2, a2 +; RV32IZHINXMIN-NEXT: or a0, a2, a0 +; RV32IZHINXMIN-NEXT: and a1, s1, a1 +; RV32IZHINXMIN-NEXT: or a1, a2, a1 +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_roundeven_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB19_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB19_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: feq.s a0, a0, a0 +; RV64IZHINXMIN-NEXT: seqz a0, a0 +; RV64IZHINXMIN-NEXT: addi a0, a0, -1 +; RV64IZHINXMIN-NEXT: and a0, a0, a1 +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = call i64 @llvm.fptoui.sat.i64.f16(half %a) ret i64 %b diff --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll --- a/llvm/test/CodeGen/RISCV/half-round-conv.ll +++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll @@ -3,10 +3,18 @@ ; RUN: -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIZFH,RV32IZFH %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64f | FileCheck -check-prefixes=CHECKIZFH,RV64IZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=CHECKIZHINX,RV32IZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs < %s \ +; RUN: -target-abi=lp64 | FileCheck -check-prefixes=CHECKIZHINX,RV64IZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs < %s \ ; RUN: -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIZFHMIN,RV32IZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64f | FileCheck -check-prefixes=CHECKIZFHMIN,RV64IZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=CHECKIZHINXMIN,RV32IZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs < %s \ +; RUN: -target-abi=lp64 | FileCheck -check-prefixes=CHECKIZHINXMIN,RV64IZHINXMIN %s define signext i8 @test_floor_si8(half %x) { ; RV32IZFH-LABEL: test_floor_si8: @@ -19,6 +27,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rdn ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_floor_si8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI0_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI0_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB0_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB0_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_floor_si8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI0_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI0_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB0_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB0_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_floor_si8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -54,6 +92,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_floor_si8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB0_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB0_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_floor_si8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB0_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB0_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = fptosi half %a to i8 ret i8 %b @@ -70,6 +142,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rdn ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_floor_si16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI1_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI1_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB1_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB1_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_floor_si16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI1_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI1_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB1_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB1_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_floor_si16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -105,6 +207,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_floor_si16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB1_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB1_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_floor_si16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB1_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB1_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = fptosi half %a to i16 ret i16 %b @@ -116,6 +252,21 @@ ; CHECKIZFH-NEXT: fcvt.w.h a0, fa0, rdn ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_floor_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI2_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI2_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB2_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rdn +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rdn +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB2_2: +; CHECKIZHINX-NEXT: fcvt.w.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_floor_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -133,6 +284,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_floor_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB2_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB2_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = fptosi half %a to i32 ret i32 %b @@ -165,6 +333,42 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rdn ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_floor_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI3_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI3_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB3_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB3_2: +; RV32IZHINX-NEXT: call __fixhfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_floor_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI3_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI3_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB3_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB3_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_floor_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -205,6 +409,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_floor_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB3_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB3_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixhfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_floor_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB3_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB3_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = fptosi half %a to i64 ret i64 %b @@ -221,6 +464,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rdn ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_floor_ui8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI4_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI4_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB4_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB4_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_floor_ui8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI4_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI4_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB4_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB4_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_floor_ui8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -256,6 +529,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_floor_ui8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB4_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB4_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_floor_ui8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB4_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB4_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = fptoui half %a to i8 ret i8 %b @@ -272,6 +579,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rdn ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_floor_ui16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI5_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI5_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB5_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB5_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_floor_ui16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI5_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI5_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB5_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB5_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_floor_ui16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -307,6 +644,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_floor_ui16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB5_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB5_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_floor_ui16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB5_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB5_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = fptoui half %a to i16 ret i16 %b @@ -318,6 +689,21 @@ ; CHECKIZFH-NEXT: fcvt.wu.h a0, fa0, rdn ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_floor_ui32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI6_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI6_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB6_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rdn +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rdn +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB6_2: +; CHECKIZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_floor_ui32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -335,6 +721,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_floor_ui32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB6_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB6_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = fptoui half %a to i32 ret i32 %b @@ -367,6 +770,42 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rdn ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_floor_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI7_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI7_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB7_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB7_2: +; RV32IZHINX-NEXT: call __fixunshfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_floor_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI7_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI7_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB7_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rdn +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rdn +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB7_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_floor_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -407,6 +846,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_floor_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB7_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB7_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_floor_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB7_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB7_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) %b = fptoui half %a to i64 ret i64 %b @@ -423,6 +901,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rup ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_ceil_si8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI8_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI8_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB8_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB8_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_ceil_si8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI8_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI8_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB8_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB8_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_ceil_si8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -458,6 +966,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_ceil_si8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB8_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB8_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_ceil_si8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB8_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB8_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = fptosi half %a to i8 ret i8 %b @@ -474,6 +1016,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rup ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_ceil_si16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI9_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI9_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB9_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB9_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_ceil_si16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI9_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI9_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB9_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB9_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_ceil_si16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -509,6 +1081,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_ceil_si16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB9_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB9_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_ceil_si16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB9_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB9_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = fptosi half %a to i16 ret i16 %b @@ -520,6 +1126,21 @@ ; CHECKIZFH-NEXT: fcvt.w.h a0, fa0, rup ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_ceil_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI10_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI10_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB10_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rup +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rup +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB10_2: +; CHECKIZHINX-NEXT: fcvt.w.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_ceil_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -537,6 +1158,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_ceil_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB10_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB10_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = fptosi half %a to i32 ret i32 %b @@ -569,6 +1207,42 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rup ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_ceil_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI11_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI11_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB11_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB11_2: +; RV32IZHINX-NEXT: call __fixhfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_ceil_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI11_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI11_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB11_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB11_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_ceil_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -609,6 +1283,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_ceil_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB11_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB11_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixhfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_ceil_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB11_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB11_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = fptosi half %a to i64 ret i64 %b @@ -625,6 +1338,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rup ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_ceil_ui8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI12_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI12_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB12_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB12_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_ceil_ui8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI12_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI12_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB12_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB12_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_ceil_ui8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -660,6 +1403,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_ceil_ui8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB12_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB12_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_ceil_ui8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB12_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB12_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = fptoui half %a to i8 ret i8 %b @@ -676,6 +1453,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rup ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_ceil_ui16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI13_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI13_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB13_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB13_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_ceil_ui16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI13_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI13_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB13_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB13_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_ceil_ui16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -711,6 +1518,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_ceil_ui16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB13_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB13_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_ceil_ui16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB13_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB13_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = fptoui half %a to i16 ret i16 %b @@ -722,6 +1563,21 @@ ; CHECKIZFH-NEXT: fcvt.wu.h a0, fa0, rup ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_ceil_ui32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI14_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI14_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB14_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rup +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rup +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB14_2: +; CHECKIZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_ceil_ui32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -739,6 +1595,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_ceil_ui32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB14_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB14_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = fptoui half %a to i32 ret i32 %b @@ -771,6 +1644,42 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rup ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_ceil_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI15_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI15_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB15_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB15_2: +; RV32IZHINX-NEXT: call __fixunshfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_ceil_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI15_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI15_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB15_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rup +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rup +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB15_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_ceil_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -811,6 +1720,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_ceil_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB15_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB15_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_ceil_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB15_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB15_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) %b = fptoui half %a to i64 ret i64 %b @@ -827,6 +1775,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_trunc_si8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI16_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI16_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB16_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB16_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_trunc_si8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI16_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI16_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB16_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB16_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_trunc_si8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -862,6 +1840,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_trunc_si8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB16_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB16_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_trunc_si8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB16_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB16_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = fptosi half %a to i8 ret i8 %b @@ -878,6 +1890,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_trunc_si16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI17_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI17_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB17_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB17_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_trunc_si16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI17_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI17_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB17_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB17_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_trunc_si16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -913,6 +1955,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_trunc_si16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB17_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB17_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_trunc_si16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB17_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB17_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = fptosi half %a to i16 ret i16 %b @@ -924,6 +2000,21 @@ ; CHECKIZFH-NEXT: fcvt.w.h a0, fa0, rtz ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_trunc_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI18_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI18_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB18_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rtz +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB18_2: +; CHECKIZHINX-NEXT: fcvt.w.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_trunc_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -941,6 +2032,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_trunc_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB18_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB18_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = fptosi half %a to i32 ret i32 %b @@ -973,6 +2081,42 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_trunc_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI19_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI19_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB19_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB19_2: +; RV32IZHINX-NEXT: call __fixhfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_trunc_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI19_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI19_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB19_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB19_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_trunc_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1013,6 +2157,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_trunc_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB19_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB19_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixhfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_trunc_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB19_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB19_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = fptosi half %a to i64 ret i64 %b @@ -1029,6 +2212,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_trunc_ui8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI20_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI20_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB20_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB20_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_trunc_ui8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI20_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI20_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB20_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB20_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_trunc_ui8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1064,6 +2277,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_trunc_ui8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB20_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB20_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_trunc_ui8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB20_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB20_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = fptoui half %a to i8 ret i8 %b @@ -1080,6 +2327,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_trunc_ui16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI21_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI21_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB21_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB21_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_trunc_ui16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI21_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI21_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB21_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB21_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_trunc_ui16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1115,6 +2392,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_trunc_ui16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB21_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB21_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_trunc_ui16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB21_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB21_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = fptoui half %a to i16 ret i16 %b @@ -1126,6 +2437,21 @@ ; CHECKIZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_trunc_ui32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI22_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI22_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB22_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rtz +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB22_2: +; CHECKIZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_trunc_ui32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1143,6 +2469,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_trunc_ui32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB22_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB22_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = fptoui half %a to i32 ret i32 %b @@ -1175,6 +2518,42 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_trunc_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI23_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI23_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB23_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB23_2: +; RV32IZHINX-NEXT: call __fixunshfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_trunc_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI23_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI23_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB23_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rtz +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rtz +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB23_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_trunc_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1215,6 +2594,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_trunc_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB23_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB23_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_trunc_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB23_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB23_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) %b = fptoui half %a to i64 ret i64 %b @@ -1231,6 +2649,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rmm ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_round_si8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI24_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI24_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB24_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB24_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_round_si8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI24_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI24_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB24_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB24_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_round_si8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1266,6 +2714,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_round_si8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB24_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB24_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_round_si8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB24_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB24_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = fptosi half %a to i8 ret i8 %b @@ -1282,6 +2764,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rmm ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_round_si16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI25_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI25_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB25_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB25_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_round_si16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI25_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI25_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB25_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB25_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_round_si16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1317,6 +2829,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_round_si16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB25_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB25_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_round_si16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB25_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB25_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = fptosi half %a to i16 ret i16 %b @@ -1328,6 +2874,21 @@ ; CHECKIZFH-NEXT: fcvt.w.h a0, fa0, rmm ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_round_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI26_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI26_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB26_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rmm +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rmm +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB26_2: +; CHECKIZHINX-NEXT: fcvt.w.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_round_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1345,6 +2906,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_round_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB26_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB26_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = fptosi half %a to i32 ret i32 %b @@ -1377,6 +2955,42 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rmm ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_round_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI27_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI27_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB27_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB27_2: +; RV32IZHINX-NEXT: call __fixhfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_round_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI27_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI27_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB27_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB27_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_round_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1417,6 +3031,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_round_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB27_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB27_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixhfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_round_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB27_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB27_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = fptosi half %a to i64 ret i64 %b @@ -1433,6 +3086,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rmm ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_round_ui8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI28_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI28_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB28_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB28_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_round_ui8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI28_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI28_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB28_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB28_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_round_ui8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1468,6 +3151,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_round_ui8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB28_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB28_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_round_ui8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB28_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB28_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = fptoui half %a to i8 ret i8 %b @@ -1484,6 +3201,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rmm ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_round_ui16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI29_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI29_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB29_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB29_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_round_ui16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI29_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI29_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB29_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB29_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_round_ui16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1519,6 +3266,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_round_ui16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB29_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB29_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_round_ui16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB29_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB29_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = fptoui half %a to i16 ret i16 %b @@ -1530,6 +3311,21 @@ ; CHECKIZFH-NEXT: fcvt.wu.h a0, fa0, rmm ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_round_ui32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI30_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI30_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB30_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rmm +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rmm +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB30_2: +; CHECKIZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_round_ui32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1547,6 +3343,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_round_ui32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB30_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB30_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = fptoui half %a to i32 ret i32 %b @@ -1579,6 +3392,42 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rmm ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_round_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI31_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI31_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB31_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB31_2: +; RV32IZHINX-NEXT: call __fixunshfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_round_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI31_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI31_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB31_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rmm +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rmm +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB31_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_round_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1619,6 +3468,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_round_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB31_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB31_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_round_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB31_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB31_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) %b = fptoui half %a to i64 ret i64 %b @@ -1635,6 +3523,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rne ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_roundeven_si8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI32_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI32_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB32_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB32_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_roundeven_si8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI32_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI32_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB32_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB32_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_roundeven_si8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1670,6 +3588,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_roundeven_si8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB32_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB32_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_roundeven_si8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB32_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB32_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = fptosi half %a to i8 ret i8 %b @@ -1686,6 +3638,36 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rne ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_roundeven_si16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI33_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI33_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB33_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB33_2: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_roundeven_si16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI33_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI33_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB33_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB33_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_roundeven_si16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1721,6 +3703,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_roundeven_si16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB33_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB33_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_roundeven_si16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB33_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB33_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = fptosi half %a to i16 ret i16 %b @@ -1732,6 +3748,21 @@ ; CHECKIZFH-NEXT: fcvt.w.h a0, fa0, rne ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_roundeven_si32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI34_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI34_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB34_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rne +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rne +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB34_2: +; CHECKIZHINX-NEXT: fcvt.w.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_roundeven_si32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1749,6 +3780,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_roundeven_si32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB34_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB34_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = fptosi half %a to i32 ret i32 %b @@ -1781,6 +3829,42 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rne ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_roundeven_si64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI35_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI35_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB35_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB35_2: +; RV32IZHINX-NEXT: call __fixhfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_roundeven_si64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI35_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI35_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB35_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB35_2: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_roundeven_si64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1821,6 +3905,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_roundeven_si64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB35_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB35_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixhfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_roundeven_si64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB35_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB35_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = fptosi half %a to i64 ret i64 %b @@ -1837,6 +3960,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rne ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_roundeven_ui8: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI36_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI36_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB36_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB36_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_roundeven_ui8: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI36_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI36_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB36_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB36_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_roundeven_ui8: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1872,6 +4025,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_roundeven_ui8: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB36_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB36_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_roundeven_ui8: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB36_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB36_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = fptoui half %a to i8 ret i8 %b @@ -1888,6 +4075,36 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rne ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_roundeven_ui16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI37_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI37_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB37_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB37_2: +; RV32IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_roundeven_ui16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI37_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI37_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB37_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB37_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_roundeven_ui16: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1923,6 +4140,40 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_roundeven_ui16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB37_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB37_2: +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_roundeven_ui16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB37_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB37_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = fptoui half %a to i16 ret i16 %b @@ -1934,6 +4185,21 @@ ; CHECKIZFH-NEXT: fcvt.wu.h a0, fa0, rne ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_roundeven_ui32: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI38_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI38_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB38_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rne +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rne +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB38_2: +; CHECKIZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_roundeven_ui32: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -1951,6 +4217,23 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; CHECKIZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_roundeven_ui32: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB38_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB38_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = fptoui half %a to i32 ret i32 %b @@ -1983,6 +4266,42 @@ ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rne ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: test_roundeven_ui64: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: .cfi_offset ra, -4 +; RV32IZHINX-NEXT: lui a1, %hi(.LCPI39_0) +; RV32IZHINX-NEXT: lh a1, %lo(.LCPI39_0)(a1) +; RV32IZHINX-NEXT: fabs.h a2, a0 +; RV32IZHINX-NEXT: flt.h a1, a2, a1 +; RV32IZHINX-NEXT: beqz a1, .LBB39_2 +; RV32IZHINX-NEXT: # %bb.1: +; RV32IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV32IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV32IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV32IZHINX-NEXT: .LBB39_2: +; RV32IZHINX-NEXT: call __fixunshfdi@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: test_roundeven_ui64: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a1, %hi(.LCPI39_0) +; RV64IZHINX-NEXT: lh a1, %lo(.LCPI39_0)(a1) +; RV64IZHINX-NEXT: fabs.h a2, a0 +; RV64IZHINX-NEXT: flt.h a1, a2, a1 +; RV64IZHINX-NEXT: beqz a1, .LBB39_2 +; RV64IZHINX-NEXT: # %bb.1: +; RV64IZHINX-NEXT: fcvt.w.h a1, a0, rne +; RV64IZHINX-NEXT: fcvt.h.w a1, a1, rne +; RV64IZHINX-NEXT: fsgnj.h a0, a1, a0 +; RV64IZHINX-NEXT: .LBB39_2: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret +; ; RV32IZFHMIN-LABEL: test_roundeven_ui64: ; RV32IZFHMIN: # %bb.0: ; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -2023,6 +4342,45 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: test_roundeven_ui64: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: lui a1, 307200 +; RV32IZHINXMIN-NEXT: fabs.s a2, a0 +; RV32IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV32IZHINXMIN-NEXT: beqz a1, .LBB39_2 +; RV32IZHINXMIN-NEXT: # %bb.1: +; RV32IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV32IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV32IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV32IZHINXMIN-NEXT: .LBB39_2: +; RV32IZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-NEXT: .cfi_def_cfa_offset 16 +; RV32IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-NEXT: .cfi_offset ra, -4 +; RV32IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-NEXT: call __fixunshfdi@plt +; RV32IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: test_roundeven_ui64: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: lui a1, 307200 +; RV64IZHINXMIN-NEXT: fabs.s a2, a0 +; RV64IZHINXMIN-NEXT: flt.s a1, a2, a1 +; RV64IZHINXMIN-NEXT: beqz a1, .LBB39_2 +; RV64IZHINXMIN-NEXT: # %bb.1: +; RV64IZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; RV64IZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; RV64IZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; RV64IZHINXMIN-NEXT: .LBB39_2: +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) %b = fptoui half %a to i64 ret i64 %b @@ -2064,6 +4422,20 @@ ; CHECKIZFH-NEXT: .LBB40_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_floor_half: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI40_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI40_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB40_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rdn +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rdn +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB40_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_floor_half: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -2079,6 +4451,21 @@ ; CHECKIZFHMIN-NEXT: .LBB40_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_floor_half: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB40_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rdn +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rdn +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB40_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.floor.f16(half %x) ret half %a } @@ -2119,6 +4506,20 @@ ; CHECKIZFH-NEXT: .LBB41_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_ceil_half: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI41_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI41_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB41_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rup +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rup +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB41_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_ceil_half: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -2134,6 +4535,21 @@ ; CHECKIZFHMIN-NEXT: .LBB41_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_ceil_half: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB41_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rup +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rup +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB41_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.ceil.f16(half %x) ret half %a } @@ -2174,6 +4590,20 @@ ; CHECKIZFH-NEXT: .LBB42_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_trunc_half: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI42_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI42_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB42_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rtz +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rtz +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB42_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_trunc_half: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -2189,6 +4619,21 @@ ; CHECKIZFHMIN-NEXT: .LBB42_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_trunc_half: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB42_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rtz +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rtz +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB42_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.trunc.f16(half %x) ret half %a } @@ -2229,6 +4674,20 @@ ; CHECKIZFH-NEXT: .LBB43_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_round_half: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI43_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI43_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB43_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rmm +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rmm +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB43_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_round_half: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -2244,6 +4703,21 @@ ; CHECKIZFHMIN-NEXT: .LBB43_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_round_half: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB43_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rmm +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rmm +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB43_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.round.f16(half %x) ret half %a } @@ -2284,6 +4758,20 @@ ; CHECKIZFH-NEXT: .LBB44_2: ; CHECKIZFH-NEXT: ret ; +; CHECKIZHINX-LABEL: test_roundeven_half: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: lui a1, %hi(.LCPI44_0) +; CHECKIZHINX-NEXT: lh a1, %lo(.LCPI44_0)(a1) +; CHECKIZHINX-NEXT: fabs.h a2, a0 +; CHECKIZHINX-NEXT: flt.h a1, a2, a1 +; CHECKIZHINX-NEXT: beqz a1, .LBB44_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: fcvt.w.h a1, a0, rne +; CHECKIZHINX-NEXT: fcvt.h.w a1, a1, rne +; CHECKIZHINX-NEXT: fsgnj.h a0, a1, a0 +; CHECKIZHINX-NEXT: .LBB44_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: test_roundeven_half: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -2299,6 +4787,21 @@ ; CHECKIZFHMIN-NEXT: .LBB44_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: test_roundeven_half: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: lui a1, 307200 +; CHECKIZHINXMIN-NEXT: fabs.s a2, a0 +; CHECKIZHINXMIN-NEXT: flt.s a1, a2, a1 +; CHECKIZHINXMIN-NEXT: beqz a1, .LBB44_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.w.s a1, a0, rne +; CHECKIZHINXMIN-NEXT: fcvt.s.w a1, a1, rne +; CHECKIZHINXMIN-NEXT: fsgnj.s a0, a1, a0 +; CHECKIZHINXMIN-NEXT: .LBB44_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %a = call half @llvm.roundeven.f16(half %x) ret half %a } diff --git a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll --- a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll @@ -3,10 +3,18 @@ ; RUN: -target-abi ilp32f < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefix=CHECKIZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefix=CHECKIZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs < %s \ ; RUN: -target-abi=ilp32f | FileCheck -check-prefix=CHECKIZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64f | FileCheck -check-prefix=CHECKIZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefix=CHECKIZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs < %s \ +; RUN: -target-abi=lp64 | FileCheck -check-prefix=CHECKIZHINXMIN %s define half @select_fcmp_false(half %a, half %b) nounwind { ; CHECK-LABEL: select_fcmp_false: @@ -14,10 +22,20 @@ ; CHECK-NEXT: fmv.h fa0, fa1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_false: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_false: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fmv.s fa0, fa1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_false: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp false half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -33,6 +51,15 @@ ; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_oeq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a2, a0, a1 +; CHECKIZHINX-NEXT: bnez a2, .LBB1_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB1_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_oeq: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1 @@ -44,6 +71,18 @@ ; CHECKIZFHMIN-NEXT: .LBB1_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_oeq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: bnez a2, .LBB1_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB1_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp oeq half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -59,6 +98,15 @@ ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_ogt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a1, a0 +; CHECKIZHINX-NEXT: bnez a2, .LBB2_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB2_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_ogt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -70,6 +118,18 @@ ; CHECKIZFHMIN-NEXT: .LBB2_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_ogt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: flt.s a2, a1, a0 +; CHECKIZHINXMIN-NEXT: bnez a2, .LBB2_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB2_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ogt half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -85,6 +145,15 @@ ; CHECK-NEXT: .LBB3_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_oge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a2, a1, a0 +; CHECKIZHINX-NEXT: bnez a2, .LBB3_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB3_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_oge: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -96,6 +165,18 @@ ; CHECKIZFHMIN-NEXT: .LBB3_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_oge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a2, a1, a0 +; CHECKIZHINXMIN-NEXT: bnez a2, .LBB3_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB3_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp oge half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -111,6 +192,15 @@ ; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_olt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a0, a1 +; CHECKIZHINX-NEXT: bnez a2, .LBB4_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB4_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_olt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1 @@ -122,6 +212,18 @@ ; CHECKIZFHMIN-NEXT: .LBB4_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_olt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: bnez a2, .LBB4_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB4_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp olt half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -137,6 +239,15 @@ ; CHECK-NEXT: .LBB5_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_ole: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a2, a0, a1 +; CHECKIZHINX-NEXT: bnez a2, .LBB5_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB5_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_ole: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1 @@ -148,6 +259,18 @@ ; CHECKIZFHMIN-NEXT: .LBB5_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_ole: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: bnez a2, .LBB5_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB5_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ole half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -165,6 +288,17 @@ ; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_one: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a0, a1 +; CHECKIZHINX-NEXT: flt.h a3, a1, a0 +; CHECKIZHINX-NEXT: or a2, a3, a2 +; CHECKIZHINX-NEXT: bnez a2, .LBB6_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB6_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_one: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1 @@ -178,6 +312,20 @@ ; CHECKIZFHMIN-NEXT: .LBB6_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_one: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: flt.s a3, a1, a0 +; CHECKIZHINXMIN-NEXT: or a2, a3, a2 +; CHECKIZHINXMIN-NEXT: bnez a2, .LBB6_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB6_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp one half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -195,6 +343,17 @@ ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_ord: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a2, a1, a1 +; CHECKIZHINX-NEXT: feq.h a3, a0, a0 +; CHECKIZHINX-NEXT: and a2, a3, a2 +; CHECKIZHINX-NEXT: bnez a2, .LBB7_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB7_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_ord: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -208,6 +367,20 @@ ; CHECKIZFHMIN-NEXT: .LBB7_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa4 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_ord: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: feq.s a2, a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a3, a0, a0 +; CHECKIZHINXMIN-NEXT: and a2, a3, a2 +; CHECKIZHINXMIN-NEXT: bnez a2, .LBB7_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB7_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ord half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -225,6 +398,17 @@ ; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_ueq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a0, a1 +; CHECKIZHINX-NEXT: flt.h a3, a1, a0 +; CHECKIZHINX-NEXT: or a2, a3, a2 +; CHECKIZHINX-NEXT: beqz a2, .LBB8_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB8_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_ueq: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1 @@ -238,6 +422,20 @@ ; CHECKIZFHMIN-NEXT: .LBB8_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_ueq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: flt.s a3, a1, a0 +; CHECKIZHINXMIN-NEXT: or a2, a3, a2 +; CHECKIZHINXMIN-NEXT: beqz a2, .LBB8_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB8_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ueq half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -253,6 +451,15 @@ ; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_ugt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a2, a0, a1 +; CHECKIZHINX-NEXT: beqz a2, .LBB9_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB9_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_ugt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1 @@ -264,6 +471,18 @@ ; CHECKIZFHMIN-NEXT: .LBB9_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_ugt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: beqz a2, .LBB9_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB9_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ugt half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -279,6 +498,15 @@ ; CHECK-NEXT: .LBB10_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_uge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a0, a1 +; CHECKIZHINX-NEXT: beqz a2, .LBB10_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB10_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_uge: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1 @@ -290,6 +518,18 @@ ; CHECKIZFHMIN-NEXT: .LBB10_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_uge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: flt.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: beqz a2, .LBB10_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB10_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp uge half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -305,6 +545,15 @@ ; CHECK-NEXT: .LBB11_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_ult: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a2, a1, a0 +; CHECKIZHINX-NEXT: beqz a2, .LBB11_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB11_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_ult: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -316,6 +565,18 @@ ; CHECKIZFHMIN-NEXT: .LBB11_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_ult: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fle.s a2, a1, a0 +; CHECKIZHINXMIN-NEXT: beqz a2, .LBB11_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB11_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ult half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -331,6 +592,15 @@ ; CHECK-NEXT: .LBB12_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_ule: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: flt.h a2, a1, a0 +; CHECKIZHINX-NEXT: beqz a2, .LBB12_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB12_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_ule: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 @@ -342,6 +612,18 @@ ; CHECKIZFHMIN-NEXT: .LBB12_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_ule: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: flt.s a2, a1, a0 +; CHECKIZHINXMIN-NEXT: beqz a2, .LBB12_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB12_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ule half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -357,6 +639,15 @@ ; CHECK-NEXT: .LBB13_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_une: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a2, a0, a1 +; CHECKIZHINX-NEXT: beqz a2, .LBB13_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB13_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_une: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1 @@ -368,6 +659,18 @@ ; CHECKIZFHMIN-NEXT: .LBB13_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_une: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a2, a0, a1 +; CHECKIZHINXMIN-NEXT: beqz a2, .LBB13_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB13_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp une half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -385,6 +688,17 @@ ; CHECK-NEXT: .LBB14_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_uno: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a2, a1, a1 +; CHECKIZHINX-NEXT: feq.h a3, a0, a0 +; CHECKIZHINX-NEXT: and a2, a3, a2 +; CHECKIZHINX-NEXT: beqz a2, .LBB14_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a1 +; CHECKIZHINX-NEXT: .LBB14_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_uno: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -398,6 +712,20 @@ ; CHECKIZFHMIN-NEXT: .LBB14_2: ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa4 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_uno: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: feq.s a2, a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a3, a0, a0 +; CHECKIZHINXMIN-NEXT: and a2, a3, a2 +; CHECKIZHINXMIN-NEXT: beqz a2, .LBB14_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a1 +; CHECKIZHINXMIN-NEXT: .LBB14_2: +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp uno half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -408,9 +736,17 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_true: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_true: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_true: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp true half %a, %b %2 = select i1 %1, half %a, half %b ret half %2 @@ -427,6 +763,16 @@ ; CHECK-NEXT: .LBB16_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: i32_select_fcmp_oeq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a1, a0, a1 +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: bnez a1, .LBB16_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a0, a3 +; CHECKIZHINX-NEXT: .LBB16_2: +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: i32_select_fcmp_oeq: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -437,6 +783,18 @@ ; CHECKIZFHMIN-NEXT: mv a0, a1 ; CHECKIZFHMIN-NEXT: .LBB16_2: ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: i32_select_fcmp_oeq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a1, a0, a1 +; CHECKIZHINXMIN-NEXT: mv a0, a2 +; CHECKIZHINXMIN-NEXT: bnez a1, .LBB16_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: mv a0, a3 +; CHECKIZHINXMIN-NEXT: .LBB16_2: +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp oeq half %a, %b %2 = select i1 %1, i32 %c, i32 %d ret i32 %2 @@ -450,6 +808,13 @@ ; CHECK-NEXT: sub a0, a1, a0 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_oeq_1_2: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: feq.h a0, a0, a1 +; CHECKIZHINX-NEXT: li a1, 2 +; CHECKIZHINX-NEXT: sub a0, a1, a0 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_oeq_1_2: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -458,6 +823,15 @@ ; CHECKIZFHMIN-NEXT: li a1, 2 ; CHECKIZFHMIN-NEXT: sub a0, a1, a0 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_oeq_1_2: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: feq.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: li a1, 2 +; CHECKIZHINXMIN-NEXT: sub a0, a1, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp fast oeq half %a, %b %2 = select i1 %1, i32 1, i32 2 ret i32 %2 @@ -470,6 +844,12 @@ ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_uge_negone_zero: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a0, a1 +; CHECKIZHINX-NEXT: addi a0, a0, -1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_uge_negone_zero: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -477,6 +857,14 @@ ; CHECKIZFHMIN-NEXT: fle.s a0, fa4, fa5 ; CHECKIZFHMIN-NEXT: addi a0, a0, -1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_uge_negone_zero: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: addi a0, a0, -1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ugt half %a, %b %2 = select i1 %1, i32 -1, i32 0 ret i32 %2 @@ -489,6 +877,12 @@ ; CHECK-NEXT: addi a0, a0, 1 ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_fcmp_uge_1_2: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: fle.h a0, a0, a1 +; CHECKIZHINX-NEXT: addi a0, a0, 1 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_fcmp_uge_1_2: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa1 @@ -496,6 +890,14 @@ ; CHECKIZFHMIN-NEXT: fle.s a0, fa4, fa5 ; CHECKIZFHMIN-NEXT: addi a0, a0, 1 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_fcmp_uge_1_2: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a1, a1 +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fle.s a0, a0, a1 +; CHECKIZHINXMIN-NEXT: addi a0, a0, 1 +; CHECKIZHINXMIN-NEXT: ret %1 = fcmp ugt half %a, %b %2 = select i1 %1, i32 1, i32 2 ret i32 %2 diff --git a/llvm/test/CodeGen/RISCV/half-select-icmp.ll b/llvm/test/CodeGen/RISCV/half-select-icmp.ll --- a/llvm/test/CodeGen/RISCV/half-select-icmp.ll +++ b/llvm/test/CodeGen/RISCV/half-select-icmp.ll @@ -3,10 +3,18 @@ ; RUN: -target-abi ilp32f < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi ilp32 < %s | FileCheck -check-prefix=CHECKIZHINX %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck -check-prefix=CHECKIZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfhmin -verify-machineinstrs < %s \ ; RUN: -target-abi=ilp32f | FileCheck -check-prefix=CHECKIZFHMIN %s ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64f | FileCheck -check-prefix=CHECKIZFHMIN %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinxmin -verify-machineinstrs < %s \ +; RUN: -target-abi=ilp32 | FileCheck -check-prefix=CHECKIZHINXMIN %s +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs < %s \ +; RUN: -target-abi=lp64 | FileCheck -check-prefix=CHECKIZHINXMIN %s define half @select_icmp_eq(i32 signext %a, i32 signext %b, half %c, half %d) { ; CHECK-LABEL: select_icmp_eq: @@ -17,6 +25,15 @@ ; CHECK-NEXT: .LBB0_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_eq: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: beq a0, a1, .LBB0_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB0_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_eq: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: beq a0, a1, .LBB0_2 @@ -28,6 +45,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_eq: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: beq a0, a1, .LBB0_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB0_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp eq i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 @@ -42,6 +71,15 @@ ; CHECK-NEXT: .LBB1_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_ne: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: bne a0, a1, .LBB1_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB1_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_ne: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: bne a0, a1, .LBB1_2 @@ -53,6 +91,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_ne: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: bne a0, a1, .LBB1_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB1_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp ne i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 @@ -67,6 +117,15 @@ ; CHECK-NEXT: .LBB2_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_ugt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: bltu a1, a0, .LBB2_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB2_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_ugt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: bltu a1, a0, .LBB2_2 @@ -78,6 +137,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_ugt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: bltu a1, a0, .LBB2_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB2_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp ugt i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 @@ -92,6 +163,15 @@ ; CHECK-NEXT: .LBB3_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_uge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: bgeu a0, a1, .LBB3_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB3_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_uge: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: bgeu a0, a1, .LBB3_2 @@ -103,6 +183,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_uge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: bgeu a0, a1, .LBB3_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB3_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp uge i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 @@ -117,6 +209,15 @@ ; CHECK-NEXT: .LBB4_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_ult: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: bltu a0, a1, .LBB4_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB4_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_ult: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: bltu a0, a1, .LBB4_2 @@ -128,6 +229,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_ult: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: bltu a0, a1, .LBB4_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB4_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp ult i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 @@ -142,6 +255,15 @@ ; CHECK-NEXT: .LBB5_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_ule: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: bgeu a1, a0, .LBB5_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB5_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_ule: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: bgeu a1, a0, .LBB5_2 @@ -153,6 +275,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_ule: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: bgeu a1, a0, .LBB5_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB5_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp ule i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 @@ -167,6 +301,15 @@ ; CHECK-NEXT: .LBB6_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_sgt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: blt a1, a0, .LBB6_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB6_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_sgt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: blt a1, a0, .LBB6_2 @@ -178,6 +321,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_sgt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: blt a1, a0, .LBB6_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB6_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp sgt i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 @@ -192,6 +347,15 @@ ; CHECK-NEXT: .LBB7_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_sge: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: bge a0, a1, .LBB7_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB7_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_sge: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: bge a0, a1, .LBB7_2 @@ -203,6 +367,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_sge: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: bge a0, a1, .LBB7_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB7_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp sge i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 @@ -217,6 +393,15 @@ ; CHECK-NEXT: .LBB8_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_slt: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: blt a0, a1, .LBB8_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB8_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_slt: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: blt a0, a1, .LBB8_2 @@ -228,6 +413,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_slt: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: blt a0, a1, .LBB8_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB8_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp slt i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 @@ -242,6 +439,15 @@ ; CHECK-NEXT: .LBB9_2: ; CHECK-NEXT: ret ; +; CHECKIZHINX-LABEL: select_icmp_sle: +; CHECKIZHINX: # %bb.0: +; CHECKIZHINX-NEXT: bge a1, a0, .LBB9_2 +; CHECKIZHINX-NEXT: # %bb.1: +; CHECKIZHINX-NEXT: mv a2, a3 +; CHECKIZHINX-NEXT: .LBB9_2: +; CHECKIZHINX-NEXT: mv a0, a2 +; CHECKIZHINX-NEXT: ret +; ; CHECKIZFHMIN-LABEL: select_icmp_sle: ; CHECKIZFHMIN: # %bb.0: ; CHECKIZFHMIN-NEXT: bge a1, a0, .LBB9_2 @@ -253,6 +459,18 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: select_icmp_sle: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: bge a1, a0, .LBB9_2 +; CHECKIZHINXMIN-NEXT: # %bb.1: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a3 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret +; CHECKIZHINXMIN-NEXT: .LBB9_2: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a2 +; CHECKIZHINXMIN-NEXT: fcvt.h.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = icmp sle i32 %a, %b %2 = select i1 %1, half %c, half %d ret half %2 diff --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll --- a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll @@ -2,6 +2,9 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f -disable-strictnode-mutation < %s | \ ; RUN: FileCheck %s -check-prefix=RV64IZFH +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 -disable-strictnode-mutation < %s | \ +; RUN: FileCheck %s -check-prefix=RV64IZHINX ; This file exhaustively checks half<->i32 conversions. In general, ; fcvt.l[u].h can be selected instead of fcvt.w[u].h because poison is @@ -13,6 +16,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: aext_fptosi: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -23,6 +31,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sext_fptosi: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -34,6 +47,13 @@ ; RV64IZFH-NEXT: slli a0, a0, 32 ; RV64IZFH-NEXT: srli a0, a0, 32 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: zext_fptosi: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV64IZHINX-NEXT: slli a0, a0, 32 +; RV64IZHINX-NEXT: srli a0, a0, 32 +; RV64IZHINX-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -43,6 +63,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: aext_fptoui: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -53,6 +78,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sext_fptoui: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -62,6 +92,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: zext_fptoui: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -71,6 +106,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: uitofp_aext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -81,6 +121,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: uitofp_sext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -90,6 +135,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: uitofp_zext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -99,6 +149,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sitofp_aext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -109,6 +164,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sitofp_sext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -118,6 +178,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sitofp_zext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } diff --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll --- a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck %s -check-prefix=RV64IZFH +; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck %s -check-prefix=RV64IZHINX ; This file exhaustively checks half<->i32 conversions. In general, ; fcvt.l[u].h can be selected instead of fcvt.w[u].h because poison is @@ -12,6 +14,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: aext_fptosi: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = fptosi half %a to i32 ret i32 %1 } @@ -21,6 +28,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sext_fptosi: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = fptosi half %a to i32 ret i32 %1 } @@ -32,6 +44,13 @@ ; RV64IZFH-NEXT: slli a0, a0, 32 ; RV64IZFH-NEXT: srli a0, a0, 32 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: zext_fptosi: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.w.h a0, a0, rtz +; RV64IZHINX-NEXT: slli a0, a0, 32 +; RV64IZHINX-NEXT: srli a0, a0, 32 +; RV64IZHINX-NEXT: ret %1 = fptosi half %a to i32 ret i32 %1 } @@ -41,6 +60,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: aext_fptoui: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = fptoui half %a to i32 ret i32 %1 } @@ -50,6 +74,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.wu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sext_fptoui: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.wu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = fptoui half %a to i32 ret i32 %1 } @@ -59,6 +88,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.lu.h a0, fa0, rtz ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: zext_fptoui: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.lu.h a0, a0, rtz +; RV64IZHINX-NEXT: ret %1 = fptoui half %a to i32 ret i32 %1 } @@ -69,6 +103,11 @@ ; RV64IZFH-NEXT: fadd.h fa5, fa0, fa1 ; RV64IZFH-NEXT: fmv.x.h a0, fa5 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: bcvt_f16_to_aext_i16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fadd.h a0, a0, a1 +; RV64IZHINX-NEXT: ret %1 = fadd half %a, %b %2 = bitcast half %1 to i16 ret i16 %2 @@ -80,6 +119,11 @@ ; RV64IZFH-NEXT: fadd.h fa5, fa0, fa1 ; RV64IZFH-NEXT: fmv.x.h a0, fa5 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: bcvt_f16_to_sext_i16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fadd.h a0, a0, a1 +; RV64IZHINX-NEXT: ret %1 = fadd half %a, %b %2 = bitcast half %1 to i16 ret i16 %2 @@ -93,6 +137,13 @@ ; RV64IZFH-NEXT: slli a0, a0, 48 ; RV64IZFH-NEXT: srli a0, a0, 48 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: bcvt_f16_to_zext_i16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fadd.h a0, a0, a1 +; RV64IZHINX-NEXT: slli a0, a0, 48 +; RV64IZHINX-NEXT: srli a0, a0, 48 +; RV64IZHINX-NEXT: ret %1 = fadd half %a, %b %2 = bitcast half %1 to i16 ret i16 %2 @@ -105,6 +156,11 @@ ; RV64IZFH-NEXT: fmv.h.x fa4, a1 ; RV64IZFH-NEXT: fadd.h fa0, fa5, fa4 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: bcvt_i64_to_f16_via_i16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fadd.h a0, a0, a1 +; RV64IZHINX-NEXT: ret %1 = trunc i64 %a to i16 %2 = trunc i64 %b to i16 %3 = bitcast i16 %1 to half @@ -118,6 +174,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: uitofp_aext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret %1 = uitofp i32 %a to half ret half %1 } @@ -127,6 +188,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: uitofp_sext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret %1 = uitofp i32 %a to half ret half %1 } @@ -136,6 +202,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.wu fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: uitofp_zext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.wu a0, a0 +; RV64IZHINX-NEXT: ret %1 = uitofp i32 %a to half ret half %1 } @@ -145,6 +216,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sitofp_aext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: ret %1 = sitofp i32 %a to half ret half %1 } @@ -154,6 +230,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sitofp_sext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: ret %1 = sitofp i32 %a to half ret half %1 } @@ -163,6 +244,11 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.h.w fa0, a0 ; RV64IZFH-NEXT: ret +; +; RV64IZHINX-LABEL: sitofp_zext_i32_to_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.h.w a0, a0 +; RV64IZHINX-NEXT: ret %1 = sitofp i32 %a to half ret half %1 } diff --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-intrinsics.ll @@ -5,6 +5,12 @@ ; RUN: llc < %s -mtriple=riscv64 -mattr=+d \ ; RUN: -mattr=+zfh -verify-machineinstrs -target-abi lp64d | \ ; RUN: FileCheck -check-prefix=RV64IDZFH %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+zhinx \ +; RUN: -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=RV64IZHINX %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+zdinx \ +; RUN: -mattr=+zhinx -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=RV64IZDINXZHINX %s ; These intrinsics require half and i64 to be legal types. @@ -20,6 +26,16 @@ ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0 ; RV64IDZFH-NEXT: ret +; +; RV64IZHINX-LABEL: llrint_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0 +; RV64IZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: llrint_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0 +; RV64IZDINXZHINX-NEXT: ret %1 = call i64 @llvm.llrint.i64.f16(half %a) ret i64 %1 } @@ -36,6 +52,16 @@ ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rmm ; RV64IDZFH-NEXT: ret +; +; RV64IZHINX-LABEL: llround_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rmm +; RV64IZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: llround_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rmm +; RV64IZDINXZHINX-NEXT: ret %1 = call i64 @llvm.llround.i64.f16(half %a) ret i64 %1 } diff --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll --- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll @@ -2,6 +2,9 @@ ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64f -disable-strictnode-mutation < %s | \ ; RUN: FileCheck %s -check-prefix=RV64IZFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 -disable-strictnode-mutation < %s | \ +; RUN: FileCheck %s -check-prefix=RV64IZHINXMIN ; This file exhaustively checks half<->i32 conversions. @@ -11,6 +14,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: aext_fptosi: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -22,6 +31,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sext_fptosi: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -34,6 +49,14 @@ ; RV64IZFHMIN-NEXT: slli a0, a0, 32 ; RV64IZFHMIN-NEXT: srli a0, a0, 32 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: zext_fptosi: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: slli a0, a0, 32 +; RV64IZHINXMIN-NEXT: srli a0, a0, 32 +; RV64IZHINXMIN-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -44,6 +67,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: aext_fptoui: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -55,6 +84,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sext_fptoui: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -65,6 +100,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: zext_fptoui: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp ret i32 %1 } @@ -77,6 +118,14 @@ ; RV64IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: uitofp_aext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: slli a0, a0, 32 +; RV64IZHINXMIN-NEXT: srli a0, a0, 32 +; RV64IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -90,6 +139,14 @@ ; RV64IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: uitofp_sext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: slli a0, a0, 32 +; RV64IZHINXMIN-NEXT: srli a0, a0, 32 +; RV64IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -100,6 +157,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: uitofp_zext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -111,6 +174,13 @@ ; RV64IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sitofp_aext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: sext.w a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -122,6 +192,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sitofp_sext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -133,6 +209,13 @@ ; RV64IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sitofp_zext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: sext.w a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } diff --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert.ll --- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+zfhmin -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck %s -check-prefix=RV64IZFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+zhinxmin -verify-machineinstrs \ +; RUN: -target-abi lp64 < %s | FileCheck %s -check-prefix=RV64IZHINXMIN ; This file exhaustively checks half<->i32 conversions. @@ -10,6 +12,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: aext_fptosi: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = fptosi half %a to i32 ret i32 %1 } @@ -20,6 +28,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.w.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sext_fptosi: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = fptosi half %a to i32 ret i32 %1 } @@ -32,6 +46,14 @@ ; RV64IZFHMIN-NEXT: slli a0, a0, 32 ; RV64IZFHMIN-NEXT: srli a0, a0, 32 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: zext_fptosi: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.w.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: slli a0, a0, 32 +; RV64IZHINXMIN-NEXT: srli a0, a0, 32 +; RV64IZHINXMIN-NEXT: ret %1 = fptosi half %a to i32 ret i32 %1 } @@ -42,6 +64,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: aext_fptoui: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = fptoui half %a to i32 ret i32 %1 } @@ -52,6 +80,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sext_fptoui: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.wu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = fptoui half %a to i32 ret i32 %1 } @@ -62,6 +96,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: zext_fptoui: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.lu.s a0, a0, rtz +; RV64IZHINXMIN-NEXT: ret %1 = fptoui half %a to i32 ret i32 %1 } @@ -75,6 +115,14 @@ ; RV64IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; RV64IZFHMIN-NEXT: fmv.x.h a0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: bcvt_f16_to_aext_i16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fadd.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = fadd half %a, %b %2 = bitcast half %1 to i16 ret i16 %2 @@ -89,6 +137,14 @@ ; RV64IZFHMIN-NEXT: fcvt.h.s fa5, fa5 ; RV64IZFHMIN-NEXT: fmv.x.h a0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: bcvt_f16_to_sext_i16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fadd.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = fadd half %a, %b %2 = bitcast half %1 to i16 ret i16 %2 @@ -105,6 +161,16 @@ ; RV64IZFHMIN-NEXT: slli a0, a0, 48 ; RV64IZFHMIN-NEXT: srli a0, a0, 48 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: bcvt_f16_to_zext_i16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fadd.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: slli a0, a0, 48 +; RV64IZHINXMIN-NEXT: srli a0, a0, 48 +; RV64IZHINXMIN-NEXT: ret %1 = fadd half %a, %b %2 = bitcast half %1 to i16 ret i16 %2 @@ -120,6 +186,14 @@ ; RV64IZFHMIN-NEXT: fadd.s fa5, fa5, fa4 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: bcvt_i64_to_f16_via_i16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a1, a1 +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fadd.s a0, a0, a1 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = trunc i64 %a to i16 %2 = trunc i64 %b to i16 %3 = bitcast i16 %1 to half @@ -136,6 +210,14 @@ ; RV64IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: uitofp_aext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: slli a0, a0, 32 +; RV64IZHINXMIN-NEXT: srli a0, a0, 32 +; RV64IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = uitofp i32 %a to half ret half %1 } @@ -148,6 +230,14 @@ ; RV64IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: uitofp_sext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: slli a0, a0, 32 +; RV64IZHINXMIN-NEXT: srli a0, a0, 32 +; RV64IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = uitofp i32 %a to half ret half %1 } @@ -158,6 +248,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.lu fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: uitofp_zext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.lu a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = uitofp i32 %a to half ret half %1 } @@ -169,6 +265,13 @@ ; RV64IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sitofp_aext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: sext.w a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = sitofp i32 %a to half ret half %1 } @@ -179,6 +282,12 @@ ; RV64IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sitofp_sext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = sitofp i32 %a to half ret half %1 } @@ -190,6 +299,13 @@ ; RV64IZFHMIN-NEXT: fcvt.s.l fa5, a0 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: sitofp_zext_i32_to_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: sext.w a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.s.l a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-NEXT: ret %1 = sitofp i32 %a to half ret half %1 } diff --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-intrinsics.ll @@ -5,6 +5,12 @@ ; RUN: llc < %s -mtriple=riscv64 -mattr=+d \ ; RUN: -mattr=+zfhmin -verify-machineinstrs -target-abi lp64d | \ ; RUN: FileCheck -check-prefix=CHECKIZFHMIN %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+zhinxmin \ +; RUN: -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=CHECKIZHINXMIN %s +; RUN: llc < %s -mtriple=riscv64 -mattr=+zdinx \ +; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=CHECKIZHINXMIN %s ; These intrinsics require half and i64 to be legal types. @@ -16,6 +22,12 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.l.s a0, fa5 ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: llrint_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.l.s a0, a0 +; CHECKIZHINXMIN-NEXT: ret %1 = call i64 @llvm.llrint.i64.f16(half %a) ret i64 %1 } @@ -28,6 +40,12 @@ ; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; CHECKIZFHMIN-NEXT: fcvt.l.s a0, fa5, rmm ; CHECKIZFHMIN-NEXT: ret +; +; CHECKIZHINXMIN-LABEL: llround_f16: +; CHECKIZHINXMIN: # %bb.0: +; CHECKIZHINXMIN-NEXT: fcvt.s.h a0, a0 +; CHECKIZHINXMIN-NEXT: fcvt.l.s a0, a0, rmm +; CHECKIZHINXMIN-NEXT: ret %1 = call i64 @llvm.llround.i64.f16(half %a) ret i64 %1 } diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll --- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics-strict.ll @@ -11,6 +11,18 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \ ; RUN: -mattr=+zfh -verify-machineinstrs -target-abi lp64d \ ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64IZFH %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zhinx \ +; RUN: -verify-machineinstrs -target-abi ilp32 -disable-strictnode-mutation \ +; RUN: | FileCheck -check-prefix=RV32IZHINX %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zhinx \ +; RUN: -verify-machineinstrs -target-abi lp64 -disable-strictnode-mutation \ +; RUN: | FileCheck -check-prefix=RV64IZHINX %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \ +; RUN: -mattr=+zhinx -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefix=RV32IZDINXZHINX %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \ +; RUN: -mattr=+zhinx -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=RV64IZDINXZHINX %s declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) @@ -24,6 +36,26 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fsqrt.h fa0, fa0 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: sqrt_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fsqrt.h a0, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: sqrt_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fsqrt.h a0, a0 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: sqrt_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fsqrt.h a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: sqrt_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fsqrt.h a0, a0 +; RV64IZDINXZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -52,6 +84,50 @@ ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: floor_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call floorf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: floor_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call floorf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: floor_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: call floorf@plt +; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: floor_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: call floorf@plt +; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.floor.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -80,6 +156,50 @@ ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: ceil_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call ceilf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: ceil_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call ceilf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: ceil_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: call ceilf@plt +; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: ceil_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: call ceilf@plt +; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.ceil.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -108,6 +228,50 @@ ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: trunc_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call truncf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: trunc_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call truncf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: trunc_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: call truncf@plt +; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: trunc_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: call truncf@plt +; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -136,6 +300,50 @@ ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: rint_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call rintf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: rint_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call rintf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: rint_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: call rintf@plt +; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: rint_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: call rintf@plt +; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.rint.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -164,6 +372,50 @@ ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: nearbyint_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call nearbyintf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: nearbyint_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call nearbyintf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: nearbyint_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: call nearbyintf@plt +; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: nearbyint_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: call nearbyintf@plt +; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.nearbyint.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -192,6 +444,50 @@ ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: round_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call roundf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: round_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call roundf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: round_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: call roundf@plt +; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: round_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: call roundf@plt +; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.round.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -220,6 +516,50 @@ ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: roundeven_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call roundevenf@plt +; RV32IZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: roundeven_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: addi sp, sp, -16 +; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZHINX-NEXT: call roundevenf@plt +; RV64IZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINX-NEXT: addi sp, sp, 16 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: roundeven_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: call roundevenf@plt +; RV32IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: roundeven_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINX-NEXT: call roundevenf@plt +; RV64IZDINXZHINX-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINX-NEXT: ret %1 = call half @llvm.experimental.constrained.roundeven.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -236,6 +576,26 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.l.h a0, fa0 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: lrint_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: lrint_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: lrint_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: lrint_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0 +; RV64IZDINXZHINX-NEXT: ret %1 = call iXLen @llvm.experimental.constrained.lrint.iXLen.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret iXLen %1 } @@ -252,6 +612,26 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rmm ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: lround_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rmm +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: lround_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rmm +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: lround_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rmm +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: lround_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rmm +; RV64IZDINXZHINX-NEXT: ret %1 = call iXLen @llvm.experimental.constrained.lround.iXLen.f16(half %a, metadata !"fpexcept.strict") strictfp ret iXLen %1 } @@ -273,6 +653,36 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.l.h a0, fa0 ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: llrint_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call llrintf@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: llrint_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0 +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: llrint_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: call llrintf@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: llrint_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0 +; RV64IZDINXZHINX-NEXT: ret %1 = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret i64 %1 } @@ -294,6 +704,36 @@ ; RV64IZFH: # %bb.0: ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rmm ; RV64IZFH-NEXT: ret +; +; RV32IZHINX-LABEL: llround_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: addi sp, sp, -16 +; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZHINX-NEXT: call llroundf@plt +; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINX-NEXT: addi sp, sp, 16 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: llround_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rmm +; RV64IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: llround_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINX-NEXT: call llroundf@plt +; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINX-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: llround_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rmm +; RV64IZDINXZHINX-NEXT: ret %1 = call i64 @llvm.experimental.constrained.llround.i64.f16(half %a, metadata !"fpexcept.strict") strictfp ret i64 %1 } diff --git a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/zfh-half-intrinsics.ll @@ -5,12 +5,24 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zfh \ ; RUN: -verify-machineinstrs -target-abi lp64f | \ ; RUN: FileCheck -check-prefix=RV64IZFH %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=zhinx \ +; RUN: -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefix=RV32IZHINX %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=zhinx \ +; RUN: -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=RV64IZHINX %s ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+d \ ; RUN: -mattr=+zfh -verify-machineinstrs -target-abi ilp32d | \ ; RUN: FileCheck -check-prefix=RV32IDZFH %s ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \ ; RUN: -mattr=+zfh -verify-machineinstrs -target-abi lp64d | \ ; RUN: FileCheck -check-prefix=RV64IDZFH %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \ +; RUN: -mattr=+zhinx -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefix=RV32IZDINXZHINX %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \ +; RUN: -mattr=+zhinx -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=RV64IZDINXZHINX %s ; These intrinsics require half to be a legal type. @@ -27,6 +39,16 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0 ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: lrint_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0 +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: lrint_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0 +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: lrint_f16: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.w.h a0, fa0 @@ -36,6 +58,16 @@ ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0 ; RV64IDZFH-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: lrint_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: lrint_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0 +; RV64IZDINXZHINX-NEXT: ret %1 = call iXLen @llvm.lrint.iXLen.f16(half %a) ret iXLen %1 } @@ -54,6 +86,16 @@ ; RV64IZFH-NEXT: fcvt.l.h a0, fa0, rmm ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: lround_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rmm +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: lround_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.l.h a0, a0, rmm +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: lround_f16: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.w.h a0, fa0, rmm @@ -63,6 +105,16 @@ ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: fcvt.l.h a0, fa0, rmm ; RV64IDZFH-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: lround_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rmm +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: lround_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.l.h a0, a0, rmm +; RV64IZDINXZHINX-NEXT: ret %1 = call iXLen @llvm.lround.iXLen.f16(half %a) ret iXLen %1 } @@ -78,6 +130,16 @@ ; RV64IZFH-NEXT: fcvt.w.h a0, fa0, rmm ; RV64IZFH-NEXT: ret ; +; RV32IZHINX-LABEL: lround_i32_f16: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: fcvt.w.h a0, a0, rmm +; RV32IZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: lround_i32_f16: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: fcvt.w.h a0, a0, rmm +; RV64IZHINX-NEXT: ret +; ; RV32IDZFH-LABEL: lround_i32_f16: ; RV32IDZFH: # %bb.0: ; RV32IDZFH-NEXT: fcvt.w.h a0, fa0, rmm @@ -87,6 +149,16 @@ ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: fcvt.w.h a0, fa0, rmm ; RV64IDZFH-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: lround_i32_f16: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rmm +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: lround_i32_f16: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: fcvt.w.h a0, a0, rmm +; RV64IZDINXZHINX-NEXT: ret %1 = call i32 @llvm.lround.i32.f16(half %a) ret i32 %1 } diff --git a/llvm/test/CodeGen/RISCV/zfh-imm.ll b/llvm/test/CodeGen/RISCV/zfh-imm.ll --- a/llvm/test/CodeGen/RISCV/zfh-imm.ll +++ b/llvm/test/CodeGen/RISCV/zfh-imm.ll @@ -7,6 +7,14 @@ ; RUN: | FileCheck --check-prefix=RV64IZFH %s ; RUN: llc -mtriple=riscv64 -target-abi lp64d -mattr=+zfh,+d < %s \ ; RUN: | FileCheck --check-prefix=RV64IDZFH %s +; RUN: llc -mtriple=riscv32 -target-abi ilp32 -mattr=+zhinx < %s \ +; RUN: | FileCheck --check-prefix=RV32IZHINX %s +; RUN: llc -mtriple=riscv32 -target-abi ilp32 -mattr=+zhinx,+zdinx < %s \ +; RUN: | FileCheck --check-prefix=RV32IZDINXZHINX %s +; RUN: llc -mtriple=riscv64 -target-abi lp64 -mattr=+zhinx < %s \ +; RUN: | FileCheck --check-prefix=RV64IZHINX %s +; RUN: llc -mtriple=riscv64 -target-abi lp64 -mattr=+zhinx,+zdinx < %s \ +; RUN: | FileCheck --check-prefix=RV64IZDINXZHINX %s define half @f16_positive_zero(ptr %pf) nounwind { ; RV32IZFH-LABEL: f16_positive_zero: @@ -28,6 +36,26 @@ ; RV64IDZFH: # %bb.0: ; RV64IDZFH-NEXT: fmv.h.x fa0, zero ; RV64IDZFH-NEXT: ret +; +; RV32IZHINX-LABEL: f16_positive_zero: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: li a0, 0 +; RV32IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: f16_positive_zero: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: li a0, 0 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: f16_positive_zero: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: li a0, 0 +; RV64IZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: f16_positive_zero: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: li a0, 0 +; RV64IZDINXZHINX-NEXT: ret ret half 0.0 } @@ -55,5 +83,25 @@ ; RV64IDZFH-NEXT: lui a0, 1048568 ; RV64IDZFH-NEXT: fmv.h.x fa0, a0 ; RV64IDZFH-NEXT: ret +; +; RV32IZHINX-LABEL: f16_negative_zero: +; RV32IZHINX: # %bb.0: +; RV32IZHINX-NEXT: lui a0, 1048568 +; RV32IZHINX-NEXT: ret +; +; RV32IZDINXZHINX-LABEL: f16_negative_zero: +; RV32IZDINXZHINX: # %bb.0: +; RV32IZDINXZHINX-NEXT: lui a0, 1048568 +; RV32IZDINXZHINX-NEXT: ret +; +; RV64IZHINX-LABEL: f16_negative_zero: +; RV64IZHINX: # %bb.0: +; RV64IZHINX-NEXT: lui a0, 1048568 +; RV64IZHINX-NEXT: ret +; +; RV64IZDINXZHINX-LABEL: f16_negative_zero: +; RV64IZDINXZHINX: # %bb.0: +; RV64IZDINXZHINX-NEXT: lui a0, 1048568 +; RV64IZDINXZHINX-NEXT: ret ret half -0.0 } diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll --- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll +++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics-strict.ll @@ -11,6 +11,18 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \ ; RUN: -mattr=+zfhmin -verify-machineinstrs -target-abi lp64d \ ; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64IZFHMIN %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zhinxmin \ +; RUN: -verify-machineinstrs -target-abi ilp32 -disable-strictnode-mutation \ +; RUN: | FileCheck -check-prefix=RV32IZHINXMIN-STRICT %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zhinxmin \ +; RUN: -verify-machineinstrs -target-abi lp64 -disable-strictnode-mutation \ +; RUN: | FileCheck -check-prefix=RV64IZHINXMIN-STRICT %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \ +; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefix=RV32IZDINXZHINXMIN %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \ +; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=RV64IZDINXZHINXMIN %s declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) @@ -28,6 +40,34 @@ ; RV64IZFHMIN-NEXT: fsqrt.s fa5, fa5 ; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: sqrt_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: fsqrt.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: sqrt_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: fsqrt.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: sqrt_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: fsqrt.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: sqrt_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: fsqrt.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -56,6 +96,50 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: floor_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: call floorf@plt +; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: floor_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: call floorf@plt +; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: floor_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: call floorf@plt +; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: floor_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: call floorf@plt +; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.floor.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -84,6 +168,50 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: ceil_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: call ceilf@plt +; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: ceil_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: call ceilf@plt +; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: ceil_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: call ceilf@plt +; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: ceil_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: call ceilf@plt +; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.ceil.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -112,6 +240,50 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: trunc_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: call truncf@plt +; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: trunc_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: call truncf@plt +; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: trunc_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: call truncf@plt +; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: trunc_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: call truncf@plt +; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.trunc.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -140,6 +312,50 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: rint_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: call rintf@plt +; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: rint_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: call rintf@plt +; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: rint_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: call rintf@plt +; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: rint_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: call rintf@plt +; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.rint.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -168,6 +384,50 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: nearbyint_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: call nearbyintf@plt +; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: nearbyint_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: call nearbyintf@plt +; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: nearbyint_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: call nearbyintf@plt +; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: nearbyint_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: call nearbyintf@plt +; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.nearbyint.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret half %1 } @@ -196,6 +456,50 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: round_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: call roundf@plt +; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: round_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: call roundf@plt +; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: round_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: call roundf@plt +; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: round_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: call roundf@plt +; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.round.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -224,6 +528,50 @@ ; RV64IZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFHMIN-NEXT: addi sp, sp, 16 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: roundeven_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: call roundevenf@plt +; RV32IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: roundeven_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV64IZHINXMIN-STRICT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: call roundevenf@plt +; RV64IZHINXMIN-STRICT-NEXT: fcvt.h.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: roundeven_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: call roundevenf@plt +; RV32IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: roundeven_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV64IZDINXZHINXMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: call roundevenf@plt +; RV64IZDINXZHINXMIN-NEXT: fcvt.h.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call half @llvm.experimental.constrained.roundeven.f16(half %a, metadata !"fpexcept.strict") strictfp ret half %1 } @@ -242,6 +590,30 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: lrint_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: fcvt.w.s a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: lrint_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: fcvt.l.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: lrint_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: lrint_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call iXLen @llvm.experimental.constrained.lrint.iXLen.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret iXLen %1 } @@ -260,6 +632,30 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rmm ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: lround_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: fcvt.w.s a0, a0, rmm +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: lround_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: fcvt.l.s a0, a0, rmm +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: lround_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rmm +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: lround_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rmm +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call iXLen @llvm.experimental.constrained.lround.iXLen.f16(half %a, metadata !"fpexcept.strict") strictfp ret iXLen %1 } @@ -282,6 +678,38 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5 ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: llrint_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: call llrintf@plt +; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: llrint_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: fcvt.l.s a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: llrint_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: call llrintf@plt +; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: llrint_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call i64 @llvm.experimental.constrained.llrint.i64.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp ret i64 %1 } @@ -304,6 +732,38 @@ ; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IZFHMIN-NEXT: fcvt.l.s a0, fa5, rmm ; RV64IZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-STRICT-LABEL: llround_f16: +; RV32IZHINXMIN-STRICT: # %bb.0: +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, -16 +; RV32IZHINXMIN-STRICT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-STRICT-NEXT: call llroundf@plt +; RV32IZHINXMIN-STRICT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZHINXMIN-STRICT-NEXT: addi sp, sp, 16 +; RV32IZHINXMIN-STRICT-NEXT: ret +; +; RV64IZHINXMIN-STRICT-LABEL: llround_f16: +; RV64IZHINXMIN-STRICT: # %bb.0: +; RV64IZHINXMIN-STRICT-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-STRICT-NEXT: fcvt.l.s a0, a0, rmm +; RV64IZHINXMIN-STRICT-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: llround_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, -16 +; RV32IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: call llroundf@plt +; RV32IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IZDINXZHINXMIN-NEXT: addi sp, sp, 16 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: llround_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rmm +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call i64 @llvm.experimental.constrained.llround.i64.f16(half %a, metadata !"fpexcept.strict") strictfp ret i64 %1 } diff --git a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/zfhmin-half-intrinsics.ll @@ -11,6 +11,18 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+d \ ; RUN: -mattr=+zfhmin -verify-machineinstrs -target-abi lp64d | \ ; RUN: FileCheck -check-prefix=RV64IDZFHMIN %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zhinxmin \ +; RUN: -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefix=RV32IZHINXMIN %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zhinxmin \ +; RUN: -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=RV64IZHINXMIN %s +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zdinx \ +; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi ilp32 | \ +; RUN: FileCheck -check-prefix=RV32IZDINXZHINXMIN %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zdinx \ +; RUN: -mattr=+zhinxmin -verify-machineinstrs -target-abi lp64 | \ +; RUN: FileCheck -check-prefix=RV64IZDINXZHINXMIN %s ; These intrinsics require half to be a legal type. @@ -40,6 +52,30 @@ ; RV64IDZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IDZFHMIN-NEXT: fcvt.l.s a0, fa5 ; RV64IDZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: lrint_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0 +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: lrint_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0 +; RV64IZHINXMIN-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: lrint_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0 +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: lrint_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0 +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call iXLen @llvm.lrint.iXLen.f16(half %a) ret iXLen %1 } @@ -70,6 +106,30 @@ ; RV64IDZFHMIN-NEXT: fcvt.s.h fa5, fa0 ; RV64IDZFHMIN-NEXT: fcvt.l.s a0, fa5, rmm ; RV64IDZFHMIN-NEXT: ret +; +; RV32IZHINXMIN-LABEL: lround_f16: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZHINXMIN-NEXT: fcvt.w.s a0, a0, rmm +; RV32IZHINXMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: lround_f16: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZHINXMIN-NEXT: fcvt.l.s a0, a0, rmm +; RV64IZHINXMIN-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: lround_f16: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV32IZDINXZHINXMIN-NEXT: fcvt.w.s a0, a0, rmm +; RV32IZDINXZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: lround_f16: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0 +; RV64IZDINXZHINXMIN-NEXT: fcvt.l.s a0, a0, rmm +; RV64IZDINXZHINXMIN-NEXT: ret %1 = call iXLen @llvm.lround.iXLen.f16(half %a) ret iXLen %1 } diff --git a/llvm/test/CodeGen/RISCV/zfhmin-imm.ll b/llvm/test/CodeGen/RISCV/zfhmin-imm.ll --- a/llvm/test/CodeGen/RISCV/zfhmin-imm.ll +++ b/llvm/test/CodeGen/RISCV/zfhmin-imm.ll @@ -3,10 +3,18 @@ ; RUN: | FileCheck --check-prefix=RV32IZFHMIN %s ; RUN: llc -mtriple=riscv32 -target-abi ilp32d -mattr=+zfhmin,+d < %s \ ; RUN: | FileCheck --check-prefix=RV32IDZFHMIN %s +; RUN: llc -mtriple=riscv32 -target-abi ilp32 -mattr=+zhinxmin < %s \ +; RUN: | FileCheck --check-prefix=RV32IZHINXMIN %s +; RUN: llc -mtriple=riscv32 -target-abi ilp32 -mattr=+zhinxmin,+zdinx < %s \ +; RUN: | FileCheck --check-prefix=RV32IZDINXZHINXMIN %s ; RUN: llc -mtriple=riscv64 -target-abi lp64f -mattr=+zfhmin < %s \ ; RUN: | FileCheck --check-prefix=RV64IZFHMIN %s ; RUN: llc -mtriple=riscv64 -target-abi lp64d -mattr=+zfhmin,+d < %s \ ; RUN: | FileCheck --check-prefix=RV64IDZFHMIN %s +; RUN: llc -mtriple=riscv64 -target-abi lp64 -mattr=+zhinxmin < %s \ +; RUN: | FileCheck --check-prefix=RV64IZHINXMIN %s +; RUN: llc -mtriple=riscv64 -target-abi lp64 -mattr=+zhinxmin,+zdinx < %s \ +; RUN: | FileCheck --check-prefix=RV64IZDINXZHINXMIN %s define half @f16_positive_zero(ptr %pf) nounwind { ; RV32IZFHMIN-LABEL: f16_positive_zero: @@ -19,6 +27,16 @@ ; RV32IDZFHMIN-NEXT: fmv.h.x fa0, zero ; RV32IDZFHMIN-NEXT: ret ; +; RV32IZHINXMIN-LABEL: f16_positive_zero: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: li a0, 0 +; RV32IZHINXMIN-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: f16_positive_zero: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: li a0, 0 +; RV32IZDINXZHINXMIN-NEXT: ret +; ; RV64IZFHMIN-LABEL: f16_positive_zero: ; RV64IZFHMIN: # %bb.0: ; RV64IZFHMIN-NEXT: fmv.h.x fa0, zero @@ -28,6 +46,16 @@ ; RV64IDZFHMIN: # %bb.0: ; RV64IDZFHMIN-NEXT: fmv.h.x fa0, zero ; RV64IDZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: f16_positive_zero: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: li a0, 0 +; RV64IZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: f16_positive_zero: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: li a0, 0 +; RV64IZDINXZHINXMIN-NEXT: ret ret half 0.0 } @@ -44,6 +72,16 @@ ; RV32IDZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV32IDZFHMIN-NEXT: ret ; +; RV32IZHINXMIN-LABEL: f16_negative_zero: +; RV32IZHINXMIN: # %bb.0: +; RV32IZHINXMIN-NEXT: lui a0, 1048568 +; RV32IZHINXMIN-NEXT: ret +; +; RV32IZDINXZHINXMIN-LABEL: f16_negative_zero: +; RV32IZDINXZHINXMIN: # %bb.0: +; RV32IZDINXZHINXMIN-NEXT: lui a0, 1048568 +; RV32IZDINXZHINXMIN-NEXT: ret +; ; RV64IZFHMIN-LABEL: f16_negative_zero: ; RV64IZFHMIN: # %bb.0: ; RV64IZFHMIN-NEXT: lui a0, 1048568 @@ -55,5 +93,15 @@ ; RV64IDZFHMIN-NEXT: lui a0, 1048568 ; RV64IDZFHMIN-NEXT: fmv.h.x fa0, a0 ; RV64IDZFHMIN-NEXT: ret +; +; RV64IZHINXMIN-LABEL: f16_negative_zero: +; RV64IZHINXMIN: # %bb.0: +; RV64IZHINXMIN-NEXT: lui a0, 1048568 +; RV64IZHINXMIN-NEXT: ret +; +; RV64IZDINXZHINXMIN-LABEL: f16_negative_zero: +; RV64IZDINXZHINXMIN: # %bb.0: +; RV64IZDINXZHINXMIN-NEXT: lui a0, 1048568 +; RV64IZDINXZHINXMIN-NEXT: ret ret half -0.0 }