diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -58,8 +58,8 @@ if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && !Subtarget.hasStdExtF()) { errs() << "Hard-float 'f' ABI can't be used for a target that " - "doesn't support the F instruction set extension (ignoring " - "target-abi)\n"; + "doesn't support the F instruction set extension (ignoring " + "target-abi)\n"; ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && !Subtarget.hasStdExtD()) { @@ -92,6 +92,14 @@ addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); if (Subtarget.hasStdExtD()) addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); + if (Subtarget.hasStdExtZhinx()) + addRegisterClass(MVT::f16, &RISCV::GPRF16RegClass); + if (Subtarget.hasStdExtZfinx()) + addRegisterClass(MVT::f32, &RISCV::GPRF32RegClass); + if (Subtarget.hasStdExtZdinx()) { + addRegisterClass(MVT::f64, &RISCV::GPRF64RegClass); + addRegisterClass(MVT::f64, &RISCV::GPRPF64RegClass); + } static const MVT::SimpleValueType BoolVecVTs[] = { MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, @@ -341,7 +349,7 @@ if (Subtarget.hasStdExtZfh()) setOperationAction(ISD::BITCAST, MVT::i16, Custom); - if (Subtarget.hasStdExtZfh()) { + if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZhinx()) { for (auto NT : FPLegalNodeTypes) setOperationAction(NT, MVT::f16, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal); @@ -379,7 +387,7 @@ setOperationAction(ISD::FPOWI, MVT::i32, Custom); } - if (Subtarget.hasStdExtF()) { + if (Subtarget.hasStdExtF() || Subtarget.hasStdExtZfinx()) { for (auto NT : FPLegalNodeTypes) setOperationAction(NT, MVT::f32, Legal); for (auto CC : FPCCToExpand) @@ -393,10 +401,11 @@ setTruncStoreAction(MVT::f32, MVT::f16, Expand); } - if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) + if ((Subtarget.hasStdExtF() || Subtarget.hasStdExtZfinx()) && + Subtarget.is64Bit()) setOperationAction(ISD::BITCAST, MVT::i32, Custom); - if (Subtarget.hasStdExtD()) { + if (Subtarget.hasStdExtD() || Subtarget.hasStdExtZdinx()) { for (auto NT : FPLegalNodeTypes) setOperationAction(NT, MVT::f64, Legal); setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); @@ -421,7 +430,7 @@ setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom); } - if (Subtarget.hasStdExtF()) { + if (Subtarget.hasStdExtF() || Subtarget.hasStdExtZfinx()) { setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom); setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom); @@ -1011,11 +1020,11 @@ setOperationAction(ISD::BITCAST, MVT::i16, Custom); setOperationAction(ISD::BITCAST, MVT::i32, Custom); setOperationAction(ISD::BITCAST, MVT::i64, Custom); - if (Subtarget.hasStdExtZfh()) + if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZhinx()) setOperationAction(ISD::BITCAST, MVT::f16, Custom); - if (Subtarget.hasStdExtF()) + if (Subtarget.hasStdExtF() || Subtarget.hasStdExtZfinx()) setOperationAction(ISD::BITCAST, MVT::f32, Custom); - if (Subtarget.hasStdExtD()) + if (Subtarget.hasStdExtD() || Subtarget.hasStdExtZdinx()) setOperationAction(ISD::BITCAST, MVT::f64, Custom); } } @@ -1044,7 +1053,7 @@ setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb()) setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); - if (Subtarget.hasStdExtF()) { + if (Subtarget.hasStdExtF() || Subtarget.hasStdExtZfinx()) { setTargetDAGCombine(ISD::ZERO_EXTEND); setTargetDAGCombine(ISD::FP_TO_SINT); setTargetDAGCombine(ISD::FP_TO_UINT); diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -206,7 +206,7 @@ let isCodeGenOnly = 1; } -class PseudoQuietFCMP +class PseudoQuietFCMP : Pseudo<(outs GPR:$rd), (ins Ty:$rs1, Ty:$rs2), []> { let hasSideEffects = 1; let mayLoad = 0; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -219,11 +219,12 @@ def PseudoFLD : PseudoFloatLoad<"fld", FPR64>; def PseudoFSD : PseudoStore<"fsd", FPR64>; +} // Predicates = [HasStdExtD] + let usesCustomInserter = 1 in { -def PseudoQuietFLE_D : PseudoQuietFCMP; -def PseudoQuietFLT_D : PseudoQuietFCMP; +defm PseudoQuietFLE_D : PseudoQuietFCMP_m; +defm PseudoQuietFLT_D : PseudoQuietFCMP_m; } -} // Predicates = [HasStdExtD] let Predicates = [HasStdExtZdinx, IsRV64] in { def : InstAlias<"fabs.d $rd, $rs", (FSGNJX_D_INX FPR64INX:$rd, FPR64INX:$rs, FPR64INX:$rs)>; @@ -299,19 +300,21 @@ // . def : PatFprFpr; def : PatFprFpr; +} // Predicates = [HasStdExtD] /// Setcc // FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for // strict versions of those. // Match non-signaling FEQ_D -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +let Predicates = [HasStdExtD] in { // Match signaling FEQ_D def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs2, SETEQ), (AND (FLE_D $rs1, $rs2), @@ -324,12 +327,14 @@ (FLE_D $rs1, $rs1)>; def : Pat<(strict_fsetccs FPR64:$rs1, FPR64:$rs1, SETOEQ), (FLE_D $rs1, $rs1)>; +} // Predicates = [HasStdExtD] -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +let Predicates = [HasStdExtD] in { def Select_FPR64_Using_CC_GPR : SelectCC_rrirr; /// Loads @@ -382,6 +387,12 @@ def : Pat<(any_uint_to_fp (i32 GPR:$rs1)), (FCVT_D_WU GPR:$rs1)>; } // Predicates = [HasStdExtD, IsRV32] +let Predicates = [HasStdExtZdinx, IsRV64] in { +// Moves (no conversion) +def : Pat<(f64 (bitconvert (i64 GPR:$rs1))), (COPY GPR:$rs1)>; +def : Pat<(i64 (bitconvert GPRF64:$rs1)), (COPY GPRF64:$rs1)>; +} // Predicates = [HasStdExtZdinx, IsRV64] + let Predicates = [HasStdExtD, IsRV64] in { /// Float constants diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -279,6 +279,12 @@ def Ext.Suffix : FPCmp_rr; } +multiclass PseudoQuietFCMP_m Exts> { + foreach Ext = Exts in + let Predicates = Ext.Predicates in + def Ext.Suffix : PseudoQuietFCMP; +} + //===----------------------------------------------------------------------===// // Instructions //===----------------------------------------------------------------------===// @@ -436,11 +442,12 @@ def PseudoFLW : PseudoFloatLoad<"flw", FPR32>; def PseudoFSW : PseudoStore<"fsw", FPR32>; +} // Predicates = [HasStdExtF] + let usesCustomInserter = 1 in { -def PseudoQuietFLE_S : PseudoQuietFCMP; -def PseudoQuietFLT_S : PseudoQuietFCMP; +defm PseudoQuietFLE_S : PseudoQuietFCMP_m; +defm PseudoQuietFLT_S : PseudoQuietFCMP_m; } -} // Predicates = [HasStdExtF] let Predicates = [HasStdExtZfinx] in { def : InstAlias<"fabs.s $rd, $rs", (FSGNJX_S_INX FPR32INX:$rd, FPR32INX:$rs, FPR32INX:$rs)>; @@ -461,22 +468,39 @@ def fpimmneg0 : PatLeaf<(fpimm), [{ return N->isExactlyValue(-0.0); }]>; /// Generic pattern classes -class PatSetCC +class PatSetCC : Pat<(OpNode Ty:$rs1, Ty:$rs2, Cond), (Inst $rs1, $rs2)>; +multiclass PatSetCC_m Exts> { + foreach Ext = Exts in + let Predicates = Ext.Predicates in + def Ext.Suffix : PatSetCC(Inst#Ext.Suffix), + Ext.Reg>; +} class PatFprFpr + DAGOperand RegTy> : Pat<(OpNode RegTy:$rs1, RegTy:$rs2), (Inst $rs1, $rs2)>; class PatFprFprDynFrm + DAGOperand RegTy> : Pat<(OpNode RegTy:$rs1, RegTy:$rs2), (Inst $rs1, $rs2, 0b111)>; +multiclass PatFprFprDynFrm_m Exts> { + foreach Ext = Exts in + let Predicates = Ext.Predicates in + def Ext.Suffix : PatFprFprDynFrm(Inst#Ext.Suffix), + Ext.Reg>; +} let Predicates = [HasStdExtF] in { /// Float constants def : Pat<(f32 (fpimm0)), (FMV_W_X X0)>; def : Pat<(f32 (fpimmneg0)), (FSGNJN_S (FMV_W_X X0), (FMV_W_X X0))>; +} // Predicates = [HasStdExtF] /// Float conversion operations @@ -485,20 +509,28 @@ /// Float arithmetic operations -def : PatFprFprDynFrm; -def : PatFprFprDynFrm; -def : PatFprFprDynFrm; -def : PatFprFprDynFrm; +defm : PatFprFprDynFrm_m; +defm : PatFprFprDynFrm_m; +defm : PatFprFprDynFrm_m; +defm : PatFprFprDynFrm_m; +let Predicates = [HasStdExtF] in { def : Pat<(any_fsqrt FPR32:$rs1), (FSQRT_S FPR32:$rs1, 0b111)>; def : Pat<(fneg FPR32:$rs1), (FSGNJN_S $rs1, $rs1)>; def : Pat<(fabs FPR32:$rs1), (FSGNJX_S $rs1, $rs1)>; +} // Predicates = [HasStdExtF] +let Predicates = [HasStdExtZfinx] in { +def : Pat<(any_fsqrt GPRF32:$rs1), (FSQRT_S_INX GPRF32:$rs1, 0b111)>; + +def : Pat<(fneg GPRF32:$rs1), (FSGNJN_S_INX $rs1, $rs1)>; +def : Pat<(fabs GPRF32:$rs1), (FSGNJX_S_INX $rs1, $rs1)>; +} // Predicates = [HasStdExtZfinx] def : PatFprFpr; def : Pat<(fcopysign FPR32:$rs1, (fneg FPR32:$rs2)), (FSGNJN_S $rs1, $rs2)>; -// fmadd: rs1 * rs2 + rs3 +let Predicates = [HasStdExtF] in { def : Pat<(any_fma FPR32:$rs1, FPR32:$rs2, FPR32:$rs3), (FMADD_S $rs1, $rs2, $rs3, 0b111)>; @@ -513,6 +545,7 @@ // fnmadd: -rs1 * rs2 - rs3 def : Pat<(any_fma (fneg FPR32:$rs1), FPR32:$rs2, (fneg FPR32:$rs3)), (FNMADD_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>; +} // Predicates = [HasStdExtF] // The ratified 20191213 ISA spec defines fmin and fmax in a way that matches // LLVM's fminnum and fmaxnum @@ -525,13 +558,14 @@ // strict versions of those. // Match non-signaling FEQ_S -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +let Predicates = [HasStdExtF] in { // Match signaling FEQ_S def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs2, SETEQ), (AND (FLE_S $rs1, $rs2), @@ -544,12 +578,14 @@ (FLE_S $rs1, $rs1)>; def : Pat<(strict_fsetccs FPR32:$rs1, FPR32:$rs1, SETOEQ), (FLE_S $rs1, $rs1)>; +} // Predicates = [HasStdExtF] -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +let Predicates = [HasStdExtF] in { def Select_FPR32_Using_CC_GPR : SelectCC_rrirr; /// Loads @@ -562,6 +598,12 @@ } // Predicates = [HasStdExtF] +let Predicates = [HasStdExtZfinx, IsRV32] in { +// Moves (no conversion) +def : Pat<(f32 (bitconvert (i32 GPR:$rs1))), (COPY GPR:$rs1)>; +def : Pat<(i32 (bitconvert GPRF32:$rs1)), (COPY GPRF32:$rs1)>; +} // Predicates = [HasStdExtZfinx, IsRV32] + let Predicates = [HasStdExtF, IsRV32] in { // Moves (no conversion) def : Pat<(bitconvert (i32 GPR:$rs1)), (FMV_W_X GPR:$rs1)>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -230,11 +230,12 @@ let Predicates = [HasStdExtZfhOrZfhmin] in { def PseudoFLH : PseudoFloatLoad<"flh", FPR16>; def PseudoFSH : PseudoStore<"fsh", FPR16>; +} // Predicates = [HasStdExtZfhOrZfhmin] + let usesCustomInserter = 1 in { -def PseudoQuietFLE_H : PseudoQuietFCMP; -def PseudoQuietFLT_H : PseudoQuietFCMP; +defm PseudoQuietFLE_H : PseudoQuietFCMP_m; +defm PseudoQuietFLT_H : PseudoQuietFCMP_m; } -} // Predicates = [HasStdExtZfhOrZfhmin] let Predicates = [HasStdExtZhinx] in { def : InstAlias<"fmv.h $rd, $rs", (FSGNJ_H_INX FPR16INX:$rd, FPR16INX:$rs, FPR16INX:$rs)>; @@ -301,19 +302,21 @@ // . def : PatFprFpr; def : PatFprFpr; +} // Predicates = [HasStdExtZfh] /// Setcc // FIXME: SETEQ/SETLT/SETLE imply nonans, can we pick better instructions for // strict versions of those. -// Match non-signaling FEQ_D -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +// Match non-signaling FEQ_H +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +let Predicates = [HasStdExtZfh] in { // Match signaling FEQ_H def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs2, SETEQ), (AND (FLE_H $rs1, $rs2), @@ -326,15 +329,24 @@ (FLE_H $rs1, $rs1)>; def : Pat<(strict_fsetccs FPR16:$rs1, FPR16:$rs1, SETOEQ), (FLE_H $rs1, $rs1)>; +} // Predicates = [HasStdExtZfh] -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; -def : PatSetCC; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +defm : PatSetCC_m; +let Predicates = [HasStdExtZfh] in { def Select_FPR16_Using_CC_GPR : SelectCC_rrirr; } // Predicates = [HasStdExtZfh] +let Predicates = [HasStdExtZhinx] in { +// Moves (no conversion) +def : Pat<(riscv_fmv_h_x GPR:$src), (COPY GPR:$src)>; +def : Pat<(riscv_fmv_x_anyexth GPRF16:$src), (COPY GPRF16:$src)>; +def : Pat<(riscv_fmv_x_signexth GPRF16:$src), (COPY GPRF16:$src)>; +} // Predicates = [HasStdExtZhinx] + let Predicates = [HasStdExtZfhOrZfhmin] in { /// Loads diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32d | FileCheck -check-prefix=RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck -check-prefix=RV64IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64ZDINX %s ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ @@ -19,6 +21,11 @@ ; RV64IFD-NEXT: li a0, 0 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_false: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: li a0, 0 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_false: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 0 @@ -44,6 +51,11 @@ ; RV64IFD-NEXT: feq.d a0, fa0, fa1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_oeq: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: feq.d a0, a0, a1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_oeq: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -79,6 +91,11 @@ ; RV64IFD-NEXT: flt.d a0, fa1, fa0 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_ogt: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: flt.d a0, a1, a0 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ogt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -114,6 +131,11 @@ ; RV64IFD-NEXT: fle.d a0, fa1, fa0 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_oge: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: fle.d a0, a1, a0 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_oge: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -151,6 +173,11 @@ ; RV64IFD-NEXT: flt.d a0, fa0, fa1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_olt: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: flt.d a0, a0, a1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_olt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -186,6 +213,11 @@ ; RV64IFD-NEXT: fle.d a0, fa0, fa1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_ole: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: fle.d a0, a0, a1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ole: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -225,6 +257,13 @@ ; RV64IFD-NEXT: or a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_one: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: flt.d a2, a0, a1 +; RV64ZDINX-NEXT: flt.d a0, a1, a0 +; RV64ZDINX-NEXT: or a0, a0, a2 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_one: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -298,6 +337,13 @@ ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_ord: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: feq.d a1, a1, a1 +; RV64ZDINX-NEXT: feq.d a0, a0, a0 +; RV64ZDINX-NEXT: and a0, a0, a1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ord: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -339,6 +385,14 @@ ; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_ueq: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: flt.d a2, a0, a1 +; RV64ZDINX-NEXT: flt.d a0, a1, a0 +; RV64ZDINX-NEXT: or a0, a0, a2 +; RV64ZDINX-NEXT: xori a0, a0, 1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ueq: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -32 @@ -410,6 +464,12 @@ ; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_ugt: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: fle.d a0, a0, a1 +; RV64ZDINX-NEXT: xori a0, a0, 1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ugt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -447,6 +507,12 @@ ; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_uge: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: flt.d a0, a0, a1 +; RV64ZDINX-NEXT: xori a0, a0, 1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_uge: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -486,6 +552,12 @@ ; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_ult: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: fle.d a0, a1, a0 +; RV64ZDINX-NEXT: xori a0, a0, 1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ult: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -523,6 +595,12 @@ ; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_ule: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: flt.d a0, a1, a0 +; RV64ZDINX-NEXT: xori a0, a0, 1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ule: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -560,6 +638,12 @@ ; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_une: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: feq.d a0, a0, a1 +; RV64ZDINX-NEXT: xori a0, a0, 1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_une: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -601,6 +685,14 @@ ; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_uno: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: feq.d a1, a1, a1 +; RV64ZDINX-NEXT: feq.d a0, a0, a0 +; RV64ZDINX-NEXT: and a0, a0, a1 +; RV64ZDINX-NEXT: xori a0, a0, 1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_uno: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -636,6 +728,11 @@ ; RV64IFD-NEXT: li a0, 1 ; RV64IFD-NEXT: ret ; +; RV64ZDINX-LABEL: fcmp_true: +; RV64ZDINX: # %bb.0: +; RV64ZDINX-NEXT: li a0, 1 +; RV64ZDINX-NEXT: ret +; ; RV32I-LABEL: fcmp_true: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s ; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s +; RUN: llc -mtriple=riscv32 -mattr=+zfinx -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32ZFINX %s ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ @@ -19,6 +21,11 @@ ; RV64IF-NEXT: li a0, 0 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_false: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: li a0, 0 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_false: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 0 @@ -44,6 +51,11 @@ ; RV64IF-NEXT: feq.s a0, fa0, fa1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_oeq: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: feq.s a0, a0, a1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_oeq: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -79,6 +91,11 @@ ; RV64IF-NEXT: flt.s a0, fa1, fa0 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_ogt: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: flt.s a0, a1, a0 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ogt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -114,6 +131,11 @@ ; RV64IF-NEXT: fle.s a0, fa1, fa0 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_oge: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: fle.s a0, a1, a0 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_oge: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -151,6 +173,11 @@ ; RV64IF-NEXT: flt.s a0, fa0, fa1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_olt: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: flt.s a0, a0, a1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_olt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -186,6 +213,11 @@ ; RV64IF-NEXT: fle.s a0, fa0, fa1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_ole: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: fle.s a0, a0, a1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ole: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -225,6 +257,13 @@ ; RV64IF-NEXT: or a0, a1, a0 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_one: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: flt.s a2, a0, a1 +; RV32ZFINX-NEXT: flt.s a0, a1, a0 +; RV32ZFINX-NEXT: or a0, a0, a2 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_one: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -290,6 +329,13 @@ ; RV64IF-NEXT: and a0, a1, a0 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_ord: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: feq.s a1, a1, a1 +; RV32ZFINX-NEXT: feq.s a0, a0, a0 +; RV32ZFINX-NEXT: and a0, a0, a1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ord: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -331,6 +377,14 @@ ; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_ueq: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: flt.s a2, a0, a1 +; RV32ZFINX-NEXT: flt.s a0, a1, a0 +; RV32ZFINX-NEXT: or a0, a0, a2 +; RV32ZFINX-NEXT: xori a0, a0, 1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ueq: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -394,6 +448,12 @@ ; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_ugt: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: fle.s a0, a0, a1 +; RV32ZFINX-NEXT: xori a0, a0, 1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ugt: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -431,6 +491,12 @@ ; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_uge: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: flt.s a0, a0, a1 +; RV32ZFINX-NEXT: xori a0, a0, 1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_uge: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -470,6 +536,12 @@ ; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_ult: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: fle.s a0, a1, a0 +; RV32ZFINX-NEXT: xori a0, a0, 1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ult: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -507,6 +579,12 @@ ; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_ule: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: flt.s a0, a1, a0 +; RV32ZFINX-NEXT: xori a0, a0, 1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ule: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -544,6 +622,12 @@ ; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_une: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: feq.s a0, a0, a1 +; RV32ZFINX-NEXT: xori a0, a0, 1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_une: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -585,6 +669,14 @@ ; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_uno: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: feq.s a1, a1, a1 +; RV32ZFINX-NEXT: feq.s a0, a0, a0 +; RV32ZFINX-NEXT: and a0, a0, a1 +; RV32ZFINX-NEXT: xori a0, a0, 1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_uno: ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 @@ -620,6 +712,11 @@ ; RV64IF-NEXT: li a0, 1 ; RV64IF-NEXT: ret ; +; RV32ZFINX-LABEL: fcmp_true: +; RV32ZFINX: # %bb.0: +; RV32ZFINX-NEXT: li a0, 1 +; RV32ZFINX-NEXT: ret +; ; RV32I-LABEL: fcmp_true: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 diff --git a/llvm/test/CodeGen/RISCV/half-fcmp.ll b/llvm/test/CodeGen/RISCV/half-fcmp.ll --- a/llvm/test/CodeGen/RISCV/half-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-fcmp.ll @@ -3,6 +3,8 @@ ; RUN: -target-abi ilp32f < %s | FileCheck -check-prefix=RV32IZFH %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ ; RUN: -target-abi lp64f < %s | FileCheck -check-prefix=RV64IZFH %s +; RUN: llc -mtriple=riscv32 -mattr=+zhinx -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32ZHINX %s ; RUN: llc -mtriple=riscv32 -mattr=+zfh -verify-machineinstrs \ ; RUN: < %s | FileCheck -check-prefix=RV32I %s ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ @@ -19,6 +21,11 @@ ; RV64IZFH-NEXT: li a0, 0 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_false: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: li a0, 0 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_false: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 0 @@ -44,6 +51,11 @@ ; RV64IZFH-NEXT: feq.h a0, fa0, fa1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_oeq: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: feq.h a0, a0, a1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_oeq: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a1 @@ -73,6 +85,11 @@ ; RV64IZFH-NEXT: flt.h a0, fa1, fa0 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_ogt: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: flt.h a0, a1, a0 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ogt: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a0 @@ -102,6 +119,11 @@ ; RV64IZFH-NEXT: fle.h a0, fa1, fa0 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_oge: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: fle.h a0, a1, a0 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_oge: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a0 @@ -131,6 +153,11 @@ ; RV64IZFH-NEXT: flt.h a0, fa0, fa1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_olt: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: flt.h a0, a0, a1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_olt: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a1 @@ -160,6 +187,11 @@ ; RV64IZFH-NEXT: fle.h a0, fa0, fa1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_ole: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: fle.h a0, a0, a1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ole: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a1 @@ -193,6 +225,13 @@ ; RV64IZFH-NEXT: or a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_one: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: flt.h a2, a0, a1 +; RV32ZHINX-NEXT: flt.h a0, a1, a0 +; RV32ZHINX-NEXT: or a0, a0, a2 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_one: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a1 @@ -230,6 +269,13 @@ ; RV64IZFH-NEXT: and a0, a1, a0 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_ord: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: feq.h a1, a1, a1 +; RV32ZHINX-NEXT: feq.h a0, a0, a0 +; RV32ZHINX-NEXT: and a0, a0, a1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ord: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a0 @@ -269,6 +315,14 @@ ; RV64IZFH-NEXT: xori a0, a0, 1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_ueq: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: flt.h a2, a0, a1 +; RV32ZHINX-NEXT: flt.h a0, a1, a0 +; RV32ZHINX-NEXT: or a0, a0, a2 +; RV32ZHINX-NEXT: xori a0, a0, 1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ueq: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a1 @@ -306,6 +360,12 @@ ; RV64IZFH-NEXT: xori a0, a0, 1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_ugt: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: fle.h a0, a0, a1 +; RV32ZHINX-NEXT: xori a0, a0, 1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ugt: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a1 @@ -339,6 +399,12 @@ ; RV64IZFH-NEXT: xori a0, a0, 1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_uge: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: flt.h a0, a0, a1 +; RV32ZHINX-NEXT: xori a0, a0, 1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_uge: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a1 @@ -372,6 +438,12 @@ ; RV64IZFH-NEXT: xori a0, a0, 1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_ult: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: fle.h a0, a1, a0 +; RV32ZHINX-NEXT: xori a0, a0, 1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ult: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a0 @@ -405,6 +477,12 @@ ; RV64IZFH-NEXT: xori a0, a0, 1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_ule: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: flt.h a0, a1, a0 +; RV32ZHINX-NEXT: xori a0, a0, 1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_ule: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a0 @@ -438,6 +516,12 @@ ; RV64IZFH-NEXT: xori a0, a0, 1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_une: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: feq.h a0, a0, a1 +; RV32ZHINX-NEXT: xori a0, a0, 1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_une: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a1 @@ -475,6 +559,14 @@ ; RV64IZFH-NEXT: xori a0, a0, 1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_uno: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: feq.h a1, a1, a1 +; RV32ZHINX-NEXT: feq.h a0, a0, a0 +; RV32ZHINX-NEXT: and a0, a0, a1 +; RV32ZHINX-NEXT: xori a0, a0, 1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_uno: ; RV32I: # %bb.0: ; RV32I-NEXT: fmv.h.x ft0, a0 @@ -510,6 +602,11 @@ ; RV64IZFH-NEXT: li a0, 1 ; RV64IZFH-NEXT: ret ; +; RV32ZHINX-LABEL: fcmp_true: +; RV32ZHINX: # %bb.0: +; RV32ZHINX-NEXT: li a0, 1 +; RV32ZHINX-NEXT: ret +; ; RV32I-LABEL: fcmp_true: ; RV32I: # %bb.0: ; RV32I-NEXT: li a0, 1 diff --git a/llvm/test/CodeGen/RISCV/target-abi-invalid.ll b/llvm/test/CodeGen/RISCV/target-abi-invalid.ll --- a/llvm/test/CodeGen/RISCV/target-abi-invalid.ll +++ b/llvm/test/CodeGen/RISCV/target-abi-invalid.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -target-abi foo < %s 2>&1 \ ; RUN: | FileCheck -check-prefix=RV32I-FOO %s ; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi ilp32foof < %s 2>&1 \ @@ -54,5 +55,64 @@ ; RV64IF-LP64D: Hard-float 'd' ABI can't be used for a target that doesn't support the D instruction set extension (ignoring target-abi) define void @nothing() nounwind { +; RV32I-FOO-LABEL: nothing: +; RV32I-FOO: # %bb.0: +; RV32I-FOO-NEXT: ret +; +; RV32IF-ILP32FOOF-LABEL: nothing: +; RV32IF-ILP32FOOF: # %bb.0: +; RV32IF-ILP32FOOF-NEXT: ret +; +; RV64I-ILP32-LABEL: nothing: +; RV64I-ILP32: # %bb.0: +; RV64I-ILP32-NEXT: ret +; +; RV64IF-ILP32F-LABEL: nothing: +; RV64IF-ILP32F: # %bb.0: +; RV64IF-ILP32F-NEXT: ret +; +; RV64IFD-ILP32D-LABEL: nothing: +; RV64IFD-ILP32D: # %bb.0: +; RV64IFD-ILP32D-NEXT: ret +; +; RV64I-ILP32E-LABEL: nothing: +; RV64I-ILP32E: # %bb.0: +; RV64I-ILP32E-NEXT: ret +; +; RV32I-LP64-LABEL: nothing: +; RV32I-LP64: # %bb.0: +; RV32I-LP64-NEXT: ret +; +; RV32IF-LP64F-LABEL: nothing: +; RV32IF-LP64F: # %bb.0: +; RV32IF-LP64F-NEXT: ret +; +; RV32IFD-LP64D-LABEL: nothing: +; RV32IFD-LP64D: # %bb.0: +; RV32IFD-LP64D-NEXT: ret +; +; RV32I-ILP32F-LABEL: nothing: +; RV32I-ILP32F: # %bb.0: +; RV32I-ILP32F-NEXT: ret +; +; RV64I-LP64F-LABEL: nothing: +; RV64I-LP64F: # %bb.0: +; RV64I-LP64F-NEXT: ret +; +; RV32I-ILP32D-LABEL: nothing: +; RV32I-ILP32D: # %bb.0: +; RV32I-ILP32D-NEXT: ret +; +; RV32IF-ILP32D-LABEL: nothing: +; RV32IF-ILP32D: # %bb.0: +; RV32IF-ILP32D-NEXT: ret +; +; RV64I-LP64D-LABEL: nothing: +; RV64I-LP64D: # %bb.0: +; RV64I-LP64D-NEXT: ret +; +; RV64IF-LP64D-LABEL: nothing: +; RV64IF-LP64D: # %bb.0: +; RV64IF-LP64D-NEXT: ret ret void }