diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -330,6 +330,12 @@ setOperationAction(ISD::LLRINT, MVT::f16, Legal); setOperationAction(ISD::LROUND, MVT::f16, Legal); setOperationAction(ISD::LLROUND, MVT::f16, Legal); + setOperationAction(ISD::STRICT_FADD, MVT::f16, Legal); + setOperationAction(ISD::STRICT_FMA, MVT::f16, Legal); + setOperationAction(ISD::STRICT_FSUB, MVT::f16, Legal); + setOperationAction(ISD::STRICT_FMUL, MVT::f16, Legal); + setOperationAction(ISD::STRICT_FDIV, MVT::f16, Legal); + setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Legal); for (auto CC : FPCCToExpand) setCondCodeAction(CC, MVT::f16, Expand); setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); @@ -367,6 +373,12 @@ setOperationAction(ISD::LLRINT, MVT::f32, Legal); setOperationAction(ISD::LROUND, MVT::f32, Legal); setOperationAction(ISD::LLROUND, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal); for (auto CC : FPCCToExpand) setCondCodeAction(CC, MVT::f32, Expand); setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); @@ -388,6 +400,12 @@ setOperationAction(ISD::LLRINT, MVT::f64, Legal); setOperationAction(ISD::LROUND, MVT::f64, Legal); setOperationAction(ISD::LLROUND, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal); for (auto CC : FPCCToExpand) setCondCodeAction(CC, MVT::f64, Expand); setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -190,12 +190,12 @@ /// Float arithmetic operations -def : PatFpr64Fpr64DynFrm; -def : PatFpr64Fpr64DynFrm; -def : PatFpr64Fpr64DynFrm; -def : PatFpr64Fpr64DynFrm; +def : PatFpr64Fpr64DynFrm; +def : PatFpr64Fpr64DynFrm; +def : PatFpr64Fpr64DynFrm; +def : PatFpr64Fpr64DynFrm; -def : Pat<(fsqrt FPR64:$rs1), (FSQRT_D FPR64:$rs1, 0b111)>; +def : Pat<(any_fsqrt FPR64:$rs1), (FSQRT_D FPR64:$rs1, 0b111)>; def : Pat<(fneg FPR64:$rs1), (FSGNJN_D $rs1, $rs1)>; def : Pat<(fabs FPR64:$rs1), (FSGNJX_D $rs1, $rs1)>; @@ -207,19 +207,19 @@ 0b111))>; // fmadd: rs1 * rs2 + rs3 -def : Pat<(fma FPR64:$rs1, FPR64:$rs2, FPR64:$rs3), +def : Pat<(any_fma FPR64:$rs1, FPR64:$rs2, FPR64:$rs3), (FMADD_D $rs1, $rs2, $rs3, 0b111)>; // fmsub: rs1 * rs2 - rs3 -def : Pat<(fma FPR64:$rs1, FPR64:$rs2, (fneg FPR64:$rs3)), +def : Pat<(any_fma FPR64:$rs1, FPR64:$rs2, (fneg FPR64:$rs3)), (FMSUB_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>; // fnmsub: -rs1 * rs2 + rs3 -def : Pat<(fma (fneg FPR64:$rs1), FPR64:$rs2, FPR64:$rs3), +def : Pat<(any_fma (fneg FPR64:$rs1), FPR64:$rs2, FPR64:$rs3), (FNMSUB_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>; // fnmadd: -rs1 * rs2 - rs3 -def : Pat<(fma (fneg FPR64:$rs1), FPR64:$rs2, (fneg FPR64:$rs3)), +def : Pat<(any_fma (fneg FPR64:$rs1), FPR64:$rs2, (fneg FPR64:$rs3)), (FNMADD_D FPR64:$rs1, FPR64:$rs2, FPR64:$rs3, 0b111)>; // The ratified 20191213 ISA spec defines fmin and fmax in a way that matches diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -311,12 +311,12 @@ /// Float arithmetic operations -def : PatFpr32Fpr32DynFrm; -def : PatFpr32Fpr32DynFrm; -def : PatFpr32Fpr32DynFrm; -def : PatFpr32Fpr32DynFrm; +def : PatFpr32Fpr32DynFrm; +def : PatFpr32Fpr32DynFrm; +def : PatFpr32Fpr32DynFrm; +def : PatFpr32Fpr32DynFrm; -def : Pat<(fsqrt FPR32:$rs1), (FSQRT_S FPR32:$rs1, 0b111)>; +def : Pat<(any_fsqrt FPR32:$rs1), (FSQRT_S FPR32:$rs1, 0b111)>; def : Pat<(fneg FPR32:$rs1), (FSGNJN_S $rs1, $rs1)>; def : Pat<(fabs FPR32:$rs1), (FSGNJX_S $rs1, $rs1)>; @@ -325,19 +325,19 @@ def : Pat<(fcopysign FPR32:$rs1, (fneg FPR32:$rs2)), (FSGNJN_S $rs1, $rs2)>; // fmadd: rs1 * rs2 + rs3 -def : Pat<(fma FPR32:$rs1, FPR32:$rs2, FPR32:$rs3), +def : Pat<(any_fma FPR32:$rs1, FPR32:$rs2, FPR32:$rs3), (FMADD_S $rs1, $rs2, $rs3, 0b111)>; // fmsub: rs1 * rs2 - rs3 -def : Pat<(fma FPR32:$rs1, FPR32:$rs2, (fneg FPR32:$rs3)), +def : Pat<(any_fma FPR32:$rs1, FPR32:$rs2, (fneg FPR32:$rs3)), (FMSUB_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>; // fnmsub: -rs1 * rs2 + rs3 -def : Pat<(fma (fneg FPR32:$rs1), FPR32:$rs2, FPR32:$rs3), +def : Pat<(any_fma (fneg FPR32:$rs1), FPR32:$rs2, FPR32:$rs3), (FNMSUB_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>; // fnmadd: -rs1 * rs2 - rs3 -def : Pat<(fma (fneg FPR32:$rs1), FPR32:$rs2, (fneg FPR32:$rs3)), +def : Pat<(any_fma (fneg FPR32:$rs1), FPR32:$rs2, (fneg FPR32:$rs3)), (FNMADD_S FPR32:$rs1, FPR32:$rs2, FPR32:$rs3, 0b111)>; // The ratified 20191213 ISA spec defines fmin and fmax in a way that matches diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZfh.td @@ -213,12 +213,12 @@ /// Float arithmetic operations -def : PatFpr16Fpr16DynFrm; -def : PatFpr16Fpr16DynFrm; -def : PatFpr16Fpr16DynFrm; -def : PatFpr16Fpr16DynFrm; +def : PatFpr16Fpr16DynFrm; +def : PatFpr16Fpr16DynFrm; +def : PatFpr16Fpr16DynFrm; +def : PatFpr16Fpr16DynFrm; -def : Pat<(fsqrt FPR16:$rs1), (FSQRT_H FPR16:$rs1, 0b111)>; +def : Pat<(any_fsqrt FPR16:$rs1), (FSQRT_H FPR16:$rs1, 0b111)>; def : Pat<(fneg FPR16:$rs1), (FSGNJN_H $rs1, $rs1)>; def : Pat<(fabs FPR16:$rs1), (FSGNJX_H $rs1, $rs1)>; @@ -230,19 +230,19 @@ def : Pat<(fcopysign FPR32:$rs1, FPR16:$rs2), (FSGNJ_S $rs1, (FCVT_S_H $rs2))>; // fmadd: rs1 * rs2 + rs3 -def : Pat<(fma FPR16:$rs1, FPR16:$rs2, FPR16:$rs3), +def : Pat<(any_fma FPR16:$rs1, FPR16:$rs2, FPR16:$rs3), (FMADD_H $rs1, $rs2, $rs3, 0b111)>; // fmsub: rs1 * rs2 - rs3 -def : Pat<(fma FPR16:$rs1, FPR16:$rs2, (fneg FPR16:$rs3)), +def : Pat<(any_fma FPR16:$rs1, FPR16:$rs2, (fneg FPR16:$rs3)), (FMSUB_H FPR16:$rs1, FPR16:$rs2, FPR16:$rs3, 0b111)>; // fnmsub: -rs1 * rs2 + rs3 -def : Pat<(fma (fneg FPR16:$rs1), FPR16:$rs2, FPR16:$rs3), +def : Pat<(any_fma (fneg FPR16:$rs1), FPR16:$rs2, FPR16:$rs3), (FNMSUB_H FPR16:$rs1, FPR16:$rs2, FPR16:$rs3, 0b111)>; // fnmadd: -rs1 * rs2 - rs3 -def : Pat<(fma (fneg FPR16:$rs1), FPR16:$rs2, (fneg FPR16:$rs3)), +def : Pat<(any_fma (fneg FPR16:$rs1), FPR16:$rs2, (fneg FPR16:$rs3)), (FNMADD_H FPR16:$rs1, FPR16:$rs2, FPR16:$rs3, 0b111)>; // The ratified 20191213 ISA spec defines fmin and fmax in a way that matches diff --git a/llvm/test/CodeGen/RISCV/double-arith-strict.ll b/llvm/test/CodeGen/RISCV/double-arith-strict.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/double-arith-strict.ll @@ -0,0 +1,880 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32IFD %s +; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64IFD %s +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s + +define double @fadd_d(double %a, double %b) nounwind strictfp { +; RV32IFD-LABEL: fadd_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fadd_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call __adddf3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fadd_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call __adddf3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} +declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) + +define double @fsub_d(double %a, double %b) nounwind strictfp { +; RV32IFD-LABEL: fsub_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fsub.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fsub.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fsub_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call __subdf3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fsub_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call __subdf3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} +declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata) + +define double @fmul_d(double %a, double %b) nounwind strictfp { +; RV32IFD-LABEL: fmul_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmul_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fmul_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call __muldf3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmul_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call __muldf3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} +declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata) + +define double @fdiv_d(double %a, double %b) nounwind strictfp { +; RV32IFD-LABEL: fdiv_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fdiv.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fdiv_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fdiv.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fdiv_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call __divdf3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fdiv_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call __divdf3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} +declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata) + +define double @fsqrt_d(double %a) nounwind strictfp { +; RV32IFD-LABEL: fsqrt_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: fsqrt.d ft0, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fsqrt_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fsqrt.d ft0, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fsqrt_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call sqrt@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fsqrt_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call sqrt@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} +declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata) + +define double @fmin_d(double %a, double %b) nounwind strictfp { +; RV32IFD-LABEL: fmin_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IFD-NEXT: call fmin@plt +; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmin_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call fmin@plt +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fmin_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call fmin@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmin_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call fmin@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call double @llvm.experimental.constrained.minnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp + ret double %1 +} +declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata) strictfp + +define double @fmax_d(double %a, double %b) nounwind strictfp { +; RV32IFD-LABEL: fmax_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IFD-NEXT: call fmax@plt +; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmax_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: addi sp, sp, -16 +; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IFD-NEXT: call fmax@plt +; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IFD-NEXT: addi sp, sp, 16 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fmax_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call fmax@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmax_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call fmax@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call double @llvm.experimental.constrained.maxnum.f64(double %a, double %b, metadata !"fpexcept.strict") strictfp + ret double %1 +} +declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata) strictfp + +define double @fmadd_d(double %a, double %b, double %c) nounwind strictfp { +; RV32IFD-LABEL: fmadd_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a4, 8(sp) +; RV32IFD-NEXT: sw a5, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft2, 8(sp) +; RV32IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmadd.d ft0, ft2, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fmadd_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call fma@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmadd_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call fma@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} +declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata) strictfp + +define double @fmsub_d(double %a, double %b, double %c) nounwind strictfp { +; RV32IFD-LABEL: fmsub_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: sw a4, 8(sp) +; RV32IFD-NEXT: sw a5, 12(sp) +; RV32IFD-NEXT: fld ft2, 8(sp) +; RV32IFD-NEXT: fcvt.d.w ft3, zero +; RV32IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV32IFD-NEXT: fmsub.d ft0, ft1, ft0, ft2 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fmsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmv.d.x ft2, a2 +; RV64IFD-NEXT: fmv.d.x ft3, zero +; RV64IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV64IFD-NEXT: fmsub.d ft0, ft1, ft0, ft2 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fmsub_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -32 +; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s2, a3 +; RV32I-NEXT: mv s3, a2 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, a4 +; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: call __adddf3@plt +; RV32I-NEXT: mv a4, a0 +; RV32I-NEXT: lui a0, 524288 +; RV32I-NEXT: xor a5, a1, a0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a3, s2 +; RV32I-NEXT: call fma@plt +; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmsub_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __adddf3@plt +; RV64I-NEXT: li a1, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: xor a2, a0, a1 +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s0 +; RV64I-NEXT: call fma@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %c_ = fadd double 0.0, %c ; avoid negation using xor + %negc = fneg double %c_ + %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %b, double %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} + +define double @fnmadd_d(double %a, double %b, double %c) nounwind strictfp { +; RV32IFD-LABEL: fnmadd_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a4, 8(sp) +; RV32IFD-NEXT: sw a5, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft2, 8(sp) +; RV32IFD-NEXT: fcvt.d.w ft3, zero +; RV32IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV32IFD-NEXT: fadd.d ft1, ft1, ft3 +; RV32IFD-NEXT: fnmadd.d ft0, ft2, ft0, ft1 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fnmadd_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a2 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmv.d.x ft3, zero +; RV64IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV64IFD-NEXT: fadd.d ft1, ft1, ft3 +; RV64IFD-NEXT: fnmadd.d ft0, ft2, ft0, ft1 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fnmadd_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -32 +; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s4, a5 +; RV32I-NEXT: mv s5, a4 +; RV32I-NEXT: mv s2, a3 +; RV32I-NEXT: mv s3, a2 +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: call __adddf3@plt +; RV32I-NEXT: mv s0, a0 +; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv a0, s5 +; RV32I-NEXT: mv a1, s4 +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: call __adddf3@plt +; RV32I-NEXT: mv a4, a0 +; RV32I-NEXT: lui a0, 524288 +; RV32I-NEXT: xor a2, s1, a0 +; RV32I-NEXT: xor a5, a1, a0 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a3, s2 +; RV32I-NEXT: call fma@plt +; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fnmadd_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __adddf3@plt +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __adddf3@plt +; RV64I-NEXT: li a1, -1 +; RV64I-NEXT: slli a2, a1, 63 +; RV64I-NEXT: xor a1, s1, a2 +; RV64I-NEXT: xor a2, a0, a2 +; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: call fma@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %a_ = fadd double 0.0, %a + %c_ = fadd double 0.0, %c + %nega = fneg double %a_ + %negc = fneg double %c_ + %1 = call double @llvm.experimental.constrained.fma.f64(double %nega, double %b, double %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} + +define double @fnmadd_d_2(double %a, double %b, double %c) nounwind strictfp { +; RV32IFD-LABEL: fnmadd_d_2: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a4, 8(sp) +; RV32IFD-NEXT: sw a5, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft2, 8(sp) +; RV32IFD-NEXT: fcvt.d.w ft3, zero +; RV32IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV32IFD-NEXT: fadd.d ft1, ft1, ft3 +; RV32IFD-NEXT: fnmadd.d ft0, ft2, ft0, ft1 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fnmadd_d_2: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: fmv.d.x ft1, a2 +; RV64IFD-NEXT: fmv.d.x ft2, a1 +; RV64IFD-NEXT: fmv.d.x ft3, zero +; RV64IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV64IFD-NEXT: fadd.d ft1, ft1, ft3 +; RV64IFD-NEXT: fnmadd.d ft0, ft2, ft0, ft1 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fnmadd_d_2: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -32 +; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s4, a5 +; RV32I-NEXT: mv s1, a4 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s3, a0 +; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: mv a1, a3 +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: call __adddf3@plt +; RV32I-NEXT: mv s5, a0 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s4 +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: call __adddf3@plt +; RV32I-NEXT: mv a4, a0 +; RV32I-NEXT: lui a0, 524288 +; RV32I-NEXT: xor a3, s0, a0 +; RV32I-NEXT: xor a5, a1, a0 +; RV32I-NEXT: mv a0, s3 +; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: mv a2, s5 +; RV32I-NEXT: call fma@plt +; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fnmadd_d_2: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __adddf3@plt +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __adddf3@plt +; RV64I-NEXT: li a1, -1 +; RV64I-NEXT: slli a2, a1, 63 +; RV64I-NEXT: xor a1, s1, a2 +; RV64I-NEXT: xor a2, a0, a2 +; RV64I-NEXT: mv a0, s2 +; RV64I-NEXT: call fma@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %b_ = fadd double 0.0, %b + %c_ = fadd double 0.0, %c + %negb = fneg double %b_ + %negc = fneg double %c_ + %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %negb, double %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} + +define double @fnmsub_d(double %a, double %b, double %c) nounwind strictfp { +; RV32IFD-LABEL: fnmsub_d: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a4, 8(sp) +; RV32IFD-NEXT: sw a5, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft2, 8(sp) +; RV32IFD-NEXT: fcvt.d.w ft3, zero +; RV32IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV32IFD-NEXT: fnmsub.d ft0, ft2, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fnmsub_d: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fmv.d.x ft3, zero +; RV64IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV64IFD-NEXT: fnmsub.d ft0, ft2, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fnmsub_d: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -32 +; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s2, a5 +; RV32I-NEXT: mv s3, a4 +; RV32I-NEXT: mv s0, a3 +; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: call __adddf3@plt +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: xor a1, a1, a2 +; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a3, s0 +; RV32I-NEXT: mv a4, s3 +; RV32I-NEXT: mv a5, s2 +; RV32I-NEXT: call fma@plt +; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fnmsub_d: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __adddf3@plt +; RV64I-NEXT: li a1, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: mv a1, s1 +; RV64I-NEXT: mv a2, s0 +; RV64I-NEXT: call fma@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %a_ = fadd double 0.0, %a + %nega = fneg double %a_ + %1 = call double @llvm.experimental.constrained.fma.f64(double %nega, double %b, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} + +define double @fnmsub_d_2(double %a, double %b, double %c) nounwind strictfp { +; RV32IFD-LABEL: fnmsub_d_2: +; RV32IFD: # %bb.0: +; RV32IFD-NEXT: addi sp, sp, -16 +; RV32IFD-NEXT: sw a4, 8(sp) +; RV32IFD-NEXT: sw a5, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 8(sp) +; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) +; RV32IFD-NEXT: fld ft2, 8(sp) +; RV32IFD-NEXT: fcvt.d.w ft3, zero +; RV32IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV32IFD-NEXT: fnmsub.d ft0, ft2, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 8(sp) +; RV32IFD-NEXT: lw a0, 8(sp) +; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: addi sp, sp, 16 +; RV32IFD-NEXT: ret +; +; RV64IFD-LABEL: fnmsub_d_2: +; RV64IFD: # %bb.0: +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmv.d.x ft2, a1 +; RV64IFD-NEXT: fmv.d.x ft3, zero +; RV64IFD-NEXT: fadd.d ft2, ft2, ft3 +; RV64IFD-NEXT: fnmsub.d ft0, ft2, ft1, ft0 +; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ret +; +; RV32I-LABEL: fnmsub_d_2: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -32 +; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s2, a5 +; RV32I-NEXT: mv s3, a4 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: mv a1, a3 +; RV32I-NEXT: li a2, 0 +; RV32I-NEXT: li a3, 0 +; RV32I-NEXT: call __adddf3@plt +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: lui a0, 524288 +; RV32I-NEXT: xor a3, a1, a0 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: mv a4, s3 +; RV32I-NEXT: mv a5, s2 +; RV32I-NEXT: call fma@plt +; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 32 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fnmsub_d_2: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __adddf3@plt +; RV64I-NEXT: li a1, -1 +; RV64I-NEXT: slli a1, a1, 63 +; RV64I-NEXT: xor a1, a0, a1 +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a2, s0 +; RV64I-NEXT: call fma@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %b_ = fadd double 0.0, %b + %negb = fneg double %b_ + %1 = call double @llvm.experimental.constrained.fma.f64(double %a, double %negb, double %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret double %1 +} diff --git a/llvm/test/CodeGen/RISCV/float-arith-strict.ll b/llvm/test/CodeGen/RISCV/float-arith-strict.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/float-arith-strict.ll @@ -0,0 +1,715 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32IF %s +; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64IF %s +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s + +define float @fadd_s(float %a, float %b) nounwind strictfp { +; RV32IF-LABEL: fadd_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fadd.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fadd_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fadd_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call __addsf3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fadd_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call __addsf3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} +declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) + +define float @fsub_s(float %a, float %b) nounwind strictfp { +; RV32IF-LABEL: fsub_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fsub.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fsub_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fsub.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fsub_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call __subsf3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fsub_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call __subsf3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} +declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) + +define float @fmul_s(float %a, float %b) nounwind strictfp { +; RV32IF-LABEL: fmul_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fmul.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmul_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmul.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fmul_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call __mulsf3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmul_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call __mulsf3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} +declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata) + +define float @fdiv_s(float %a, float %b) nounwind strictfp { +; RV32IF-LABEL: fdiv_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fdiv.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fdiv_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fdiv.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fdiv_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call __divsf3@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fdiv_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call __divsf3@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} +declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata) + +define float @fsqrt_s(float %a) nounwind strictfp { +; RV32IF-LABEL: fsqrt_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: fsqrt.s ft0, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fsqrt_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fsqrt.s ft0, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fsqrt_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call sqrtf@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fsqrt_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call sqrtf@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call float @llvm.experimental.constrained.sqrt.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} +declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata) + +define float @fmin_s(float %a, float %b) nounwind strictfp { +; RV32IF-LABEL: fmin_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: call fminf@plt +; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmin_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: call fminf@plt +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fmin_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call fminf@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmin_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call fminf@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call float @llvm.experimental.constrained.minnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp + ret float %1 +} +declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata) strictfp + +define float @fmax_s(float %a, float %b) nounwind strictfp { +; RV32IF-LABEL: fmax_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: addi sp, sp, -16 +; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32IF-NEXT: call fmaxf@plt +; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32IF-NEXT: addi sp, sp, 16 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmax_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: addi sp, sp, -16 +; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64IF-NEXT: call fmaxf@plt +; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fmax_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call fmaxf@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmax_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call fmaxf@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call float @llvm.experimental.constrained.maxnum.f32(float %a, float %b, metadata !"fpexcept.strict") strictfp + ret float %1 +} +declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata) strictfp + +define float @fmadd_s(float %a, float %b, float %c) nounwind strictfp { +; RV32IF-LABEL: fmadd_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a2 +; RV32IF-NEXT: fmv.w.x ft1, a1 +; RV32IF-NEXT: fmv.w.x ft2, a0 +; RV32IF-NEXT: fmadd.s ft0, ft2, ft1, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmadd_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft2, a0 +; RV64IF-NEXT: fmadd.s ft0, ft2, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fmadd_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: call fmaf@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmadd_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: call fmaf@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} +declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata) strictfp + +define float @fmsub_s(float %a, float %b, float %c) nounwind strictfp { +; RV32IF-LABEL: fmsub_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fmv.w.x ft2, a2 +; RV32IF-NEXT: fmv.w.x ft3, zero +; RV32IF-NEXT: fadd.s ft2, ft2, ft3 +; RV32IF-NEXT: fmsub.s ft0, ft1, ft0, ft2 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fmsub_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmv.w.x ft2, a2 +; RV64IF-NEXT: fmv.w.x ft3, zero +; RV64IF-NEXT: fadd.s ft2, ft2, ft3 +; RV64IF-NEXT: fmsub.s ft0, ft1, ft0, ft2 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fmsub_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: call __addsf3@plt +; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: xor a2, a0, a1 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s0 +; RV32I-NEXT: call fmaf@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fmsub_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __addsf3@plt +; RV64I-NEXT: lui a1, 524288 +; RV64I-NEXT: xor a2, a0, a1 +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s0 +; RV64I-NEXT: call fmaf@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %c_ = fadd float 0.0, %c ; avoid negation using xor + %negc = fneg float %c_ + %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} + +define float @fnmadd_s(float %a, float %b, float %c) nounwind strictfp { +; RV32IF-LABEL: fnmadd_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a2 +; RV32IF-NEXT: fmv.w.x ft2, a0 +; RV32IF-NEXT: fmv.w.x ft3, zero +; RV32IF-NEXT: fadd.s ft2, ft2, ft3 +; RV32IF-NEXT: fadd.s ft1, ft1, ft3 +; RV32IF-NEXT: fnmadd.s ft0, ft2, ft0, ft1 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fnmadd_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a2 +; RV64IF-NEXT: fmv.w.x ft2, a0 +; RV64IF-NEXT: fmv.w.x ft3, zero +; RV64IF-NEXT: fadd.s ft2, ft2, ft3 +; RV64IF-NEXT: fadd.s ft1, ft1, ft3 +; RV64IF-NEXT: fnmadd.s ft0, ft2, ft0, ft1 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fnmadd_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a2 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: call __addsf3@plt +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: call __addsf3@plt +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: xor a1, s1, a2 +; RV32I-NEXT: xor a2, a0, a2 +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: call fmaf@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fnmadd_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __addsf3@plt +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __addsf3@plt +; RV64I-NEXT: lui a2, 524288 +; RV64I-NEXT: xor a1, s1, a2 +; RV64I-NEXT: xor a2, a0, a2 +; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: call fmaf@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %a_ = fadd float 0.0, %a + %c_ = fadd float 0.0, %c + %nega = fneg float %a_ + %negc = fneg float %c_ + %1 = call float @llvm.experimental.constrained.fma.f32(float %nega, float %b, float %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} + +define float @fnmadd_s_2(float %a, float %b, float %c) nounwind strictfp { +; RV32IF-LABEL: fnmadd_s_2: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: fmv.w.x ft1, a2 +; RV32IF-NEXT: fmv.w.x ft2, a1 +; RV32IF-NEXT: fmv.w.x ft3, zero +; RV32IF-NEXT: fadd.s ft2, ft2, ft3 +; RV32IF-NEXT: fadd.s ft1, ft1, ft3 +; RV32IF-NEXT: fnmadd.s ft0, ft2, ft0, ft1 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fnmadd_s_2: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: fmv.w.x ft1, a2 +; RV64IF-NEXT: fmv.w.x ft2, a1 +; RV64IF-NEXT: fmv.w.x ft3, zero +; RV64IF-NEXT: fadd.s ft2, ft2, ft3 +; RV64IF-NEXT: fadd.s ft1, ft1, ft3 +; RV64IF-NEXT: fnmadd.s ft0, ft2, ft0, ft1 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fnmadd_s_2: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a2 +; RV32I-NEXT: mv s2, a0 +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: call __addsf3@plt +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: call __addsf3@plt +; RV32I-NEXT: lui a2, 524288 +; RV32I-NEXT: xor a1, s1, a2 +; RV32I-NEXT: xor a2, a0, a2 +; RV32I-NEXT: mv a0, s2 +; RV32I-NEXT: call fmaf@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fnmadd_s_2: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s2, a0 +; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __addsf3@plt +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __addsf3@plt +; RV64I-NEXT: lui a2, 524288 +; RV64I-NEXT: xor a1, s1, a2 +; RV64I-NEXT: xor a2, a0, a2 +; RV64I-NEXT: mv a0, s2 +; RV64I-NEXT: call fmaf@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %b_ = fadd float 0.0, %b + %c_ = fadd float 0.0, %c + %negb = fneg float %b_ + %negc = fneg float %c_ + %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %negb, float %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} + +define float @fnmsub_s(float %a, float %b, float %c) nounwind strictfp { +; RV32IF-LABEL: fnmsub_s: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a2 +; RV32IF-NEXT: fmv.w.x ft1, a1 +; RV32IF-NEXT: fmv.w.x ft2, a0 +; RV32IF-NEXT: fmv.w.x ft3, zero +; RV32IF-NEXT: fadd.s ft2, ft2, ft3 +; RV32IF-NEXT: fnmsub.s ft0, ft2, ft1, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fnmsub_s: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: fmv.w.x ft2, a0 +; RV64IF-NEXT: fmv.w.x ft3, zero +; RV64IF-NEXT: fadd.s ft2, ft2, ft3 +; RV64IF-NEXT: fnmsub.s ft0, ft2, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fnmsub_s: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a2 +; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: call __addsf3@plt +; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: mv a2, s0 +; RV32I-NEXT: call fmaf@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fnmsub_s: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __addsf3@plt +; RV64I-NEXT: lui a1, 524288 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: mv a1, s1 +; RV64I-NEXT: mv a2, s0 +; RV64I-NEXT: call fmaf@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %a_ = fadd float 0.0, %a + %nega = fneg float %a_ + %1 = call float @llvm.experimental.constrained.fma.f32(float %nega, float %b, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} + +define float @fnmsub_s_2(float %a, float %b, float %c) nounwind strictfp { +; RV32IF-LABEL: fnmsub_s_2: +; RV32IF: # %bb.0: +; RV32IF-NEXT: fmv.w.x ft0, a2 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fmv.w.x ft2, a1 +; RV32IF-NEXT: fmv.w.x ft3, zero +; RV32IF-NEXT: fadd.s ft2, ft2, ft3 +; RV32IF-NEXT: fnmsub.s ft0, ft2, ft1, ft0 +; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: ret +; +; RV64IF-LABEL: fnmsub_s_2: +; RV64IF: # %bb.0: +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmv.w.x ft2, a1 +; RV64IF-NEXT: fmv.w.x ft3, zero +; RV64IF-NEXT: fadd.s ft2, ft2, ft3 +; RV64IF-NEXT: fnmsub.s ft0, ft2, ft1, ft0 +; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ret +; +; RV32I-LABEL: fnmsub_s_2: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill +; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill +; RV32I-NEXT: mv s0, a2 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: li a1, 0 +; RV32I-NEXT: call __addsf3@plt +; RV32I-NEXT: lui a1, 524288 +; RV32I-NEXT: xor a1, a0, a1 +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a2, s0 +; RV32I-NEXT: call fmaf@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload +; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: fnmsub_s_2: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -32 +; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill +; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: mv s0, a2 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: li a1, 0 +; RV64I-NEXT: call __addsf3@plt +; RV64I-NEXT: lui a1, 524288 +; RV64I-NEXT: xor a1, a0, a1 +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a2, s0 +; RV64I-NEXT: call fmaf@plt +; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload +; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 32 +; RV64I-NEXT: ret + %b_ = fadd float 0.0, %b + %negb = fneg float %b_ + %1 = call float @llvm.experimental.constrained.fma.f32(float %a, float %negb, float %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret float %1 +} diff --git a/llvm/test/CodeGen/RISCV/half-arith-strict.ll b/llvm/test/CodeGen/RISCV/half-arith-strict.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/half-arith-strict.ll @@ -0,0 +1,222 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zfh -verify-machineinstrs \ +; RUN: -disable-strictnode-mutation -target-abi ilp32f < %s \ +; RUN: | FileCheck -check-prefix=RV32IZFH %s +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zfh -verify-machineinstrs \ +; RUN: -disable-strictnode-mutation -target-abi lp64f < %s \ +; RUN: | FileCheck -check-prefix=RV64IZFH %s + +; FIXME: We can't test without Zfh because soft promote legalization isn't +; implemented in SelectionDAG for STRICT nodes. + +define half @fadd_h(half %a, half %b) nounwind strictfp { +; RV32IZFH-LABEL: fadd_h: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fadd.h fa0, fa0, fa1 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fadd_h: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fadd.h fa0, fa0, fa1 +; RV64IZFH-NEXT: ret + %1 = call half @llvm.experimental.constrained.fadd.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} +declare half @llvm.experimental.constrained.fadd.f16(half, half, metadata, metadata) + +define half @fsub_h(half %a, half %b) nounwind strictfp { +; RV32IZFH-LABEL: fsub_h: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fsub.h fa0, fa0, fa1 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fsub_h: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fsub.h fa0, fa0, fa1 +; RV64IZFH-NEXT: ret + %1 = call half @llvm.experimental.constrained.fsub.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} +declare half @llvm.experimental.constrained.fsub.f16(half, half, metadata, metadata) + +define half @fmul_h(half %a, half %b) nounwind strictfp { +; RV32IZFH-LABEL: fmul_h: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fmul.h fa0, fa0, fa1 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fmul_h: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fmul.h fa0, fa0, fa1 +; RV64IZFH-NEXT: ret + %1 = call half @llvm.experimental.constrained.fmul.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} +declare half @llvm.experimental.constrained.fmul.f16(half, half, metadata, metadata) + +define half @fdiv_h(half %a, half %b) nounwind strictfp { +; RV32IZFH-LABEL: fdiv_h: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fdiv.h fa0, fa0, fa1 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fdiv_h: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fdiv.h fa0, fa0, fa1 +; RV64IZFH-NEXT: ret + %1 = call half @llvm.experimental.constrained.fdiv.f16(half %a, half %b, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} +declare half @llvm.experimental.constrained.fdiv.f16(half, half, metadata, metadata) + +define half @fsqrt_h(half %a) nounwind strictfp { +; RV32IZFH-LABEL: fsqrt_h: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fsqrt.h fa0, fa0 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fsqrt_h: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fsqrt.h fa0, fa0 +; RV64IZFH-NEXT: ret + %1 = call half @llvm.experimental.constrained.sqrt.f16(half %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} +declare half @llvm.experimental.constrained.sqrt.f16(half, metadata, metadata) + +; FIXME: fminnum/fmaxnum need libcalls to handle SNaN, but we don't have f16 +; libcalls and don't support promotion yet. +;define half @fmin_h(half %a, half %b) nounwind strictfp { +; %1 = call half @llvm.experimental.constrained.minnum.f16(half %a, half %b, metadata !"fpexcept.strict") strictfp +; ret half %1 +;} +;declare half @llvm.experimental.constrained.minnum.f16(half, half, metadata) strictfp +; +;define half @fmax_h(half %a, half %b) nounwind strictfp { +; %1 = call half @llvm.experimental.constrained.maxnum.f16(half %a, half %b, metadata !"fpexcept.strict") strictfp +; ret half %1 +;} +;declare half @llvm.experimental.constrained.maxnum.f16(half, half, metadata) strictfp + +define half @fmadd_h(half %a, half %b, half %c) nounwind strictfp { +; RV32IZFH-LABEL: fmadd_h: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fmadd.h fa0, fa0, fa1, fa2 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fmadd_h: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fmadd.h fa0, fa0, fa1, fa2 +; RV64IZFH-NEXT: ret + %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %b, half %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} +declare half @llvm.experimental.constrained.fma.f16(half, half, half, metadata, metadata) strictfp + +define half @fmsub_h(half %a, half %b, half %c) nounwind strictfp { +; RV32IZFH-LABEL: fmsub_h: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fmv.h.x ft0, zero +; RV32IZFH-NEXT: fadd.h ft0, fa2, ft0 +; RV32IZFH-NEXT: fmsub.h fa0, fa0, fa1, ft0 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fmsub_h: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fmv.h.x ft0, zero +; RV64IZFH-NEXT: fadd.h ft0, fa2, ft0 +; RV64IZFH-NEXT: fmsub.h fa0, fa0, fa1, ft0 +; RV64IZFH-NEXT: ret + %c_ = fadd half 0.0, %c ; avoid negation using xor + %negc = fneg half %c_ + %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %b, half %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} + +define half @fnmadd_h(half %a, half %b, half %c) nounwind strictfp { +; RV32IZFH-LABEL: fnmadd_h: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fmv.h.x ft0, zero +; RV32IZFH-NEXT: fadd.h ft1, fa0, ft0 +; RV32IZFH-NEXT: fadd.h ft0, fa2, ft0 +; RV32IZFH-NEXT: fnmadd.h fa0, ft1, fa1, ft0 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fnmadd_h: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fmv.h.x ft0, zero +; RV64IZFH-NEXT: fadd.h ft1, fa0, ft0 +; RV64IZFH-NEXT: fadd.h ft0, fa2, ft0 +; RV64IZFH-NEXT: fnmadd.h fa0, ft1, fa1, ft0 +; RV64IZFH-NEXT: ret + %a_ = fadd half 0.0, %a + %c_ = fadd half 0.0, %c + %nega = fneg half %a_ + %negc = fneg half %c_ + %1 = call half @llvm.experimental.constrained.fma.f16(half %nega, half %b, half %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} + +define half @fnmadd_h_2(half %a, half %b, half %c) nounwind strictfp { +; RV32IZFH-LABEL: fnmadd_h_2: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fmv.h.x ft0, zero +; RV32IZFH-NEXT: fadd.h ft1, fa1, ft0 +; RV32IZFH-NEXT: fadd.h ft0, fa2, ft0 +; RV32IZFH-NEXT: fnmadd.h fa0, ft1, fa0, ft0 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fnmadd_h_2: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fmv.h.x ft0, zero +; RV64IZFH-NEXT: fadd.h ft1, fa1, ft0 +; RV64IZFH-NEXT: fadd.h ft0, fa2, ft0 +; RV64IZFH-NEXT: fnmadd.h fa0, ft1, fa0, ft0 +; RV64IZFH-NEXT: ret + %b_ = fadd half 0.0, %b + %c_ = fadd half 0.0, %c + %negb = fneg half %b_ + %negc = fneg half %c_ + %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %negb, half %negc, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} + +define half @fnmsub_h(half %a, half %b, half %c) nounwind strictfp { +; RV32IZFH-LABEL: fnmsub_h: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fmv.h.x ft0, zero +; RV32IZFH-NEXT: fadd.h ft0, fa0, ft0 +; RV32IZFH-NEXT: fnmsub.h fa0, ft0, fa1, fa2 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fnmsub_h: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fmv.h.x ft0, zero +; RV64IZFH-NEXT: fadd.h ft0, fa0, ft0 +; RV64IZFH-NEXT: fnmsub.h fa0, ft0, fa1, fa2 +; RV64IZFH-NEXT: ret + %a_ = fadd half 0.0, %a + %nega = fneg half %a_ + %1 = call half @llvm.experimental.constrained.fma.f16(half %nega, half %b, half %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +} + +define half @fnmsub_h_2(half %a, half %b, half %c) nounwind strictfp { +; RV32IZFH-LABEL: fnmsub_h_2: +; RV32IZFH: # %bb.0: +; RV32IZFH-NEXT: fmv.h.x ft0, zero +; RV32IZFH-NEXT: fadd.h ft0, fa1, ft0 +; RV32IZFH-NEXT: fnmsub.h fa0, ft0, fa0, fa2 +; RV32IZFH-NEXT: ret +; +; RV64IZFH-LABEL: fnmsub_h_2: +; RV64IZFH: # %bb.0: +; RV64IZFH-NEXT: fmv.h.x ft0, zero +; RV64IZFH-NEXT: fadd.h ft0, fa1, ft0 +; RV64IZFH-NEXT: fnmsub.h fa0, ft0, fa0, fa2 +; RV64IZFH-NEXT: ret + %b_ = fadd half 0.0, %b + %negb = fneg half %b_ + %1 = call half @llvm.experimental.constrained.fma.f16(half %a, half %negb, half %c, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp + ret half %1 +}