Index: llvm/lib/Target/RISCV/RISCVISelLowering.h =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.h +++ llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -209,7 +209,7 @@ SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const; SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const; SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; - + SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const; bool isEligibleForTailCallOptimization( CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, const SmallVector &ArgLocs) const; Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -211,6 +211,18 @@ setOperationAction(ISD::TRAP, MVT::Other, Legal); setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); + if (Subtarget.hasStdExtD()) { + setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom); + setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom); + } + if (Subtarget.hasStdExtF()) { + setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom); + setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom); + } + setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal); + setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal); + setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal); + setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal); if (Subtarget.hasStdExtA()) { setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); @@ -432,6 +444,9 @@ } case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); + case ISD::STRICT_FSETCC: + case ISD::STRICT_FSETCCS: + return lowerSETCC(Op, DAG); } } @@ -835,6 +850,18 @@ return DAG.getMergeValues(Parts, DL); } +SDValue RISCVTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT VT = Op.getValueType(); + SDValue Chain = Op.getOperand(0); + SDValue LHS = Op.getOperand(1); + SDValue RHS = Op.getOperand(2); + ISD::CondCode CC = cast(Op.getOperand(3))->get(); + SDValue Result = + DAG.getNode(ISD::SETCC, DL, VT, LHS, RHS, DAG.getCondCode(CC)); + return DAG.getMergeValues({Result, Chain}, DL); +} + SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); Index: llvm/lib/Target/RISCV/RISCVInstrInfoD.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -348,12 +348,12 @@ def : Pat<(f64 (fpimm0)), (FCVT_D_W X0)>; // double->[u]int. Round-to-zero must be used. -def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_W_D FPR64:$rs1, 0b001)>; -def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_WU_D FPR64:$rs1, 0b001)>; +def : Pat<(any_fp_to_sint FPR64:$rs1), (FCVT_W_D FPR64:$rs1, 0b001)>; +def : Pat<(any_fp_to_uint FPR64:$rs1), (FCVT_WU_D FPR64:$rs1, 0b001)>; // [u]int->double. -def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_W GPR:$rs1)>; -def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_WU GPR:$rs1)>; +def : Pat<(any_sint_to_fp GPR:$rs1), (FCVT_D_W GPR:$rs1)>; +def : Pat<(any_uint_to_fp GPR:$rs1), (FCVT_D_WU GPR:$rs1)>; } // Predicates = [HasStdExtD, IsRV32] let Predicates = [HasStdExtD, IsRV64] in { @@ -368,17 +368,17 @@ // because fpto[u|s]i produce poison if the value can't fit into the target. // We match the single case below because fcvt.wu.d sign-extends its result so // is cheaper than fcvt.lu.d+sext.w. -def : Pat<(sext_inreg (zexti32 (fp_to_uint FPR64:$rs1)), i32), +def : Pat<(sext_inreg (zexti32 (any_fp_to_uint FPR64:$rs1)), i32), (FCVT_WU_D $rs1, 0b001)>; // [u]int32->fp -def : Pat<(sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_D_W $rs1)>; -def : Pat<(uint_to_fp (zexti32 GPR:$rs1)), (FCVT_D_WU $rs1)>; +def : Pat<(any_sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_D_W $rs1)>; +def : Pat<(any_uint_to_fp (zexti32 GPR:$rs1)), (FCVT_D_WU $rs1)>; -def : Pat<(fp_to_sint FPR64:$rs1), (FCVT_L_D FPR64:$rs1, 0b001)>; -def : Pat<(fp_to_uint FPR64:$rs1), (FCVT_LU_D FPR64:$rs1, 0b001)>; +def : Pat<(any_fp_to_sint FPR64:$rs1), (FCVT_L_D FPR64:$rs1, 0b001)>; +def : Pat<(any_fp_to_uint FPR64:$rs1), (FCVT_LU_D FPR64:$rs1, 0b001)>; // [u]int64->fp. Match GCC and default to using dynamic rounding mode. -def : Pat<(sint_to_fp GPR:$rs1), (FCVT_D_L GPR:$rs1, 0b111)>; -def : Pat<(uint_to_fp GPR:$rs1), (FCVT_D_LU GPR:$rs1, 0b111)>; +def : Pat<(any_sint_to_fp GPR:$rs1), (FCVT_D_L GPR:$rs1, 0b111)>; +def : Pat<(any_uint_to_fp GPR:$rs1), (FCVT_D_LU GPR:$rs1, 0b111)>; } // Predicates = [HasStdExtD, IsRV64] Index: llvm/lib/Target/RISCV/RISCVInstrInfoF.td =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -390,12 +390,12 @@ let Predicates = [HasStdExtF, IsRV32] in { // float->[u]int. Round-to-zero must be used. -def : Pat<(fp_to_sint FPR32:$rs1), (FCVT_W_S $rs1, 0b001)>; -def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_WU_S $rs1, 0b001)>; +def : Pat<(any_fp_to_sint FPR32:$rs1), (FCVT_W_S $rs1, 0b001)>; +def : Pat<(any_fp_to_uint FPR32:$rs1), (FCVT_WU_S $rs1, 0b001)>; // [u]int->float. Match GCC and default to using dynamic rounding mode. -def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_W $rs1, 0b111)>; -def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_WU $rs1, 0b111)>; +def : Pat<(any_sint_to_fp GPR:$rs1), (FCVT_S_W $rs1, 0b111)>; +def : Pat<(any_uint_to_fp GPR:$rs1), (FCVT_S_WU $rs1, 0b111)>; } // Predicates = [HasStdExtF, IsRV32] let Predicates = [HasStdExtF, IsRV64] in { @@ -408,16 +408,16 @@ // because fpto[u|s]i produces poison if the value can't fit into the target. // We match the single case below because fcvt.wu.s sign-extends its result so // is cheaper than fcvt.lu.s+sext.w. -def : Pat<(sext_inreg (assertzexti32 (fp_to_uint FPR32:$rs1)), i32), +def : Pat<(sext_inreg (assertzexti32 (any_fp_to_uint FPR32:$rs1)), i32), (FCVT_WU_S $rs1, 0b001)>; // FP->[u]int64 -def : Pat<(fp_to_sint FPR32:$rs1), (FCVT_L_S $rs1, 0b001)>; -def : Pat<(fp_to_uint FPR32:$rs1), (FCVT_LU_S $rs1, 0b001)>; +def : Pat<(any_fp_to_sint FPR32:$rs1), (FCVT_L_S $rs1, 0b001)>; +def : Pat<(any_fp_to_uint FPR32:$rs1), (FCVT_LU_S $rs1, 0b001)>; // [u]int->fp. Match GCC and default to using dynamic rounding mode. -def : Pat<(sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_S_W $rs1, 0b111)>; -def : Pat<(uint_to_fp (zexti32 GPR:$rs1)), (FCVT_S_WU $rs1, 0b111)>; -def : Pat<(sint_to_fp GPR:$rs1), (FCVT_S_L $rs1, 0b111)>; -def : Pat<(uint_to_fp GPR:$rs1), (FCVT_S_LU $rs1, 0b111)>; +def : Pat<(any_sint_to_fp (sext_inreg GPR:$rs1, i32)), (FCVT_S_W $rs1, 0b111)>; +def : Pat<(any_uint_to_fp (zexti32 GPR:$rs1)), (FCVT_S_WU $rs1, 0b111)>; +def : Pat<(any_sint_to_fp GPR:$rs1), (FCVT_S_L $rs1, 0b111)>; +def : Pat<(any_uint_to_fp GPR:$rs1), (FCVT_S_LU $rs1, 0b111)>; } // Predicates = [HasStdExtF, IsRV64] Index: llvm/test/CodeGen/RISCV/fp-strict.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/fp-strict.ll @@ -0,0 +1,680 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O1 -mtriple=riscv64 -mattr="+d" -target-abi lp64d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64D +; RUN: llc -O1 -mtriple=riscv32 -mattr="+d" -target-abi ilp32d -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32D + + +declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f64(double, double, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata) +declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata) + +define i1 @f_lt_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_lt_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: flt.d a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_lt_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: flt.d a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_lts_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_lts_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: flt.d a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_lts_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: flt.d a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_lt_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_lt_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: flt.s a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_lt_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: flt.s a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_lts_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_lts_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: flt.s a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_lts_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: flt.s a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_leq_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_leq_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fle.d a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_leq_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fle.d a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_leqs_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_leqs_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fle.d a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_leqs_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fle.d a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_leq_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_leq_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fle.s a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_leq_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fle.s a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_leqs_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_leqs_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fle.s a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_leqs_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fle.s a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_gt_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_gt_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: flt.d a0, fa1, fa0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_gt_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: flt.d a0, fa1, fa0 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_gts_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_gts_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: flt.d a0, fa1, fa0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_gts_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: flt.d a0, fa1, fa0 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict") + ret i1 %cmp +} + + +define i1 @f_gt_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_gt_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: flt.s a0, fa1, fa0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_gt_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: flt.s a0, fa1, fa0 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_gts_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_gts_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: flt.s a0, fa1, fa0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_gts_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: flt.s a0, fa1, fa0 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_geq_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_geq_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fle.d a0, fa1, fa0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_geq_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fle.d a0, fa1, fa0 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_geqs_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_geqs_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fle.d a0, fa1, fa0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_geqs_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fle.d a0, fa1, fa0 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_geq_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_geq_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fle.s a0, fa1, fa0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_geq_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fle.s a0, fa1, fa0 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_geqs_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_geqs_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fle.s a0, fa1, fa0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_geqs_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fle.s a0, fa1, fa0 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_eq_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_eq_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: feq.d a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_eq_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: feq.d a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_eqs_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_eqs_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: feq.d a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_eqs_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: feq.d a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_eq_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_eq_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: feq.s a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_eq_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: feq.s a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_eqs_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_eqs_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: feq.s a0, fa0, fa1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_eqs_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: feq.s a0, fa0, fa1 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_neq_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_neq_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: feq.d a0, fa0, fa1 +; RV64D-NEXT: xori a0, a0, 1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_neq_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: feq.d a0, fa0, fa1 +; RV32D-NEXT: xori a0, a0, 1 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_neqs_f64(double %a, double %b) strictfp nounwind { +; RV64D-LABEL: f_neqs_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: feq.d a0, fa0, fa1 +; RV64D-NEXT: xori a0, a0, 1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_neqs_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: feq.d a0, fa0, fa1 +; RV32D-NEXT: xori a0, a0, 1 +; RV32D-NEXT: ret +entry: + %cmp = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"une", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_neq_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_neq_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: feq.s a0, fa0, fa1 +; RV64D-NEXT: xori a0, a0, 1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_neq_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: feq.s a0, fa0, fa1 +; RV32D-NEXT: xori a0, a0, 1 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") + ret i1 %cmp +} + +define i1 @f_neqs_f32(float %a, float %b) strictfp nounwind { +; RV64D-LABEL: f_neqs_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: feq.s a0, fa0, fa1 +; RV64D-NEXT: xori a0, a0, 1 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_neqs_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: feq.s a0, fa0, fa1 +; RV32D-NEXT: xori a0, a0, 1 +; RV32D-NEXT: ret +entry: + %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"une", metadata !"fpexcept.strict") + ret i1 %cmp +} + +declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata) +declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata) + +define i32 @f_fptosi32(double %a) strictfp nounwind { +; RV64D-LABEL: f_fptosi32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.l.d a0, fa0, rtz +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_fptosi32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fcvt.w.d a0, fa0, rtz +; RV32D-NEXT: ret +entry: + %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") + ret i32 %conv +} + +define i32 @f_fptoui32(double %a) strictfp nounwind { +; RV64D-LABEL: f_fptoui32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.lu.d a0, fa0, rtz +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_fptoui32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fcvt.wu.d a0, fa0, rtz +; RV32D-NEXT: ret +entry: + %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") + ret i32 %conv +} + +declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata) +declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata) +define i64 @f_fptosi64(double %a) { +; RV64D-LABEL: f_fptosi64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.l.d a0, fa0, rtz +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_fptosi64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: addi sp, sp, -16 +; RV32D-NEXT: .cfi_def_cfa_offset 16 +; RV32D-NEXT: sw ra, 12(sp) +; RV32D-NEXT: .cfi_offset ra, -4 +; RV32D-NEXT: call __fixdfdi +; RV32D-NEXT: lw ra, 12(sp) +; RV32D-NEXT: addi sp, sp, 16 +; RV32D-NEXT: ret +entry: + %conv = tail call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict") + ret i64 %conv +} + +define i64 @f_fptoui64(double %a) { +; RV64D-LABEL: f_fptoui64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.lu.d a0, fa0, rtz +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_fptoui64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: addi sp, sp, -16 +; RV32D-NEXT: .cfi_def_cfa_offset 16 +; RV32D-NEXT: sw ra, 12(sp) +; RV32D-NEXT: .cfi_offset ra, -4 +; RV32D-NEXT: call __fixunsdfdi +; RV32D-NEXT: lw ra, 12(sp) +; RV32D-NEXT: addi sp, sp, 16 +; RV32D-NEXT: ret +entry: + %conv = tail call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict") + ret i64 %conv +} + +declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) +declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) + +define i32 @f_fptosi_f32_i32(float %a) strictfp nounwind { +; RV64D-LABEL: f_fptosi_f32_i32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.l.s a0, fa0, rtz +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_fptosi_f32_i32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fcvt.w.s a0, fa0, rtz +; RV32D-NEXT: ret +entry: + %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") + ret i32 %conv +} + +define i32 @f_fptoui_f32_i32(float %a) strictfp nounwind { +; RV64D-LABEL: f_fptoui_f32_i32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.lu.s a0, fa0, rtz +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_fptoui_f32_i32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fcvt.wu.s a0, fa0, rtz +; RV32D-NEXT: ret +entry: + %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") + ret i32 %conv +} + +declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata) +declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata) + +define i64 @f_fptosi_f32_i64(float %a) strictfp nounwind { +; RV64D-LABEL: f_fptosi_f32_i64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.l.s a0, fa0, rtz +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_fptosi_f32_i64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: addi sp, sp, -16 +; RV32D-NEXT: sw ra, 12(sp) +; RV32D-NEXT: call __fixsfdi +; RV32D-NEXT: lw ra, 12(sp) +; RV32D-NEXT: addi sp, sp, 16 +; RV32D-NEXT: ret +entry: + %conv = tail call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict") + ret i64 %conv +} + +define i64 @f_fptoui_f32_i64(float %a) strictfp nounwind { +; RV64D-LABEL: f_fptoui_f32_i64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.lu.s a0, fa0, rtz +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_fptoui_f32_i64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: addi sp, sp, -16 +; RV32D-NEXT: sw ra, 12(sp) +; RV32D-NEXT: call __fixunssfdi +; RV32D-NEXT: lw ra, 12(sp) +; RV32D-NEXT: addi sp, sp, 16 +; RV32D-NEXT: ret +entry: + %conv = tail call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict") + ret i64 %conv +} + +declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata) +declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata) + +define double @f_sitofp_i32_f64(i32 %a) strictfp nounwind { +; RV64D-LABEL: f_sitofp_i32_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.d.w fa0, a0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_sitofp_i32_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fcvt.d.w fa0, a0 +; RV32D-NEXT: ret +entry: + %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret double %conv +} + +define double @f_uitofp_i32_f64(i32 %a) strictfp nounwind { +; RV64D-LABEL: f_uitofp_i32_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.d.wu fa0, a0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_uitofp_i32_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fcvt.d.wu fa0, a0 +; RV32D-NEXT: ret +entry: + %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret double %conv +} + +declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata) +declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata) + +define double @f_sitofp_i64_f64(i64 %a) strictfp nounwind { +; RV64D-LABEL: f_sitofp_i64_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.d.l fa0, a0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_sitofp_i64_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: addi sp, sp, -16 +; RV32D-NEXT: sw ra, 12(sp) +; RV32D-NEXT: call __floatdidf +; RV32D-NEXT: lw ra, 12(sp) +; RV32D-NEXT: addi sp, sp, 16 +; RV32D-NEXT: ret +entry: + %conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret double %conv +} + +define double @f_uitofp_i64_f64(i64 %a) strictfp nounwind { +; RV64D-LABEL: f_uitofp_i64_f64: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.d.lu fa0, a0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_uitofp_i64_f64: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: addi sp, sp, -16 +; RV32D-NEXT: sw ra, 12(sp) +; RV32D-NEXT: call __floatundidf +; RV32D-NEXT: lw ra, 12(sp) +; RV32D-NEXT: addi sp, sp, 16 +; RV32D-NEXT: ret +entry: + %conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret double %conv +} + +declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata) +declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata) + +define float @f_sitofp_i64_f32(i64 %a) strictfp nounwind { +; RV64D-LABEL: f_sitofp_i64_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.s.l fa0, a0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_sitofp_i64_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: addi sp, sp, -16 +; RV32D-NEXT: sw ra, 12(sp) +; RV32D-NEXT: call __floatdisf +; RV32D-NEXT: lw ra, 12(sp) +; RV32D-NEXT: addi sp, sp, 16 +; RV32D-NEXT: ret +entry: + %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %conv +} + +define float @f_uitofp_i64_f32(i64 %a) strictfp nounwind { +; RV64D-LABEL: f_uitofp_i64_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.s.lu fa0, a0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_uitofp_i64_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: addi sp, sp, -16 +; RV32D-NEXT: sw ra, 12(sp) +; RV32D-NEXT: call __floatundisf +; RV32D-NEXT: lw ra, 12(sp) +; RV32D-NEXT: addi sp, sp, 16 +; RV32D-NEXT: ret +entry: + %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %conv +} + +declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) +declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata) + +define float @f_sitofp_i32_f32(i32 %a) strictfp nounwind { +; RV64D-LABEL: f_sitofp_i32_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.s.w fa0, a0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_sitofp_i32_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fcvt.s.w fa0, a0 +; RV32D-NEXT: ret +entry: + %conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %conv +} + +define float @f_uitofp_i32_f32(i32 %a) strictfp nounwind { +; RV64D-LABEL: f_uitofp_i32_f32: +; RV64D: # %bb.0: # %entry +; RV64D-NEXT: fcvt.s.wu fa0, a0 +; RV64D-NEXT: ret +; +; RV32D-LABEL: f_uitofp_i32_f32: +; RV32D: # %bb.0: # %entry +; RV32D-NEXT: fcvt.s.wu fa0, a0 +; RV32D-NEXT: ret +entry: + %conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %conv +} +