diff --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td --- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td @@ -249,6 +249,11 @@ def : Pat<(f32 (sint_to_fp GPR:$src)), (FFINT_S_W (MOVGR2FR_W GPR:$src))>; } // Predicates = [HasBasicF, IsLA64] +// FP Rounding +let Predicates = [HasBasicF, IsLA64] in { +def : PatFpr; +} // Predicates = [HasBasicF, IsLA64] + let Predicates = [HasBasicF, IsLA32] in { // GPR -> FPR def : Pat<(bitconvert (i32 GPR:$src)), (MOVGR2FR_W GPR:$src)>; diff --git a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td --- a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td @@ -259,6 +259,11 @@ def : Pat<(bitconvert FPR64:$src), (MOVFR2GR_D FPR64:$src)>; } // Predicates = [HasBasicD, IsLA64] +// FP Rounding +let Predicates = [HasBasicD, IsLA64] in { +def : PatFpr; +} // Predicates = [HasBasicD, IsLA64] + let Predicates = [HasBasicD, IsLA32] in { def : Pat<(f64 fpimm0), (MOVGR2FRH_W (MOVGR2FR_W_64 R0), R0)>; def : Pat<(f64 fpimm0neg), (FNEG_D (MOVGR2FRH_W (MOVGR2FR_W_64 R0), R0))>; diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -80,6 +80,10 @@ setOperationAction(ISD::CTLZ, MVT::i32, Custom); if (Subtarget.hasBasicF() && !Subtarget.hasBasicD()) setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); + if (Subtarget.hasBasicF()) + setOperationAction(ISD::FRINT, MVT::f32, Legal); + if (Subtarget.hasBasicD()) + setOperationAction(ISD::FRINT, MVT::f64, Legal); } // LA32 does not have REVB.2W and REVB.D due to the 64-bit operands, and diff --git a/llvm/test/CodeGen/LoongArch/frint.ll b/llvm/test/CodeGen/LoongArch/frint.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/frint.ll @@ -0,0 +1,79 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32F +; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32D +; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64F +; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64D + +define float @rint_f32(float %f) nounwind { +; LA32F-LABEL: rint_f32: +; LA32F: # %bb.0: # %entry +; LA32F-NEXT: addi.w $sp, $sp, -16 +; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32F-NEXT: bl %plt(rintf) +; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32F-NEXT: addi.w $sp, $sp, 16 +; LA32F-NEXT: ret +; +; LA32D-LABEL: rint_f32: +; LA32D: # %bb.0: # %entry +; LA32D-NEXT: addi.w $sp, $sp, -16 +; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32D-NEXT: bl %plt(rintf) +; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32D-NEXT: addi.w $sp, $sp, 16 +; LA32D-NEXT: ret +; +; LA64F-LABEL: rint_f32: +; LA64F: # %bb.0: # %entry +; LA64F-NEXT: frint.s $fa0, $fa0 +; LA64F-NEXT: ret +; +; LA64D-LABEL: rint_f32: +; LA64D: # %bb.0: # %entry +; LA64D-NEXT: frint.s $fa0, $fa0 +; LA64D-NEXT: ret +entry: + %0 = tail call float @llvm.rint.f32(float %f) + ret float %0 +} + +declare float @llvm.rint.f32(float) + +define double @rint_f64(double %d) nounwind { +; LA32F-LABEL: rint_f64: +; LA32F: # %bb.0: # %entry +; LA32F-NEXT: addi.w $sp, $sp, -16 +; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32F-NEXT: bl %plt(rint) +; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32F-NEXT: addi.w $sp, $sp, 16 +; LA32F-NEXT: ret +; +; LA32D-LABEL: rint_f64: +; LA32D: # %bb.0: # %entry +; LA32D-NEXT: addi.w $sp, $sp, -16 +; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32D-NEXT: bl %plt(rint) +; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32D-NEXT: addi.w $sp, $sp, 16 +; LA32D-NEXT: ret +; +; LA64F-LABEL: rint_f64: +; LA64F: # %bb.0: # %entry +; LA64F-NEXT: addi.d $sp, $sp, -16 +; LA64F-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill +; LA64F-NEXT: bl %plt(rint) +; LA64F-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload +; LA64F-NEXT: addi.d $sp, $sp, 16 +; LA64F-NEXT: ret +; +; LA64D-LABEL: rint_f64: +; LA64D: # %bb.0: # %entry +; LA64D-NEXT: frint.d $fa0, $fa0 +; LA64D-NEXT: ret +entry: + %0 = tail call double @llvm.rint.f64(double %d) + ret double %0 +} + +declare double @llvm.rint.f64(double)