diff --git a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td --- a/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchFloat32InstrInfo.td @@ -165,6 +165,7 @@ def : PatFPSetcc; def : PatFPSetcc; def : PatFPSetcc; +def : PatFPSetcc; // TODO: Match signaling comparison strict_fsetccs with FCMP_S*_S instructions. diff --git a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td --- a/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchFloat64InstrInfo.td @@ -169,6 +169,7 @@ def : PatFPSetcc; def : PatFPSetcc; def : PatFPSetcc; +def : PatFPSetcc; // TODO: Match signaling comparison strict_fsetccs with FCMP_S*_D instructions. @@ -222,6 +223,9 @@ (FFINT_D_W (MOVGR2FR_W GPR:$src))>; def : Pat<(f64 (sint_to_fp GPR:$src)), (FFINT_D_L (MOVGR2FR_D GPR:$src))>; +def : Pat<(f64 (uint_to_fp (i64 (zexti32 (i64 GPR:$src))))), + (FFINT_D_W (MOVGR2FR_W GPR:$src))>; + def : Pat<(bitconvert GPR:$src), (MOVGR2FR_D GPR:$src)>; // Convert FP to int diff --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h --- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h +++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.h @@ -49,6 +49,7 @@ } bool selectSExti32(SDValue N, SDValue &Val); + bool selectZExti32(SDValue N, SDValue &Val); // Include the pieces autogenerated from the target description. #include "LoongArchGenDAGISel.inc" diff --git a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp --- a/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelDAGToDAG.cpp @@ -161,6 +161,24 @@ return false; } +bool LoongArchDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) { + if (N.getOpcode() == ISD::AND) { + auto *C = dyn_cast(N.getOperand(1)); + if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) { + Val = N.getOperand(0); + return true; + } + } + MVT VT = N.getSimpleValueType(); + APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32); + if (CurDAG->MaskedValueIsZero(N, Mask)) { + Val = N; + return true; + } + + return false; +} + // This pass converts a legalized DAG into a LoongArch-specific DAG, ready // for instruction scheduling. FunctionPass *llvm::createLoongArchISelDag(LoongArchTargetMachine &TM) { diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h @@ -105,6 +105,7 @@ SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const; SDValue lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; SDValue lowerBITCAST(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override; diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -60,6 +60,8 @@ setOperationAction(ISD::SRL, MVT::i32, Custom); setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); setOperationAction(ISD::BITCAST, MVT::i32, Custom); + if (Subtarget.hasBasicF() && !Subtarget.hasBasicD()) + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); } static const ISD::CondCode FPCCToExpand[] = {ISD::SETOGT, ISD::SETOGE, @@ -80,10 +82,12 @@ setOperationAction(ISD::SELECT_CC, GRLenVT, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, GRLenVT, Expand); - if (!Subtarget.is64Bit()) setLibcallName(RTLIB::MUL_I128, nullptr); + setOperationAction(ISD::FP_TO_UINT, GRLenVT, Custom); + setOperationAction(ISD::UINT_TO_FP, GRLenVT, Custom); + // Compute derived properties from the register classes. computeRegisterProperties(STI.getRegisterInfo()); @@ -125,9 +129,30 @@ return lowerFP_TO_SINT(Op, DAG); case ISD::BITCAST: return lowerBITCAST(Op, DAG); + case ISD::FP_TO_UINT: + return SDValue(); + case ISD::UINT_TO_FP: + return lowerUINT_TO_FP(Op, DAG); } } +SDValue LoongArchTargetLowering::lowerUINT_TO_FP(SDValue Op, + SelectionDAG &DAG) const { + + SDLoc DL(Op); + auto &TLI = DAG.getTargetLoweringInfo(); + SDValue Tmp1, Tmp2; + SDValue Op1 = Op.getOperand(0); + if (Op1->getOpcode() == ISD::AssertZext || + Op1->getOpcode() == ISD::AssertSext) + return Op; + SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op.getOperand(0)); + SDValue Res = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f64, Trunc); + SDNode *N = Res.getNode(); + TLI.expandUINT_TO_FP(N, Tmp1, Tmp2, DAG); + return Tmp1; +} + SDValue LoongArchTargetLowering::lowerBITCAST(SDValue Op, SelectionDAG &DAG) const { @@ -359,6 +384,15 @@ } break; } + case ISD::FP_TO_UINT: { + assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && + "Unexpected custom legalisation"); + auto &TLI = DAG.getTargetLoweringInfo(); + SDValue Tmp1, Tmp2; + TLI.expandFP_TO_UINT(N, Tmp1, Tmp2, DAG); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Tmp1)); + break; + } } } diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -591,6 +591,7 @@ def shiftMask32 : ComplexPattern; def sexti32 : ComplexPattern; +def zexti32 : ComplexPattern; class shiftop : PatFrag<(ops node:$val, node:$count), diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll --- a/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/double-convert.ll @@ -113,6 +113,35 @@ ret i32 %1 } +define i32 @convert_double_to_u32(double %a) nounwind { +; LA32-LABEL: convert_double_to_u32: +; LA32: # %bb.0: +; LA32-NEXT: pcalau12i $a0, .LCPI7_0 +; LA32-NEXT: addi.w $a0, $a0, .LCPI7_0 +; LA32-NEXT: fld.d $fa1, $a0, 0 +; LA32-NEXT: fsub.d $fa2, $fa0, $fa1 +; LA32-NEXT: ftintrz.w.d $fa2, $fa2 +; LA32-NEXT: movfr2gr.s $a0, $fa2 +; LA32-NEXT: lu12i.w $a1, -524288 +; LA32-NEXT: xor $a0, $a0, $a1 +; LA32-NEXT: fcmp.clt.d $fcc0, $fa0, $fa1 +; LA32-NEXT: movcf2gr $a1, $fcc0 +; LA32-NEXT: masknez $a0, $a0, $a1 +; LA32-NEXT: ftintrz.w.d $fa0, $fa0 +; LA32-NEXT: movfr2gr.s $a2, $fa0 +; LA32-NEXT: maskeqz $a1, $a2, $a1 +; LA32-NEXT: or $a0, $a1, $a0 +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: convert_double_to_u32: +; LA64: # %bb.0: +; LA64-NEXT: ftintrz.l.d $fa0, $fa0 +; LA64-NEXT: movfr2gr.d $a0, $fa0 +; LA64-NEXT: jirl $zero, $ra, 0 + %1 = fptoui double %a to i32 + ret i32 %1 +} + define i64 @convert_double_to_i64(double %a) nounwind { ; LA32-LABEL: convert_double_to_i64: ; LA32: # %bb.0: @@ -132,27 +161,136 @@ ret i64 %1 } -define i64 @bitcast_double_to_i64(double %a) nounwind { -; LA32-LABEL: bitcast_double_to_i64: +define i64 @convert_double_to_u64(double %a) nounwind { +; LA32-LABEL: convert_double_to_u64: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -16 -; LA32-NEXT: fst.d $fa0, $sp, 8 -; LA32-NEXT: addi.w $a0, $sp, 8 -; LA32-NEXT: ori $a0, $a0, 4 -; LA32-NEXT: ld.w $a1, $a0, 0 -; LA32-NEXT: ld.w $a0, $sp, 8 +; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32-NEXT: bl __fixunsdfdi +; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload ; LA32-NEXT: addi.w $sp, $sp, 16 ; LA32-NEXT: jirl $zero, $ra, 0 ; -; LA64-LABEL: bitcast_double_to_i64: +; LA64-LABEL: convert_double_to_u64: ; LA64: # %bb.0: -; LA64-NEXT: movfr2gr.d $a0, $fa0 +; LA64-NEXT: pcalau12i $a0, .LCPI9_0 +; LA64-NEXT: addi.d $a0, $a0, .LCPI9_0 +; LA64-NEXT: fld.d $fa1, $a0, 0 +; LA64-NEXT: fsub.d $fa2, $fa0, $fa1 +; LA64-NEXT: ftintrz.l.d $fa2, $fa2 +; LA64-NEXT: movfr2gr.d $a0, $fa2 +; LA64-NEXT: lu52i.d $a1, $zero, -2048 +; LA64-NEXT: xor $a0, $a0, $a1 +; LA64-NEXT: fcmp.clt.d $fcc0, $fa0, $fa1 +; LA64-NEXT: movcf2gr $a1, $fcc0 +; LA64-NEXT: masknez $a0, $a0, $a1 +; LA64-NEXT: ftintrz.l.d $fa0, $fa0 +; LA64-NEXT: movfr2gr.d $a2, $fa0 +; LA64-NEXT: maskeqz $a1, $a2, $a1 +; LA64-NEXT: or $a0, $a1, $a0 ; LA64-NEXT: jirl $zero, $ra, 0 - %1 = bitcast double %a to i64 + %1 = fptoui double %a to i64 ret i64 %1 } -define double @bitcast_i64_to_double(i64 %a) nounwind { +define double @convert_u8_to_double(i8 zeroext %a) nounwind { +; LA32-LABEL: convert_u8_to_double: +; LA32: # %bb.0: +; LA32-NEXT: movgr2fr.w $fa0, $a0 +; LA32-NEXT: ffint.d.w $fa0, $fa0 +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: convert_u8_to_double: +; LA64: # %bb.0: +; LA64-NEXT: movgr2fr.w $fa0, $a0 +; LA64-NEXT: ffint.d.w $fa0, $fa0 +; LA64-NEXT: jirl $zero, $ra, 0 + %1 = uitofp i8 %a to double + ret double %1 +} + +define double @convert_u16_to_double(i16 zeroext %a) nounwind { +; LA32-LABEL: convert_u16_to_double: +; LA32: # %bb.0: +; LA32-NEXT: movgr2fr.w $fa0, $a0 +; LA32-NEXT: ffint.d.w $fa0, $fa0 +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: convert_u16_to_double: +; LA64: # %bb.0: +; LA64-NEXT: movgr2fr.w $fa0, $a0 +; LA64-NEXT: ffint.d.w $fa0, $fa0 +; LA64-NEXT: jirl $zero, $ra, 0 + %1 = uitofp i16 %a to double + ret double %1 +} + +define double @convert_u32_to_double(i32 %a) nounwind { +; LA32-LABEL: convert_u32_to_double: +; LA32: # %bb.0: +; LA32-NEXT: addi.w $sp, $sp, -16 +; LA32-NEXT: addi.w $a1, $sp, 8 +; LA32-NEXT: ori $a1, $a1, 4 +; LA32-NEXT: lu12i.w $a2, 275200 +; LA32-NEXT: st.w $a2, $a1, 0 +; LA32-NEXT: st.w $a0, $sp, 8 +; LA32-NEXT: pcalau12i $a0, .LCPI12_0 +; LA32-NEXT: addi.w $a0, $a0, .LCPI12_0 +; LA32-NEXT: fld.d $fa0, $a0, 0 +; LA32-NEXT: fld.d $fa1, $sp, 8 +; LA32-NEXT: fsub.d $fa0, $fa1, $fa0 +; LA32-NEXT: addi.w $sp, $sp, 16 +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: convert_u32_to_double: +; LA64: # %bb.0: +; LA64-NEXT: lu52i.d $a1, $zero, 1107 +; LA64-NEXT: movgr2fr.d $fa0, $a1 +; LA64-NEXT: pcalau12i $a1, .LCPI12_0 +; LA64-NEXT: addi.d $a1, $a1, .LCPI12_0 +; LA64-NEXT: fld.d $fa1, $a1, 0 +; LA64-NEXT: fsub.d $fa0, $fa0, $fa1 +; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64-NEXT: lu52i.d $a1, $zero, 1075 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: movgr2fr.d $fa1, $a0 +; LA64-NEXT: fadd.d $fa0, $fa1, $fa0 +; LA64-NEXT: jirl $zero, $ra, 0 + %1 = uitofp i32 %a to double + ret double %1 +} + +define double @convert_u64_to_double(i64 %a) nounwind { +; LA32-LABEL: convert_u64_to_double: +; LA32: # %bb.0: +; LA32-NEXT: addi.w $sp, $sp, -16 +; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32-NEXT: bl __floatundidf +; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32-NEXT: addi.w $sp, $sp, 16 +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: convert_u64_to_double: +; LA64: # %bb.0: +; LA64-NEXT: srli.d $a1, $a0, 32 +; LA64-NEXT: lu52i.d $a2, $zero, 1107 +; LA64-NEXT: or $a1, $a1, $a2 +; LA64-NEXT: movgr2fr.d $fa0, $a1 +; LA64-NEXT: pcalau12i $a1, .LCPI13_0 +; LA64-NEXT: addi.d $a1, $a1, .LCPI13_0 +; LA64-NEXT: fld.d $fa1, $a1, 0 +; LA64-NEXT: fsub.d $fa0, $fa0, $fa1 +; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64-NEXT: lu52i.d $a1, $zero, 1075 +; LA64-NEXT: or $a0, $a0, $a1 +; LA64-NEXT: movgr2fr.d $fa1, $a0 +; LA64-NEXT: fadd.d $fa0, $fa1, $fa0 +; LA64-NEXT: jirl $zero, $ra, 0 + %1 = uitofp i64 %a to double + ret double %1 +} + +define double @bitcast_i64_to_double(i64 %a, i64 %b) nounwind { ; LA32-LABEL: bitcast_i64_to_double: ; LA32: # %bb.0: ; LA32-NEXT: addi.w $sp, $sp, -16 @@ -171,3 +309,23 @@ %1 = bitcast i64 %a to double ret double %1 } + +define i64 @bitcast_double_to_i64(double %a) nounwind { +; LA32-LABEL: bitcast_double_to_i64: +; LA32: # %bb.0: +; LA32-NEXT: addi.w $sp, $sp, -16 +; LA32-NEXT: fst.d $fa0, $sp, 8 +; LA32-NEXT: addi.w $a0, $sp, 8 +; LA32-NEXT: ori $a0, $a0, 4 +; LA32-NEXT: ld.w $a1, $a0, 0 +; LA32-NEXT: ld.w $a0, $sp, 8 +; LA32-NEXT: addi.w $sp, $sp, 16 +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: bitcast_double_to_i64: +; LA64: # %bb.0: +; LA64-NEXT: movfr2gr.d $a0, $fa0 +; LA64-NEXT: jirl $zero, $ra, 0 + %1 = bitcast double %a to i64 + ret i64 %1 +} diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll --- a/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/float-convert.ll @@ -1,166 +1,650 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32 -; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64 +; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32F +; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32D +; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64F +; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64D define signext i8 @convert_float_to_i8(float %a) nounwind { -; LA32-LABEL: convert_float_to_i8: -; LA32: # %bb.0: -; LA32-NEXT: ftintrz.w.s $fa0, $fa0 -; LA32-NEXT: movfr2gr.s $a0, $fa0 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: convert_float_to_i8: -; LA64: # %bb.0: -; LA64-NEXT: ftintrz.w.s $fa0, $fa0 -; LA64-NEXT: movfr2gr.s $a0, $fa0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: convert_float_to_i8: +; LA32F: # %bb.0: +; LA32F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32F-NEXT: movfr2gr.s $a0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_float_to_i8: +; LA32D: # %bb.0: +; LA32D-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32D-NEXT: movfr2gr.s $a0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_float_to_i8: +; LA64F: # %bb.0: +; LA64F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA64F-NEXT: movfr2gr.s $a0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_float_to_i8: +; LA64D: # %bb.0: +; LA64D-NEXT: ftintrz.l.s $fa0, $fa0 +; LA64D-NEXT: movfr2gr.d $a0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = fptosi float %a to i8 ret i8 %1 } define signext i16 @convert_float_to_i16(float %a) nounwind { -; LA32-LABEL: convert_float_to_i16: -; LA32: # %bb.0: -; LA32-NEXT: ftintrz.w.s $fa0, $fa0 -; LA32-NEXT: movfr2gr.s $a0, $fa0 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: convert_float_to_i16: -; LA64: # %bb.0: -; LA64-NEXT: ftintrz.w.s $fa0, $fa0 -; LA64-NEXT: movfr2gr.s $a0, $fa0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: convert_float_to_i16: +; LA32F: # %bb.0: +; LA32F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32F-NEXT: movfr2gr.s $a0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_float_to_i16: +; LA32D: # %bb.0: +; LA32D-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32D-NEXT: movfr2gr.s $a0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_float_to_i16: +; LA64F: # %bb.0: +; LA64F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA64F-NEXT: movfr2gr.s $a0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_float_to_i16: +; LA64D: # %bb.0: +; LA64D-NEXT: ftintrz.l.s $fa0, $fa0 +; LA64D-NEXT: movfr2gr.d $a0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = fptosi float %a to i16 ret i16 %1 } define i32 @convert_float_to_i32(float %a) nounwind { -; LA32-LABEL: convert_float_to_i32: -; LA32: # %bb.0: -; LA32-NEXT: ftintrz.w.s $fa0, $fa0 -; LA32-NEXT: movfr2gr.s $a0, $fa0 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: convert_float_to_i32: -; LA64: # %bb.0: -; LA64-NEXT: ftintrz.w.s $fa0, $fa0 -; LA64-NEXT: movfr2gr.s $a0, $fa0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: convert_float_to_i32: +; LA32F: # %bb.0: +; LA32F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32F-NEXT: movfr2gr.s $a0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_float_to_i32: +; LA32D: # %bb.0: +; LA32D-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32D-NEXT: movfr2gr.s $a0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_float_to_i32: +; LA64F: # %bb.0: +; LA64F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA64F-NEXT: movfr2gr.s $a0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_float_to_i32: +; LA64D: # %bb.0: +; LA64D-NEXT: ftintrz.w.s $fa0, $fa0 +; LA64D-NEXT: movfr2gr.s $a0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = fptosi float %a to i32 ret i32 %1 } define i64 @convert_float_to_i64(float %a) nounwind { -; LA32-LABEL: convert_float_to_i64: -; LA32: # %bb.0: -; LA32-NEXT: addi.w $sp, $sp, -16 -; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill -; LA32-NEXT: bl __fixsfdi -; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload -; LA32-NEXT: addi.w $sp, $sp, 16 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: convert_float_to_i64: -; LA64: # %bb.0: -; LA64-NEXT: ftintrz.w.s $fa0, $fa0 -; LA64-NEXT: movfr2gr.s $a0, $fa0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: convert_float_to_i64: +; LA32F: # %bb.0: +; LA32F-NEXT: addi.w $sp, $sp, -16 +; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32F-NEXT: bl __fixsfdi +; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32F-NEXT: addi.w $sp, $sp, 16 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_float_to_i64: +; LA32D: # %bb.0: +; LA32D-NEXT: addi.w $sp, $sp, -16 +; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32D-NEXT: bl __fixsfdi +; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32D-NEXT: addi.w $sp, $sp, 16 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_float_to_i64: +; LA64F: # %bb.0: +; LA64F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA64F-NEXT: movfr2gr.s $a0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_float_to_i64: +; LA64D: # %bb.0: +; LA64D-NEXT: ftintrz.l.s $fa0, $fa0 +; LA64D-NEXT: movfr2gr.d $a0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = fptosi float %a to i64 ret i64 %1 } +define zeroext i8 @convert_float_to_u8(float %a) nounwind { +; LA32F-LABEL: convert_float_to_u8: +; LA32F: # %bb.0: +; LA32F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32F-NEXT: movfr2gr.s $a0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_float_to_u8: +; LA32D: # %bb.0: +; LA32D-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32D-NEXT: movfr2gr.s $a0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_float_to_u8: +; LA64F: # %bb.0: +; LA64F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA64F-NEXT: movfr2gr.s $a0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_float_to_u8: +; LA64D: # %bb.0: +; LA64D-NEXT: ftintrz.l.s $fa0, $fa0 +; LA64D-NEXT: movfr2gr.d $a0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 + %1 = fptoui float %a to i8 + ret i8 %1 +} + +define zeroext i16 @convert_float_to_u16(float %a) nounwind { +; LA32F-LABEL: convert_float_to_u16: +; LA32F: # %bb.0: +; LA32F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32F-NEXT: movfr2gr.s $a0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_float_to_u16: +; LA32D: # %bb.0: +; LA32D-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32D-NEXT: movfr2gr.s $a0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_float_to_u16: +; LA64F: # %bb.0: +; LA64F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA64F-NEXT: movfr2gr.s $a0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_float_to_u16: +; LA64D: # %bb.0: +; LA64D-NEXT: ftintrz.l.s $fa0, $fa0 +; LA64D-NEXT: movfr2gr.d $a0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 + %1 = fptoui float %a to i16 + ret i16 %1 +} + +define i32 @convert_float_to_u32(float %a) nounwind { +; LA32F-LABEL: convert_float_to_u32: +; LA32F: # %bb.0: +; LA32F-NEXT: pcalau12i $a0, .LCPI6_0 +; LA32F-NEXT: addi.w $a0, $a0, .LCPI6_0 +; LA32F-NEXT: fld.s $fa1, $a0, 0 +; LA32F-NEXT: fsub.s $fa2, $fa0, $fa1 +; LA32F-NEXT: ftintrz.w.s $fa2, $fa2 +; LA32F-NEXT: movfr2gr.s $a0, $fa2 +; LA32F-NEXT: lu12i.w $a1, -524288 +; LA32F-NEXT: xor $a0, $a0, $a1 +; LA32F-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1 +; LA32F-NEXT: movcf2gr $a1, $fcc0 +; LA32F-NEXT: masknez $a0, $a0, $a1 +; LA32F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32F-NEXT: movfr2gr.s $a2, $fa0 +; LA32F-NEXT: maskeqz $a1, $a2, $a1 +; LA32F-NEXT: or $a0, $a1, $a0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_float_to_u32: +; LA32D: # %bb.0: +; LA32D-NEXT: pcalau12i $a0, .LCPI6_0 +; LA32D-NEXT: addi.w $a0, $a0, .LCPI6_0 +; LA32D-NEXT: fld.s $fa1, $a0, 0 +; LA32D-NEXT: fsub.s $fa2, $fa0, $fa1 +; LA32D-NEXT: ftintrz.w.s $fa2, $fa2 +; LA32D-NEXT: movfr2gr.s $a0, $fa2 +; LA32D-NEXT: lu12i.w $a1, -524288 +; LA32D-NEXT: xor $a0, $a0, $a1 +; LA32D-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1 +; LA32D-NEXT: movcf2gr $a1, $fcc0 +; LA32D-NEXT: masknez $a0, $a0, $a1 +; LA32D-NEXT: ftintrz.w.s $fa0, $fa0 +; LA32D-NEXT: movfr2gr.s $a2, $fa0 +; LA32D-NEXT: maskeqz $a1, $a2, $a1 +; LA32D-NEXT: or $a0, $a1, $a0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_float_to_u32: +; LA64F: # %bb.0: +; LA64F-NEXT: pcalau12i $a0, .LCPI6_0 +; LA64F-NEXT: addi.d $a0, $a0, .LCPI6_0 +; LA64F-NEXT: fld.s $fa1, $a0, 0 +; LA64F-NEXT: fsub.s $fa2, $fa0, $fa1 +; LA64F-NEXT: ftintrz.w.s $fa2, $fa2 +; LA64F-NEXT: movfr2gr.s $a0, $fa2 +; LA64F-NEXT: lu12i.w $a1, -524288 +; LA64F-NEXT: xor $a0, $a0, $a1 +; LA64F-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1 +; LA64F-NEXT: movcf2gr $a1, $fcc0 +; LA64F-NEXT: masknez $a0, $a0, $a1 +; LA64F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA64F-NEXT: movfr2gr.s $a2, $fa0 +; LA64F-NEXT: maskeqz $a1, $a2, $a1 +; LA64F-NEXT: or $a0, $a1, $a0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_float_to_u32: +; LA64D: # %bb.0: +; LA64D-NEXT: ftintrz.l.s $fa0, $fa0 +; LA64D-NEXT: movfr2gr.d $a0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 + %1 = fptoui float %a to i32 + ret i32 %1 +} + +define i64 @convert_float_to_u64(float %a) nounwind { +; LA32F-LABEL: convert_float_to_u64: +; LA32F: # %bb.0: +; LA32F-NEXT: addi.w $sp, $sp, -16 +; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32F-NEXT: bl __fixunssfdi +; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32F-NEXT: addi.w $sp, $sp, 16 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_float_to_u64: +; LA32D: # %bb.0: +; LA32D-NEXT: addi.w $sp, $sp, -16 +; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32D-NEXT: bl __fixunssfdi +; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32D-NEXT: addi.w $sp, $sp, 16 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_float_to_u64: +; LA64F: # %bb.0: +; LA64F-NEXT: pcalau12i $a0, .LCPI7_0 +; LA64F-NEXT: addi.d $a0, $a0, .LCPI7_0 +; LA64F-NEXT: fld.s $fa1, $a0, 0 +; LA64F-NEXT: fsub.s $fa2, $fa0, $fa1 +; LA64F-NEXT: ftintrz.w.s $fa2, $fa2 +; LA64F-NEXT: movfr2gr.s $a0, $fa2 +; LA64F-NEXT: lu52i.d $a1, $zero, -2048 +; LA64F-NEXT: xor $a0, $a0, $a1 +; LA64F-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1 +; LA64F-NEXT: movcf2gr $a1, $fcc0 +; LA64F-NEXT: masknez $a0, $a0, $a1 +; LA64F-NEXT: ftintrz.w.s $fa0, $fa0 +; LA64F-NEXT: movfr2gr.s $a2, $fa0 +; LA64F-NEXT: maskeqz $a1, $a2, $a1 +; LA64F-NEXT: or $a0, $a1, $a0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_float_to_u64: +; LA64D: # %bb.0: +; LA64D-NEXT: pcalau12i $a0, .LCPI7_0 +; LA64D-NEXT: addi.d $a0, $a0, .LCPI7_0 +; LA64D-NEXT: fld.s $fa1, $a0, 0 +; LA64D-NEXT: fsub.s $fa2, $fa0, $fa1 +; LA64D-NEXT: ftintrz.l.s $fa2, $fa2 +; LA64D-NEXT: movfr2gr.d $a0, $fa2 +; LA64D-NEXT: lu52i.d $a1, $zero, -2048 +; LA64D-NEXT: xor $a0, $a0, $a1 +; LA64D-NEXT: fcmp.clt.s $fcc0, $fa0, $fa1 +; LA64D-NEXT: movcf2gr $a1, $fcc0 +; LA64D-NEXT: masknez $a0, $a0, $a1 +; LA64D-NEXT: ftintrz.l.s $fa0, $fa0 +; LA64D-NEXT: movfr2gr.d $a2, $fa0 +; LA64D-NEXT: maskeqz $a1, $a2, $a1 +; LA64D-NEXT: or $a0, $a1, $a0 +; LA64D-NEXT: jirl $zero, $ra, 0 + %1 = fptoui float %a to i64 + ret i64 %1 +} + define float @convert_i8_to_float(i8 signext %a) nounwind { -; LA32-LABEL: convert_i8_to_float: -; LA32: # %bb.0: -; LA32-NEXT: movgr2fr.w $fa0, $a0 -; LA32-NEXT: ffint.s.w $fa0, $fa0 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: convert_i8_to_float: -; LA64: # %bb.0: -; LA64-NEXT: movgr2fr.w $fa0, $a0 -; LA64-NEXT: ffint.s.w $fa0, $fa0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: convert_i8_to_float: +; LA32F: # %bb.0: +; LA32F-NEXT: movgr2fr.w $fa0, $a0 +; LA32F-NEXT: ffint.s.w $fa0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_i8_to_float: +; LA32D: # %bb.0: +; LA32D-NEXT: movgr2fr.w $fa0, $a0 +; LA32D-NEXT: ffint.s.w $fa0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_i8_to_float: +; LA64F: # %bb.0: +; LA64F-NEXT: movgr2fr.w $fa0, $a0 +; LA64F-NEXT: ffint.s.w $fa0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_i8_to_float: +; LA64D: # %bb.0: +; LA64D-NEXT: movgr2fr.w $fa0, $a0 +; LA64D-NEXT: ffint.s.w $fa0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = sitofp i8 %a to float ret float %1 } define float @convert_i16_to_float(i16 signext %a) nounwind { -; LA32-LABEL: convert_i16_to_float: -; LA32: # %bb.0: -; LA32-NEXT: movgr2fr.w $fa0, $a0 -; LA32-NEXT: ffint.s.w $fa0, $fa0 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: convert_i16_to_float: -; LA64: # %bb.0: -; LA64-NEXT: movgr2fr.w $fa0, $a0 -; LA64-NEXT: ffint.s.w $fa0, $fa0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: convert_i16_to_float: +; LA32F: # %bb.0: +; LA32F-NEXT: movgr2fr.w $fa0, $a0 +; LA32F-NEXT: ffint.s.w $fa0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_i16_to_float: +; LA32D: # %bb.0: +; LA32D-NEXT: movgr2fr.w $fa0, $a0 +; LA32D-NEXT: ffint.s.w $fa0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_i16_to_float: +; LA64F: # %bb.0: +; LA64F-NEXT: movgr2fr.w $fa0, $a0 +; LA64F-NEXT: ffint.s.w $fa0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_i16_to_float: +; LA64D: # %bb.0: +; LA64D-NEXT: movgr2fr.w $fa0, $a0 +; LA64D-NEXT: ffint.s.w $fa0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = sitofp i16 %a to float ret float %1 } define float @convert_i32_to_float(i32 %a) nounwind { -; LA32-LABEL: convert_i32_to_float: -; LA32: # %bb.0: -; LA32-NEXT: movgr2fr.w $fa0, $a0 -; LA32-NEXT: ffint.s.w $fa0, $fa0 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: convert_i32_to_float: -; LA64: # %bb.0: -; LA64-NEXT: addi.w $a0, $a0, 0 -; LA64-NEXT: movgr2fr.w $fa0, $a0 -; LA64-NEXT: ffint.s.w $fa0, $fa0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: convert_i32_to_float: +; LA32F: # %bb.0: +; LA32F-NEXT: movgr2fr.w $fa0, $a0 +; LA32F-NEXT: ffint.s.w $fa0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_i32_to_float: +; LA32D: # %bb.0: +; LA32D-NEXT: movgr2fr.w $fa0, $a0 +; LA32D-NEXT: ffint.s.w $fa0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_i32_to_float: +; LA64F: # %bb.0: +; LA64F-NEXT: addi.w $a0, $a0, 0 +; LA64F-NEXT: movgr2fr.w $fa0, $a0 +; LA64F-NEXT: ffint.s.w $fa0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_i32_to_float: +; LA64D: # %bb.0: +; LA64D-NEXT: addi.w $a0, $a0, 0 +; LA64D-NEXT: movgr2fr.w $fa0, $a0 +; LA64D-NEXT: ffint.s.w $fa0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = sitofp i32 %a to float ret float %1 } define float @convert_i64_to_float(i64 %a) nounwind { -; LA32-LABEL: convert_i64_to_float: -; LA32: # %bb.0: -; LA32-NEXT: addi.w $sp, $sp, -16 -; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill -; LA32-NEXT: bl __floatdisf -; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload -; LA32-NEXT: addi.w $sp, $sp, 16 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: convert_i64_to_float: -; LA64: # %bb.0: -; LA64-NEXT: movgr2fr.w $fa0, $a0 -; LA64-NEXT: ffint.s.w $fa0, $fa0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: convert_i64_to_float: +; LA32F: # %bb.0: +; LA32F-NEXT: addi.w $sp, $sp, -16 +; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32F-NEXT: bl __floatdisf +; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32F-NEXT: addi.w $sp, $sp, 16 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_i64_to_float: +; LA32D: # %bb.0: +; LA32D-NEXT: addi.w $sp, $sp, -16 +; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32D-NEXT: bl __floatdisf +; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32D-NEXT: addi.w $sp, $sp, 16 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_i64_to_float: +; LA64F: # %bb.0: +; LA64F-NEXT: movgr2fr.w $fa0, $a0 +; LA64F-NEXT: ffint.s.w $fa0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_i64_to_float: +; LA64D: # %bb.0: +; LA64D-NEXT: movgr2fr.w $fa0, $a0 +; LA64D-NEXT: ffint.s.w $fa0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = sitofp i64 %a to float ret float %1 } +define float @convert_u8_to_float(i8 zeroext %a) nounwind { +; LA32F-LABEL: convert_u8_to_float: +; LA32F: # %bb.0: +; LA32F-NEXT: movgr2fr.w $fa0, $a0 +; LA32F-NEXT: ffint.s.w $fa0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_u8_to_float: +; LA32D: # %bb.0: +; LA32D-NEXT: movgr2fr.w $fa0, $a0 +; LA32D-NEXT: ffint.s.w $fa0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_u8_to_float: +; LA64F: # %bb.0: +; LA64F-NEXT: movgr2fr.w $fa0, $a0 +; LA64F-NEXT: ffint.s.w $fa0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_u8_to_float: +; LA64D: # %bb.0: +; LA64D-NEXT: movgr2fr.w $fa0, $a0 +; LA64D-NEXT: ffint.s.w $fa0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 + %1 = uitofp i8 %a to float + ret float %1 +} + +define float @convert_u16_to_float(i16 zeroext %a) nounwind { +; LA32F-LABEL: convert_u16_to_float: +; LA32F: # %bb.0: +; LA32F-NEXT: movgr2fr.w $fa0, $a0 +; LA32F-NEXT: ffint.s.w $fa0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_u16_to_float: +; LA32D: # %bb.0: +; LA32D-NEXT: movgr2fr.w $fa0, $a0 +; LA32D-NEXT: ffint.s.w $fa0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_u16_to_float: +; LA64F: # %bb.0: +; LA64F-NEXT: movgr2fr.w $fa0, $a0 +; LA64F-NEXT: ffint.s.w $fa0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_u16_to_float: +; LA64D: # %bb.0: +; LA64D-NEXT: movgr2fr.w $fa0, $a0 +; LA64D-NEXT: ffint.s.w $fa0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 + %1 = uitofp i16 %a to float + ret float %1 +} + +define float @convert_u32_to_float(i32 %a) nounwind { +; LA32F-LABEL: convert_u32_to_float: +; LA32F: # %bb.0: +; LA32F-NEXT: srli.w $a1, $a0, 1 +; LA32F-NEXT: andi $a2, $a0, 1 +; LA32F-NEXT: or $a1, $a2, $a1 +; LA32F-NEXT: movgr2fr.w $fa0, $a1 +; LA32F-NEXT: ffint.s.w $fa0, $fa0 +; LA32F-NEXT: fadd.s $fa0, $fa0, $fa0 +; LA32F-NEXT: slti $a1, $a0, 0 +; LA32F-NEXT: movgr2cf $fcc0, $a1 +; LA32F-NEXT: movgr2fr.w $fa1, $a0 +; LA32F-NEXT: ffint.s.w $fa1, $fa1 +; LA32F-NEXT: fsel $fa0, $fa1, $fa0, $fcc0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_u32_to_float: +; LA32D: # %bb.0: +; LA32D-NEXT: addi.w $sp, $sp, -16 +; LA32D-NEXT: addi.w $a1, $sp, 8 +; LA32D-NEXT: ori $a1, $a1, 4 +; LA32D-NEXT: lu12i.w $a2, 275200 +; LA32D-NEXT: st.w $a2, $a1, 0 +; LA32D-NEXT: st.w $a0, $sp, 8 +; LA32D-NEXT: pcalau12i $a0, .LCPI14_0 +; LA32D-NEXT: addi.w $a0, $a0, .LCPI14_0 +; LA32D-NEXT: fld.d $fa0, $a0, 0 +; LA32D-NEXT: fld.d $fa1, $sp, 8 +; LA32D-NEXT: fsub.d $fa0, $fa1, $fa0 +; LA32D-NEXT: fcvt.s.d $fa0, $fa0 +; LA32D-NEXT: addi.w $sp, $sp, 16 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_u32_to_float: +; LA64F: # %bb.0: +; LA64F-NEXT: bstrpick.d $a1, $a0, 31, 1 +; LA64F-NEXT: andi $a2, $a0, 1 +; LA64F-NEXT: or $a1, $a2, $a1 +; LA64F-NEXT: movgr2fr.w $fa0, $a1 +; LA64F-NEXT: ffint.s.w $fa0, $fa0 +; LA64F-NEXT: fadd.s $fa0, $fa0, $fa0 +; LA64F-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64F-NEXT: slti $a1, $a0, 0 +; LA64F-NEXT: movgr2cf $fcc0, $a1 +; LA64F-NEXT: movgr2fr.w $fa1, $a0 +; LA64F-NEXT: ffint.s.w $fa1, $fa1 +; LA64F-NEXT: fsel $fa0, $fa1, $fa0, $fcc0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_u32_to_float: +; LA64D: # %bb.0: +; LA64D-NEXT: bstrpick.d $a1, $a0, 31, 1 +; LA64D-NEXT: andi $a2, $a0, 1 +; LA64D-NEXT: or $a1, $a2, $a1 +; LA64D-NEXT: movgr2fr.w $fa0, $a1 +; LA64D-NEXT: ffint.s.w $fa0, $fa0 +; LA64D-NEXT: fadd.s $fa0, $fa0, $fa0 +; LA64D-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64D-NEXT: slti $a1, $a0, 0 +; LA64D-NEXT: movgr2cf $fcc0, $a1 +; LA64D-NEXT: movgr2fr.w $fa1, $a0 +; LA64D-NEXT: ffint.s.w $fa1, $fa1 +; LA64D-NEXT: fsel $fa0, $fa1, $fa0, $fcc0 +; LA64D-NEXT: jirl $zero, $ra, 0 + %1 = uitofp i32 %a to float + ret float %1 +} + +define float @convert_u64_to_float(i64 %a) nounwind { +; LA32F-LABEL: convert_u64_to_float: +; LA32F: # %bb.0: +; LA32F-NEXT: addi.w $sp, $sp, -16 +; LA32F-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32F-NEXT: bl __floatundisf +; LA32F-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32F-NEXT: addi.w $sp, $sp, 16 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: convert_u64_to_float: +; LA32D: # %bb.0: +; LA32D-NEXT: addi.w $sp, $sp, -16 +; LA32D-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill +; LA32D-NEXT: bl __floatundisf +; LA32D-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload +; LA32D-NEXT: addi.w $sp, $sp, 16 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: convert_u64_to_float: +; LA64F: # %bb.0: +; LA64F-NEXT: srli.d $a1, $a0, 1 +; LA64F-NEXT: andi $a2, $a0, 1 +; LA64F-NEXT: or $a1, $a2, $a1 +; LA64F-NEXT: movgr2fr.w $fa0, $a1 +; LA64F-NEXT: ffint.s.w $fa0, $fa0 +; LA64F-NEXT: fadd.s $fa0, $fa0, $fa0 +; LA64F-NEXT: slti $a1, $a0, 0 +; LA64F-NEXT: movgr2cf $fcc0, $a1 +; LA64F-NEXT: movgr2fr.w $fa1, $a0 +; LA64F-NEXT: ffint.s.w $fa1, $fa1 +; LA64F-NEXT: fsel $fa0, $fa1, $fa0, $fcc0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: convert_u64_to_float: +; LA64D: # %bb.0: +; LA64D-NEXT: srli.d $a1, $a0, 1 +; LA64D-NEXT: andi $a2, $a0, 1 +; LA64D-NEXT: or $a1, $a2, $a1 +; LA64D-NEXT: movgr2fr.w $fa0, $a1 +; LA64D-NEXT: ffint.s.w $fa0, $fa0 +; LA64D-NEXT: fadd.s $fa0, $fa0, $fa0 +; LA64D-NEXT: slti $a1, $a0, 0 +; LA64D-NEXT: movgr2cf $fcc0, $a1 +; LA64D-NEXT: movgr2fr.w $fa1, $a0 +; LA64D-NEXT: ffint.s.w $fa1, $fa1 +; LA64D-NEXT: fsel $fa0, $fa1, $fa0, $fcc0 +; LA64D-NEXT: jirl $zero, $ra, 0 + %1 = uitofp i64 %a to float + ret float %1 +} + define i32 @bitcast_float_to_i32(float %a) nounwind { -; LA32-LABEL: bitcast_float_to_i32: -; LA32: # %bb.0: -; LA32-NEXT: movfr2gr.s $a0, $fa0 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: bitcast_float_to_i32: -; LA64: # %bb.0: -; LA64-NEXT: movfr2gr.s $a0, $fa0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: bitcast_float_to_i32: +; LA32F: # %bb.0: +; LA32F-NEXT: movfr2gr.s $a0, $fa0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: bitcast_float_to_i32: +; LA32D: # %bb.0: +; LA32D-NEXT: movfr2gr.s $a0, $fa0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: bitcast_float_to_i32: +; LA64F: # %bb.0: +; LA64F-NEXT: movfr2gr.s $a0, $fa0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: bitcast_float_to_i32: +; LA64D: # %bb.0: +; LA64D-NEXT: movfr2gr.s $a0, $fa0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = bitcast float %a to i32 ret i32 %1 } define float @bitcast_i32_to_float(i32 %a) nounwind { -; LA32-LABEL: bitcast_i32_to_float: -; LA32: # %bb.0: -; LA32-NEXT: movgr2fr.w $fa0, $a0 -; LA32-NEXT: jirl $zero, $ra, 0 -; -; LA64-LABEL: bitcast_i32_to_float: -; LA64: # %bb.0: -; LA64-NEXT: movgr2fr.w $fa0, $a0 -; LA64-NEXT: jirl $zero, $ra, 0 +; LA32F-LABEL: bitcast_i32_to_float: +; LA32F: # %bb.0: +; LA32F-NEXT: movgr2fr.w $fa0, $a0 +; LA32F-NEXT: jirl $zero, $ra, 0 +; +; LA32D-LABEL: bitcast_i32_to_float: +; LA32D: # %bb.0: +; LA32D-NEXT: movgr2fr.w $fa0, $a0 +; LA32D-NEXT: jirl $zero, $ra, 0 +; +; LA64F-LABEL: bitcast_i32_to_float: +; LA64F: # %bb.0: +; LA64F-NEXT: movgr2fr.w $fa0, $a0 +; LA64F-NEXT: jirl $zero, $ra, 0 +; +; LA64D-LABEL: bitcast_i32_to_float: +; LA64D: # %bb.0: +; LA64D-NEXT: movgr2fr.w $fa0, $a0 +; LA64D-NEXT: jirl $zero, $ra, 0 %1 = bitcast i32 %a to float ret float %1 }