diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -1200,6 +1200,8 @@ case ISD::STRICT_FTRUNC: case ISD::FTRUNC: ExpandFloatRes_FTRUNC(N, Lo, Hi); break; case ISD::LOAD: ExpandFloatRes_LOAD(N, Lo, Hi); break; + case ISD::STRICT_SINT_TO_FP: + case ISD::STRICT_UINT_TO_FP: case ISD::SINT_TO_FP: case ISD::UINT_TO_FP: ExpandFloatRes_XINT_TO_FP(N, Lo, Hi); break; case ISD::STRICT_FREM: @@ -1598,9 +1600,11 @@ assert(N->getValueType(0) == MVT::ppcf128 && "Unsupported XINT_TO_FP!"); EVT VT = N->getValueType(0); EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); - SDValue Src = N->getOperand(0); + bool Strict = N->isStrictFPOpcode(); + SDValue Src = N->getOperand(Strict ? 1 : 0); EVT SrcVT = Src.getValueType(); - bool isSigned = N->getOpcode() == ISD::SINT_TO_FP; + bool isSigned = N->getOpcode() == ISD::SINT_TO_FP || + N->getOpcode() == ISD::STRICT_SINT_TO_FP; SDLoc dl(N); // First do an SINT_TO_FP, whether the original was signed or unsigned. @@ -1612,7 +1616,12 @@ MVT::i32, Src); Lo = DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(NVT), APInt(NVT.getSizeInBits(), 0)), dl, NVT); - Hi = DAG.getNode(ISD::SINT_TO_FP, dl, NVT, Src); + if (Strict) { + Hi = DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {NVT, MVT::Other}, + {N->getOperand(0), Src}); + ReplaceValueWith(SDValue(N, 1), Hi); + } else + Hi = DAG.getNode(ISD::SINT_TO_FP, dl, NVT, Src); } else { RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; if (SrcVT.bitsLE(MVT::i64)) { @@ -1627,8 +1636,11 @@ TargetLowering::MakeLibCallOptions CallOptions; CallOptions.setSExt(true); - Hi = TLI.makeLibCall(DAG, LC, VT, Src, CallOptions, dl).first; - GetPairElements(Hi, Lo, Hi); + std::pair Tmp = + TLI.makeLibCall(DAG, LC, VT, Src, CallOptions, dl); + if (Strict) + ReplaceValueWith(SDValue(N, 1), Tmp.second); + GetPairElements(Tmp.first, Lo, Hi); } if (isSigned) @@ -1659,10 +1671,13 @@ } // TODO: Are there fast-math-flags to propagate to this FADD? - Lo = DAG.getNode(ISD::FADD, dl, VT, Hi, - DAG.getConstantFP(APFloat(APFloat::PPCDoubleDouble(), - APInt(128, Parts)), - dl, MVT::ppcf128)); + SDValue NewLo = DAG.getConstantFP( + APFloat(APFloat::PPCDoubleDouble(), APInt(128, Parts)), dl, MVT::ppcf128); + if (Strict) + Lo = DAG.getNode(ISD::STRICT_FADD, dl, {VT, MVT::Other}, + {N->getOperand(0), Hi, NewLo}); + else + Lo = DAG.getNode(ISD::FADD, dl, VT, Hi, NewLo); Lo = DAG.getSelectCC(dl, Src, DAG.getConstant(0, dl, SrcVT), Lo, Hi, ISD::SETLT); GetPairElements(Lo, Lo, Hi); diff --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll --- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll @@ -1528,6 +1528,448 @@ ret void } +define ppc_fp128 @i32_to_ppcq(i32 signext %m) #0 { +; PC64LE-LABEL: i32_to_ppcq: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mtfprwz 0, 3 +; PC64LE-NEXT: xxlxor 2, 2, 2 +; PC64LE-NEXT: xscvsxddp 1, 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: i32_to_ppcq: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mtfprwz 0, 3 +; PC64LE9-NEXT: xscvsxddp 1, 0 +; PC64LE9-NEXT: xxlxor 2, 2, 2 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: i32_to_ppcq: +; PC64: # %bb.0: # %entry +; PC64-NEXT: std 3, -8(1) +; PC64-NEXT: addis 3, 2, .LCPI33_0@toc@ha +; PC64-NEXT: lfd 0, -8(1) +; PC64-NEXT: lfs 2, .LCPI33_0@toc@l(3) +; PC64-NEXT: fcfid 1, 0 +; PC64-NEXT: blr +entry: + %conv = tail call ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #1 + ret ppc_fp128 %conv +} + +define ppc_fp128 @i64_to_ppcq(i64 %m) #0 { +; PC64LE-LABEL: i64_to_ppcq: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __floatditf +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: i64_to_ppcq: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __floatditf +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: i64_to_ppcq: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __floatditf +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %conv = tail call ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #1 + ret ppc_fp128 %conv +} + +define ppc_fp128 @u32_to_ppcq(i32 zeroext %m) #0 { +; PC64LE-LABEL: u32_to_ppcq: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 30, -24(1) # 8-byte Folded Spill +; PC64LE-NEXT: stfd 31, -8(1) # 8-byte Folded Spill +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -64(1) +; PC64LE-NEXT: mr 30, 3 +; PC64LE-NEXT: addis 3, 2, .LCPI35_0@toc@ha +; PC64LE-NEXT: xxlxor 2, 2, 2 +; PC64LE-NEXT: mtfprwz 0, 30 +; PC64LE-NEXT: lfs 3, .LCPI35_0@toc@l(3) +; PC64LE-NEXT: xxlxor 4, 4, 4 +; PC64LE-NEXT: xscvsxddp 31, 0 +; PC64LE-NEXT: fmr 1, 31 +; PC64LE-NEXT: bl __gcc_qadd +; PC64LE-NEXT: nop +; PC64LE-NEXT: cmpwi 30, 0 +; PC64LE-NEXT: blt 0, .LBB35_2 +; PC64LE-NEXT: # %bb.1: # %entry +; PC64LE-NEXT: fmr 1, 31 +; PC64LE-NEXT: .LBB35_2: # %entry +; PC64LE-NEXT: blt 0, .LBB35_4 +; PC64LE-NEXT: # %bb.3: # %entry +; PC64LE-NEXT: xxlxor 2, 2, 2 +; PC64LE-NEXT: .LBB35_4: # %entry +; PC64LE-NEXT: addi 1, 1, 64 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: lfd 31, -8(1) # 8-byte Folded Reload +; PC64LE-NEXT: ld 30, -24(1) # 8-byte Folded Reload +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: u32_to_ppcq: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 30, -24(1) # 8-byte Folded Spill +; PC64LE9-NEXT: stfd 31, -8(1) # 8-byte Folded Spill +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -64(1) +; PC64LE9-NEXT: mr 30, 3 +; PC64LE9-NEXT: addis 3, 2, .LCPI35_0@toc@ha +; PC64LE9-NEXT: mtfprwz 0, 30 +; PC64LE9-NEXT: xscvsxddp 31, 0 +; PC64LE9-NEXT: lfs 3, .LCPI35_0@toc@l(3) +; PC64LE9-NEXT: fmr 1, 31 +; PC64LE9-NEXT: xxlxor 2, 2, 2 +; PC64LE9-NEXT: xxlxor 4, 4, 4 +; PC64LE9-NEXT: bl __gcc_qadd +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: cmpwi 30, 0 +; PC64LE9-NEXT: blt 0, .LBB35_2 +; PC64LE9-NEXT: # %bb.1: # %entry +; PC64LE9-NEXT: fmr 1, 31 +; PC64LE9-NEXT: .LBB35_2: # %entry +; PC64LE9-NEXT: blt 0, .LBB35_4 +; PC64LE9-NEXT: # %bb.3: # %entry +; PC64LE9-NEXT: xxlxor 2, 2, 2 +; PC64LE9-NEXT: .LBB35_4: # %entry +; PC64LE9-NEXT: addi 1, 1, 64 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: lfd 31, -8(1) # 8-byte Folded Reload +; PC64LE9-NEXT: ld 30, -24(1) # 8-byte Folded Reload +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: u32_to_ppcq: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -160(1) +; PC64-NEXT: std 30, 128(1) # 8-byte Folded Spill +; PC64-NEXT: mr 30, 3 +; PC64-NEXT: extsw 3, 3 +; PC64-NEXT: std 3, 120(1) +; PC64-NEXT: addis 3, 2, .LCPI35_0@toc@ha +; PC64-NEXT: stfd 31, 152(1) # 8-byte Folded Spill +; PC64-NEXT: lfd 0, 120(1) +; PC64-NEXT: lfs 3, .LCPI35_0@toc@l(3) +; PC64-NEXT: addis 3, 2, .LCPI35_1@toc@ha +; PC64-NEXT: lfs 31, .LCPI35_1@toc@l(3) +; PC64-NEXT: stfd 30, 144(1) # 8-byte Folded Spill +; PC64-NEXT: fcfid 30, 0 +; PC64-NEXT: fmr 1, 30 +; PC64-NEXT: fmr 2, 31 +; PC64-NEXT: fmr 4, 31 +; PC64-NEXT: bl __gcc_qadd +; PC64-NEXT: nop +; PC64-NEXT: cmpwi 30, 0 +; PC64-NEXT: blt 0, .LBB35_2 +; PC64-NEXT: # %bb.1: # %entry +; PC64-NEXT: fmr 1, 30 +; PC64-NEXT: .LBB35_2: # %entry +; PC64-NEXT: blt 0, .LBB35_4 +; PC64-NEXT: # %bb.3: # %entry +; PC64-NEXT: fmr 2, 31 +; PC64-NEXT: .LBB35_4: # %entry +; PC64-NEXT: lfd 31, 152(1) # 8-byte Folded Reload +; PC64-NEXT: ld 30, 128(1) # 8-byte Folded Reload +; PC64-NEXT: lfd 30, 144(1) # 8-byte Folded Reload +; PC64-NEXT: addi 1, 1, 160 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %conv = tail call ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #1 + ret ppc_fp128 %conv +} + +define ppc_fp128 @u64_to_ppcq(i64 %m) #0 { +; PC64LE-LABEL: u64_to_ppcq: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 30, -32(1) # 8-byte Folded Spill +; PC64LE-NEXT: stfd 30, -16(1) # 8-byte Folded Spill +; PC64LE-NEXT: stfd 31, -8(1) # 8-byte Folded Spill +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -64(1) +; PC64LE-NEXT: mr 30, 3 +; PC64LE-NEXT: bl __floatditf +; PC64LE-NEXT: nop +; PC64LE-NEXT: addis 3, 2, .LCPI36_0@toc@ha +; PC64LE-NEXT: xxlxor 4, 4, 4 +; PC64LE-NEXT: fmr 30, 1 +; PC64LE-NEXT: fmr 31, 2 +; PC64LE-NEXT: lfs 3, .LCPI36_0@toc@l(3) +; PC64LE-NEXT: bl __gcc_qadd +; PC64LE-NEXT: nop +; PC64LE-NEXT: cmpdi 30, 0 +; PC64LE-NEXT: blt 0, .LBB36_2 +; PC64LE-NEXT: # %bb.1: # %entry +; PC64LE-NEXT: fmr 1, 30 +; PC64LE-NEXT: .LBB36_2: # %entry +; PC64LE-NEXT: blt 0, .LBB36_4 +; PC64LE-NEXT: # %bb.3: # %entry +; PC64LE-NEXT: fmr 2, 31 +; PC64LE-NEXT: .LBB36_4: # %entry +; PC64LE-NEXT: addi 1, 1, 64 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: lfd 31, -8(1) # 8-byte Folded Reload +; PC64LE-NEXT: lfd 30, -16(1) # 8-byte Folded Reload +; PC64LE-NEXT: ld 30, -32(1) # 8-byte Folded Reload +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: u64_to_ppcq: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 30, -32(1) # 8-byte Folded Spill +; PC64LE9-NEXT: stfd 30, -16(1) # 8-byte Folded Spill +; PC64LE9-NEXT: stfd 31, -8(1) # 8-byte Folded Spill +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -64(1) +; PC64LE9-NEXT: mr 30, 3 +; PC64LE9-NEXT: bl __floatditf +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addis 3, 2, .LCPI36_0@toc@ha +; PC64LE9-NEXT: lfs 3, .LCPI36_0@toc@l(3) +; PC64LE9-NEXT: xxlxor 4, 4, 4 +; PC64LE9-NEXT: fmr 30, 1 +; PC64LE9-NEXT: fmr 31, 2 +; PC64LE9-NEXT: bl __gcc_qadd +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: cmpdi 30, 0 +; PC64LE9-NEXT: blt 0, .LBB36_2 +; PC64LE9-NEXT: # %bb.1: # %entry +; PC64LE9-NEXT: fmr 1, 30 +; PC64LE9-NEXT: .LBB36_2: # %entry +; PC64LE9-NEXT: blt 0, .LBB36_4 +; PC64LE9-NEXT: # %bb.3: # %entry +; PC64LE9-NEXT: fmr 2, 31 +; PC64LE9-NEXT: .LBB36_4: # %entry +; PC64LE9-NEXT: addi 1, 1, 64 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: lfd 31, -8(1) # 8-byte Folded Reload +; PC64LE9-NEXT: lfd 30, -16(1) # 8-byte Folded Reload +; PC64LE9-NEXT: ld 30, -32(1) # 8-byte Folded Reload +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: u64_to_ppcq: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -144(1) +; PC64-NEXT: std 30, 112(1) # 8-byte Folded Spill +; PC64-NEXT: stfd 30, 128(1) # 8-byte Folded Spill +; PC64-NEXT: mr 30, 3 +; PC64-NEXT: stfd 31, 136(1) # 8-byte Folded Spill +; PC64-NEXT: bl __floatditf +; PC64-NEXT: nop +; PC64-NEXT: addis 3, 2, .LCPI36_0@toc@ha +; PC64-NEXT: fmr 31, 2 +; PC64-NEXT: lfs 3, .LCPI36_0@toc@l(3) +; PC64-NEXT: addis 3, 2, .LCPI36_1@toc@ha +; PC64-NEXT: fmr 30, 1 +; PC64-NEXT: lfs 4, .LCPI36_1@toc@l(3) +; PC64-NEXT: bl __gcc_qadd +; PC64-NEXT: nop +; PC64-NEXT: cmpdi 30, 0 +; PC64-NEXT: blt 0, .LBB36_2 +; PC64-NEXT: # %bb.1: # %entry +; PC64-NEXT: fmr 1, 30 +; PC64-NEXT: .LBB36_2: # %entry +; PC64-NEXT: blt 0, .LBB36_4 +; PC64-NEXT: # %bb.3: # %entry +; PC64-NEXT: fmr 2, 31 +; PC64-NEXT: .LBB36_4: # %entry +; PC64-NEXT: lfd 31, 136(1) # 8-byte Folded Reload +; PC64-NEXT: ld 30, 112(1) # 8-byte Folded Reload +; PC64-NEXT: lfd 30, 128(1) # 8-byte Folded Reload +; PC64-NEXT: addi 1, 1, 144 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %conv = tail call ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #1 + ret ppc_fp128 %conv +} + +define ppc_fp128 @i128_to_ppcq(i128 %m) #0 { +; PC64LE-LABEL: i128_to_ppcq: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -32(1) +; PC64LE-NEXT: bl __floattitf +; PC64LE-NEXT: nop +; PC64LE-NEXT: addi 1, 1, 32 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: i128_to_ppcq: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -32(1) +; PC64LE9-NEXT: bl __floattitf +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 1, 1, 32 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: i128_to_ppcq: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -112(1) +; PC64-NEXT: bl __floattitf +; PC64-NEXT: nop +; PC64-NEXT: addi 1, 1, 112 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %conv = tail call ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i128(i128 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #1 + ret ppc_fp128 %conv +} + +define ppc_fp128 @u128_to_ppcq(i128 %m) #0 { +; PC64LE-LABEL: u128_to_ppcq: +; PC64LE: # %bb.0: # %entry +; PC64LE-NEXT: mflr 0 +; PC64LE-NEXT: std 30, -32(1) # 8-byte Folded Spill +; PC64LE-NEXT: stfd 30, -16(1) # 8-byte Folded Spill +; PC64LE-NEXT: stfd 31, -8(1) # 8-byte Folded Spill +; PC64LE-NEXT: std 0, 16(1) +; PC64LE-NEXT: stdu 1, -64(1) +; PC64LE-NEXT: mr 30, 4 +; PC64LE-NEXT: bl __floattitf +; PC64LE-NEXT: nop +; PC64LE-NEXT: addis 3, 2, .LCPI38_0@toc@ha +; PC64LE-NEXT: xxlxor 4, 4, 4 +; PC64LE-NEXT: fmr 30, 1 +; PC64LE-NEXT: fmr 31, 2 +; PC64LE-NEXT: lfd 3, .LCPI38_0@toc@l(3) +; PC64LE-NEXT: bl __gcc_qadd +; PC64LE-NEXT: nop +; PC64LE-NEXT: cmpdi 30, 0 +; PC64LE-NEXT: blt 0, .LBB38_2 +; PC64LE-NEXT: # %bb.1: # %entry +; PC64LE-NEXT: fmr 1, 30 +; PC64LE-NEXT: .LBB38_2: # %entry +; PC64LE-NEXT: blt 0, .LBB38_4 +; PC64LE-NEXT: # %bb.3: # %entry +; PC64LE-NEXT: fmr 2, 31 +; PC64LE-NEXT: .LBB38_4: # %entry +; PC64LE-NEXT: addi 1, 1, 64 +; PC64LE-NEXT: ld 0, 16(1) +; PC64LE-NEXT: lfd 31, -8(1) # 8-byte Folded Reload +; PC64LE-NEXT: lfd 30, -16(1) # 8-byte Folded Reload +; PC64LE-NEXT: ld 30, -32(1) # 8-byte Folded Reload +; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: blr +; +; PC64LE9-LABEL: u128_to_ppcq: +; PC64LE9: # %bb.0: # %entry +; PC64LE9-NEXT: mflr 0 +; PC64LE9-NEXT: std 30, -32(1) # 8-byte Folded Spill +; PC64LE9-NEXT: stfd 30, -16(1) # 8-byte Folded Spill +; PC64LE9-NEXT: stfd 31, -8(1) # 8-byte Folded Spill +; PC64LE9-NEXT: std 0, 16(1) +; PC64LE9-NEXT: stdu 1, -64(1) +; PC64LE9-NEXT: mr 30, 4 +; PC64LE9-NEXT: bl __floattitf +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addis 3, 2, .LCPI38_0@toc@ha +; PC64LE9-NEXT: lfd 3, .LCPI38_0@toc@l(3) +; PC64LE9-NEXT: xxlxor 4, 4, 4 +; PC64LE9-NEXT: fmr 30, 1 +; PC64LE9-NEXT: fmr 31, 2 +; PC64LE9-NEXT: bl __gcc_qadd +; PC64LE9-NEXT: nop +; PC64LE9-NEXT: cmpdi 30, 0 +; PC64LE9-NEXT: blt 0, .LBB38_2 +; PC64LE9-NEXT: # %bb.1: # %entry +; PC64LE9-NEXT: fmr 1, 30 +; PC64LE9-NEXT: .LBB38_2: # %entry +; PC64LE9-NEXT: blt 0, .LBB38_4 +; PC64LE9-NEXT: # %bb.3: # %entry +; PC64LE9-NEXT: fmr 2, 31 +; PC64LE9-NEXT: .LBB38_4: # %entry +; PC64LE9-NEXT: addi 1, 1, 64 +; PC64LE9-NEXT: ld 0, 16(1) +; PC64LE9-NEXT: lfd 31, -8(1) # 8-byte Folded Reload +; PC64LE9-NEXT: lfd 30, -16(1) # 8-byte Folded Reload +; PC64LE9-NEXT: ld 30, -32(1) # 8-byte Folded Reload +; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: blr +; +; PC64-LABEL: u128_to_ppcq: +; PC64: # %bb.0: # %entry +; PC64-NEXT: mflr 0 +; PC64-NEXT: std 0, 16(1) +; PC64-NEXT: stdu 1, -144(1) +; PC64-NEXT: std 30, 112(1) # 8-byte Folded Spill +; PC64-NEXT: stfd 30, 128(1) # 8-byte Folded Spill +; PC64-NEXT: mr 30, 3 +; PC64-NEXT: stfd 31, 136(1) # 8-byte Folded Spill +; PC64-NEXT: bl __floattitf +; PC64-NEXT: nop +; PC64-NEXT: addis 3, 2, .LCPI38_0@toc@ha +; PC64-NEXT: fmr 31, 2 +; PC64-NEXT: lfd 3, .LCPI38_0@toc@l(3) +; PC64-NEXT: addis 3, 2, .LCPI38_1@toc@ha +; PC64-NEXT: fmr 30, 1 +; PC64-NEXT: lfs 4, .LCPI38_1@toc@l(3) +; PC64-NEXT: bl __gcc_qadd +; PC64-NEXT: nop +; PC64-NEXT: cmpdi 30, 0 +; PC64-NEXT: blt 0, .LBB38_2 +; PC64-NEXT: # %bb.1: # %entry +; PC64-NEXT: fmr 1, 30 +; PC64-NEXT: .LBB38_2: # %entry +; PC64-NEXT: blt 0, .LBB38_4 +; PC64-NEXT: # %bb.3: # %entry +; PC64-NEXT: fmr 2, 31 +; PC64-NEXT: .LBB38_4: # %entry +; PC64-NEXT: lfd 31, 136(1) # 8-byte Folded Reload +; PC64-NEXT: ld 30, 112(1) # 8-byte Folded Reload +; PC64-NEXT: lfd 30, 128(1) # 8-byte Folded Reload +; PC64-NEXT: addi 1, 1, 144 +; PC64-NEXT: ld 0, 16(1) +; PC64-NEXT: mtlr 0 +; PC64-NEXT: blr +entry: + %conv = tail call ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i128(i128 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #1 + ret ppc_fp128 %conv +} + attributes #0 = { nounwind strictfp } attributes #1 = { strictfp } @@ -1563,3 +2005,9 @@ declare i32 @llvm.experimental.constrained.fptosi.i32.ppcf128(ppc_fp128, metadata) declare i64 @llvm.experimental.constrained.fptoui.i64.ppcf128(ppc_fp128, metadata) declare i32 @llvm.experimental.constrained.fptoui.i32.ppcf128(ppc_fp128, metadata) +declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i32(i32, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i32(i32, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i64(i64, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i64(i64, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.sitofp.ppcf128.i128(i128, metadata, metadata) +declare ppc_fp128 @llvm.experimental.constrained.uitofp.ppcf128.i128(i128, metadata, metadata)