diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -275,11 +275,26 @@ setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal); setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal); setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); + if (Subtarget.hasFPRND()) { + setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal); + } setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal); setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal); setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal); setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal); + if (Subtarget.hasVSX()) + setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f64, Legal); + if (Subtarget.hasFPRND()) { + setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal); + } // We don't support sin/cos/sqrt/fmod/pow setOperationAction(ISD::FSIN , MVT::f64, Expand); @@ -333,6 +348,10 @@ setOperationAction(ISD::FROUND, MVT::f32, Legal); } + if (Subtarget.hasVSX()) { + setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); + } + // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd // to speed up scalar BSWAP64. // CTPOP or CTTZ were introduced in P8/P9 respectively @@ -917,11 +936,21 @@ setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal); setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal); setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal); + setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal); + setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal); + setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal); + setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal); + setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal); setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal); setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal); setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal); setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal); + setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal); + setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal); + setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal); + setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal); + setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal); addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); } @@ -985,6 +1014,15 @@ setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal); setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal); setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal); + setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal); + setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal); + setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); + setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal); + setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal); + setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal); + setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal); + setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal); + setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal); } setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -2511,7 +2511,7 @@ def FTSQRT: XForm_17a<63, 160, (outs crrc:$crD), (ins f8rc:$fB), "ftsqrt $crD, $fB", IIC_FPCompare>; -let Uses = [RM] in { +let Uses = [RM], mayRaiseFPException = 1 in { let hasSideEffects = 0 in { defm FCTIW : XForm_26r<63, 14, (outs f8rc:$frD), (ins f8rc:$frB), "fctiw", "$frD, $frB", IIC_FPGeneral, @@ -2525,39 +2525,39 @@ defm FRSP : XForm_26r<63, 12, (outs f4rc:$frD), (ins f8rc:$frB), "frsp", "$frD, $frB", IIC_FPGeneral, - [(set f32:$frD, (fpround f64:$frB))]>; + [(set f32:$frD, (any_fpround f64:$frB))]>; let Interpretation64Bit = 1, isCodeGenOnly = 1 in defm FRIND : XForm_26r<63, 392, (outs f8rc:$frD), (ins f8rc:$frB), "frin", "$frD, $frB", IIC_FPGeneral, - [(set f64:$frD, (fround f64:$frB))]>; + [(set f64:$frD, (any_fround f64:$frB))]>; defm FRINS : XForm_26r<63, 392, (outs f4rc:$frD), (ins f4rc:$frB), "frin", "$frD, $frB", IIC_FPGeneral, - [(set f32:$frD, (fround f32:$frB))]>; + [(set f32:$frD, (any_fround f32:$frB))]>; } let hasSideEffects = 0 in { let Interpretation64Bit = 1, isCodeGenOnly = 1 in defm FRIPD : XForm_26r<63, 456, (outs f8rc:$frD), (ins f8rc:$frB), "frip", "$frD, $frB", IIC_FPGeneral, - [(set f64:$frD, (fceil f64:$frB))]>; + [(set f64:$frD, (any_fceil f64:$frB))]>; defm FRIPS : XForm_26r<63, 456, (outs f4rc:$frD), (ins f4rc:$frB), "frip", "$frD, $frB", IIC_FPGeneral, - [(set f32:$frD, (fceil f32:$frB))]>; + [(set f32:$frD, (any_fceil f32:$frB))]>; let Interpretation64Bit = 1, isCodeGenOnly = 1 in defm FRIZD : XForm_26r<63, 424, (outs f8rc:$frD), (ins f8rc:$frB), "friz", "$frD, $frB", IIC_FPGeneral, - [(set f64:$frD, (ftrunc f64:$frB))]>; + [(set f64:$frD, (any_ftrunc f64:$frB))]>; defm FRIZS : XForm_26r<63, 424, (outs f4rc:$frD), (ins f4rc:$frB), "friz", "$frD, $frB", IIC_FPGeneral, - [(set f32:$frD, (ftrunc f32:$frB))]>; + [(set f32:$frD, (any_ftrunc f32:$frB))]>; let Interpretation64Bit = 1, isCodeGenOnly = 1 in defm FRIMD : XForm_26r<63, 488, (outs f8rc:$frD), (ins f8rc:$frB), "frim", "$frD, $frB", IIC_FPGeneral, - [(set f64:$frD, (ffloor f64:$frB))]>; + [(set f64:$frD, (any_ffloor f64:$frB))]>; defm FRIMS : XForm_26r<63, 488, (outs f4rc:$frD), (ins f4rc:$frB), "frim", "$frD, $frB", IIC_FPGeneral, - [(set f32:$frD, (ffloor f32:$frB))]>; + [(set f32:$frD, (any_ffloor f32:$frB))]>; defm FSQRT : XForm_26r<63, 22, (outs f8rc:$frD), (ins f8rc:$frB), "fsqrt", "$frD, $frB", IIC_FPSqrtD, diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td --- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td +++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td @@ -740,65 +740,65 @@ def XSRDPI : XX2Form<60, 73, (outs vsfrc:$XT), (ins vsfrc:$XB), "xsrdpi $XT, $XB", IIC_VecFP, - [(set f64:$XT, (fround f64:$XB))]>; + [(set f64:$XT, (any_fround f64:$XB))]>; def XSRDPIC : XX2Form<60, 107, (outs vsfrc:$XT), (ins vsfrc:$XB), "xsrdpic $XT, $XB", IIC_VecFP, - [(set f64:$XT, (fnearbyint f64:$XB))]>; + [(set f64:$XT, (any_fnearbyint f64:$XB))]>; def XSRDPIM : XX2Form<60, 121, (outs vsfrc:$XT), (ins vsfrc:$XB), "xsrdpim $XT, $XB", IIC_VecFP, - [(set f64:$XT, (ffloor f64:$XB))]>; + [(set f64:$XT, (any_ffloor f64:$XB))]>; def XSRDPIP : XX2Form<60, 105, (outs vsfrc:$XT), (ins vsfrc:$XB), "xsrdpip $XT, $XB", IIC_VecFP, - [(set f64:$XT, (fceil f64:$XB))]>; + [(set f64:$XT, (any_fceil f64:$XB))]>; def XSRDPIZ : XX2Form<60, 89, (outs vsfrc:$XT), (ins vsfrc:$XB), "xsrdpiz $XT, $XB", IIC_VecFP, - [(set f64:$XT, (ftrunc f64:$XB))]>; + [(set f64:$XT, (any_ftrunc f64:$XB))]>; def XVRDPI : XX2Form<60, 201, (outs vsrc:$XT), (ins vsrc:$XB), "xvrdpi $XT, $XB", IIC_VecFP, - [(set v2f64:$XT, (fround v2f64:$XB))]>; + [(set v2f64:$XT, (any_fround v2f64:$XB))]>; def XVRDPIC : XX2Form<60, 235, (outs vsrc:$XT), (ins vsrc:$XB), "xvrdpic $XT, $XB", IIC_VecFP, - [(set v2f64:$XT, (fnearbyint v2f64:$XB))]>; + [(set v2f64:$XT, (any_fnearbyint v2f64:$XB))]>; def XVRDPIM : XX2Form<60, 249, (outs vsrc:$XT), (ins vsrc:$XB), "xvrdpim $XT, $XB", IIC_VecFP, - [(set v2f64:$XT, (ffloor v2f64:$XB))]>; + [(set v2f64:$XT, (any_ffloor v2f64:$XB))]>; def XVRDPIP : XX2Form<60, 233, (outs vsrc:$XT), (ins vsrc:$XB), "xvrdpip $XT, $XB", IIC_VecFP, - [(set v2f64:$XT, (fceil v2f64:$XB))]>; + [(set v2f64:$XT, (any_fceil v2f64:$XB))]>; def XVRDPIZ : XX2Form<60, 217, (outs vsrc:$XT), (ins vsrc:$XB), "xvrdpiz $XT, $XB", IIC_VecFP, - [(set v2f64:$XT, (ftrunc v2f64:$XB))]>; + [(set v2f64:$XT, (any_ftrunc v2f64:$XB))]>; def XVRSPI : XX2Form<60, 137, (outs vsrc:$XT), (ins vsrc:$XB), "xvrspi $XT, $XB", IIC_VecFP, - [(set v4f32:$XT, (fround v4f32:$XB))]>; + [(set v4f32:$XT, (any_fround v4f32:$XB))]>; def XVRSPIC : XX2Form<60, 171, (outs vsrc:$XT), (ins vsrc:$XB), "xvrspic $XT, $XB", IIC_VecFP, - [(set v4f32:$XT, (fnearbyint v4f32:$XB))]>; + [(set v4f32:$XT, (any_fnearbyint v4f32:$XB))]>; def XVRSPIM : XX2Form<60, 185, (outs vsrc:$XT), (ins vsrc:$XB), "xvrspim $XT, $XB", IIC_VecFP, - [(set v4f32:$XT, (ffloor v4f32:$XB))]>; + [(set v4f32:$XT, (any_ffloor v4f32:$XB))]>; def XVRSPIP : XX2Form<60, 169, (outs vsrc:$XT), (ins vsrc:$XB), "xvrspip $XT, $XB", IIC_VecFP, - [(set v4f32:$XT, (fceil v4f32:$XB))]>; + [(set v4f32:$XT, (any_fceil v4f32:$XB))]>; def XVRSPIZ : XX2Form<60, 153, (outs vsrc:$XT), (ins vsrc:$XB), "xvrspiz $XT, $XB", IIC_VecFP, - [(set v4f32:$XT, (ftrunc v4f32:$XB))]>; + [(set v4f32:$XT, (any_ftrunc v4f32:$XB))]>; // Max/Min Instructions let isCommutable = 1 in { @@ -1470,14 +1470,15 @@ (outs vssrc:$XT), (ins vssrc:$XA, vssrc:$XB), "xsdivsp $XT, $XA, $XB", IIC_FPDivS, [(set f32:$XT, (any_fdiv f32:$XA, f32:$XB))]>; - } // mayRaiseFPException def XSRESP : XX2Form<60, 26, (outs vssrc:$XT), (ins vssrc:$XB), "xsresp $XT, $XB", IIC_VecFP, [(set f32:$XT, (PPCfre f32:$XB))]>; def XSRSP : XX2Form<60, 281, (outs vssrc:$XT), (ins vsfrc:$XB), - "xsrsp $XT, $XB", IIC_VecFP, []>; + "xsrsp $XT, $XB", IIC_VecFP, + [(set f32:$XT, (any_fpround f64:$XB))]>; + } // mayRaiseFPException def XSSQRTSP : XX2Form<60, 11, (outs vssrc:$XT), (ins vssrc:$XB), "xssqrtsp $XT, $XB", IIC_FPSqrtS, @@ -2792,12 +2793,14 @@ //===--------------------------------------------------------------------===// // Quad-Precision Floating-Point Conversion Instructions: + let mayRaiseFPException = 1 in { // Convert DP -> QP def XSCVDPQP : X_VT5_XO5_VB5_TyVB<63, 22, 836, "xscvdpqp", vfrc, - [(set f128:$vT, (fpextend f64:$vB))]>; + [(set f128:$vT, (any_fpextend f64:$vB))]>; // Round & Convert QP -> DP (dword[1] is set to zero) def XSCVQPDP : X_VT5_XO5_VB5_VSFR<63, 20, 836, "xscvqpdp" , []>; + } def XSCVQPDPO : X_VT5_XO5_VB5_VSFR_Ro<63, 20, 836, "xscvqpdpo", [(set f64:$vT, (int_ppc_truncf128_round_to_odd @@ -2864,23 +2867,25 @@ let RC = ex; } + let mayRaiseFPException = 1 in { // Round to Quad-Precision Integer [with Inexact] def XSRQPI : Z23_VT5_R1_VB5_RMC2_EX1<63, 5, 0, "xsrqpi" , []>; def XSRQPIX : Z23_VT5_R1_VB5_RMC2_EX1<63, 5, 1, "xsrqpix", []>; + } // Use current rounding mode - def : Pat<(f128 (fnearbyint f128:$vB)), (f128 (XSRQPI 0, $vB, 3))>; + def : Pat<(f128 (any_fnearbyint f128:$vB)), (f128 (XSRQPI 0, $vB, 3))>; // Round to nearest, ties away from zero - def : Pat<(f128 (fround f128:$vB)), (f128 (XSRQPI 0, $vB, 0))>; + def : Pat<(f128 (any_fround f128:$vB)), (f128 (XSRQPI 0, $vB, 0))>; // Round towards Zero - def : Pat<(f128 (ftrunc f128:$vB)), (f128 (XSRQPI 1, $vB, 1))>; + def : Pat<(f128 (any_ftrunc f128:$vB)), (f128 (XSRQPI 1, $vB, 1))>; // Round towards +Inf - def : Pat<(f128 (fceil f128:$vB)), (f128 (XSRQPI 1, $vB, 2))>; + def : Pat<(f128 (any_fceil f128:$vB)), (f128 (XSRQPI 1, $vB, 2))>; // Round towards -Inf - def : Pat<(f128 (ffloor f128:$vB)), (f128 (XSRQPI 1, $vB, 3))>; + def : Pat<(f128 (any_ffloor f128:$vB)), (f128 (XSRQPI 1, $vB, 3))>; // Use current rounding mode, [with Inexact] - def : Pat<(f128 (frint f128:$vB)), (f128 (XSRQPIX 0, $vB, 3))>; + def : Pat<(f128 (any_frint f128:$vB)), (f128 (XSRQPIX 0, $vB, 3))>; // Round Quad-Precision to Double-Extended Precision (fp80) def XSRQPXP : Z23_VT5_R1_VB5_RMC2_EX1<63, 37, 0, "xsrqpxp", []>; @@ -3787,11 +3792,11 @@ (STXSIBX (XSCVDPUXWS f64:$src), xoaddr:$dst)>; // Round & Convert QP -> DP/SP - def : Pat<(f64 (fpround f128:$src)), (f64 (XSCVQPDP $src))>; - def : Pat<(f32 (fpround f128:$src)), (f32 (XSRSP (XSCVQPDPO $src)))>; + def : Pat<(f64 (any_fpround f128:$src)), (f64 (XSCVQPDP $src))>; + def : Pat<(f32 (any_fpround f128:$src)), (f32 (XSRSP (XSCVQPDPO $src)))>; // Convert SP -> QP - def : Pat<(f128 (fpextend f32:$src)), + def : Pat<(f128 (any_fpextend f32:$src)), (f128 (XSCVDPQP (COPY_TO_REGCLASS $src, VFRC)))>; def : Pat<(f32 (PPCxsmaxc f32:$XA, f32:$XB)), @@ -3842,10 +3847,10 @@ } def DblToFlt { - dag A0 = (f32 (fpround (f64 (extractelt v2f64:$A, 0)))); - dag A1 = (f32 (fpround (f64 (extractelt v2f64:$A, 1)))); - dag B0 = (f32 (fpround (f64 (extractelt v2f64:$B, 0)))); - dag B1 = (f32 (fpround (f64 (extractelt v2f64:$B, 1)))); + dag A0 = (f32 (any_fpround (f64 (extractelt v2f64:$A, 0)))); + dag A1 = (f32 (any_fpround (f64 (extractelt v2f64:$A, 1)))); + dag B0 = (f32 (any_fpround (f64 (extractelt v2f64:$B, 0)))); + dag B1 = (f32 (any_fpround (f64 (extractelt v2f64:$B, 1)))); } def ExtDbl { diff --git a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll --- a/llvm/test/CodeGen/PowerPC/build-vector-tests.ll +++ b/llvm/test/CodeGen/PowerPC/build-vector-tests.ll @@ -1511,6 +1511,7 @@ ; P9BE-NEXT: lfsux f0, r3, r4 ; P9BE-NEXT: lfs f1, 12(r3) ; P9BE-NEXT: lfs f2, 4(r3) +; P9BE-NEXT: xsrsp f0, f0 ; P9BE-NEXT: xxmrghd vs1, vs2, vs1 ; P9BE-NEXT: xvcvdpsp v2, vs1 ; P9BE-NEXT: lfs f1, 8(r3) @@ -1525,6 +1526,7 @@ ; P9LE-NEXT: sldi r4, r4, 2 ; P9LE-NEXT: lfsux f0, r3, r4 ; P9LE-NEXT: lfs f1, 8(r3) +; P9LE-NEXT: xsrsp f0, f0 ; P9LE-NEXT: xxmrghd vs0, vs1, vs0 ; P9LE-NEXT: lfs f1, 12(r3) ; P9LE-NEXT: xvcvdpsp v2, vs0 @@ -1539,6 +1541,7 @@ ; P8BE: # %bb.0: # %entry ; P8BE-NEXT: sldi r4, r4, 2 ; P8BE-NEXT: lfsux f0, r3, r4 +; P8BE-NEXT: xsrsp f0, f0 ; P8BE-NEXT: lfs f1, 12(r3) ; P8BE-NEXT: lfs f2, 4(r3) ; P8BE-NEXT: lfs f3, 8(r3) @@ -1554,6 +1557,7 @@ ; P8LE: # %bb.0: # %entry ; P8LE-NEXT: sldi r4, r4, 2 ; P8LE-NEXT: lfsux f0, r3, r4 +; P8LE-NEXT: xsrsp f0, f0 ; P8LE-NEXT: lfs f1, 8(r3) ; P8LE-NEXT: lfs f2, 4(r3) ; P8LE-NEXT: lfs f3, 12(r3) @@ -1598,6 +1602,7 @@ ; P9BE-NEXT: lfsux f0, r3, r4 ; P9BE-NEXT: lfs f1, -12(r3) ; P9BE-NEXT: lfs f2, -4(r3) +; P9BE-NEXT: xsrsp f0, f0 ; P9BE-NEXT: xxmrghd vs1, vs2, vs1 ; P9BE-NEXT: xvcvdpsp v2, vs1 ; P9BE-NEXT: lfs f1, -8(r3) @@ -1612,6 +1617,7 @@ ; P9LE-NEXT: sldi r4, r4, 2 ; P9LE-NEXT: lfsux f0, r3, r4 ; P9LE-NEXT: lfs f1, -8(r3) +; P9LE-NEXT: xsrsp f0, f0 ; P9LE-NEXT: xxmrghd vs0, vs1, vs0 ; P9LE-NEXT: lfs f1, -12(r3) ; P9LE-NEXT: xvcvdpsp v2, vs0 @@ -1626,6 +1632,7 @@ ; P8BE: # %bb.0: # %entry ; P8BE-NEXT: sldi r4, r4, 2 ; P8BE-NEXT: lfsux f0, r3, r4 +; P8BE-NEXT: xsrsp f0, f0 ; P8BE-NEXT: lfs f1, -12(r3) ; P8BE-NEXT: lfs f2, -4(r3) ; P8BE-NEXT: lfs f3, -8(r3) @@ -1641,6 +1648,7 @@ ; P8LE: # %bb.0: # %entry ; P8LE-NEXT: sldi r4, r4, 2 ; P8LE-NEXT: lfsux f0, r3, r4 +; P8LE-NEXT: xsrsp f0, f0 ; P8LE-NEXT: lfs f1, -8(r3) ; P8LE-NEXT: lfs f2, -4(r3) ; P8LE-NEXT: lfs f3, -12(r3) @@ -3030,6 +3038,7 @@ ; P9BE-NEXT: lfsux f0, r3, r4 ; P9BE-NEXT: lfs f1, 12(r3) ; P9BE-NEXT: lfs f2, 4(r3) +; P9BE-NEXT: xsrsp f0, f0 ; P9BE-NEXT: xxmrghd vs1, vs2, vs1 ; P9BE-NEXT: xvcvdpsp v2, vs1 ; P9BE-NEXT: lfs f1, 8(r3) @@ -3044,6 +3053,7 @@ ; P9LE-NEXT: sldi r4, r4, 2 ; P9LE-NEXT: lfsux f0, r3, r4 ; P9LE-NEXT: lfs f1, 8(r3) +; P9LE-NEXT: xsrsp f0, f0 ; P9LE-NEXT: xxmrghd vs0, vs1, vs0 ; P9LE-NEXT: lfs f1, 12(r3) ; P9LE-NEXT: xvcvdpsp v2, vs0 @@ -3058,6 +3068,7 @@ ; P8BE: # %bb.0: # %entry ; P8BE-NEXT: sldi r4, r4, 2 ; P8BE-NEXT: lfsux f0, r3, r4 +; P8BE-NEXT: xsrsp f0, f0 ; P8BE-NEXT: lfs f1, 12(r3) ; P8BE-NEXT: lfs f2, 4(r3) ; P8BE-NEXT: lfs f3, 8(r3) @@ -3073,6 +3084,7 @@ ; P8LE: # %bb.0: # %entry ; P8LE-NEXT: sldi r4, r4, 2 ; P8LE-NEXT: lfsux f0, r3, r4 +; P8LE-NEXT: xsrsp f0, f0 ; P8LE-NEXT: lfs f1, 8(r3) ; P8LE-NEXT: lfs f2, 4(r3) ; P8LE-NEXT: lfs f3, 12(r3) @@ -3118,6 +3130,7 @@ ; P9BE-NEXT: lfsux f0, r3, r4 ; P9BE-NEXT: lfs f1, -12(r3) ; P9BE-NEXT: lfs f2, -4(r3) +; P9BE-NEXT: xsrsp f0, f0 ; P9BE-NEXT: xxmrghd vs1, vs2, vs1 ; P9BE-NEXT: xvcvdpsp v2, vs1 ; P9BE-NEXT: lfs f1, -8(r3) @@ -3132,6 +3145,7 @@ ; P9LE-NEXT: sldi r4, r4, 2 ; P9LE-NEXT: lfsux f0, r3, r4 ; P9LE-NEXT: lfs f1, -8(r3) +; P9LE-NEXT: xsrsp f0, f0 ; P9LE-NEXT: xxmrghd vs0, vs1, vs0 ; P9LE-NEXT: lfs f1, -12(r3) ; P9LE-NEXT: xvcvdpsp v2, vs0 @@ -3146,6 +3160,7 @@ ; P8BE: # %bb.0: # %entry ; P8BE-NEXT: sldi r4, r4, 2 ; P8BE-NEXT: lfsux f0, r3, r4 +; P8BE-NEXT: xsrsp f0, f0 ; P8BE-NEXT: lfs f1, -12(r3) ; P8BE-NEXT: lfs f2, -4(r3) ; P8BE-NEXT: lfs f3, -8(r3) @@ -3161,6 +3176,7 @@ ; P8LE: # %bb.0: # %entry ; P8LE-NEXT: sldi r4, r4, 2 ; P8LE-NEXT: lfsux f0, r3, r4 +; P8LE-NEXT: xsrsp f0, f0 ; P8LE-NEXT: lfs f1, -8(r3) ; P8LE-NEXT: lfs f2, -4(r3) ; P8LE-NEXT: lfs f3, -12(r3) diff --git a/llvm/test/CodeGen/PowerPC/fp-strict-f128.ll b/llvm/test/CodeGen/PowerPC/fp-strict-f128.ll --- a/llvm/test/CodeGen/PowerPC/fp-strict-f128.ll +++ b/llvm/test/CodeGen/PowerPC/fp-strict-f128.ll @@ -5,6 +5,16 @@ declare fp128 @llvm.experimental.constrained.fsub.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.fmul.f128(fp128, fp128, metadata, metadata) declare fp128 @llvm.experimental.constrained.fdiv.f128(fp128, fp128, metadata, metadata) +declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata) +declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata) +declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata) +declare fp128 @llvm.experimental.constrained.fpext.f128.f64(double, metadata) +declare fp128 @llvm.experimental.constrained.fpext.f128.f32(float, metadata) +declare double @llvm.experimental.constrained.fptrunc.f64.f128(fp128, metadata, metadata) +declare float @llvm.experimental.constrained.fptrunc.f32.f128(fp128, metadata, metadata) +declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata) +declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata) +declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata) define fp128 @fadd_f128(fp128 %f1, fp128 %f2) { ; CHECK-LABEL: fadd_f128: @@ -53,3 +63,121 @@ metadata !"fpexcept.strict") ret fp128 %res } + +define fp128 @fceil_f128(fp128 %f1) { +; CHECK-LABEL: fceil_f128: +; CHECK: # %bb.0: +; CHECK-NEXT: xsrqpi 1, v2, v2, 2 +; CHECK-NEXT: blr + %res = call fp128 @llvm.experimental.constrained.ceil.f128( + fp128 %f1, + metadata !"fpexcept.strict") + ret fp128 %res; +} + +define fp128 @floor_f128(fp128 %f1) { +; CHECK-LABEL: floor_f128: +; CHECK: # %bb.0: +; CHECK-NEXT: xsrqpi 1, v2, v2, 3 +; CHECK-NEXT: blr + %res = call fp128 @llvm.experimental.constrained.floor.f128( + fp128 %f1, + metadata !"fpexcept.strict") + ret fp128 %res; +} + +define fp128 @nearbyint_f128(fp128 %f1) { +; CHECK-LABEL: nearbyint_f128: +; CHECK: # %bb.0: +; CHECK-NEXT: xsrqpi 0, v2, v2, 3 +; CHECK-NEXT: blr + %res = call fp128 @llvm.experimental.constrained.nearbyint.f128( + fp128 %f1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret fp128 %res; +} + +define fp128 @fpext_f128_f32(float %f1) { +; CHECK-LABEL: fpext_f128_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: xscpsgndp v2, f1, f1 +; CHECK-NEXT: xscvdpqp v2, v2 +; CHECK-NEXT: blr + %res = call fp128 @llvm.experimental.constrained.fpext.f128.f32( + float %f1, + metadata !"fpexcept.strict") + ret fp128 %res; +} + +define fp128 @fpext_f128_f64(double %f1) { +; CHECK-LABEL: fpext_f128_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: xscpsgndp v2, f1, f1 +; CHECK-NEXT: xscvdpqp v2, v2 +; CHECK-NEXT: blr + %res = call fp128 @llvm.experimental.constrained.fpext.f128.f64( + double %f1, + metadata !"fpexcept.strict") + ret fp128 %res; +} + +define float @fptrunc_f128_f32(fp128 %f1) { +; CHECK-LABEL: fptrunc_f128_f32: +; CHECK: # %bb.0: +; CHECK-NEXT: xscvqpdpo v2, v2 +; CHECK-NEXT: xsrsp f1, v2 +; CHECK-NEXT: blr + %res = call float @llvm.experimental.constrained.fptrunc.f32.f128( + fp128 %f1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret float %res; +} + +define double @fptrunc_f128_f64(fp128 %f1) { +; CHECK-LABEL: fptrunc_f128_f64: +; CHECK: # %bb.0: +; CHECK-NEXT: xscvqpdp v2, v2 +; CHECK-NEXT: xscpsgndp f1, v2, v2 +; CHECK-NEXT: blr + %res = call double @llvm.experimental.constrained.fptrunc.f64.f128( + fp128 %f1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret double %res; +} + +define fp128 @rint_f128(fp128 %f1) { +; CHECK-LABEL: rint_f128: +; CHECK: # %bb.0: +; CHECK-NEXT: xsrqpix 0, v2, v2, 3 +; CHECK-NEXT: blr + %res = call fp128 @llvm.experimental.constrained.rint.f128( + fp128 %f1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret fp128 %res; +} + +define fp128 @round_f128(fp128 %f1) { +; CHECK-LABEL: round_f128: +; CHECK: # %bb.0: +; CHECK-NEXT: xsrqpi 0, v2, v2, 0 +; CHECK-NEXT: blr + %res = call fp128 @llvm.experimental.constrained.round.f128( + fp128 %f1, + metadata !"fpexcept.strict") + ret fp128 %res; +} + +define fp128 @trunc_f128(fp128 %f1) { +; CHECK-LABEL: trunc_f128: +; CHECK: # %bb.0: +; CHECK-NEXT: xsrqpi 1, v2, v2, 1 +; CHECK-NEXT: blr + %res = call fp128 @llvm.experimental.constrained.trunc.f128( + fp128 %f1, + metadata !"fpexcept.strict") + ret fp128 %res; +} diff --git a/llvm/test/CodeGen/PowerPC/fp-strict-round.ll b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/fp-strict-round.ll @@ -0,0 +1,474 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux \ +; RUN: -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr | FileCheck \ +; RUN: --check-prefix=P8 %s +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux \ +; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr | FileCheck \ +; RUN: --check-prefix=P9 %s + +; FIXME: Constrained fpext would fail if VSX feature disabled. Add no-vsx + +declare float @llvm.experimental.constrained.ceil.f32(float, metadata) +declare double @llvm.experimental.constrained.ceil.f64(double, metadata) +declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata) +declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata) + +declare float @llvm.experimental.constrained.floor.f32(float, metadata) +declare double @llvm.experimental.constrained.floor.f64(double, metadata) +declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata) +declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata) + +declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata) +declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata) +declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata) + +declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata) +declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata) + +declare float @llvm.experimental.constrained.fptrunc.f32.f64(double, metadata, metadata) +declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata) +declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata) + +declare float @llvm.experimental.constrained.round.f32(float, metadata) +declare double @llvm.experimental.constrained.round.f64(double, metadata) +declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata) +declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata) + +declare float @llvm.experimental.constrained.trunc.f32(float, metadata) +declare double @llvm.experimental.constrained.trunc.f64(double, metadata) +declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata) +declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata) + +define float @ceil_f32(float %f1) { +; P8-LABEL: ceil_f32: +; P8: # %bb.0: +; P8-NEXT: frip f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: ceil_f32: +; P9: # %bb.0: +; P9-NEXT: frip f1, f1 +; P9-NEXT: blr + %res = call float @llvm.experimental.constrained.ceil.f32( + float %f1, + metadata !"fpexcept.strict") + ret float %res +} + +define double @ceil_f64(double %f1) { +; P8-LABEL: ceil_f64: +; P8: # %bb.0: +; P8-NEXT: xsrdpip f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: ceil_f64: +; P9: # %bb.0: +; P9-NEXT: xsrdpip f1, f1 +; P9-NEXT: blr + %res = call double @llvm.experimental.constrained.ceil.f64( + double %f1, + metadata !"fpexcept.strict") + ret double %res +} + +define <4 x float> @ceil_v4f32(<4 x float> %vf1) { +; P8-LABEL: ceil_v4f32: +; P8: # %bb.0: +; P8-NEXT: xvrspip v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: ceil_v4f32: +; P9: # %bb.0: +; P9-NEXT: xvrspip v2, v2 +; P9-NEXT: blr + %res = call <4 x float> @llvm.experimental.constrained.ceil.v4f32( + <4 x float> %vf1, + metadata !"fpexcept.strict") + ret <4 x float> %res +} + +define <2 x double> @ceil_v2f64(<2 x double> %vf1) { +; P8-LABEL: ceil_v2f64: +; P8: # %bb.0: +; P8-NEXT: xvrdpip v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: ceil_v2f64: +; P9: # %bb.0: +; P9-NEXT: xvrdpip v2, v2 +; P9-NEXT: blr + %res = call <2 x double> @llvm.experimental.constrained.ceil.v2f64( + <2 x double> %vf1, + metadata !"fpexcept.strict") + ret <2 x double> %res +} + +define float @floor_f32(float %f1) { +; P8-LABEL: floor_f32: +; P8: # %bb.0: +; P8-NEXT: frim f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: floor_f32: +; P9: # %bb.0: +; P9-NEXT: frim f1, f1 +; P9-NEXT: blr + %res = call float @llvm.experimental.constrained.floor.f32( + float %f1, + metadata !"fpexcept.strict") + ret float %res +} + +define double @floor_f64(double %f1) { +; P8-LABEL: floor_f64: +; P8: # %bb.0: +; P8-NEXT: xsrdpim f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: floor_f64: +; P9: # %bb.0: +; P9-NEXT: xsrdpim f1, f1 +; P9-NEXT: blr + %res = call double @llvm.experimental.constrained.floor.f64( + double %f1, + metadata !"fpexcept.strict") + ret double %res; +} + +define <4 x float> @floor_v4f32(<4 x float> %vf1) { +; P8-LABEL: floor_v4f32: +; P8: # %bb.0: +; P8-NEXT: xvrspim v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: floor_v4f32: +; P9: # %bb.0: +; P9-NEXT: xvrspim v2, v2 +; P9-NEXT: blr + %res = call <4 x float> @llvm.experimental.constrained.floor.v4f32( + <4 x float> %vf1, + metadata !"fpexcept.strict") + ret <4 x float> %res; +} + +define <2 x double> @floor_v2f64(<2 x double> %vf1) { +; P8-LABEL: floor_v2f64: +; P8: # %bb.0: +; P8-NEXT: xvrdpim v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: floor_v2f64: +; P9: # %bb.0: +; P9-NEXT: xvrdpim v2, v2 +; P9-NEXT: blr + %res = call <2 x double> @llvm.experimental.constrained.floor.v2f64( + <2 x double> %vf1, + metadata !"fpexcept.strict") + ret <2 x double> %res; +} + +define double @nearbyint_f64(double %f1, double %f2) { +; P8-LABEL: nearbyint_f64: +; P8: # %bb.0: +; P8-NEXT: xsrdpic f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: nearbyint_f64: +; P9: # %bb.0: +; P9-NEXT: xsrdpic f1, f1 +; P9-NEXT: blr + %res = call double @llvm.experimental.constrained.nearbyint.f64( + double %f1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret double %res +} + +define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) { +; P8-LABEL: nearbyint_v4f32: +; P8: # %bb.0: +; P8-NEXT: xvrspic v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: nearbyint_v4f32: +; P9: # %bb.0: +; P9-NEXT: xvrspic v2, v2 +; P9-NEXT: blr + %res = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32( + <4 x float> %vf1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret <4 x float> %res +} + +define <2 x double> @nearbyint_v2f64(<2 x double> %vf1, <2 x double> %vf2) { +; P8-LABEL: nearbyint_v2f64: +; P8: # %bb.0: +; P8-NEXT: xvrdpic v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: nearbyint_v2f64: +; P9: # %bb.0: +; P9-NEXT: xvrdpic v2, v2 +; P9-NEXT: blr + %res = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64( + <2 x double> %vf1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret <2 x double> %res +} + +define <4 x double> @fpext_v4f64_v4f32(<4 x float> %vf1) { +; P8-LABEL: fpext_v4f64_v4f32: +; P8: # %bb.0: +; P8-NEXT: xxsldwi vs0, v2, v2, 1 +; P8-NEXT: xxsldwi vs1, v2, v2, 3 +; P8-NEXT: xxswapd vs3, v2 +; P8-NEXT: xscvspdpn f2, v2 +; P8-NEXT: xscvspdpn f0, vs0 +; P8-NEXT: xscvspdpn f1, vs1 +; P8-NEXT: xscvspdpn f3, vs3 +; P8-NEXT: xxmrghd v2, vs2, vs0 +; P8-NEXT: xxmrghd v3, vs3, vs1 +; P8-NEXT: blr +; +; P9-LABEL: fpext_v4f64_v4f32: +; P9: # %bb.0: +; P9-NEXT: xxsldwi vs0, v2, v2, 3 +; P9-NEXT: xxswapd vs1, v2 +; P9-NEXT: xscvspdpn f0, vs0 +; P9-NEXT: xscvspdpn f1, vs1 +; P9-NEXT: xxsldwi vs2, v2, v2, 1 +; P9-NEXT: xscvspdpn f2, vs2 +; P9-NEXT: xxmrghd vs0, vs1, vs0 +; P9-NEXT: xscvspdpn f1, v2 +; P9-NEXT: xxmrghd v3, vs1, vs2 +; P9-NEXT: xxlor v2, vs0, vs0 +; P9-NEXT: blr + %res = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32( + <4 x float> %vf1, + metadata !"fpexcept.strict") + ret <4 x double> %res +} + +define <2 x double> @fpext_v2f64_v2f32(<2 x float> %vf1) { +; P8-LABEL: fpext_v2f64_v2f32: +; P8: # %bb.0: +; P8-NEXT: xxsldwi vs0, v2, v2, 1 +; P8-NEXT: xscvspdpn f1, v2 +; P8-NEXT: xscvspdpn f0, vs0 +; P8-NEXT: xxmrghd v2, vs1, vs0 +; P8-NEXT: blr +; +; P9-LABEL: fpext_v2f64_v2f32: +; P9: # %bb.0: +; P9-NEXT: xxsldwi vs0, v2, v2, 3 +; P9-NEXT: xxswapd vs1, v2 +; P9-NEXT: xscvspdpn f0, vs0 +; P9-NEXT: xscvspdpn f1, vs1 +; P9-NEXT: xxmrghd v2, vs1, vs0 +; P9-NEXT: blr + %res = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32( + <2 x float> %vf1, + metadata !"fpexcept.strict") + ret <2 x double> %res +} + +define float @fptrunc_f32_f64(double %f1) { +; P8-LABEL: fptrunc_f32_f64: +; P8: # %bb.0: +; P8-NEXT: xsrsp f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: fptrunc_f32_f64: +; P9: # %bb.0: +; P9-NEXT: xsrsp f1, f1 +; P9-NEXT: blr + %res = call float @llvm.experimental.constrained.fptrunc.f32.f64( + double %f1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret float %res; +} + +define <4 x float> @fptrunc_v4f32_v4f64(<4 x double> %vf1) { +; P8-LABEL: fptrunc_v4f32_v4f64: +; P8: # %bb.0: +; P8-NEXT: xxmrgld vs0, v2, v3 +; P8-NEXT: xxmrghd vs1, v2, v3 +; P8-NEXT: xvcvdpsp v2, vs0 +; P8-NEXT: xvcvdpsp v3, vs1 +; P8-NEXT: vmrgew v2, v3, v2 +; P8-NEXT: blr +; +; P9-LABEL: fptrunc_v4f32_v4f64: +; P9: # %bb.0: +; P9-NEXT: xxmrgld vs0, v3, v2 +; P9-NEXT: xvcvdpsp v4, vs0 +; P9-NEXT: xxmrghd vs0, v3, v2 +; P9-NEXT: xvcvdpsp v2, vs0 +; P9-NEXT: vmrgew v2, v2, v4 +; P9-NEXT: blr + %res = call <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64( + <4 x double> %vf1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret <4 x float> %res +} + +define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %vf1) { +; P8-LABEL: fptrunc_v2f32_v2f64: +; P8: # %bb.0: +; P8-NEXT: xxswapd vs0, v2 +; P8-NEXT: xsrsp f1, v2 +; P8-NEXT: xsrsp f0, f0 +; P8-NEXT: xscvdpspn v2, f1 +; P8-NEXT: xscvdpspn v3, f0 +; P8-NEXT: vmrghw v2, v2, v3 +; P8-NEXT: blr +; +; P9-LABEL: fptrunc_v2f32_v2f64: +; P9: # %bb.0: +; P9-NEXT: xsrsp f0, v2 +; P9-NEXT: xscvdpspn vs0, f0 +; P9-NEXT: xxsldwi v3, vs0, vs0, 1 +; P9-NEXT: xxswapd vs0, v2 +; P9-NEXT: xsrsp f0, f0 +; P9-NEXT: xscvdpspn vs0, f0 +; P9-NEXT: xxsldwi v2, vs0, vs0, 1 +; P9-NEXT: vmrglw v2, v3, v2 +; P9-NEXT: blr + %res = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64( + <2 x double> %vf1, + metadata !"round.dynamic", + metadata !"fpexcept.strict") + ret <2 x float> %res +} + +define float @round_f32(float %f1) { +; P8-LABEL: round_f32: +; P8: # %bb.0: +; P8-NEXT: frin f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: round_f32: +; P9: # %bb.0: +; P9-NEXT: frin f1, f1 +; P9-NEXT: blr + %res = call float @llvm.experimental.constrained.round.f32( + float %f1, + metadata !"fpexcept.strict") + ret float %res +} + +define double @round_f64(double %f1) { +; P8-LABEL: round_f64: +; P8: # %bb.0: +; P8-NEXT: xsrdpi f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: round_f64: +; P9: # %bb.0: +; P9-NEXT: xsrdpi f1, f1 +; P9-NEXT: blr + %res = call double @llvm.experimental.constrained.round.f64( + double %f1, + metadata !"fpexcept.strict") + ret double %res +} + +define <4 x float> @round_v4f32(<4 x float> %vf1) { +; P8-LABEL: round_v4f32: +; P8: # %bb.0: +; P8-NEXT: xvrspi v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: round_v4f32: +; P9: # %bb.0: +; P9-NEXT: xvrspi v2, v2 +; P9-NEXT: blr + %res = call <4 x float> @llvm.experimental.constrained.round.v4f32( + <4 x float> %vf1, + metadata !"fpexcept.strict") + ret <4 x float> %res +} + +define <2 x double> @round_v2f64(<2 x double> %vf1) { +; P8-LABEL: round_v2f64: +; P8: # %bb.0: +; P8-NEXT: xvrdpi v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: round_v2f64: +; P9: # %bb.0: +; P9-NEXT: xvrdpi v2, v2 +; P9-NEXT: blr + %res = call <2 x double> @llvm.experimental.constrained.round.v2f64( + <2 x double> %vf1, + metadata !"fpexcept.strict") + ret <2 x double> %res +} + +define float @trunc_f32(float %f1) { +; P8-LABEL: trunc_f32: +; P8: # %bb.0: +; P8-NEXT: friz f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: trunc_f32: +; P9: # %bb.0: +; P9-NEXT: friz f1, f1 +; P9-NEXT: blr + %res = call float @llvm.experimental.constrained.trunc.f32( + float %f1, + metadata !"fpexcept.strict") + ret float %res +} + +define double @trunc_f64(double %f1) { +; P8-LABEL: trunc_f64: +; P8: # %bb.0: +; P8-NEXT: xsrdpiz f1, f1 +; P8-NEXT: blr +; +; P9-LABEL: trunc_f64: +; P9: # %bb.0: +; P9-NEXT: xsrdpiz f1, f1 +; P9-NEXT: blr + %res = call double @llvm.experimental.constrained.trunc.f64( + double %f1, + metadata !"fpexcept.strict") + ret double %res +} + +define <4 x float> @trunc_v4f32(<4 x float> %vf1) { +; P8-LABEL: trunc_v4f32: +; P8: # %bb.0: +; P8-NEXT: xvrspiz v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: trunc_v4f32: +; P9: # %bb.0: +; P9-NEXT: xvrspiz v2, v2 +; P9-NEXT: blr + %res = call <4 x float> @llvm.experimental.constrained.trunc.v4f32( + <4 x float> %vf1, + metadata !"fpexcept.strict") + ret <4 x float> %res +} + +define <2 x double> @trunc_v2f64(<2 x double> %vf1) { +; P8-LABEL: trunc_v2f64: +; P8: # %bb.0: +; P8-NEXT: xvrdpiz v2, v2 +; P8-NEXT: blr +; +; P9-LABEL: trunc_v2f64: +; P9: # %bb.0: +; P9-NEXT: xvrdpiz v2, v2 +; P9-NEXT: blr + %res = call <2 x double> @llvm.experimental.constrained.trunc.v2f64( + <2 x double> %vf1, + metadata !"fpexcept.strict") + ret <2 x double> %res +} diff --git a/llvm/test/CodeGen/PowerPC/fp-strict.ll b/llvm/test/CodeGen/PowerPC/fp-strict.ll --- a/llvm/test/CodeGen/PowerPC/fp-strict.ll +++ b/llvm/test/CodeGen/PowerPC/fp-strict.ll @@ -1,7 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s -mtriple=powerpc64-unknown-linux -mcpu=pwr8 | FileCheck %s -; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr9 | FileCheck %s -; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s -mtriple=powerpc64le-unknown-linux -mcpu=pwr8 -mattr=-vsx | FileCheck %s -check-prefix=NOVSX +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux \ +; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr -mcpu=pwr8 | FileCheck %s +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux \ +; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr -mcpu=pwr9 | FileCheck %s +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux \ +; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr -mcpu=pwr8 -mattr=-vsx | \ +; RUN: FileCheck %s -check-prefix=NOVSX declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) diff --git a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll --- a/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/PowerPC/ppcf128-constrained-fp-intrinsics.ll @@ -1071,12 +1071,12 @@ define float @test_fptrunc_ppc_fp128_f32(ppc_fp128 %first) nounwind { ; PC64LE-LABEL: test_fptrunc_ppc_fp128_f32: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: frsp 1, 1 +; PC64LE-NEXT: xsrsp 1, 1 ; PC64LE-NEXT: blr ; ; PC64LE9-LABEL: test_fptrunc_ppc_fp128_f32: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: frsp 1, 1 +; PC64LE9-NEXT: xsrsp 1, 1 ; PC64LE9-NEXT: blr ; ; PC64-LABEL: test_fptrunc_ppc_fp128_f32: @@ -1369,7 +1369,7 @@ ; PC64LE-NEXT: stfd 30, 32(30) ; PC64LE-NEXT: bl __powitf2 ; PC64LE-NEXT: nop -; PC64LE-NEXT: frsp 0, 1 +; PC64LE-NEXT: xsrsp 0, 1 ; PC64LE-NEXT: stfsx 0, 0, 29 ; PC64LE-NEXT: stfd 1, -16(30) ; PC64LE-NEXT: stfd 2, -8(30) @@ -1407,8 +1407,8 @@ ; PC64LE9-NEXT: nop ; PC64LE9-NEXT: fmr 3, 1 ; PC64LE9-NEXT: fmr 4, 2 -; PC64LE9-NEXT: fmr 30, 2 -; PC64LE9-NEXT: fmr 29, 1 +; PC64LE9-NEXT: fmr 30, 1 +; PC64LE9-NEXT: fmr 29, 2 ; PC64LE9-NEXT: stfd 2, 24(30) ; PC64LE9-NEXT: stfd 1, 16(30) ; PC64LE9-NEXT: bl __gcc_qmul @@ -1416,11 +1416,11 @@ ; PC64LE9-NEXT: fmr 1, 31 ; PC64LE9-NEXT: xxlxor 2, 2, 2 ; PC64LE9-NEXT: li 5, 2 -; PC64LE9-NEXT: stfd 30, 40(30) -; PC64LE9-NEXT: stfd 29, 32(30) +; PC64LE9-NEXT: stfd 29, 40(30) +; PC64LE9-NEXT: stfd 30, 32(30) ; PC64LE9-NEXT: bl __powitf2 ; PC64LE9-NEXT: nop -; PC64LE9-NEXT: frsp 0, 1 +; PC64LE9-NEXT: xsrsp 0, 1 ; PC64LE9-NEXT: stfs 0, 0(29) ; PC64LE9-NEXT: stfd 1, -16(30) ; PC64LE9-NEXT: stfd 2, -8(30) diff --git a/llvm/test/CodeGen/PowerPC/scalar-rounding-ops.ll b/llvm/test/CodeGen/PowerPC/scalar-rounding-ops.ll --- a/llvm/test/CodeGen/PowerPC/scalar-rounding-ops.ll +++ b/llvm/test/CodeGen/PowerPC/scalar-rounding-ops.ll @@ -342,14 +342,14 @@ define dso_local double @test_nearbyint(double %d) local_unnamed_addr { ; BE-LABEL: test_nearbyint: -; BE: # %bb.0: # %entry -; BE: bl nearbyint -; BE: blr +; BE: # %bb.0: # %entry +; BE-NEXT: xsrdpic f1, f1 +; BE-NEXT: blr ; ; CHECK-LABEL: test_nearbyint: -; CHECK: # %bb.0: # %entry -; CHECK: bl nearbyint -; CHECK: blr +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xsrdpic f1, f1 +; CHECK-NEXT: blr ; ; FAST-LABEL: test_nearbyint: ; FAST: # %bb.0: # %entry @@ -364,14 +364,32 @@ define dso_local float @test_nearbyintf(float %f) local_unnamed_addr { ; BE-LABEL: test_nearbyintf: -; BE: # %bb.0: # %entry -; BE: bl nearbyint -; BE: blr +; BE: # %bb.0: # %entry +; BE-NEXT: mflr r0 +; BE-NEXT: std r0, 16(r1) +; BE-NEXT: stdu r1, -112(r1) +; BE-NEXT: .cfi_def_cfa_offset 112 +; BE-NEXT: .cfi_offset lr, 16 +; BE-NEXT: bl nearbyintf +; BE-NEXT: nop +; BE-NEXT: addi r1, r1, 112 +; BE-NEXT: ld r0, 16(r1) +; BE-NEXT: mtlr r0 +; BE-NEXT: blr ; ; CHECK-LABEL: test_nearbyintf: -; CHECK: # %bb.0: # %entry -; CHECK: bl nearbyintf -; CHECK: blr +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: mflr r0 +; CHECK-NEXT: std r0, 16(r1) +; CHECK-NEXT: stdu r1, -32(r1) +; CHECK-NEXT: .cfi_def_cfa_offset 32 +; CHECK-NEXT: .cfi_offset lr, 16 +; CHECK-NEXT: bl nearbyintf +; CHECK-NEXT: nop +; CHECK-NEXT: addi r1, r1, 32 +; CHECK-NEXT: ld r0, 16(r1) +; CHECK-NEXT: mtlr r0 +; CHECK-NEXT: blr ; ; FAST-LABEL: test_nearbyintf: ; FAST: # %bb.0: # %entry diff --git a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll --- a/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll +++ b/llvm/test/CodeGen/PowerPC/vector-constrained-fp-intrinsics.ll @@ -4967,50 +4967,19 @@ define <2 x double> @constrained_vector_nearbyint_v2f64() #0 { ; PC64LE-LABEL: constrained_vector_nearbyint_v2f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: mflr 0 -; PC64LE-NEXT: std 0, 16(1) -; PC64LE-NEXT: stdu 1, -64(1) ; PC64LE-NEXT: addis 3, 2, .LCPI81_0@toc@ha -; PC64LE-NEXT: lfd 1, .LCPI81_0@toc@l(3) -; PC64LE-NEXT: bl nearbyint -; PC64LE-NEXT: nop -; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE-NEXT: stxvd2x 1, 1, 3 # 16-byte Folded Spill -; PC64LE-NEXT: addis 3, 2, .LCPI81_1@toc@ha -; PC64LE-NEXT: lfs 1, .LCPI81_1@toc@l(3) -; PC64LE-NEXT: bl nearbyint -; PC64LE-NEXT: nop -; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE-NEXT: lxvd2x 0, 1, 3 # 16-byte Folded Reload -; PC64LE-NEXT: xxmrghd 34, 1, 0 -; PC64LE-NEXT: addi 1, 1, 64 -; PC64LE-NEXT: ld 0, 16(1) -; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: addi 3, 3, .LCPI81_0@toc@l +; PC64LE-NEXT: lxvd2x 0, 0, 3 +; PC64LE-NEXT: xxswapd 0, 0 +; PC64LE-NEXT: xvrdpic 34, 0 ; PC64LE-NEXT: blr ; ; PC64LE9-LABEL: constrained_vector_nearbyint_v2f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: mflr 0 -; PC64LE9-NEXT: std 0, 16(1) -; PC64LE9-NEXT: stdu 1, -48(1) ; PC64LE9-NEXT: addis 3, 2, .LCPI81_0@toc@ha -; PC64LE9-NEXT: lfd 1, .LCPI81_0@toc@l(3) -; PC64LE9-NEXT: bl nearbyint -; PC64LE9-NEXT: nop -; PC64LE9-NEXT: addis 3, 2, .LCPI81_1@toc@ha -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE9-NEXT: stxv 1, 32(1) # 16-byte Folded Spill -; PC64LE9-NEXT: lfs 1, .LCPI81_1@toc@l(3) -; PC64LE9-NEXT: bl nearbyint -; PC64LE9-NEXT: nop -; PC64LE9-NEXT: lxv 0, 32(1) # 16-byte Folded Reload -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE9-NEXT: xxmrghd 34, 1, 0 -; PC64LE9-NEXT: addi 1, 1, 48 -; PC64LE9-NEXT: ld 0, 16(1) -; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: addi 3, 3, .LCPI81_0@toc@l +; PC64LE9-NEXT: lxvx 0, 0, 3 +; PC64LE9-NEXT: xvrdpic 34, 0 ; PC64LE9-NEXT: blr entry: %nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64( @@ -5109,72 +5078,31 @@ define <3 x double> @constrained_vector_nearby_v3f64() #0 { ; PC64LE-LABEL: constrained_vector_nearby_v3f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: mflr 0 -; PC64LE-NEXT: std 0, 16(1) -; PC64LE-NEXT: stdu 1, -80(1) -; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: stxvd2x 63, 1, 3 # 16-byte Folded Spill +; PC64LE-NEXT: addis 3, 2, .LCPI83_1@toc@ha +; PC64LE-NEXT: addi 3, 3, .LCPI83_1@toc@l +; PC64LE-NEXT: lxvd2x 0, 0, 3 ; PC64LE-NEXT: addis 3, 2, .LCPI83_0@toc@ha ; PC64LE-NEXT: lfd 1, .LCPI83_0@toc@l(3) -; PC64LE-NEXT: bl nearbyint -; PC64LE-NEXT: nop -; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE-NEXT: stxvd2x 1, 1, 3 # 16-byte Folded Spill -; PC64LE-NEXT: addis 3, 2, .LCPI83_1@toc@ha -; PC64LE-NEXT: lfs 1, .LCPI83_1@toc@l(3) -; PC64LE-NEXT: bl nearbyint -; PC64LE-NEXT: nop -; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE-NEXT: lxvd2x 0, 1, 3 # 16-byte Folded Reload -; PC64LE-NEXT: addis 3, 2, .LCPI83_2@toc@ha -; PC64LE-NEXT: xxmrghd 63, 0, 1 -; PC64LE-NEXT: lfd 1, .LCPI83_2@toc@l(3) -; PC64LE-NEXT: bl nearbyint -; PC64LE-NEXT: nop -; PC64LE-NEXT: xxswapd 0, 63 -; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: xxlor 2, 63, 63 -; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload -; PC64LE-NEXT: fmr 3, 1 -; PC64LE-NEXT: fmr 1, 0 -; PC64LE-NEXT: addi 1, 1, 80 -; PC64LE-NEXT: ld 0, 16(1) -; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: xxswapd 0, 0 +; PC64LE-NEXT: xsrdpic 3, 1 +; PC64LE-NEXT: xvrdpic 2, 0 +; PC64LE-NEXT: xxswapd 1, 2 +; PC64LE-NEXT: # kill: def $f2 killed $f2 killed $vsl2 +; PC64LE-NEXT: # kill: def $f1 killed $f1 killed $vsl1 ; PC64LE-NEXT: blr ; ; PC64LE9-LABEL: constrained_vector_nearby_v3f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: mflr 0 -; PC64LE9-NEXT: std 0, 16(1) -; PC64LE9-NEXT: stdu 1, -64(1) ; PC64LE9-NEXT: addis 3, 2, .LCPI83_0@toc@ha -; PC64LE9-NEXT: lfd 1, .LCPI83_0@toc@l(3) -; PC64LE9-NEXT: stxv 63, 48(1) # 16-byte Folded Spill -; PC64LE9-NEXT: bl nearbyint -; PC64LE9-NEXT: nop +; PC64LE9-NEXT: lfd 0, .LCPI83_0@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI83_1@toc@ha -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE9-NEXT: stxv 1, 32(1) # 16-byte Folded Spill -; PC64LE9-NEXT: lfs 1, .LCPI83_1@toc@l(3) -; PC64LE9-NEXT: bl nearbyint -; PC64LE9-NEXT: nop -; PC64LE9-NEXT: lxv 0, 32(1) # 16-byte Folded Reload -; PC64LE9-NEXT: addis 3, 2, .LCPI83_2@toc@ha -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE9-NEXT: xxmrghd 63, 0, 1 -; PC64LE9-NEXT: lfd 1, .LCPI83_2@toc@l(3) -; PC64LE9-NEXT: bl nearbyint -; PC64LE9-NEXT: nop -; PC64LE9-NEXT: fmr 3, 1 -; PC64LE9-NEXT: xxswapd 1, 63 -; PC64LE9-NEXT: xscpsgndp 2, 63, 63 -; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload +; PC64LE9-NEXT: addi 3, 3, .LCPI83_1@toc@l +; PC64LE9-NEXT: xsrdpic 3, 0 +; PC64LE9-NEXT: lxvx 0, 0, 3 +; PC64LE9-NEXT: xvrdpic 2, 0 +; PC64LE9-NEXT: xxswapd 1, 2 ; PC64LE9-NEXT: # kill: def $f1 killed $f1 killed $vsl1 -; PC64LE9-NEXT: addi 1, 1, 64 -; PC64LE9-NEXT: ld 0, 16(1) -; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: # kill: def $f2 killed $f2 killed $vsl2 ; PC64LE9-NEXT: blr entry: %nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64( @@ -5187,86 +5115,28 @@ define <4 x double> @constrained_vector_nearbyint_v4f64() #0 { ; PC64LE-LABEL: constrained_vector_nearbyint_v4f64: ; PC64LE: # %bb.0: # %entry -; PC64LE-NEXT: mflr 0 -; PC64LE-NEXT: std 0, 16(1) -; PC64LE-NEXT: stdu 1, -80(1) -; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: stxvd2x 63, 1, 3 # 16-byte Folded Spill ; PC64LE-NEXT: addis 3, 2, .LCPI84_0@toc@ha -; PC64LE-NEXT: lfd 1, .LCPI84_0@toc@l(3) -; PC64LE-NEXT: bl nearbyint -; PC64LE-NEXT: nop -; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE-NEXT: stxvd2x 1, 1, 3 # 16-byte Folded Spill -; PC64LE-NEXT: addis 3, 2, .LCPI84_1@toc@ha -; PC64LE-NEXT: lfd 1, .LCPI84_1@toc@l(3) -; PC64LE-NEXT: bl nearbyint -; PC64LE-NEXT: nop -; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE-NEXT: lxvd2x 0, 1, 3 # 16-byte Folded Reload -; PC64LE-NEXT: addis 3, 2, .LCPI84_2@toc@ha -; PC64LE-NEXT: xxmrghd 63, 1, 0 -; PC64LE-NEXT: lfd 1, .LCPI84_2@toc@l(3) -; PC64LE-NEXT: bl nearbyint -; PC64LE-NEXT: nop -; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE-NEXT: stxvd2x 1, 1, 3 # 16-byte Folded Spill -; PC64LE-NEXT: addis 3, 2, .LCPI84_3@toc@ha -; PC64LE-NEXT: lfd 1, .LCPI84_3@toc@l(3) -; PC64LE-NEXT: bl nearbyint -; PC64LE-NEXT: nop -; PC64LE-NEXT: li 3, 48 -; PC64LE-NEXT: vmr 2, 31 -; PC64LE-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE-NEXT: lxvd2x 0, 1, 3 # 16-byte Folded Reload -; PC64LE-NEXT: li 3, 64 -; PC64LE-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload -; PC64LE-NEXT: xxmrghd 35, 1, 0 -; PC64LE-NEXT: addi 1, 1, 80 -; PC64LE-NEXT: ld 0, 16(1) -; PC64LE-NEXT: mtlr 0 +; PC64LE-NEXT: addis 4, 2, .LCPI84_1@toc@ha +; PC64LE-NEXT: addi 3, 3, .LCPI84_0@toc@l +; PC64LE-NEXT: lxvd2x 0, 0, 3 +; PC64LE-NEXT: addi 3, 4, .LCPI84_1@toc@l +; PC64LE-NEXT: lxvd2x 1, 0, 3 +; PC64LE-NEXT: xxswapd 0, 0 +; PC64LE-NEXT: xxswapd 1, 1 +; PC64LE-NEXT: xvrdpic 35, 0 +; PC64LE-NEXT: xvrdpic 34, 1 ; PC64LE-NEXT: blr ; ; PC64LE9-LABEL: constrained_vector_nearbyint_v4f64: ; PC64LE9: # %bb.0: # %entry -; PC64LE9-NEXT: mflr 0 -; PC64LE9-NEXT: std 0, 16(1) -; PC64LE9-NEXT: stdu 1, -64(1) ; PC64LE9-NEXT: addis 3, 2, .LCPI84_0@toc@ha -; PC64LE9-NEXT: lfd 1, .LCPI84_0@toc@l(3) -; PC64LE9-NEXT: stxv 63, 48(1) # 16-byte Folded Spill -; PC64LE9-NEXT: bl nearbyint -; PC64LE9-NEXT: nop +; PC64LE9-NEXT: addi 3, 3, .LCPI84_0@toc@l +; PC64LE9-NEXT: lxvx 0, 0, 3 ; PC64LE9-NEXT: addis 3, 2, .LCPI84_1@toc@ha -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE9-NEXT: stxv 1, 32(1) # 16-byte Folded Spill -; PC64LE9-NEXT: lfd 1, .LCPI84_1@toc@l(3) -; PC64LE9-NEXT: bl nearbyint -; PC64LE9-NEXT: nop -; PC64LE9-NEXT: lxv 0, 32(1) # 16-byte Folded Reload -; PC64LE9-NEXT: addis 3, 2, .LCPI84_2@toc@ha -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE9-NEXT: xxmrghd 63, 1, 0 -; PC64LE9-NEXT: lfd 1, .LCPI84_2@toc@l(3) -; PC64LE9-NEXT: bl nearbyint -; PC64LE9-NEXT: nop -; PC64LE9-NEXT: addis 3, 2, .LCPI84_3@toc@ha -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE9-NEXT: stxv 1, 32(1) # 16-byte Folded Spill -; PC64LE9-NEXT: lfd 1, .LCPI84_3@toc@l(3) -; PC64LE9-NEXT: bl nearbyint -; PC64LE9-NEXT: nop -; PC64LE9-NEXT: lxv 0, 32(1) # 16-byte Folded Reload -; PC64LE9-NEXT: vmr 2, 31 -; PC64LE9-NEXT: lxv 63, 48(1) # 16-byte Folded Reload -; PC64LE9-NEXT: # kill: def $f1 killed $f1 def $vsl1 -; PC64LE9-NEXT: xxmrghd 35, 1, 0 -; PC64LE9-NEXT: addi 1, 1, 64 -; PC64LE9-NEXT: ld 0, 16(1) -; PC64LE9-NEXT: mtlr 0 +; PC64LE9-NEXT: addi 3, 3, .LCPI84_1@toc@l +; PC64LE9-NEXT: xvrdpic 35, 0 +; PC64LE9-NEXT: lxvx 0, 0, 3 +; PC64LE9-NEXT: xvrdpic 34, 0 ; PC64LE9-NEXT: blr entry: %nearby = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64( @@ -6100,7 +5970,7 @@ ; PC64LE: # %bb.0: # %entry ; PC64LE-NEXT: addis 3, 2, .LCPI95_0@toc@ha ; PC64LE-NEXT: lfd 0, .LCPI95_0@toc@l(3) -; PC64LE-NEXT: frsp 0, 0 +; PC64LE-NEXT: xsrsp 0, 0 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE-NEXT: blr @@ -6109,7 +5979,7 @@ ; PC64LE9: # %bb.0: # %entry ; PC64LE9-NEXT: addis 3, 2, .LCPI95_0@toc@ha ; PC64LE9-NEXT: lfd 0, .LCPI95_0@toc@l(3) -; PC64LE9-NEXT: frsp 0, 0 +; PC64LE9-NEXT: xsrsp 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE9-NEXT: blr @@ -6128,8 +5998,8 @@ ; PC64LE-NEXT: addis 4, 2, .LCPI96_1@toc@ha ; PC64LE-NEXT: lfd 0, .LCPI96_0@toc@l(3) ; PC64LE-NEXT: lfd 1, .LCPI96_1@toc@l(4) -; PC64LE-NEXT: frsp 0, 0 -; PC64LE-NEXT: frsp 1, 1 +; PC64LE-NEXT: xsrsp 0, 0 +; PC64LE-NEXT: xsrsp 1, 1 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xscvdpspn 1, 1 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 @@ -6142,11 +6012,11 @@ ; PC64LE9-NEXT: addis 3, 2, .LCPI96_0@toc@ha ; PC64LE9-NEXT: lfd 0, .LCPI96_0@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI96_1@toc@ha -; PC64LE9-NEXT: frsp 0, 0 +; PC64LE9-NEXT: xsrsp 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE9-NEXT: lfd 0, .LCPI96_1@toc@l(3) -; PC64LE9-NEXT: frsp 0, 0 +; PC64LE9-NEXT: xsrsp 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 35, 0, 0, 1 ; PC64LE9-NEXT: vmrglw 2, 3, 2 @@ -6167,12 +6037,12 @@ ; PC64LE-NEXT: lfd 0, .LCPI97_0@toc@l(3) ; PC64LE-NEXT: lfd 1, .LCPI97_1@toc@l(4) ; PC64LE-NEXT: addis 3, 2, .LCPI97_3@toc@ha -; PC64LE-NEXT: frsp 0, 0 +; PC64LE-NEXT: xsrsp 0, 0 ; PC64LE-NEXT: lfd 2, .LCPI97_3@toc@l(3) ; PC64LE-NEXT: addis 3, 2, .LCPI97_2@toc@ha -; PC64LE-NEXT: frsp 1, 1 +; PC64LE-NEXT: xsrsp 1, 1 ; PC64LE-NEXT: addi 3, 3, .LCPI97_2@toc@l -; PC64LE-NEXT: frsp 2, 2 +; PC64LE-NEXT: xsrsp 2, 2 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xscvdpspn 1, 1 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 @@ -6189,20 +6059,20 @@ ; PC64LE9-NEXT: addis 3, 2, .LCPI97_0@toc@ha ; PC64LE9-NEXT: lfd 0, .LCPI97_0@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI97_1@toc@ha -; PC64LE9-NEXT: frsp 0, 0 +; PC64LE9-NEXT: xsrsp 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE9-NEXT: lfd 0, .LCPI97_1@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI97_2@toc@ha ; PC64LE9-NEXT: addi 3, 3, .LCPI97_2@toc@l -; PC64LE9-NEXT: frsp 0, 0 +; PC64LE9-NEXT: xsrsp 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 35, 0, 0, 1 ; PC64LE9-NEXT: vmrglw 2, 3, 2 ; PC64LE9-NEXT: lxvx 35, 0, 3 ; PC64LE9-NEXT: addis 3, 2, .LCPI97_3@toc@ha ; PC64LE9-NEXT: lfd 0, .LCPI97_3@toc@l(3) -; PC64LE9-NEXT: frsp 0, 0 +; PC64LE9-NEXT: xsrsp 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 36, 0, 0, 1 ; PC64LE9-NEXT: vperm 2, 4, 2, 3 @@ -6221,12 +6091,16 @@ ; PC64LE: # %bb.0: # %entry ; PC64LE-NEXT: addis 3, 2, .LCPI98_0@toc@ha ; PC64LE-NEXT: addis 4, 2, .LCPI98_1@toc@ha -; PC64LE-NEXT: addis 5, 2, .LCPI98_2@toc@ha -; PC64LE-NEXT: addis 6, 2, .LCPI98_3@toc@ha ; PC64LE-NEXT: lfd 0, .LCPI98_0@toc@l(3) +; PC64LE-NEXT: addis 3, 2, .LCPI98_2@toc@ha ; PC64LE-NEXT: lfd 1, .LCPI98_1@toc@l(4) -; PC64LE-NEXT: lfd 2, .LCPI98_2@toc@l(5) -; PC64LE-NEXT: lfd 3, .LCPI98_3@toc@l(6) +; PC64LE-NEXT: addis 4, 2, .LCPI98_3@toc@ha +; PC64LE-NEXT: lfd 2, .LCPI98_2@toc@l(3) +; PC64LE-NEXT: lfd 3, .LCPI98_3@toc@l(4) +; PC64LE-NEXT: xsrsp 0, 0 +; PC64LE-NEXT: xsrsp 1, 1 +; PC64LE-NEXT: xsrsp 2, 2 +; PC64LE-NEXT: xsrsp 3, 3 ; PC64LE-NEXT: xxmrghd 0, 1, 0 ; PC64LE-NEXT: xxmrghd 1, 3, 2 ; PC64LE-NEXT: xvcvdpsp 34, 0 @@ -6241,11 +6115,15 @@ ; PC64LE9-NEXT: addis 3, 2, .LCPI98_1@toc@ha ; PC64LE9-NEXT: lfd 1, .LCPI98_1@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI98_2@toc@ha +; PC64LE9-NEXT: xsrsp 0, 0 +; PC64LE9-NEXT: xsrsp 1, 1 ; PC64LE9-NEXT: xxmrghd 0, 1, 0 ; PC64LE9-NEXT: xvcvdpsp 34, 0 ; PC64LE9-NEXT: lfd 0, .LCPI98_2@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI98_3@toc@ha ; PC64LE9-NEXT: lfd 1, .LCPI98_3@toc@l(3) +; PC64LE9-NEXT: xsrsp 0, 0 +; PC64LE9-NEXT: xsrsp 1, 1 ; PC64LE9-NEXT: xxmrghd 0, 1, 0 ; PC64LE9-NEXT: xvcvdpsp 35, 0 ; PC64LE9-NEXT: vmrgew 2, 3, 2 @@ -6374,7 +6252,7 @@ ; PC64LE: # %bb.0: # %entry ; PC64LE-NEXT: addis 3, 2, .LCPI103_0@toc@ha ; PC64LE-NEXT: lfs 0, .LCPI103_0@toc@l(3) -; PC64LE-NEXT: xsrdpip 0, 0 +; PC64LE-NEXT: frip 0, 0 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE-NEXT: blr @@ -6383,7 +6261,7 @@ ; PC64LE9: # %bb.0: # %entry ; PC64LE9-NEXT: addis 3, 2, .LCPI103_0@toc@ha ; PC64LE9-NEXT: lfs 0, .LCPI103_0@toc@l(3) -; PC64LE9-NEXT: xsrdpip 0, 0 +; PC64LE9-NEXT: frip 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE9-NEXT: blr @@ -6426,12 +6304,12 @@ ; PC64LE-NEXT: lfs 0, .LCPI105_2@toc@l(3) ; PC64LE-NEXT: lfs 1, .LCPI105_1@toc@l(4) ; PC64LE-NEXT: addis 3, 2, .LCPI105_0@toc@ha -; PC64LE-NEXT: xsrdpip 0, 0 +; PC64LE-NEXT: frip 0, 0 ; PC64LE-NEXT: lfs 2, .LCPI105_0@toc@l(3) ; PC64LE-NEXT: addis 3, 2, .LCPI105_3@toc@ha -; PC64LE-NEXT: xsrdpip 1, 1 +; PC64LE-NEXT: frip 1, 1 ; PC64LE-NEXT: addi 3, 3, .LCPI105_3@toc@l -; PC64LE-NEXT: xsrdpip 2, 2 +; PC64LE-NEXT: frip 2, 2 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xscvdpspn 1, 1 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 @@ -6450,12 +6328,12 @@ ; PC64LE9-NEXT: addis 3, 2, .LCPI105_1@toc@ha ; PC64LE9-NEXT: lfs 1, .LCPI105_1@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI105_2@toc@ha -; PC64LE9-NEXT: xsrdpip 0, 0 +; PC64LE9-NEXT: frip 0, 0 ; PC64LE9-NEXT: lfs 2, .LCPI105_2@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI105_3@toc@ha ; PC64LE9-NEXT: addi 3, 3, .LCPI105_3@toc@l -; PC64LE9-NEXT: xsrdpip 1, 1 -; PC64LE9-NEXT: xsrdpip 2, 2 +; PC64LE9-NEXT: frip 1, 1 +; PC64LE9-NEXT: frip 2, 2 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xscvdpspn 1, 1 ; PC64LE9-NEXT: xscvdpspn 2, 2 @@ -6514,7 +6392,7 @@ ; PC64LE: # %bb.0: # %entry ; PC64LE-NEXT: addis 3, 2, .LCPI107_0@toc@ha ; PC64LE-NEXT: lfs 0, .LCPI107_0@toc@l(3) -; PC64LE-NEXT: xsrdpim 0, 0 +; PC64LE-NEXT: frim 0, 0 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE-NEXT: blr @@ -6523,7 +6401,7 @@ ; PC64LE9: # %bb.0: # %entry ; PC64LE9-NEXT: addis 3, 2, .LCPI107_0@toc@ha ; PC64LE9-NEXT: lfs 0, .LCPI107_0@toc@l(3) -; PC64LE9-NEXT: xsrdpim 0, 0 +; PC64LE9-NEXT: frim 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE9-NEXT: blr @@ -6567,12 +6445,12 @@ ; PC64LE-NEXT: lfs 0, .LCPI109_2@toc@l(3) ; PC64LE-NEXT: lfs 1, .LCPI109_1@toc@l(4) ; PC64LE-NEXT: addis 3, 2, .LCPI109_0@toc@ha -; PC64LE-NEXT: xsrdpim 0, 0 +; PC64LE-NEXT: frim 0, 0 ; PC64LE-NEXT: lfs 2, .LCPI109_0@toc@l(3) ; PC64LE-NEXT: addis 3, 2, .LCPI109_3@toc@ha -; PC64LE-NEXT: xsrdpim 1, 1 +; PC64LE-NEXT: frim 1, 1 ; PC64LE-NEXT: addi 3, 3, .LCPI109_3@toc@l -; PC64LE-NEXT: xsrdpim 2, 2 +; PC64LE-NEXT: frim 2, 2 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xscvdpspn 1, 1 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 @@ -6591,12 +6469,12 @@ ; PC64LE9-NEXT: addis 3, 2, .LCPI109_1@toc@ha ; PC64LE9-NEXT: lfs 1, .LCPI109_1@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI109_2@toc@ha -; PC64LE9-NEXT: xsrdpim 0, 0 +; PC64LE9-NEXT: frim 0, 0 ; PC64LE9-NEXT: lfs 2, .LCPI109_2@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI109_3@toc@ha ; PC64LE9-NEXT: addi 3, 3, .LCPI109_3@toc@l -; PC64LE9-NEXT: xsrdpim 1, 1 -; PC64LE9-NEXT: xsrdpim 2, 2 +; PC64LE9-NEXT: frim 1, 1 +; PC64LE9-NEXT: frim 2, 2 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xscvdpspn 1, 1 ; PC64LE9-NEXT: xscvdpspn 2, 2 @@ -6655,7 +6533,7 @@ ; PC64LE: # %bb.0: # %entry ; PC64LE-NEXT: addis 3, 2, .LCPI111_0@toc@ha ; PC64LE-NEXT: lfs 0, .LCPI111_0@toc@l(3) -; PC64LE-NEXT: xsrdpi 0, 0 +; PC64LE-NEXT: frin 0, 0 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE-NEXT: blr @@ -6664,7 +6542,7 @@ ; PC64LE9: # %bb.0: # %entry ; PC64LE9-NEXT: addis 3, 2, .LCPI111_0@toc@ha ; PC64LE9-NEXT: lfs 0, .LCPI111_0@toc@l(3) -; PC64LE9-NEXT: xsrdpi 0, 0 +; PC64LE9-NEXT: frin 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE9-NEXT: blr @@ -6707,12 +6585,12 @@ ; PC64LE-NEXT: lfs 0, .LCPI113_2@toc@l(3) ; PC64LE-NEXT: lfs 1, .LCPI113_1@toc@l(4) ; PC64LE-NEXT: addis 3, 2, .LCPI113_0@toc@ha -; PC64LE-NEXT: xsrdpi 0, 0 +; PC64LE-NEXT: frin 0, 0 ; PC64LE-NEXT: lfs 2, .LCPI113_0@toc@l(3) ; PC64LE-NEXT: addis 3, 2, .LCPI113_3@toc@ha -; PC64LE-NEXT: xsrdpi 1, 1 +; PC64LE-NEXT: frin 1, 1 ; PC64LE-NEXT: addi 3, 3, .LCPI113_3@toc@l -; PC64LE-NEXT: xsrdpi 2, 2 +; PC64LE-NEXT: frin 2, 2 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xscvdpspn 1, 1 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 @@ -6731,12 +6609,12 @@ ; PC64LE9-NEXT: addis 3, 2, .LCPI113_1@toc@ha ; PC64LE9-NEXT: lfs 1, .LCPI113_1@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI113_2@toc@ha -; PC64LE9-NEXT: xsrdpi 0, 0 +; PC64LE9-NEXT: frin 0, 0 ; PC64LE9-NEXT: lfs 2, .LCPI113_2@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI113_3@toc@ha ; PC64LE9-NEXT: addi 3, 3, .LCPI113_3@toc@l -; PC64LE9-NEXT: xsrdpi 1, 1 -; PC64LE9-NEXT: xsrdpi 2, 2 +; PC64LE9-NEXT: frin 1, 1 +; PC64LE9-NEXT: frin 2, 2 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xscvdpspn 1, 1 ; PC64LE9-NEXT: xscvdpspn 2, 2 @@ -6796,7 +6674,7 @@ ; PC64LE: # %bb.0: # %entry ; PC64LE-NEXT: addis 3, 2, .LCPI115_0@toc@ha ; PC64LE-NEXT: lfs 0, .LCPI115_0@toc@l(3) -; PC64LE-NEXT: xsrdpiz 0, 0 +; PC64LE-NEXT: friz 0, 0 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE-NEXT: blr @@ -6805,7 +6683,7 @@ ; PC64LE9: # %bb.0: # %entry ; PC64LE9-NEXT: addis 3, 2, .LCPI115_0@toc@ha ; PC64LE9-NEXT: lfs 0, .LCPI115_0@toc@l(3) -; PC64LE9-NEXT: xsrdpiz 0, 0 +; PC64LE9-NEXT: friz 0, 0 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xxsldwi 34, 0, 0, 1 ; PC64LE9-NEXT: blr @@ -6848,12 +6726,12 @@ ; PC64LE-NEXT: lfs 0, .LCPI117_2@toc@l(3) ; PC64LE-NEXT: lfs 1, .LCPI117_1@toc@l(4) ; PC64LE-NEXT: addis 3, 2, .LCPI117_0@toc@ha -; PC64LE-NEXT: xsrdpiz 0, 0 +; PC64LE-NEXT: friz 0, 0 ; PC64LE-NEXT: lfs 2, .LCPI117_0@toc@l(3) ; PC64LE-NEXT: addis 3, 2, .LCPI117_3@toc@ha -; PC64LE-NEXT: xsrdpiz 1, 1 +; PC64LE-NEXT: friz 1, 1 ; PC64LE-NEXT: addi 3, 3, .LCPI117_3@toc@l -; PC64LE-NEXT: xsrdpiz 2, 2 +; PC64LE-NEXT: friz 2, 2 ; PC64LE-NEXT: xscvdpspn 0, 0 ; PC64LE-NEXT: xscvdpspn 1, 1 ; PC64LE-NEXT: xxsldwi 34, 0, 0, 1 @@ -6872,12 +6750,12 @@ ; PC64LE9-NEXT: addis 3, 2, .LCPI117_1@toc@ha ; PC64LE9-NEXT: lfs 1, .LCPI117_1@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI117_2@toc@ha -; PC64LE9-NEXT: xsrdpiz 0, 0 +; PC64LE9-NEXT: friz 0, 0 ; PC64LE9-NEXT: lfs 2, .LCPI117_2@toc@l(3) ; PC64LE9-NEXT: addis 3, 2, .LCPI117_3@toc@ha ; PC64LE9-NEXT: addi 3, 3, .LCPI117_3@toc@l -; PC64LE9-NEXT: xsrdpiz 1, 1 -; PC64LE9-NEXT: xsrdpiz 2, 2 +; PC64LE9-NEXT: friz 1, 1 +; PC64LE9-NEXT: friz 2, 2 ; PC64LE9-NEXT: xscvdpspn 0, 0 ; PC64LE9-NEXT: xscvdpspn 1, 1 ; PC64LE9-NEXT: xscvdpspn 2, 2