diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -3809,7 +3809,7 @@ Opc = Subtarget->hasVSX() ? PPC::XSCMPUDP : PPC::FCMPUD; } else { assert(LHS.getValueType() == MVT::f128 && "Unknown vt!"); - assert(Subtarget->hasVSX() && "__float128 requires VSX"); + assert(Subtarget->hasP9Vector() && "XSCMPUQP requires Power9 Vector"); Opc = PPC::XSCMPUQP; } if (Chain) diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -1128,6 +1128,7 @@ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1217,6 +1217,27 @@ setOperationAction(ISD::FP_ROUND, VT, Custom); setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom); } + + // Expand the SELECT to SELECT_CC + setOperationAction(ISD::SELECT, MVT::f128, Expand); + + setOperationAction(ISD::SETCC, MVT::f128, Custom); + setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom); + setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom); + + // Lower the select_cc as follows for fp128. + // select_cc x, y, tv, fv, cc -> + // z = setcc x, y, cc (expand as libcall) + // select_cc z, 0, tv, fv, NE + for (auto VT : {MVT::i32, MVT::i64, MVT::f128}) + setOperationAction(ISD::SELECT_CC, VT, Custom); + + // Lower the br_cc as follows for fp128 as we didn't have native + // instruction to lower the setcc on fp128. + // br_cc cc, x, y, dest -> + // z = setcc x, y, cc (expand as libcall) + // br_cc NE, z, 0, dest + setOperationAction(ISD::BR_CC, MVT::f128, Custom); } if (Subtarget.hasP9Altivec()) { @@ -3318,21 +3339,43 @@ } SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { - ISD::CondCode CC = cast(Op.getOperand(2))->get(); + bool IsStrict = Op->isStrictFPOpcode(); + ISD::CondCode CC = + cast(Op.getOperand(IsStrict ? 3 : 2))->get(); + SDValue LHS = Op.getOperand(IsStrict ? 1 : 0); + SDValue RHS = Op.getOperand(IsStrict ? 2 : 1); + SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue(); + EVT LHSVT = LHS.getValueType(); SDLoc dl(Op); + // Soften the setcc with libcall if it is fp128. + if (LHSVT == MVT::f128) { + assert(!Subtarget.hasP9Vector() && + "Don't custom lower the setcc for fp128 with P9 vector enabled"); + softenSetCCOperands(DAG, LHSVT, LHS, RHS, CC, dl, LHS, RHS, Chain, + Op->getOpcode() == ISD::STRICT_FSETCCS); + if (RHS.getNode()) + LHS = DAG.getNode(ISD::SETCC, dl, Op.getValueType(), LHS, RHS, + DAG.getCondCode(CC)); + if (IsStrict) + return DAG.getMergeValues({LHS, Chain}, dl); + return LHS; + } + + assert(!IsStrict && "Don't know how to handle the strict setcc"); + if (Op.getValueType() == MVT::v2i64) { // When the operands themselves are v2i64 values, we need to do something // special because VSX has no underlying comparison operations for these. - if (Op.getOperand(0).getValueType() == MVT::v2i64) { + if (LHS.getValueType() == MVT::v2i64) { // Equality can be handled by casting to the legal type for Altivec // comparisons, everything else needs to be expanded. if (CC == ISD::SETEQ || CC == ISD::SETNE) { - return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, - DAG.getSetCC(dl, MVT::v4i32, - DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), - DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), - CC)); + return DAG.getNode( + ISD::BITCAST, dl, MVT::v2i64, + DAG.getSetCC(dl, MVT::v4i32, + DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, LHS), + DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, RHS), CC)); } return SDValue(); @@ -3348,7 +3391,7 @@ if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) return V; - if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) { + if (ConstantSDNode *C = dyn_cast(RHS)) { // Leave comparisons against 0 and -1 alone for now, since they're usually // optimized. FIXME: revisit this when we can custom lower all setcc // optimizations. @@ -3361,11 +3404,9 @@ // condition register, reading it back out, and masking the correct bit. The // normal approach here uses sub to do this instead of xor. Using xor exposes // the result to other bit-twiddling opportunities. - EVT LHSVT = Op.getOperand(0).getValueType(); if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { EVT VT = Op.getValueType(); - SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), - Op.getOperand(1)); + SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, LHS, RHS); return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); } return SDValue(); @@ -8140,21 +8181,60 @@ return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV); } +SDValue PPCTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { + SDValue LHS = Op.getOperand(2); + + if (LHS.getValueType() == MVT::f128) { + assert(!Subtarget.hasP9Vector() && + "Don't custom lower the br_cc for fp128 with Power9 vector enabled"); + SDValue Chain = Op.getOperand(0); + ISD::CondCode CC = cast(Op.getOperand(1))->get(); + SDValue RHS = Op.getOperand(3); + SDValue Dest = Op.getOperand(4); + SDLoc dl(Op); + // Lower br_cc cc, lhs, rhs, dest -> + // z = setcc lhs, rhs, cc + // br_cc NE, z, 0, dest + EVT SetCCVT = + getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f128); + SDValue Z = DAG.getSetCC(dl, SetCCVT, LHS, RHS, CC); + SDValue Zero = DAG.getConstant(0, dl, Z.getValueType()); + return DAG.getNode(ISD::BR_CC, dl, MVT::Other, Chain, + DAG.getCondCode(ISD::SETNE), Z, Zero, Dest); + } + + return Op; +} + /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when /// possible. SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { - // Not FP, or using SPE? Not a fsel. - if (!Op.getOperand(0).getValueType().isFloatingPoint() || - !Op.getOperand(2).getValueType().isFloatingPoint() || Subtarget.hasSPE()) - return Op; - ISD::CondCode CC = cast(Op.getOperand(4))->get(); - EVT ResVT = Op.getValueType(); EVT CmpVT = Op.getOperand(0).getValueType(); SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); - SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); + SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); SDLoc dl(Op); + + // PowerPC didn't have native instruction to do the comparision for fp128 if + // Power9 vector is not enabled. So, we need to do the following + // transformation so that, setcc will be converted into libcall. + // select_cc lhs, rhs, tv, fv, cc -> + // z = setcc cc, x, y + // select_cc z, 0, tv, fv, NE + if (CmpVT == MVT::f128 && !Subtarget.hasP9Vector()) { + SDValue Z = DAG.getSetCC( + dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT), + LHS, RHS, CC); + SDValue Zero = DAG.getConstant(0, dl, Z.getValueType()); + return DAG.getSelectCC(dl, Z, Zero, TV, FV, ISD::SETNE); + } + + // Not FP, or using SPE? Not a fsel. + if (!CmpVT.isFloatingPoint() || !TV.getValueType().isFloatingPoint() || + Subtarget.hasSPE()) + return Op; + SDNodeFlags Flags = Op.getNode()->getFlags(); // We have xsmaxcdp/xsmincdp which are OK to emit even in the @@ -11065,7 +11145,11 @@ case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); case ISD::JumpTable: return LowerJumpTable(Op, DAG); + case ISD::STRICT_FSETCC: + case ISD::STRICT_FSETCCS: case ISD::SETCC: return LowerSETCC(Op, DAG); + case ISD::BR_CC: + return LowerBR_CC(Op, DAG); case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); diff --git a/llvm/test/CodeGen/PowerPC/f128-compare.ll b/llvm/test/CodeGen/PowerPC/f128-compare.ll --- a/llvm/test/CodeGen/PowerPC/f128-compare.ll +++ b/llvm/test/CodeGen/PowerPC/f128-compare.ll @@ -2,7 +2,7 @@ ; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \ ; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s ; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs \ -; RUN: -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s \ +; RUN: -enable-soft-fp128 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s \ ; RUN: -check-prefix=CHECK-P8 @a_qp = common global fp128 0xL00000000000000000000000000000000, align 16 @@ -31,14 +31,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __gtkf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: extsw r3, r3 @@ -79,14 +77,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __ltkf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31 @@ -125,14 +121,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __gekf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31 @@ -172,14 +166,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __lekf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: extsw r3, r3 @@ -221,14 +213,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __eqkf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: cntlzw r3, r3 @@ -267,14 +257,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __gtkf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: extsw r3, r3 @@ -316,14 +304,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __ltkf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31 @@ -364,14 +350,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __gekf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: rlwinm r3, r3, 1, 31, 31 @@ -411,14 +395,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __lekf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: extsw r3, r3 @@ -459,14 +441,12 @@ ; CHECK-P8-NEXT: stdu r1, -32(r1) ; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 ; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha ; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r5, r2, a_qp@toc@ha -; CHECK-P8-NEXT: addi r6, r5, a_qp@toc@l -; CHECK-P8-NEXT: addi r7, r4, b_qp@toc@l -; CHECK-P8-NEXT: ld r3, a_qp@toc@l(r5) -; CHECK-P8-NEXT: ld r5, b_qp@toc@l(r4) -; CHECK-P8-NEXT: ld r4, 8(r6) -; CHECK-P8-NEXT: ld r6, 8(r7) +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: lvx v2, 0, r3 +; CHECK-P8-NEXT: lvx v3, 0, r4 ; CHECK-P8-NEXT: bl __nekf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: cntlzw r3, r3 @@ -503,41 +483,38 @@ ; CHECK-P8-LABEL: greater_sel_qp: ; CHECK-P8: # %bb.0: # %entry ; CHECK-P8-NEXT: mflr r0 -; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 -; CHECK-P8-NEXT: .cfi_offset lr, 16 -; CHECK-P8-NEXT: .cfi_offset r27, -40 -; CHECK-P8-NEXT: .cfi_offset r28, -32 -; CHECK-P8-NEXT: .cfi_offset r29, -24 -; CHECK-P8-NEXT: .cfi_offset r30, -16 -; CHECK-P8-NEXT: std r27, -40(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r28, -32(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill ; CHECK-P8-NEXT: std r0, 16(r1) ; CHECK-P8-NEXT: stdu r1, -80(r1) -; CHECK-P8-NEXT: addis r3, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r4, r2, a_qp@toc@ha -; CHECK-P8-NEXT: ld r30, a_qp@toc@l(r4) -; CHECK-P8-NEXT: addi r4, r4, a_qp@toc@l -; CHECK-P8-NEXT: ld r29, b_qp@toc@l(r3) -; CHECK-P8-NEXT: addi r3, r3, b_qp@toc@l -; CHECK-P8-NEXT: ld r28, 8(r4) -; CHECK-P8-NEXT: ld r27, 8(r3) -; CHECK-P8-NEXT: mr r3, r30 -; CHECK-P8-NEXT: mr r5, r29 -; CHECK-P8-NEXT: mr r4, r28 -; CHECK-P8-NEXT: mr r6, r27 +; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 +; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: .cfi_offset v30, -32 +; CHECK-P8-NEXT: .cfi_offset v31, -16 +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha +; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha +; CHECK-P8-NEXT: lvx v30, 0, r4 +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: lvx v31, 0, r3 +; CHECK-P8-NEXT: vmr v3, v30 +; CHECK-P8-NEXT: vmr v2, v31 ; CHECK-P8-NEXT: bl __gtkf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: cmpwi r3, 0 -; CHECK-P8-NEXT: iselgt r3, r30, r29 -; CHECK-P8-NEXT: iselgt r4, r28, r27 +; CHECK-P8-NEXT: bgt cr0, .LBB10_2 +; CHECK-P8-NEXT: # %bb.1: # %entry +; CHECK-P8-NEXT: vmr v31, v30 +; CHECK-P8-NEXT: .LBB10_2: # %entry +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: vmr v2, v31 +; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload ; CHECK-P8-NEXT: addi r1, r1, 80 ; CHECK-P8-NEXT: ld r0, 16(r1) -; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r28, -32(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r27, -40(r1) # 8-byte Folded Reload ; CHECK-P8-NEXT: mtlr r0 ; CHECK-P8-NEXT: blr entry: @@ -567,41 +544,38 @@ ; CHECK-P8-LABEL: less_sel_qp: ; CHECK-P8: # %bb.0: # %entry ; CHECK-P8-NEXT: mflr r0 -; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 -; CHECK-P8-NEXT: .cfi_offset lr, 16 -; CHECK-P8-NEXT: .cfi_offset r27, -40 -; CHECK-P8-NEXT: .cfi_offset r28, -32 -; CHECK-P8-NEXT: .cfi_offset r29, -24 -; CHECK-P8-NEXT: .cfi_offset r30, -16 -; CHECK-P8-NEXT: std r27, -40(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r28, -32(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill ; CHECK-P8-NEXT: std r0, 16(r1) ; CHECK-P8-NEXT: stdu r1, -80(r1) -; CHECK-P8-NEXT: addis r3, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r4, r2, a_qp@toc@ha -; CHECK-P8-NEXT: ld r30, a_qp@toc@l(r4) -; CHECK-P8-NEXT: addi r4, r4, a_qp@toc@l -; CHECK-P8-NEXT: ld r29, b_qp@toc@l(r3) -; CHECK-P8-NEXT: addi r3, r3, b_qp@toc@l -; CHECK-P8-NEXT: ld r28, 8(r4) -; CHECK-P8-NEXT: ld r27, 8(r3) -; CHECK-P8-NEXT: mr r3, r30 -; CHECK-P8-NEXT: mr r5, r29 -; CHECK-P8-NEXT: mr r4, r28 -; CHECK-P8-NEXT: mr r6, r27 +; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 +; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: .cfi_offset v30, -32 +; CHECK-P8-NEXT: .cfi_offset v31, -16 +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha +; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha +; CHECK-P8-NEXT: lvx v30, 0, r4 +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: lvx v31, 0, r3 +; CHECK-P8-NEXT: vmr v3, v30 +; CHECK-P8-NEXT: vmr v2, v31 ; CHECK-P8-NEXT: bl __ltkf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: cmpwi r3, 0 -; CHECK-P8-NEXT: isellt r3, r30, r29 -; CHECK-P8-NEXT: isellt r4, r28, r27 +; CHECK-P8-NEXT: blt cr0, .LBB11_2 +; CHECK-P8-NEXT: # %bb.1: # %entry +; CHECK-P8-NEXT: vmr v31, v30 +; CHECK-P8-NEXT: .LBB11_2: # %entry +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: vmr v2, v31 +; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload ; CHECK-P8-NEXT: addi r1, r1, 80 ; CHECK-P8-NEXT: ld r0, 16(r1) -; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r28, -32(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r27, -40(r1) # 8-byte Folded Reload ; CHECK-P8-NEXT: mtlr r0 ; CHECK-P8-NEXT: blr entry: @@ -632,41 +606,38 @@ ; CHECK-P8-LABEL: greater_eq_sel_qp: ; CHECK-P8: # %bb.0: # %entry ; CHECK-P8-NEXT: mflr r0 -; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 -; CHECK-P8-NEXT: .cfi_offset lr, 16 -; CHECK-P8-NEXT: .cfi_offset r27, -40 -; CHECK-P8-NEXT: .cfi_offset r28, -32 -; CHECK-P8-NEXT: .cfi_offset r29, -24 -; CHECK-P8-NEXT: .cfi_offset r30, -16 -; CHECK-P8-NEXT: std r27, -40(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r28, -32(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill ; CHECK-P8-NEXT: std r0, 16(r1) ; CHECK-P8-NEXT: stdu r1, -80(r1) -; CHECK-P8-NEXT: addis r3, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r4, r2, a_qp@toc@ha -; CHECK-P8-NEXT: ld r30, a_qp@toc@l(r4) -; CHECK-P8-NEXT: addi r4, r4, a_qp@toc@l -; CHECK-P8-NEXT: ld r29, b_qp@toc@l(r3) -; CHECK-P8-NEXT: addi r3, r3, b_qp@toc@l -; CHECK-P8-NEXT: ld r28, 8(r4) -; CHECK-P8-NEXT: ld r27, 8(r3) -; CHECK-P8-NEXT: mr r3, r30 -; CHECK-P8-NEXT: mr r5, r29 -; CHECK-P8-NEXT: mr r4, r28 -; CHECK-P8-NEXT: mr r6, r27 +; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 +; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: .cfi_offset v30, -32 +; CHECK-P8-NEXT: .cfi_offset v31, -16 +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha +; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha +; CHECK-P8-NEXT: lvx v30, 0, r4 +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: lvx v31, 0, r3 +; CHECK-P8-NEXT: vmr v3, v30 +; CHECK-P8-NEXT: vmr v2, v31 ; CHECK-P8-NEXT: bl __gekf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: cmpwi r3, -1 -; CHECK-P8-NEXT: iselgt r3, r30, r29 -; CHECK-P8-NEXT: iselgt r4, r28, r27 +; CHECK-P8-NEXT: bgt cr0, .LBB12_2 +; CHECK-P8-NEXT: # %bb.1: # %entry +; CHECK-P8-NEXT: vmr v31, v30 +; CHECK-P8-NEXT: .LBB12_2: # %entry +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: vmr v2, v31 +; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload ; CHECK-P8-NEXT: addi r1, r1, 80 ; CHECK-P8-NEXT: ld r0, 16(r1) -; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r28, -32(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r27, -40(r1) # 8-byte Folded Reload ; CHECK-P8-NEXT: mtlr r0 ; CHECK-P8-NEXT: blr entry: @@ -697,41 +668,38 @@ ; CHECK-P8-LABEL: less_eq_sel_qp: ; CHECK-P8: # %bb.0: # %entry ; CHECK-P8-NEXT: mflr r0 -; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 -; CHECK-P8-NEXT: .cfi_offset lr, 16 -; CHECK-P8-NEXT: .cfi_offset r27, -40 -; CHECK-P8-NEXT: .cfi_offset r28, -32 -; CHECK-P8-NEXT: .cfi_offset r29, -24 -; CHECK-P8-NEXT: .cfi_offset r30, -16 -; CHECK-P8-NEXT: std r27, -40(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r28, -32(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill ; CHECK-P8-NEXT: std r0, 16(r1) ; CHECK-P8-NEXT: stdu r1, -80(r1) -; CHECK-P8-NEXT: addis r3, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r4, r2, a_qp@toc@ha -; CHECK-P8-NEXT: ld r30, a_qp@toc@l(r4) -; CHECK-P8-NEXT: addi r4, r4, a_qp@toc@l -; CHECK-P8-NEXT: ld r29, b_qp@toc@l(r3) -; CHECK-P8-NEXT: addi r3, r3, b_qp@toc@l -; CHECK-P8-NEXT: ld r28, 8(r4) -; CHECK-P8-NEXT: ld r27, 8(r3) -; CHECK-P8-NEXT: mr r3, r30 -; CHECK-P8-NEXT: mr r5, r29 -; CHECK-P8-NEXT: mr r4, r28 -; CHECK-P8-NEXT: mr r6, r27 +; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 +; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: .cfi_offset v30, -32 +; CHECK-P8-NEXT: .cfi_offset v31, -16 +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha +; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha +; CHECK-P8-NEXT: lvx v30, 0, r4 +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: lvx v31, 0, r3 +; CHECK-P8-NEXT: vmr v3, v30 +; CHECK-P8-NEXT: vmr v2, v31 ; CHECK-P8-NEXT: bl __lekf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: cmpwi r3, 1 -; CHECK-P8-NEXT: isellt r3, r30, r29 -; CHECK-P8-NEXT: isellt r4, r28, r27 +; CHECK-P8-NEXT: blt cr0, .LBB13_2 +; CHECK-P8-NEXT: # %bb.1: # %entry +; CHECK-P8-NEXT: vmr v31, v30 +; CHECK-P8-NEXT: .LBB13_2: # %entry +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: vmr v2, v31 +; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload ; CHECK-P8-NEXT: addi r1, r1, 80 ; CHECK-P8-NEXT: ld r0, 16(r1) -; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r28, -32(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r27, -40(r1) # 8-byte Folded Reload ; CHECK-P8-NEXT: mtlr r0 ; CHECK-P8-NEXT: blr entry: @@ -761,41 +729,38 @@ ; CHECK-P8-LABEL: equal_sel_qp: ; CHECK-P8: # %bb.0: # %entry ; CHECK-P8-NEXT: mflr r0 -; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 -; CHECK-P8-NEXT: .cfi_offset lr, 16 -; CHECK-P8-NEXT: .cfi_offset r27, -40 -; CHECK-P8-NEXT: .cfi_offset r28, -32 -; CHECK-P8-NEXT: .cfi_offset r29, -24 -; CHECK-P8-NEXT: .cfi_offset r30, -16 -; CHECK-P8-NEXT: std r27, -40(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r28, -32(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r29, -24(r1) # 8-byte Folded Spill -; CHECK-P8-NEXT: std r30, -16(r1) # 8-byte Folded Spill ; CHECK-P8-NEXT: std r0, 16(r1) ; CHECK-P8-NEXT: stdu r1, -80(r1) -; CHECK-P8-NEXT: addis r3, r2, b_qp@toc@ha -; CHECK-P8-NEXT: addis r4, r2, a_qp@toc@ha -; CHECK-P8-NEXT: ld r30, a_qp@toc@l(r4) -; CHECK-P8-NEXT: addi r4, r4, a_qp@toc@l -; CHECK-P8-NEXT: ld r29, b_qp@toc@l(r3) -; CHECK-P8-NEXT: addi r3, r3, b_qp@toc@l -; CHECK-P8-NEXT: ld r28, 8(r4) -; CHECK-P8-NEXT: ld r27, 8(r3) -; CHECK-P8-NEXT: mr r3, r30 -; CHECK-P8-NEXT: mr r5, r29 -; CHECK-P8-NEXT: mr r4, r28 -; CHECK-P8-NEXT: mr r6, r27 +; CHECK-P8-NEXT: .cfi_def_cfa_offset 80 +; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: .cfi_offset v30, -32 +; CHECK-P8-NEXT: .cfi_offset v31, -16 +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: addis r4, r2, b_qp@toc@ha +; CHECK-P8-NEXT: stvx v30, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: addi r4, r4, b_qp@toc@l +; CHECK-P8-NEXT: stvx v31, r1, r3 # 16-byte Folded Spill +; CHECK-P8-NEXT: addis r3, r2, a_qp@toc@ha +; CHECK-P8-NEXT: lvx v30, 0, r4 +; CHECK-P8-NEXT: addi r3, r3, a_qp@toc@l +; CHECK-P8-NEXT: lvx v31, 0, r3 +; CHECK-P8-NEXT: vmr v3, v30 +; CHECK-P8-NEXT: vmr v2, v31 ; CHECK-P8-NEXT: bl __eqkf2 ; CHECK-P8-NEXT: nop ; CHECK-P8-NEXT: cmplwi r3, 0 -; CHECK-P8-NEXT: iseleq r3, r30, r29 -; CHECK-P8-NEXT: iseleq r4, r28, r27 +; CHECK-P8-NEXT: beq cr0, .LBB14_2 +; CHECK-P8-NEXT: # %bb.1: # %entry +; CHECK-P8-NEXT: vmr v31, v30 +; CHECK-P8-NEXT: .LBB14_2: # %entry +; CHECK-P8-NEXT: li r3, 64 +; CHECK-P8-NEXT: vmr v2, v31 +; CHECK-P8-NEXT: lvx v31, r1, r3 # 16-byte Folded Reload +; CHECK-P8-NEXT: li r3, 48 +; CHECK-P8-NEXT: lvx v30, r1, r3 # 16-byte Folded Reload ; CHECK-P8-NEXT: addi r1, r1, 80 ; CHECK-P8-NEXT: ld r0, 16(r1) -; CHECK-P8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r28, -32(r1) # 8-byte Folded Reload -; CHECK-P8-NEXT: ld r27, -40(r1) # 8-byte Folded Reload ; CHECK-P8-NEXT: mtlr r0 ; CHECK-P8-NEXT: blr entry: @@ -805,3 +770,44 @@ %cond = select i1 %cmp, fp128 %0, fp128 %1 ret fp128 %cond } + +define i64 @br_cc(fp128 %a, fp128 %b) { +; CHECK-LABEL: br_cc: +; CHECK: # %bb.0: +; CHECK-NEXT: xscmpuqp cr0, v2, v3 +; CHECK-NEXT: bne cr0, .LBB15_2 +; CHECK-NEXT: # %bb.1: # %if.then +; CHECK-NEXT: li r3, 0 +; CHECK-NEXT: blr +; CHECK-NEXT: .LBB15_2: # %if.else +; CHECK-NEXT: li r3, 1 +; CHECK-NEXT: blr +; +; CHECK-P8-LABEL: br_cc: +; CHECK-P8: # %bb.0: +; CHECK-P8-NEXT: mflr r0 +; CHECK-P8-NEXT: std r0, 16(r1) +; CHECK-P8-NEXT: stdu r1, -32(r1) +; CHECK-P8-NEXT: .cfi_def_cfa_offset 32 +; CHECK-P8-NEXT: .cfi_offset lr, 16 +; CHECK-P8-NEXT: bl __nekf2 +; CHECK-P8-NEXT: nop +; CHECK-P8-NEXT: cmplwi r3, 0 +; CHECK-P8-NEXT: bne cr0, .LBB15_2 +; CHECK-P8-NEXT: # %bb.1: # %if.then +; CHECK-P8-NEXT: li r3, 0 +; CHECK-P8-NEXT: b .LBB15_3 +; CHECK-P8-NEXT: .LBB15_2: # %if.else +; CHECK-P8-NEXT: li r3, 1 +; CHECK-P8-NEXT: .LBB15_3: # %if.then +; CHECK-P8-NEXT: addi r1, r1, 32 +; CHECK-P8-NEXT: ld r0, 16(r1) +; CHECK-P8-NEXT: mtlr r0 +; CHECK-P8-NEXT: blr + %cmp = fcmp oeq fp128 %a, %b + br i1 %cmp, label %if.then, label %if.else +if.then: + ret i64 0 +if.else: + ret i64 1 +}