diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h --- a/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -153,6 +153,9 @@ SRA, SHL, + /// FNMSUB - Negated multiply-subtract instruction. + FNMSUB, + /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign /// word and shift left immediate. EXTSWSLI, @@ -684,6 +687,14 @@ MachineBasicBlock *Entry, const SmallVectorImpl &Exits) const override; + NegatibleCost getNegatibleCost(SDValue Op, SelectionDAG &DAG, + bool LegalOperations, bool ForCodeSize, + unsigned Depth = 0) const override; + + SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, + bool LegalOperations, bool ForCodeSize, + unsigned Depth = 0) const override; + /// getSetCCResultType - Return the ISD::SETCC ValueType EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override; @@ -1207,6 +1218,7 @@ SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const; SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const; SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const; + SDValue combineFMALike(SDNode *N, DAGCombinerInfo &DCI) const; SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const; SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const; SDValue combineABS(SDNode *N, DAGCombinerInfo &DCI) const; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1194,6 +1194,7 @@ setTargetDAGCombine(ISD::SRA); setTargetDAGCombine(ISD::SRL); setTargetDAGCombine(ISD::MUL); + setTargetDAGCombine(ISD::FMA); setTargetDAGCombine(ISD::SINT_TO_FP); setTargetDAGCombine(ISD::BUILD_VECTOR); if (Subtarget.hasFPCVT()) @@ -1480,6 +1481,8 @@ case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; + case PPCISD::FNMSUB: + return "PPCISD::FNMSUB"; } return nullptr; } @@ -13876,6 +13879,9 @@ return combineSRL(N, DCI); case ISD::MUL: return combineMUL(N, DCI); + case ISD::FMA: + case PPCISD::FNMSUB: + return combineFMALike(N, DCI); case PPCISD::SHL: if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. return N->getOperand(0); @@ -15591,6 +15597,80 @@ } } +TargetLowering::NegatibleCost +PPCTargetLowering::getNegatibleCost(SDValue Op, SelectionDAG &DAG, + bool LegalOperations, bool ForCodeSize, + unsigned Depth) const { + unsigned Opc = Op.getOpcode(); + EVT VT = Op.getValueType(); + + // All negation to FMA in PPC should be at least neutral, but keeping it + // simpler would help target independent combiner to do more foldings. + switch (Opc) { + case PPCISD::FNMSUB: + if (!Op.hasOneUse() || !isTypeLegal(VT)) + break; + + NegatibleCost N2Cost = getNegatibleCost( + Op.getOperand(2), DAG, LegalOperations, ForCodeSize, Depth + 1); + // The last operand will always be negated, so its cost matters. + if (N2Cost == NegatibleCost::Expensive) + return NegatibleCost::Expensive; + + NegatibleCost N0Cost = getNegatibleCost( + Op.getOperand(0), DAG, LegalOperations, ForCodeSize, Depth + 1); + NegatibleCost N1Cost = getNegatibleCost( + Op.getOperand(1), DAG, LegalOperations, ForCodeSize, Depth + 1); + + return std::max(N0Cost, N1Cost); + } + + return TargetLowering::getNegatibleCost(Op, DAG, LegalOperations, ForCodeSize, + Depth); +} + +SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, + bool LegalOperations, + bool ForCodeSize, + unsigned Depth) const { + unsigned Opc = Op.getOpcode(); + EVT VT = Op.getValueType(); + SDNodeFlags Flags = Op.getNode()->getFlags(); + + // Base class already defined negation for FMA + switch (Opc) { + case PPCISD::FNMSUB: + if (!Op.hasOneUse() || !isTypeLegal(VT)) + break; + + SDValue N0 = Op.getOperand(0); + SDValue N1 = Op.getOperand(1); + + // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c)) + // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c)) + SDValue NegN2 = getNegatedExpression(Op.getOperand(2), DAG, LegalOperations, + ForCodeSize, Depth + 1); + NegatibleCost N0Cost = + getNegatibleCost(N0, DAG, LegalOperations, ForCodeSize, Depth + 1); + NegatibleCost N1Cost = + getNegatibleCost(N1, DAG, LegalOperations, ForCodeSize, Depth + 1); + + // Choose the cheaper one to negate. + if (N0Cost > N1Cost) { + SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOperations, + ForCodeSize, Depth + 1); + return DAG.getNode(Opc, SDLoc(Op), VT, NegN0, N1, NegN2, Flags); + } else { + SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOperations, + ForCodeSize, Depth + 1); + return DAG.getNode(Opc, SDLoc(Op), VT, N0, NegN1, NegN2, Flags); + } + } + + return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations, + ForCodeSize, Depth); +} + // Override to enable LOAD_STACK_GUARD lowering on Linux. bool PPCTargetLowering::useLoadStackGuardNode() const { if (!Subtarget.isTargetLinux()) @@ -15929,6 +16009,72 @@ } } +SDValue PPCTargetLowering::combineFMALike(SDNode *N, + DAGCombinerInfo &DCI) const { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue N2 = N->getOperand(2); + SDNodeFlags Flags = N->getFlags(); + EVT VT = N->getValueType(0); + SelectionDAG &DAG = DCI.DAG; + const TargetOptions &Options = getTargetMachine().Options; + unsigned Opc = N->getOpcode(); + + // Check nsz flag before generating fnmsub instructions, getNegatibleCost + // already checked before negating FMA, so we don't to care about fnmadd. + // FNMSUB means (fneg (fma a b (fneg c))), allowing this transform may change + // sign of zero when a*b-c=0 since (fnmsub a b c)=-0 while c-a*b=+0. + if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath) + return SDValue(); + + // Vector version can't be selected if VSX disabled. + if (VT.isVector() && !Subtarget.hasVSX()) + return SDValue(); + + bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize(); + bool LegalOperations = !DCI.isBeforeLegalizeOps(); + SDLoc Loc(N); + + // Try 'invert' this op to reduce negs. They're symmetric. + auto InvertOpCode = [](unsigned Opc) -> unsigned { + if (Opc == ISD::FMA) + return PPCISD::FNMSUB; + else if (Opc == PPCISD::FNMSUB) + return ISD::FMA; + else + llvm_unreachable("Unexpected FMA opcode"); + }; + + // (fma (fneg a) b c) => (fnmsub a b c) + // (fnmsub (fneg a) b c) => (fma a b c) + if (getNegatibleCost(N0, DAG, LegalOperations, CodeSize) == + NegatibleCost::Cheaper) + return DAG.getNode(InvertOpCode(Opc), Loc, VT, + getNegatedExpression(N0, DAG, LegalOperations, CodeSize), + N1, N2, Flags); + + // (fma a (fneg b) c) => (fnmsub a b c) + // (fnmsub a (fneg b) c) => (fma a b c) + if (getNegatibleCost(N1, DAG, LegalOperations, CodeSize) == + NegatibleCost::Cheaper) + return DAG.getNode(InvertOpCode(Opc), Loc, VT, N0, + getNegatedExpression(N1, DAG, LegalOperations, CodeSize), + N2, Flags); + + // (fma a b (fneg c)) => (fneg (fnmsub a b c)) + // (fnmsub a b (fneg c)) => (fneg (fma a b c)) + if (getNegatibleCost(N2, DAG, LegalOperations, CodeSize) == + NegatibleCost::Cheaper) + return DAG.getNode( + ISD::FNEG, Loc, VT, + DAG.getNode(InvertOpCode(Opc), Loc, VT, N0, N1, + getNegatedExpression(N2, DAG, LegalOperations, CodeSize), + Flags), + Flags); + + return SDValue(); +} + bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { // Only duplicate to increase tail-calls for the 64bit SysV ABIs. if (!Subtarget.is64BitELFABI()) diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -218,6 +218,8 @@ def PPCsra : SDNode<"PPCISD::SRA" , SDTIntShiftOp>; def PPCshl : SDNode<"PPCISD::SHL" , SDTIntShiftOp>; +def PPCfnmsub : SDNode<"PPCISD::FNMSUB" , SDTFPTernaryOp>; + def PPCextswsli : SDNode<"PPCISD::EXTSWSLI" , SDT_PPCextswsli>; // Move 2 i64 values into a VSX register @@ -3348,15 +3350,15 @@ def : Pat<(atomic_fence (timm), (timm)), (MSYNC)>, Requires<[HasOnlyMSYNC]>; let Predicates = [HasFPU] in { -// Additional FNMSUB patterns: -a*c + b == -(a*c - b) -def : Pat<(fma (fneg f64:$A), f64:$C, f64:$B), - (FNMSUB $A, $C, $B)>; -def : Pat<(fma f64:$A, (fneg f64:$C), f64:$B), - (FNMSUB $A, $C, $B)>; -def : Pat<(fma (fneg f32:$A), f32:$C, f32:$B), - (FNMSUBS $A, $C, $B)>; -def : Pat<(fma f32:$A, (fneg f32:$C), f32:$B), - (FNMSUBS $A, $C, $B)>; +// Additional fnmsub patterns +def : Pat<(PPCfnmsub f64:$A, f64:$B, f64:$C), + (FNMSUB $A, $B, $C)>; +def : Pat<(PPCfnmsub f32:$A, f32:$B, f32:$C), + (FNMSUBS $A, $B, $C)>; +def : Pat<(fneg (PPCfnmsub f64:$A, f64:$B, f64:$C)), + (FMSUB $A, $B, $C)>; +def : Pat<(fneg (PPCfnmsub f32:$A, f32:$B, f32:$C)), + (FMSUBS $A, $B, $C)>; // FCOPYSIGN's operand types need not agree. def : Pat<(fcopysign f64:$frB, f32:$frA), diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td --- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td +++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td @@ -1011,21 +1011,21 @@ (f64 (EXTRACT_SUBREG $S, sub_64))>; } -// Additional fnmsub patterns: -a*b + c == -(a*b - c) -def : Pat<(fma (fneg f64:$A), f64:$B, f64:$C), - (XSNMSUBADP $C, $A, $B)>; -def : Pat<(fma f64:$A, (fneg f64:$B), f64:$C), +// Additional fnmsub pattern for PPC specific ISD opcode +def : Pat<(PPCfnmsub f64:$A, f64:$B, f64:$C), (XSNMSUBADP $C, $A, $B)>; +def : Pat<(fneg (PPCfnmsub f64:$A, f64:$B, f64:$C)), + (XSMSUBADP $C, $A, $B)>; -def : Pat<(fma (fneg v2f64:$A), v2f64:$B, v2f64:$C), - (XVNMSUBADP $C, $A, $B)>; -def : Pat<(fma v2f64:$A, (fneg v2f64:$B), v2f64:$C), +def : Pat<(PPCfnmsub v2f64:$A, v2f64:$B, v2f64:$C), (XVNMSUBADP $C, $A, $B)>; +def : Pat<(fneg (PPCfnmsub v2f64:$A, v2f64:$B, v2f64:$C)), + (XVMSUBADP $C, $A, $B)>; -def : Pat<(fma (fneg v4f32:$A), v4f32:$B, v4f32:$C), - (XVNMSUBASP $C, $A, $B)>; -def : Pat<(fma v4f32:$A, (fneg v4f32:$B), v4f32:$C), +def : Pat<(PPCfnmsub v4f32:$A, v4f32:$B, v4f32:$C), (XVNMSUBASP $C, $A, $B)>; +def : Pat<(fneg (PPCfnmsub v4f32:$A, v4f32:$B, v4f32:$C)), + (XVMSUBASP $C, $A, $B)>; def : Pat<(v2f64 (bitconvert v4f32:$A)), (COPY_TO_REGCLASS $A, VSRC)>; @@ -1578,10 +1578,8 @@ AltVSXFMARel; } - // Additional xsnmsubasp patterns: -a*b + c == -(a*b - c) - def : Pat<(fma (fneg f32:$A), f32:$B, f32:$C), - (XSNMSUBASP $C, $A, $B)>; - def : Pat<(fma f32:$A, (fneg f32:$B), f32:$C), + // Additional fnmsub pattern for PPC specific ISD opcode + def : Pat<(PPCfnmsub f32:$A, f32:$B, f32:$C), (XSNMSUBASP $C, $A, $B)>; // Single Precision Conversions (FP <-> INT) diff --git a/llvm/test/CodeGen/PowerPC/combine-fneg.ll b/llvm/test/CodeGen/PowerPC/combine-fneg.ll --- a/llvm/test/CodeGen/PowerPC/combine-fneg.ll +++ b/llvm/test/CodeGen/PowerPC/combine-fneg.ll @@ -13,10 +13,10 @@ ; CHECK-NEXT: xvredp 2, 0 ; CHECK-NEXT: xxswapd 1, 1 ; CHECK-NEXT: xxlor 3, 1, 1 -; CHECK-NEXT: xvmaddadp 3, 0, 2 -; CHECK-NEXT: xvnmsubadp 2, 2, 3 -; CHECK-NEXT: xvmaddadp 1, 0, 2 -; CHECK-NEXT: xvmsubadp 2, 2, 1 +; CHECK-NEXT: xvnmsubadp 3, 0, 2 +; CHECK-NEXT: xvmaddadp 2, 2, 3 +; CHECK-NEXT: xvnmsubadp 1, 0, 2 +; CHECK-NEXT: xvnmaddadp 2, 2, 1 ; CHECK-NEXT: xvmuldp 34, 34, 2 ; CHECK-NEXT: xvmuldp 35, 35, 2 ; CHECK-NEXT: blr diff --git a/llvm/test/CodeGen/PowerPC/fma-assoc.ll b/llvm/test/CodeGen/PowerPC/fma-assoc.ll --- a/llvm/test/CodeGen/PowerPC/fma-assoc.ll +++ b/llvm/test/CodeGen/PowerPC/fma-assoc.ll @@ -225,14 +225,18 @@ define double @test_FMSUB_ASSOC_EXT3(float %A, float %B, double %C, ; CHECK-LABEL: test_FMSUB_ASSOC_EXT3: ; CHECK: # %bb.0: -; CHECK-NEXT: fnmsub 0, 1, 2, 5 -; CHECK-NEXT: fnmsub 1, 3, 4, 0 +; CHECK-NEXT: fneg 0, 1 +; CHECK-NEXT: fmadd 0, 0, 2, 5 +; CHECK-NEXT: fneg 1, 3 +; CHECK-NEXT: fmadd 1, 1, 4, 0 ; CHECK-NEXT: blr ; ; CHECK-VSX-LABEL: test_FMSUB_ASSOC_EXT3: ; CHECK-VSX: # %bb.0: -; CHECK-VSX-NEXT: xsnmsubmdp 1, 2, 5 -; CHECK-VSX-NEXT: xsnmsubadp 1, 3, 4 +; CHECK-VSX-NEXT: xsnegdp 1, 1 +; CHECK-VSX-NEXT: xsnegdp 0, 3 +; CHECK-VSX-NEXT: xsmaddmdp 1, 2, 5 +; CHECK-VSX-NEXT: xsmaddadp 1, 0, 4 ; CHECK-VSX-NEXT: blr double %D, double %E) { %F = fmul float %A, %B ; [#uses=1] @@ -246,15 +250,19 @@ define double @test_FMSUB_ASSOC_EXT4(float %A, float %B, float %C, ; CHECK-LABEL: test_FMSUB_ASSOC_EXT4: ; CHECK: # %bb.0: -; CHECK-NEXT: fnmsub 0, 3, 4, 5 -; CHECK-NEXT: fnmsub 1, 1, 2, 0 +; CHECK-NEXT: fneg 0, 3 +; CHECK-NEXT: fmadd 0, 0, 4, 5 +; CHECK-NEXT: fneg 1, 1 +; CHECK-NEXT: fmadd 1, 1, 2, 0 ; CHECK-NEXT: blr ; ; CHECK-VSX-LABEL: test_FMSUB_ASSOC_EXT4: ; CHECK-VSX: # %bb.0: -; CHECK-VSX-NEXT: xsnmsubmdp 3, 4, 5 -; CHECK-VSX-NEXT: xsnmsubadp 3, 1, 2 -; CHECK-VSX-NEXT: fmr 1, 3 +; CHECK-VSX-NEXT: xsnegdp 0, 3 +; CHECK-VSX-NEXT: xsnegdp 1, 1 +; CHECK-VSX-NEXT: xsmaddmdp 0, 4, 5 +; CHECK-VSX-NEXT: xsmaddadp 0, 1, 2 +; CHECK-VSX-NEXT: fmr 1, 0 ; CHECK-VSX-NEXT: blr float %D, double %E) { %F = fmul float %A, %B ; [#uses=1] @@ -503,14 +511,18 @@ define double @test_reassoc_FMSUB_ASSOC_EXT3(float %A, float %B, double %C, ; CHECK-LABEL: test_reassoc_FMSUB_ASSOC_EXT3: ; CHECK: # %bb.0: -; CHECK-NEXT: fnmsub 0, 1, 2, 5 -; CHECK-NEXT: fnmsub 1, 3, 4, 0 +; CHECK-NEXT: fneg 0, 1 +; CHECK-NEXT: fmadd 0, 0, 2, 5 +; CHECK-NEXT: fneg 1, 3 +; CHECK-NEXT: fmadd 1, 1, 4, 0 ; CHECK-NEXT: blr ; ; CHECK-VSX-LABEL: test_reassoc_FMSUB_ASSOC_EXT3: ; CHECK-VSX: # %bb.0: -; CHECK-VSX-NEXT: xsnmsubmdp 1, 2, 5 -; CHECK-VSX-NEXT: xsnmsubadp 1, 3, 4 +; CHECK-VSX-NEXT: xsnegdp 1, 1 +; CHECK-VSX-NEXT: xsnegdp 0, 3 +; CHECK-VSX-NEXT: xsmaddmdp 1, 2, 5 +; CHECK-VSX-NEXT: xsmaddadp 1, 0, 4 ; CHECK-VSX-NEXT: blr double %D, double %E) { %F = fmul reassoc float %A, %B ; [#uses=1] @@ -521,18 +533,45 @@ ret double %J } +; fnmsub/xsnmsubadp may affect the sign of zero, we need nsz flag +; to ensure generating them +define double @test_fast_FMSUB_ASSOC_EXT3(float %A, float %B, double %C, +; CHECK-LABEL: test_fast_FMSUB_ASSOC_EXT3: +; CHECK: # %bb.0: +; CHECK-NEXT: fnmsub 0, 1, 2, 5 +; CHECK-NEXT: fnmsub 1, 3, 4, 0 +; CHECK-NEXT: blr +; +; CHECK-VSX-LABEL: test_fast_FMSUB_ASSOC_EXT3: +; CHECK-VSX: # %bb.0: +; CHECK-VSX-NEXT: xsnmsubmdp 1, 2, 5 +; CHECK-VSX-NEXT: xsnmsubadp 1, 3, 4 +; CHECK-VSX-NEXT: blr + double %D, double %E) { + %F = fmul fast float %A, %B ; [#uses=1] + %G = fpext float %F to double ; [#uses=1] + %H = fmul fast double %C, %D ; [#uses=1] + %I = fadd fast double %H, %G ; [#uses=1] + %J = fsub fast double %E, %I ; [#uses=1] + ret double %J +} + define double @test_reassoc_FMSUB_ASSOC_EXT4(float %A, float %B, float %C, ; CHECK-LABEL: test_reassoc_FMSUB_ASSOC_EXT4: ; CHECK: # %bb.0: -; CHECK-NEXT: fnmsub 0, 3, 4, 5 -; CHECK-NEXT: fnmsub 1, 1, 2, 0 +; CHECK-NEXT: fneg 0, 3 +; CHECK-NEXT: fmadd 0, 0, 4, 5 +; CHECK-NEXT: fneg 1, 1 +; CHECK-NEXT: fmadd 1, 1, 2, 0 ; CHECK-NEXT: blr ; ; CHECK-VSX-LABEL: test_reassoc_FMSUB_ASSOC_EXT4: ; CHECK-VSX: # %bb.0: -; CHECK-VSX-NEXT: xsnmsubmdp 3, 4, 5 -; CHECK-VSX-NEXT: xsnmsubadp 3, 1, 2 -; CHECK-VSX-NEXT: fmr 1, 3 +; CHECK-VSX-NEXT: xsnegdp 0, 3 +; CHECK-VSX-NEXT: xsnegdp 1, 1 +; CHECK-VSX-NEXT: xsmaddmdp 0, 4, 5 +; CHECK-VSX-NEXT: xsmaddadp 0, 1, 2 +; CHECK-VSX-NEXT: fmr 1, 0 ; CHECK-VSX-NEXT: blr float %D, double %E) { %F = fmul reassoc float %A, %B ; [#uses=1] @@ -542,3 +581,25 @@ %J = fsub reassoc double %E, %I ; [#uses=1] ret double %J } + +define double @test_fast_FMSUB_ASSOC_EXT4(float %A, float %B, float %C, +; CHECK-LABEL: test_fast_FMSUB_ASSOC_EXT4: +; CHECK: # %bb.0: +; CHECK-NEXT: fnmsub 0, 3, 4, 5 +; CHECK-NEXT: fnmsub 1, 1, 2, 0 +; CHECK-NEXT: blr +; +; CHECK-VSX-LABEL: test_fast_FMSUB_ASSOC_EXT4: +; CHECK-VSX: # %bb.0: +; CHECK-VSX-NEXT: xsnmsubmdp 3, 4, 5 +; CHECK-VSX-NEXT: xsnmsubadp 3, 1, 2 +; CHECK-VSX-NEXT: fmr 1, 3 +; CHECK-VSX-NEXT: blr + float %D, double %E) { + %F = fmul fast float %A, %B ; [#uses=1] + %G = fmul fast float %C, %D ; [#uses=1] + %H = fadd fast float %F, %G ; [#uses=1] + %I = fpext float %H to double ; [#uses=1] + %J = fsub fast double %E, %I ; [#uses=1] + ret double %J +} diff --git a/llvm/test/CodeGen/PowerPC/fma-ext.ll b/llvm/test/CodeGen/PowerPC/fma-ext.ll --- a/llvm/test/CodeGen/PowerPC/fma-ext.ll +++ b/llvm/test/CodeGen/PowerPC/fma-ext.ll @@ -49,10 +49,27 @@ %F = fsub double %C, %E ; [#uses=1] ret double %F ; CHECK-LABEL: test_FMSUB_EXT2: -; CHECK: fnmsub +; CHECK: fneg +; CHECK-NEXT: fmadd ; CHECK-NEXT: blr ; CHECK-VSX-LABEL: test_FMSUB_EXT2: +; CHECK-VSX: xsnegdp +; CHECK-VSX-NEXT: xsmaddmdp +; CHECK-VSX-NEXT: blr +} + +; need nsz flag to generate fnmsub since it may affect sign of zero +define double @test_FMSUB_EXT2_NSZ(float %A, float %B, double %C) { + %D = fmul nsz float %A, %B ; [#uses=1] + %E = fpext float %D to double ; [#uses=1] + %F = fsub nsz double %C, %E ; [#uses=1] + ret double %F +; CHECK-LABEL: test_FMSUB_EXT2_NSZ: +; CHECK: fnmsub +; CHECK-NEXT: blr + +; CHECK-VSX-LABEL: test_FMSUB_EXT2_NSZ: ; CHECK-VSX: xsnmsubmdp ; CHECK-VSX-NEXT: blr } diff --git a/llvm/test/CodeGen/PowerPC/fma-negate.ll b/llvm/test/CodeGen/PowerPC/fma-negate.ll --- a/llvm/test/CodeGen/PowerPC/fma-negate.ll +++ b/llvm/test/CodeGen/PowerPC/fma-negate.ll @@ -7,12 +7,14 @@ define double @test_mul_sub_f64(double %a, double %b, double %c) { ; VSX-LABEL: test_mul_sub_f64: ; VSX: # %bb.0: # %entry -; VSX-NEXT: xsnmsubadp 1, 2, 3 +; VSX-NEXT: xsnegdp 0, 2 +; VSX-NEXT: xsmaddadp 1, 0, 3 ; VSX-NEXT: blr ; ; NO-VSX-LABEL: test_mul_sub_f64: ; NO-VSX: # %bb.0: # %entry -; NO-VSX-NEXT: fnmsub 1, 2, 3, 1 +; NO-VSX-NEXT: fneg 0, 2 +; NO-VSX-NEXT: fmadd 1, 0, 3, 1 ; NO-VSX-NEXT: blr entry: %0 = fmul contract reassoc double %b, %c @@ -43,13 +45,15 @@ define double @test_neg_fma_f64(double %a, double %b, double %c) { ; VSX-LABEL: test_neg_fma_f64: ; VSX: # %bb.0: # %entry -; VSX-NEXT: xsnmsubadp 3, 1, 2 +; VSX-NEXT: xsnegdp 0, 1 +; VSX-NEXT: xsmaddadp 3, 0, 2 ; VSX-NEXT: fmr 1, 3 ; VSX-NEXT: blr ; ; NO-VSX-LABEL: test_neg_fma_f64: ; NO-VSX: # %bb.0: # %entry -; NO-VSX-NEXT: fnmsub 1, 1, 2, 3 +; NO-VSX-NEXT: fneg 0, 1 +; NO-VSX-NEXT: fmadd 1, 0, 2, 3 ; NO-VSX-NEXT: blr entry: %0 = fsub contract reassoc double -0.0, %a @@ -61,12 +65,14 @@ define float @test_mul_sub_f32(float %a, float %b, float %c) { ; VSX-LABEL: test_mul_sub_f32: ; VSX: # %bb.0: # %entry -; VSX-NEXT: xsnmsubasp 1, 2, 3 +; VSX-NEXT: fneg 0, 2 +; VSX-NEXT: xsmaddasp 1, 0, 3 ; VSX-NEXT: blr ; ; NO-VSX-LABEL: test_mul_sub_f32: ; NO-VSX: # %bb.0: # %entry -; NO-VSX-NEXT: fnmsubs 1, 2, 3, 1 +; NO-VSX-NEXT: fneg 0, 2 +; NO-VSX-NEXT: fmadds 1, 0, 3, 1 ; NO-VSX-NEXT: blr entry: %0 = fmul contract reassoc float %b, %c @@ -97,13 +103,15 @@ define float @test_neg_fma_f32(float %a, float %b, float %c) { ; VSX-LABEL: test_neg_fma_f32: ; VSX: # %bb.0: # %entry -; VSX-NEXT: xsnmsubasp 3, 1, 2 +; VSX-NEXT: fneg 0, 1 +; VSX-NEXT: xsmaddasp 3, 0, 2 ; VSX-NEXT: fmr 1, 3 ; VSX-NEXT: blr ; ; NO-VSX-LABEL: test_neg_fma_f32: ; NO-VSX: # %bb.0: # %entry -; NO-VSX-NEXT: fnmsubs 1, 1, 2, 3 +; NO-VSX-NEXT: fneg 0, 1 +; NO-VSX-NEXT: fmadds 1, 0, 2, 3 ; NO-VSX-NEXT: blr entry: %0 = fsub contract reassoc float -0.0, %a @@ -114,14 +122,17 @@ define <2 x double> @test_neg_fma_v2f64(<2 x double> %a, <2 x double> %b, ; VSX-LABEL: test_neg_fma_v2f64: ; VSX: # %bb.0: # %entry -; VSX-NEXT: xvnmsubadp 36, 34, 35 +; VSX-NEXT: xvnegdp 0, 34 +; VSX-NEXT: xvmaddadp 36, 0, 35 ; VSX-NEXT: vmr 2, 4 ; VSX-NEXT: blr ; ; NO-VSX-LABEL: test_neg_fma_v2f64: ; NO-VSX: # %bb.0: # %entry -; NO-VSX-NEXT: fnmsub 1, 1, 3, 5 -; NO-VSX-NEXT: fnmsub 2, 2, 4, 6 +; NO-VSX-NEXT: fneg 0, 2 +; NO-VSX-NEXT: fneg 1, 1 +; NO-VSX-NEXT: fmadd 1, 1, 3, 5 +; NO-VSX-NEXT: fmadd 2, 0, 4, 6 ; NO-VSX-NEXT: blr <2 x double> %c) { entry: @@ -135,7 +146,8 @@ define <4 x float> @test_neg_fma_v4f32(<4 x float> %a, <4 x float> %b, ; VSX-LABEL: test_neg_fma_v4f32: ; VSX: # %bb.0: # %entry -; VSX-NEXT: xvnmsubasp 36, 34, 35 +; VSX-NEXT: xvnegsp 0, 34 +; VSX-NEXT: xvmaddasp 36, 0, 35 ; VSX-NEXT: vmr 2, 4 ; VSX-NEXT: blr ; @@ -230,8 +242,7 @@ ; VSX-LABEL: test_fast_2mul_sub_f32: ; VSX: # %bb.0: # %entry ; VSX-NEXT: xsmulsp 0, 3, 4 -; VSX-NEXT: xsmsubasp 0, 1, 2 -; VSX-NEXT: fmr 1, 0 +; VSX-NEXT: fmsubs 1, 1, 2, 0 ; VSX-NEXT: blr ; ; NO-VSX-LABEL: test_fast_2mul_sub_f32: diff --git a/llvm/test/CodeGen/PowerPC/fma.ll b/llvm/test/CodeGen/PowerPC/fma.ll --- a/llvm/test/CodeGen/PowerPC/fma.ll +++ b/llvm/test/CodeGen/PowerPC/fma.ll @@ -95,10 +95,25 @@ %E = fsub double %C, %D ; [#uses=1] ret double %E ; CHECK-LABEL: test_FNMSUB1: -; CHECK: fnmsub +; CHECK: fneg +; CHECK-NEXT: fmadd ; CHECK-NEXT: blr ; CHECK-VSX-LABEL: test_FNMSUB1: +; CHECK-VSX: xsnegdp +; CHECK-VSX-NEXT: xsmaddmdp +} + +; need nsz flag to generate fnmsub since it may affect sign of zero +define double @test_FNMSUB1_NSZ(double %A, double %B, double %C) { + %D = fmul nsz double %A, %B ; [#uses=1] + %E = fsub nsz double %C, %D ; [#uses=1] + ret double %E +; CHECK-LABEL: test_FNMSUB1_NSZ: +; CHECK: fnmsub +; CHECK-NEXT: blr + +; CHECK-VSX-LABEL: test_FNMSUB1_NSZ: ; CHECK-VSX: xsnmsubmdp } diff --git a/llvm/test/CodeGen/PowerPC/repeated-fp-divisors.ll b/llvm/test/CodeGen/PowerPC/repeated-fp-divisors.ll --- a/llvm/test/CodeGen/PowerPC/repeated-fp-divisors.ll +++ b/llvm/test/CodeGen/PowerPC/repeated-fp-divisors.ll @@ -13,9 +13,9 @@ ; CHECK-NEXT: lvx 4, 0, 3 ; CHECK-NEXT: xxspltw 0, 0, 0 ; CHECK-NEXT: xvresp 1, 0 -; CHECK-NEXT: xvmaddasp 35, 0, 1 +; CHECK-NEXT: xvnmsubasp 35, 0, 1 ; CHECK-NEXT: xvmulsp 0, 34, 36 -; CHECK-NEXT: xvnmsubasp 1, 1, 35 +; CHECK-NEXT: xvmaddasp 1, 1, 35 ; CHECK-NEXT: xvmulsp 34, 0, 1 ; CHECK-NEXT: blr %ins = insertelement <4 x float> undef, float %a, i32 0