Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -26174,24 +26174,35 @@ static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget) { EVT VT = N->getValueType(0); + EVT SVT = VT.getScalarType(); SDValue Arg = N->getOperand(0); + SDLoc DL(N); + + // If we're negating a FMUL node on a target with FMA, then we can avoid the + // use of a constant by performing (0 - A*B) instead. + if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) && + (Subtarget->hasFMA() || Subtarget->hasFMA4())) { + SDValue Zero = DAG.getConstantFP(0.0, DL, VT); + return DAG.getNode(X86ISD::FNMADD, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Zero); + } // If we're negating a FMA node, then we can adjust the // instruction to include the extra negation. if (Arg.hasOneUse()) { switch (Arg.getOpcode()) { - case X86ISD::FMADD: - return DAG.getNode(X86ISD::FNMSUB, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); - case X86ISD::FMSUB: - return DAG.getNode(X86ISD::FNMADD, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); - case X86ISD::FNMADD: - return DAG.getNode(X86ISD::FMSUB, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); - case X86ISD::FNMSUB: - return DAG.getNode(X86ISD::FMADD, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FMADD: + return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FMSUB: + return DAG.getNode(X86ISD::FNMADD, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FNMADD: + return DAG.getNode(X86ISD::FMSUB, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FNMSUB: + return DAG.getNode(X86ISD::FMADD, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); } } return SDValue(); Index: test/CodeGen/X86/fma_patterns.ll =================================================================== --- test/CodeGen/X86/fma_patterns.ll +++ test/CodeGen/X86/fma_patterns.ll @@ -667,4 +667,40 @@ ret <4 x float> %a } +; (fneg (fmul x, y)) -> (fnmadd x, y, 0) + +define double @test_f64_fneg_fmul(double %x, double %y) #0 { +; CHECK_FMA-LABEL: test_f64_fneg_fmul: +; CHECK_FMA: # BB#0: +; CHECK_FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; CHECK_FMA-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 +; CHECK_FMA-NEXT: retq +; +; CHECK_FMA4-LABEL: test_f64_fneg_fmul: +; CHECK_FMA4: # BB#0: +; CHECK_FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; CHECK_FMA4-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4-NEXT: retq + %m = fmul double %x, %y + %n = fsub double -0.0, %m + ret double %n +} + +define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 { +; CHECK_FMA-LABEL: test_v4f32_fneg_fmul: +; CHECK_FMA: # BB#0: +; CHECK_FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; CHECK_FMA-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 +; CHECK_FMA-NEXT: retq +; +; CHECK_FMA4-LABEL: test_v4f32_fneg_fmul: +; CHECK_FMA4: # BB#0: +; CHECK_FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; CHECK_FMA4-NEXT: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4-NEXT: retq + %m = fmul <4 x float> %x, %y + %n = fsub <4 x float> , %m + ret <4 x float> %n +} + attributes #0 = { "unsafe-fp-math"="true" }