Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -13381,7 +13381,8 @@ /// The only differences between FABS and FNEG are the mask and the logic op. /// FNEG also has a folding opportunity for FNEG(FABS(x)). -static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) { +static SDValue LowerFABSorFNEG(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) && "Wrong opcode for lowering FABS or FNEG."); @@ -13396,6 +13397,18 @@ SDLoc dl(Op); MVT VT = Op.getSimpleValueType(); + MVT ScalarVT = VT.getScalarType(); + + // If we're negating a FMUL node on a target with FMA, then we can avoid the + // use of a constant by performing (0 - A*B) instead. + if (!IsFABS && Op.getOperand(0).getOpcode() == ISD::FMUL && + DAG.getTarget().Options.UnsafeFPMath && + (ScalarVT == MVT::f32 || ScalarVT == MVT::f64) && + (Subtarget->hasFMA() || Subtarget->hasFMA4() || Subtarget->hasAVX512())) { + SDValue Zero = DAG.getConstantFP(0.0, dl, VT); + return DAG.getNode(X86ISD::FNMADD, dl, VT, Op.getOperand(0).getOperand(0), + Op.getOperand(0).getOperand(1), Zero); + } // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to // decide if we should generate a 16-byte constant mask when we only need 4 or @@ -19780,7 +19793,7 @@ case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG); case ISD::FABS: - case ISD::FNEG: return LowerFABSorFNEG(Op, DAG); + case ISD::FNEG: return LowerFABSorFNEG(Op, Subtarget, DAG); case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); case ISD::SETCC: return LowerSETCC(Op, DAG); Index: test/CodeGen/X86/fma_patterns.ll =================================================================== --- test/CodeGen/X86/fma_patterns.ll +++ test/CodeGen/X86/fma_patterns.ll @@ -599,4 +599,40 @@ ret <4 x float> %a } +; (fneg (fmul x, y)) -> (fnmadd x, y, 0) + +define double @test_f64_fneg_fmul(double %x, double %y) #0 { +; CHECK_FMA-LABEL: test_f64_fneg_fmul: +; CHECK_FMA: # BB#0: +; CHECK_FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; CHECK_FMA-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 +; CHECK_FMA-NEXT: retq +; +; CHECK_FMA4-LABEL: test_f64_fneg_fmul: +; CHECK_FMA4: # BB#0: +; CHECK_FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; CHECK_FMA4-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4-NEXT: retq + %m = fmul double %x, %y + %n = fsub double -0.0, %m + ret double %n +} + +define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 { +; CHECK_FMA-LABEL: test_v4f32_fneg_fmul: +; CHECK_FMA: # BB#0: +; CHECK_FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; CHECK_FMA-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 +; CHECK_FMA-NEXT: retq +; +; CHECK_FMA4-LABEL: test_v4f32_fneg_fmul: +; CHECK_FMA4: # BB#0: +; CHECK_FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; CHECK_FMA4-NEXT: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0 +; CHECK_FMA4-NEXT: retq + %m = fmul <4 x float> %x, %y + %n = fsub <4 x float> , %m + ret <4 x float> %n +} + attributes #0 = { "unsafe-fp-math"="true" }