Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -26135,24 +26135,39 @@ static SDValue PerformFNEGCombine(SDNode *N, SelectionDAG &DAG, const X86Subtarget *Subtarget) { EVT VT = N->getValueType(0); + EVT SVT = VT.getScalarType(); SDValue Arg = N->getOperand(0); + SDLoc DL(N); + + // Let legalize expand this if it isn't a legal type yet. + if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) + return SDValue(); + + // If we're negating a FMUL node on a target with FMA, then we can avoid the + // use of a constant by performing (0 - A*B) instead. + if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) && + (Subtarget->hasFMA() || Subtarget->hasFMA4() || Subtarget->hasAVX512())) { + SDValue Zero = DAG.getConstantFP(0.0, DL, VT); + return DAG.getNode(X86ISD::FNMADD, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Zero); + } // If we're negating a FMA node, then we can adjust the // instruction to include the extra negation. if (Arg.hasOneUse()) { switch (Arg.getOpcode()) { - case X86ISD::FMADD: - return DAG.getNode(X86ISD::FNMSUB, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); - case X86ISD::FMSUB: - return DAG.getNode(X86ISD::FNMADD, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); - case X86ISD::FNMADD: - return DAG.getNode(X86ISD::FMSUB, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); - case X86ISD::FNMSUB: - return DAG.getNode(X86ISD::FMADD, SDLoc(N), VT, Arg.getOperand(0), - Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FMADD: + return DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FMSUB: + return DAG.getNode(X86ISD::FNMADD, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FNMADD: + return DAG.getNode(X86ISD::FMSUB, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); + case X86ISD::FNMSUB: + return DAG.getNode(X86ISD::FMADD, DL, VT, Arg.getOperand(0), + Arg.getOperand(1), Arg.getOperand(2)); } } return SDValue(); Index: test/CodeGen/X86/fma_patterns.ll =================================================================== --- test/CodeGen/X86/fma_patterns.ll +++ test/CodeGen/X86/fma_patterns.ll @@ -1109,4 +1109,76 @@ ret <4 x float> %a } +; Pattern: (fneg (fmul x, y)) -> (fnmadd x, y, 0) + +define double @test_f64_fneg_fmul(double %x, double %y) #0 { +; FMA-LABEL: test_f64_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA-NEXT: vfnmadd213sd %xmm2, %xmm1, %xmm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_f64_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA4-NEXT: vfnmaddsd %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_f64_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vfnmadd213sd %xmm2, %xmm0, %xmm1 +; AVX512-NEXT: vmovaps %zmm1, %zmm0 +; AVX512-NEXT: retq + %m = fmul double %x, %y + %n = fsub double -0.0, %m + ret double %n +} + +define <4 x float> @test_v4f32_fneg_fmul(<4 x float> %x, <4 x float> %y) #0 { +; FMA-LABEL: test_v4f32_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v4f32_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; FMA4-NEXT: vfnmaddps %xmm2, %xmm1, %xmm0, %xmm0 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v4f32_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 +; AVX512-NEXT: retq + %m = fmul <4 x float> %x, %y + %n = fsub <4 x float> , %m + ret <4 x float> %n +} + +define <4 x double> @test_v4f64_fneg_fmul(<4 x double> %x, <4 x double> %y) #0 { +; FMA-LABEL: test_v4f64_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorpd %ymm2, %ymm2, %ymm2 +; FMA-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v4f64_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorpd %ymm2, %ymm2, %ymm2 +; FMA4-NEXT: vfnmaddpd %ymm2, %ymm1, %ymm0, %ymm0 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v4f64_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vxorps %ymm2, %ymm2, %ymm2 +; AVX512-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 +; AVX512-NEXT: retq + %m = fmul <4 x double> %x, %y + %n = fsub <4 x double> , %m + ret <4 x double> %n +} + attributes #0 = { "unsafe-fp-math"="true" } Index: test/CodeGen/X86/fma_patterns_wide.ll =================================================================== --- test/CodeGen/X86/fma_patterns_wide.ll +++ test/CodeGen/X86/fma_patterns_wide.ll @@ -737,4 +737,56 @@ ret <16 x float> %a } +; Pattern: (fneg (fmul x, y)) -> (fnmadd x, y, 0) + +define <16 x float> @test_v16f32_fneg_fmul(<16 x float> %x, <16 x float> %y) #0 { +; FMA-LABEL: test_v16f32_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorps %ymm4, %ymm4, %ymm4 +; FMA-NEXT: vfnmadd213ps %ymm4, %ymm2, %ymm0 +; FMA-NEXT: vfnmadd213ps %ymm4, %ymm3, %ymm1 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v16f32_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorps %ymm4, %ymm4, %ymm4 +; FMA4-NEXT: vfnmaddps %ymm4, %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vfnmaddps %ymm4, %ymm3, %ymm1, %ymm1 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v16f32_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vpxord %zmm2, %zmm2, %zmm2 +; AVX512-NEXT: vfnmadd213ps %zmm2, %zmm1, %zmm0 +; AVX512-NEXT: retq + %m = fmul <16 x float> %x, %y + %n = fsub <16 x float> , %m + ret <16 x float> %n +} + +define <8 x double> @test_v8f64_fneg_fmul(<8 x double> %x, <8 x double> %y) #0 { +; FMA-LABEL: test_v8f64_fneg_fmul: +; FMA: # BB#0: +; FMA-NEXT: vxorpd %ymm4, %ymm4, %ymm4 +; FMA-NEXT: vfnmadd213pd %ymm4, %ymm2, %ymm0 +; FMA-NEXT: vfnmadd213pd %ymm4, %ymm3, %ymm1 +; FMA-NEXT: retq +; +; FMA4-LABEL: test_v8f64_fneg_fmul: +; FMA4: # BB#0: +; FMA4-NEXT: vxorpd %ymm4, %ymm4, %ymm4 +; FMA4-NEXT: vfnmaddpd %ymm4, %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vfnmaddpd %ymm4, %ymm3, %ymm1, %ymm1 +; FMA4-NEXT: retq +; +; AVX512-LABEL: test_v8f64_fneg_fmul: +; AVX512: # BB#0: +; AVX512-NEXT: vpxord %zmm2, %zmm2, %zmm2 +; AVX512-NEXT: vfnmadd213pd %zmm2, %zmm1, %zmm0 +; AVX512-NEXT: retq + %m = fmul <8 x double> %x, %y + %n = fsub <8 x double> , %m + ret <8 x double> %n +} + attributes #0 = { "unsafe-fp-math"="true" }