Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -7475,9 +7475,9 @@ } /// Returns true if is possible to fold MUL and an idiom that has already been -/// recognized as ADDSUB(\p Opnd0, \p Opnd1) into FMADDSUB(x, y, \p Opnd1). -/// If (and only if) true is returned, the operands of FMADDSUB are written to -/// parameters \p Opnd0, \p Opnd1, \p Opnd2. +/// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into +/// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the +/// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2. /// /// Prior to calling this function it should be known that there is some /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation @@ -7500,12 +7500,12 @@ /// recognized ADDSUB idiom with ADDSUB operation is that such replacement /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit /// FMADDSUB is. -static bool isFMAddSub(const X86Subtarget &Subtarget, SelectionDAG &DAG, - SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2, - unsigned ExpectedUses) { +static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget, + SelectionDAG &DAG, + SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2, + unsigned ExpectedUses) { if (Opnd0.getOpcode() != ISD::FMUL || - !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || - !Subtarget.hasAnyFMA()) + !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA()) return false; // FIXME: These checks must match the similar ones in @@ -7542,7 +7542,7 @@ SDValue Opnd2; // TODO: According to coverage reports, the FMADDSUB transform is not // triggered by any tests. - if (isFMAddSub(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) + if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) return DAG.getNode(X86ISD::FMADDSUB, DL, VT, Opnd0, Opnd1, Opnd2); // Do not generate X86ISD::ADDSUB node for 512-bit types even though @@ -29692,17 +29692,18 @@ return SDValue(); } -/// Returns true iff the shuffle node \p N can be replaced with ADDSUB -/// operation. If true is returned then the operands of ADDSUB operation +/// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD) +/// operation. If true is returned then the operands of ADDSUB(SUBADD) operation /// are written to the parameters \p Opnd0 and \p Opnd1. /// -/// We combine shuffle to ADDSUB directly on the abstract vector shuffle nodes +/// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes /// so it is easier to generically match. We also insert dummy vector shuffle /// nodes for the operands which explicitly discard the lanes which are unused /// by this operation to try to flow through the rest of the combiner /// the fact that they're unused. -static bool isAddSub(SDNode *N, const X86Subtarget &Subtarget, - SDValue &Opnd0, SDValue &Opnd1) { +static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget, + SDValue &Opnd0, SDValue &Opnd1, + bool matchSubAdd = false) { EVT VT = N->getValueType(0); if ((!Subtarget.hasSSE3() || (VT != MVT::v4f32 && VT != MVT::v2f64)) && @@ -29722,12 +29723,15 @@ SDValue V1 = N->getOperand(0); SDValue V2 = N->getOperand(1); - // We require the first shuffle operand to be the FSUB node, and the second to - // be the FADD node. - if (V1.getOpcode() == ISD::FADD && V2.getOpcode() == ISD::FSUB) { + unsigned ExpectedOpcode = matchSubAdd ? ISD::FADD : ISD::FSUB; + unsigned NextExpectedOpcode = matchSubAdd ? ISD::FSUB : ISD::FADD; + + // We require the first shuffle operand to be the ExpectedOpcode node, + // and the second to be the NextExpectedOpcode node. + if (V1.getOpcode() == NextExpectedOpcode && V2.getOpcode() == ExpectedOpcode) { ShuffleVectorSDNode::commuteMask(Mask); std::swap(V1, V2); - } else if (V1.getOpcode() != ISD::FSUB || V2.getOpcode() != ISD::FADD) + } else if (V1.getOpcode() != ExpectedOpcode || V2.getOpcode() != NextExpectedOpcode) return false; // If there are other uses of these operations we can't fold them. @@ -29761,7 +29765,7 @@ const X86Subtarget &Subtarget, SelectionDAG &DAG) { SDValue Opnd0, Opnd1; - if (!isAddSub(N, Subtarget, Opnd0, Opnd1)) + if (!isAddSubOrSubAdd(N, Subtarget, Opnd0, Opnd1)) return SDValue(); EVT VT = N->getValueType(0); @@ -29769,7 +29773,7 @@ // Try to generate X86ISD::FMADDSUB node here. SDValue Opnd2; - if (isFMAddSub(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) + if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) return DAG.getNode(X86ISD::FMADDSUB, DL, VT, Opnd0, Opnd1, Opnd2); // Do not generate X86ISD::ADDSUB node for 512-bit types even though @@ -29781,6 +29785,26 @@ return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1); } +/// \brief Try to combine a shuffle into a target-specific +/// mul-sub-add node. +static SDValue combineShuffleToFMSubAdd(SDNode *N, + const X86Subtarget &Subtarget, + SelectionDAG &DAG) { + SDValue Opnd0, Opnd1; + if (!isAddSubOrSubAdd(N, Subtarget, Opnd0, Opnd1, true)) + return SDValue(); + + EVT VT = N->getValueType(0); + SDLoc DL(N); + + // Try to generate X86ISD::FMSUBADD node here. + SDValue Opnd2; + if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) + return DAG.getNode(X86ISD::FMSUBADD, DL, VT, Opnd0, Opnd1, Opnd2); + + return SDValue(); +} + // We are looking for a shuffle where both sources are concatenated with undef // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so // if we can express this as a single-source shuffle, that's preferable. @@ -29867,11 +29891,14 @@ EVT VT = N->getValueType(0); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // If we have legalized the vector types, look for blends of FADD and FSUB - // nodes that we can fuse into an ADDSUB node. + // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node. if (TLI.isTypeLegal(VT)) { if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG)) return AddSub; + if (SDValue FMSubAdd = combineShuffleToFMSubAdd(N, Subtarget, DAG)) + return FMSubAdd; + if (SDValue HAddSub = foldShuffleOfHorizOp(N)) return HAddSub; } Index: llvm/trunk/test/CodeGen/X86/fmsubadd-combine.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/fmsubadd-combine.ll +++ llvm/trunk/test/CodeGen/X86/fmsubadd-combine.ll @@ -8,26 +8,17 @@ define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 { ; FMA3_256-LABEL: mul_subadd_pd128: ; FMA3_256: # %bb.0: # %entry -; FMA3_256-NEXT: vmulpd %xmm1, %xmm0, %xmm0 -; FMA3_256-NEXT: vsubpd %xmm2, %xmm0, %xmm1 -; FMA3_256-NEXT: vaddpd %xmm2, %xmm0, %xmm0 -; FMA3_256-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; FMA3_256-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 ; FMA3_256-NEXT: retq ; ; FMA3_512-LABEL: mul_subadd_pd128: ; FMA3_512: # %bb.0: # %entry -; FMA3_512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 -; FMA3_512-NEXT: vsubpd %xmm2, %xmm0, %xmm1 -; FMA3_512-NEXT: vaddpd %xmm2, %xmm0, %xmm0 -; FMA3_512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; FMA3_512-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0 ; FMA3_512-NEXT: retq ; ; FMA4-LABEL: mul_subadd_pd128: ; FMA4: # %bb.0: # %entry -; FMA4-NEXT: vmulpd %xmm1, %xmm0, %xmm0 -; FMA4-NEXT: vsubpd %xmm2, %xmm0, %xmm1 -; FMA4-NEXT: vaddpd %xmm2, %xmm0, %xmm0 -; FMA4-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1] +; FMA4-NEXT: vfmsubaddpd %xmm2, %xmm1, %xmm0, %xmm0 ; FMA4-NEXT: retq entry: %AB = fmul <2 x double> %A, %B @@ -40,18 +31,12 @@ define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 { ; FMA3-LABEL: mul_subadd_ps128: ; FMA3: # %bb.0: # %entry -; FMA3-NEXT: vmulps %xmm1, %xmm0, %xmm0 -; FMA3-NEXT: vsubps %xmm2, %xmm0, %xmm1 -; FMA3-NEXT: vaddps %xmm2, %xmm0, %xmm0 -; FMA3-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; FMA3-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0 ; FMA3-NEXT: retq ; ; FMA4-LABEL: mul_subadd_ps128: ; FMA4: # %bb.0: # %entry -; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0 -; FMA4-NEXT: vsubps %xmm2, %xmm0, %xmm1 -; FMA4-NEXT: vaddps %xmm2, %xmm0, %xmm0 -; FMA4-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] +; FMA4-NEXT: vfmsubaddps %xmm2, %xmm1, %xmm0, %xmm0 ; FMA4-NEXT: retq entry: %AB = fmul <4 x float> %A, %B @@ -64,18 +49,12 @@ define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 { ; FMA3-LABEL: mul_subadd_pd256: ; FMA3: # %bb.0: # %entry -; FMA3-NEXT: vmulpd %ymm1, %ymm0, %ymm0 -; FMA3-NEXT: vsubpd %ymm2, %ymm0, %ymm1 -; FMA3-NEXT: vaddpd %ymm2, %ymm0, %ymm0 -; FMA3-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3] +; FMA3-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0 ; FMA3-NEXT: retq ; ; FMA4-LABEL: mul_subadd_pd256: ; FMA4: # %bb.0: # %entry -; FMA4-NEXT: vmulpd %ymm1, %ymm0, %ymm0 -; FMA4-NEXT: vsubpd %ymm2, %ymm0, %ymm1 -; FMA4-NEXT: vaddpd %ymm2, %ymm0, %ymm0 -; FMA4-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3] +; FMA4-NEXT: vfmsubaddpd %ymm2, %ymm1, %ymm0, %ymm0 ; FMA4-NEXT: retq entry: %AB = fmul <4 x double> %A, %B @@ -88,18 +67,12 @@ define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 { ; FMA3-LABEL: mul_subadd_ps256: ; FMA3: # %bb.0: # %entry -; FMA3-NEXT: vmulps %ymm1, %ymm0, %ymm0 -; FMA3-NEXT: vsubps %ymm2, %ymm0, %ymm1 -; FMA3-NEXT: vaddps %ymm2, %ymm0, %ymm0 -; FMA3-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; FMA3-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0 ; FMA3-NEXT: retq ; ; FMA4-LABEL: mul_subadd_ps256: ; FMA4: # %bb.0: # %entry -; FMA4-NEXT: vmulps %ymm1, %ymm0, %ymm0 -; FMA4-NEXT: vsubps %ymm2, %ymm0, %ymm1 -; FMA4-NEXT: vaddps %ymm2, %ymm0, %ymm0 -; FMA4-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] +; FMA4-NEXT: vfmsubaddps %ymm2, %ymm1, %ymm0, %ymm0 ; FMA4-NEXT: retq entry: %AB = fmul <8 x float> %A, %B @@ -112,34 +85,19 @@ define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 { ; FMA3_256-LABEL: mul_subadd_pd512: ; FMA3_256: # %bb.0: # %entry -; FMA3_256-NEXT: vmulpd %ymm2, %ymm0, %ymm0 -; FMA3_256-NEXT: vmulpd %ymm3, %ymm1, %ymm1 -; FMA3_256-NEXT: vsubpd %ymm5, %ymm1, %ymm2 -; FMA3_256-NEXT: vsubpd %ymm4, %ymm0, %ymm3 -; FMA3_256-NEXT: vaddpd %ymm5, %ymm1, %ymm1 -; FMA3_256-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3] -; FMA3_256-NEXT: vaddpd %ymm4, %ymm0, %ymm0 -; FMA3_256-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3] +; FMA3_256-NEXT: vfmsubadd213pd %ymm4, %ymm2, %ymm0 +; FMA3_256-NEXT: vfmsubadd213pd %ymm5, %ymm3, %ymm1 ; FMA3_256-NEXT: retq ; ; FMA3_512-LABEL: mul_subadd_pd512: ; FMA3_512: # %bb.0: # %entry -; FMA3_512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 -; FMA3_512-NEXT: vsubpd %zmm2, %zmm0, %zmm1 -; FMA3_512-NEXT: vaddpd %zmm2, %zmm0, %zmm0 -; FMA3_512-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[1],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[6],zmm1[7] +; FMA3_512-NEXT: vfmsubadd213pd %zmm2, %zmm1, %zmm0 ; FMA3_512-NEXT: retq ; ; FMA4-LABEL: mul_subadd_pd512: ; FMA4: # %bb.0: # %entry -; FMA4-NEXT: vmulpd %ymm2, %ymm0, %ymm0 -; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1 -; FMA4-NEXT: vsubpd %ymm5, %ymm1, %ymm2 -; FMA4-NEXT: vsubpd %ymm4, %ymm0, %ymm3 -; FMA4-NEXT: vaddpd %ymm5, %ymm1, %ymm1 -; FMA4-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3] -; FMA4-NEXT: vaddpd %ymm4, %ymm0, %ymm0 -; FMA4-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3] +; FMA4-NEXT: vfmsubaddpd %ymm4, %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vfmsubaddpd %ymm5, %ymm3, %ymm1, %ymm1 ; FMA4-NEXT: retq entry: %AB = fmul <8 x double> %A, %B @@ -152,35 +110,19 @@ define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 { ; FMA3_256-LABEL: mul_subadd_ps512: ; FMA3_256: # %bb.0: # %entry -; FMA3_256-NEXT: vmulps %ymm2, %ymm0, %ymm0 -; FMA3_256-NEXT: vmulps %ymm3, %ymm1, %ymm1 -; FMA3_256-NEXT: vsubps %ymm5, %ymm1, %ymm2 -; FMA3_256-NEXT: vsubps %ymm4, %ymm0, %ymm3 -; FMA3_256-NEXT: vaddps %ymm5, %ymm1, %ymm1 -; FMA3_256-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] -; FMA3_256-NEXT: vaddps %ymm4, %ymm0, %ymm0 -; FMA3_256-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7] +; FMA3_256-NEXT: vfmsubadd213ps %ymm4, %ymm2, %ymm0 +; FMA3_256-NEXT: vfmsubadd213ps %ymm5, %ymm3, %ymm1 ; FMA3_256-NEXT: retq ; ; FMA3_512-LABEL: mul_subadd_ps512: ; FMA3_512: # %bb.0: # %entry -; FMA3_512-NEXT: vmulps %zmm1, %zmm0, %zmm1 -; FMA3_512-NEXT: vaddps %zmm2, %zmm1, %zmm0 -; FMA3_512-NEXT: movw $-21846, %ax # imm = 0xAAAA -; FMA3_512-NEXT: kmovw %eax, %k1 -; FMA3_512-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1} +; FMA3_512-NEXT: vfmsubadd213ps %zmm2, %zmm1, %zmm0 ; FMA3_512-NEXT: retq ; ; FMA4-LABEL: mul_subadd_ps512: ; FMA4: # %bb.0: # %entry -; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0 -; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1 -; FMA4-NEXT: vsubps %ymm5, %ymm1, %ymm2 -; FMA4-NEXT: vsubps %ymm4, %ymm0, %ymm3 -; FMA4-NEXT: vaddps %ymm5, %ymm1, %ymm1 -; FMA4-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] -; FMA4-NEXT: vaddps %ymm4, %ymm0, %ymm0 -; FMA4-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7] +; FMA4-NEXT: vfmsubaddps %ymm4, %ymm2, %ymm0, %ymm0 +; FMA4-NEXT: vfmsubaddps %ymm5, %ymm3, %ymm1, %ymm1 ; FMA4-NEXT: retq entry: %AB = fmul <16 x float> %A, %B