Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -540,6 +540,10 @@ // Use ANDPD and ORPD to simulate FCOPYSIGN. setOperationAction(ISD::FCOPYSIGN, VT, Custom); + // These might be better off as horizontal vector ops. + setOperationAction(ISD::FADD, VT, Custom); + setOperationAction(ISD::FSUB, VT, Custom); + // We don't support sin/cos/fmod setOperationAction(ISD::FSIN , VT, Expand); setOperationAction(ISD::FCOS , VT, Expand); @@ -18309,6 +18313,61 @@ In, DAG.getUNDEF(SVT))); } +/// Horizontal vector math instructions may be slower than normal math with +/// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch +/// implementation, and likely shuffle complexity of the alternate sequence. +static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize(); + bool HasFastHOps = Subtarget.hasFastHorizontalOps(); + return !IsSingleSource || IsOptimizingSize || HasFastHOps; +} + +/// Depending on uarch and/or optimizing for size, we might prefer to use a +/// vector operation in place of the typical scalar operation. +static SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + MVT VT = Op.getSimpleValueType(); + assert(VT == MVT::f32 || VT == MVT::f64 && "Only expecting float/double"); + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + if (!LHS.hasOneUse() || !RHS.hasOneUse() || !Subtarget.hasSSE3()) + return Op; + + if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + LHS.getOperand(0) != RHS.getOperand(0)) + return Op; + + if (!isa(LHS.getOperand(1)) || + !isa(RHS.getOperand(1)) || + !shouldUseHorizontalOp(true, DAG, Subtarget)) + return Op; + + // fadd (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0 + // fadd (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0 + // fsub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0 + auto HOpcode = Op.getOpcode() == ISD::FADD ? X86ISD::FHADD : X86ISD::FHSUB; + SDValue X = LHS.getOperand(0); + EVT VecVT = X.getValueType(); + unsigned BitWidth = VecVT.getSizeInBits(); + unsigned LExtIndex = LHS.getConstantOperandVal(1); + unsigned RExtIndex = RHS.getConstantOperandVal(1); + if (((LExtIndex == 0 && RExtIndex == 1) || + (LExtIndex == 1 && RExtIndex == 0 && HOpcode == X86ISD::FHADD)) && + (BitWidth == 128 || BitWidth == 256)) { + // Creating a 256-bit horizontal op would be wasteful, so extract the + // 256-bit source op to 128-bit (this is free: ymm -> xmm). + SDLoc DL(Op); + if (BitWidth == 256) + X = extract128BitVector(X, 0, DAG, DL); + SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X); + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, HOp, + DAG.getIntPtrConstant(0, DL)); + } + return Op; +} + /// The only differences between FABS and FNEG are the mask and the logic op. /// FNEG also has a folding opportunity for FNEG(FABS(x)). static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) { @@ -26161,6 +26220,8 @@ case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG); case ISD::STORE: return LowerStore(Op, Subtarget, DAG); + case ISD::FADD: + case ISD::FSUB: return lowerFaddFsub(Op, DAG, Subtarget); case ISD::FABS: case ISD::FNEG: return LowerFABSorFNEG(Op, DAG); case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); @@ -37867,16 +37928,6 @@ return true; } -/// Horizontal vector math instructions may be slower than normal math with -/// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch -/// implementation, and likely shuffle complexity of the alternate sequence. -static bool shouldCombineToHorizontalOp(bool IsSingleSource, SelectionDAG &DAG, - const X86Subtarget &Subtarget) { - bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize(); - bool HasFastHOps = Subtarget.hasFastHorizontalOps(); - return !IsSingleSource || IsOptimizingSize || HasFastHOps; -} - /// Do target-specific dag combines on floating-point adds/subs. static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { @@ -37884,16 +37935,16 @@ SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); bool IsFadd = N->getOpcode() == ISD::FADD; + auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB; assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode"); // Try to synthesize horizontal add/sub from adds/subs of shuffles. if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && isHorizontalBinOp(LHS, RHS, IsFadd) && - shouldCombineToHorizontalOp(LHS == RHS, DAG, Subtarget)) { - auto NewOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB; - return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); - } + shouldUseHorizontalOp(LHS == RHS, DAG, Subtarget)) + return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS); + return SDValue(); } @@ -40794,7 +40845,7 @@ if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, true) && - shouldCombineToHorizontalOp(Op0 == Op1, DAG, Subtarget)) { + shouldUseHorizontalOp(Op0 == Op1, DAG, Subtarget)) { auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL, ArrayRef Ops) { return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops); @@ -40929,7 +40980,7 @@ if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, false) && - shouldCombineToHorizontalOp(Op0 == Op1, DAG, Subtarget)) { + shouldUseHorizontalOp(Op0 == Op1, DAG, Subtarget)) { auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL, ArrayRef Ops) { return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops); Index: test/CodeGen/X86/haddsub-undef.ll =================================================================== --- test/CodeGen/X86/haddsub-undef.ll +++ test/CodeGen/X86/haddsub-undef.ll @@ -84,17 +84,38 @@ } define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) { -; SSE-LABEL: test4_undef: -; SSE: # %bb.0: -; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE-NEXT: addss %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE-SLOW-LABEL: test4_undef: +; SSE-SLOW: # %bb.0: +; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE-SLOW-NEXT: retq ; -; AVX-LABEL: test4_undef: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE-FAST-LABEL: test4_undef: +; SSE-FAST: # %bb.0: +; SSE-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE-FAST-NEXT: retq +; +; AVX1-SLOW-LABEL: test4_undef: +; AVX1-SLOW: # %bb.0: +; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX1-SLOW-NEXT: retq +; +; AVX1-FAST-LABEL: test4_undef: +; AVX1-FAST: # %bb.0: +; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX1-FAST-NEXT: retq +; +; AVX2-SLOW-LABEL: test4_undef: +; AVX2-SLOW: # %bb.0: +; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX2-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX2-SLOW-NEXT: retq +; +; AVX2-FAST-LABEL: test4_undef: +; AVX2-FAST: # %bb.0: +; AVX2-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX2-FAST-NEXT: retq %vecext = extractelement <4 x float> %a, i32 0 %vecext1 = extractelement <4 x float> %a, i32 1 %add = fadd float %vecext, %vecext1 @@ -103,19 +124,40 @@ } define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) { -; SSE-LABEL: test5_undef: -; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm1 -; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE-NEXT: addsd %xmm0, %xmm1 -; SSE-NEXT: movapd %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE-SLOW-LABEL: test5_undef: +; SSE-SLOW: # %bb.0: +; SSE-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE-SLOW-NEXT: retq ; -; AVX-LABEL: test5_undef: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE-FAST-LABEL: test5_undef: +; SSE-FAST: # %bb.0: +; SSE-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE-FAST-NEXT: retq +; +; AVX1-SLOW-LABEL: test5_undef: +; AVX1-SLOW: # %bb.0: +; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX1-SLOW-NEXT: retq +; +; AVX1-FAST-LABEL: test5_undef: +; AVX1-FAST: # %bb.0: +; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX1-FAST-NEXT: retq +; +; AVX2-SLOW-LABEL: test5_undef: +; AVX2-SLOW: # %bb.0: +; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX2-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX2-SLOW-NEXT: retq +; +; AVX2-FAST-LABEL: test5_undef: +; AVX2-FAST: # %bb.0: +; AVX2-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX2-FAST-NEXT: retq %vecext = extractelement <2 x double> %a, i32 0 %vecext1 = extractelement <2 x double> %a, i32 1 %add = fadd double %vecext, %vecext1 @@ -166,27 +208,67 @@ } define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) { -; SSE-LABEL: test8_undef: -; SSE: # %bb.0: -; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE-NEXT: addss %xmm0, %xmm1 -; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE-NEXT: addss %xmm2, %xmm0 -; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE-SLOW-LABEL: test8_undef: +; SSE-SLOW: # %bb.0: +; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE-SLOW-NEXT: addss %xmm0, %xmm1 +; SSE-SLOW-NEXT: movaps %xmm0, %xmm2 +; SSE-SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] +; SSE-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; SSE-SLOW-NEXT: addss %xmm2, %xmm0 +; SSE-SLOW-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE-SLOW-NEXT: movaps %xmm1, %xmm0 +; SSE-SLOW-NEXT: retq ; -; AVX-LABEL: test8_undef: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1 -; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0 -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; AVX-NEXT: retq +; SSE-FAST-LABEL: test8_undef: +; SSE-FAST: # %bb.0: +; SSE-FAST-NEXT: movaps %xmm0, %xmm1 +; SSE-FAST-NEXT: haddps %xmm0, %xmm1 +; SSE-FAST-NEXT: movaps %xmm0, %xmm2 +; SSE-FAST-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] +; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; SSE-FAST-NEXT: addss %xmm2, %xmm0 +; SSE-FAST-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE-FAST-NEXT: movaps %xmm1, %xmm0 +; SSE-FAST-NEXT: retq +; +; AVX1-SLOW-LABEL: test8_undef: +; AVX1-SLOW: # %bb.0: +; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1 +; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; AVX1-SLOW-NEXT: retq +; +; AVX1-FAST-LABEL: test8_undef: +; AVX1-FAST: # %bb.0: +; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1 +; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX1-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX1-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-FAST-NEXT: retq +; +; AVX2-SLOW-LABEL: test8_undef: +; AVX2-SLOW: # %bb.0: +; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX2-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1 +; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX2-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; AVX2-SLOW-NEXT: retq +; +; AVX2-FAST-LABEL: test8_undef: +; AVX2-FAST: # %bb.0: +; AVX2-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1 +; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX2-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX2-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-FAST-NEXT: retq %vecext = extractelement <4 x float> %a, i32 0 %vecext1 = extractelement <4 x float> %a, i32 1 %add = fadd float %vecext, %vecext1 @@ -241,14 +323,21 @@ } define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) { -; SSE-LABEL: test11_undef: -; SSE: # %bb.0: -; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE-NEXT: addss %xmm1, %xmm0 -; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3] -; SSE-NEXT: addss %xmm3, %xmm1 -; SSE-NEXT: movddup {{.*#+}} xmm1 = xmm1[0,0] -; SSE-NEXT: retq +; SSE-SLOW-LABEL: test11_undef: +; SSE-SLOW: # %bb.0: +; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3] +; SSE-SLOW-NEXT: addss %xmm3, %xmm1 +; SSE-SLOW-NEXT: movddup {{.*#+}} xmm1 = xmm1[0,0] +; SSE-SLOW-NEXT: retq +; +; SSE-FAST-LABEL: test11_undef: +; SSE-FAST: # %bb.0: +; SSE-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE-FAST-NEXT: haddps %xmm3, %xmm3 +; SSE-FAST-NEXT: movddup {{.*#+}} xmm1 = xmm3[0,0] +; SSE-FAST-NEXT: retq ; ; AVX-LABEL: test11_undef: ; AVX: # %bb.0: Index: test/CodeGen/X86/haddsub.ll =================================================================== --- test/CodeGen/X86/haddsub.ll +++ test/CodeGen/X86/haddsub.ll @@ -584,17 +584,27 @@ } define float @extract_extract_v4f32_fadd_f32(<4 x float> %x) { -; SSE3-LABEL: extract_extract_v4f32_fadd_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f32_fadd_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fadd float %x0, %x1 @@ -602,17 +612,27 @@ } define float @extract_extract_v4f32_fadd_f32_commute(<4 x float> %x) { -; SSE3-LABEL: extract_extract_v4f32_fadd_f32_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f32_fadd_f32_commute: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fadd float %x1, %x0 @@ -620,18 +640,29 @@ } define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) { -; SSE3-LABEL: extract_extract_v8f32_fadd_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v8f32_fadd_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fadd float %x0, %x1 @@ -639,18 +670,29 @@ } define float @extract_extract_v8f32_fadd_f32_commute(<8 x float> %x) { -; SSE3-LABEL: extract_extract_v8f32_fadd_f32_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v8f32_fadd_f32_commute: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fadd float %x1, %x0 @@ -658,23 +700,35 @@ } define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) { -; SSE3-LABEL: extract_extract_v4f32_fsub_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: subss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f32_fsub_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: subss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f32_fsub_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f32_fsub_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f32_fsub_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f32_fsub_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fsub float %x0, %x1 ret float %x01 } +; Negative test...or get hoppy and negate? + define float @extract_extract_v4f32_fsub_f32_commute(<4 x float> %x) { ; SSE3-LABEL: extract_extract_v4f32_fsub_f32_commute: ; SSE3: # %bb.0: @@ -695,24 +749,37 @@ } define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) { -; SSE3-LABEL: extract_extract_v8f32_fsub_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: subss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v8f32_fsub_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: subss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v8f32_fsub_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v8f32_fsub_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v8f32_fsub_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v8f32_fsub_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fsub float %x0, %x1 ret float %x01 } +; Negative test...or get hoppy and negate? + define float @extract_extract_v8f32_fsub_f32_commute(<8 x float> %x) { ; SSE3-LABEL: extract_extract_v8f32_fsub_f32_commute: ; SSE3: # %bb.0: @@ -734,19 +801,29 @@ } define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) { -; SSE3-LABEL: extract_extract_v2f64_fadd_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v2f64_fadd_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fadd double %x0, %x1 @@ -754,19 +831,29 @@ } define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) { -; SSE3-LABEL: extract_extract_v2f64_fadd_f64_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v2f64_fadd_f64_commute: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fadd double %x1, %x0 @@ -774,20 +861,31 @@ } define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) { -; SSE3-LABEL: extract_extract_v4f64_fadd_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f64_fadd_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fadd double %x0, %x1 @@ -795,20 +893,31 @@ } define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) { -; SSE3-LABEL: extract_extract_v4f64_fadd_f64_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f64_fadd_f64_commute: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fadd double %x1, %x0 @@ -816,24 +925,36 @@ } define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) { -; SSE3-LABEL: extract_extract_v2f64_fsub_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: subsd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v2f64_fsub_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v2f64_fsub_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v2f64_fsub_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v2f64_fsub_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v2f64_fsub_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fsub double %x0, %x1 ret double %x01 } +; Negative test...or get hoppy and negate? + define double @extract_extract_v2f64_fsub_f64_commute(<2 x double> %x) { ; SSE3-LABEL: extract_extract_v2f64_fsub_f64_commute: ; SSE3: # %bb.0: @@ -855,25 +976,38 @@ } define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) { -; SSE3-LABEL: extract_extract_v4f64_fsub_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: subsd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f64_fsub_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f64_fsub_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f64_fsub_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f64_fsub_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f64_fsub_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fsub double %x0, %x1 ret double %x01 } +; Negative test...or get hoppy and negate? + define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) { ; SSE3-LABEL: extract_extract_v4f64_fsub_f64_commute: ; SSE3: # %bb.0: