Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -540,6 +540,10 @@ // Use ANDPD and ORPD to simulate FCOPYSIGN. setOperationAction(ISD::FCOPYSIGN, VT, Custom); + // These might be better off as horizontal vector ops. + setOperationAction(ISD::FADD, VT, Custom); + setOperationAction(ISD::FSUB, VT, Custom); + // We don't support sin/cos/fmod setOperationAction(ISD::FSIN , VT, Expand); setOperationAction(ISD::FCOS , VT, Expand); @@ -18335,6 +18339,63 @@ return !IsSingleSource || IsOptimizingSize || HasFastHOps; } +/// Depending on uarch and/or optimizing for size, we might prefer to use a +/// vector operation in place of the typical scalar operation. +static SDValue lowerFaddFsub(SDValue Op, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + MVT VT = Op.getSimpleValueType(); + assert(VT == MVT::f32 || VT == MVT::f64 && "Only expecting float/double"); + + // If both operands have other uses, this is probably not profitable. + // Horizontal FP add/sub were added with SSE3. + SDValue LHS = Op.getOperand(0); + SDValue RHS = Op.getOperand(1); + if ((!LHS.hasOneUse() && !RHS.hasOneUse()) || !Subtarget.hasSSE3()) + return Op; + + if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT || + LHS.getOperand(0) != RHS.getOperand(0)) + return Op; + + if (!isa(LHS.getOperand(1)) || + !isa(RHS.getOperand(1)) || + !shouldUseHorizontalOp(true, DAG, Subtarget)) + return Op; + + // Allow commuted 'hadd' ops. + // TODO: Allow commuted fsub by negating the result of FHSUB? + // TODO: This can be extended to handle other adjacent extract pairs. + auto HOpcode = Op.getOpcode() == ISD::FADD ? X86ISD::FHADD : X86ISD::FHSUB; + unsigned LExtIndex = LHS.getConstantOperandVal(1); + unsigned RExtIndex = RHS.getConstantOperandVal(1); + if (LExtIndex == 1 && RExtIndex == 0 && HOpcode == X86ISD::FHADD) + std::swap(LExtIndex, RExtIndex); + if (LExtIndex != 0 || RExtIndex != 1) + return Op; + + SDValue X = LHS.getOperand(0); + EVT VecVT = X.getValueType(); + unsigned BitWidth = VecVT.getSizeInBits(); + assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) && + "Not expecting illegal vector widths here"); + + // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit + // equivalent, so extract the 256/512-bit source op to 128-bit. + // This is free: ymm/zmm -> xmm. + SDLoc DL(Op); + if (BitWidth == 256 || BitWidth == 512) + X = extract128BitVector(X, 0, DAG, DL); + + // fadd (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0 + // fadd (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0 + // fsub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0 + // The extract of element 0 is free: the scalar result is element 0. + SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X); + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, HOp, + DAG.getIntPtrConstant(0, DL)); +} + /// The only differences between FABS and FNEG are the mask and the logic op. /// FNEG also has a folding opportunity for FNEG(FABS(x)). static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) { @@ -26015,6 +26076,8 @@ case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG); case ISD::STORE: return LowerStore(Op, Subtarget, DAG); + case ISD::FADD: + case ISD::FSUB: return lowerFaddFsub(Op, DAG, Subtarget); case ISD::FABS: case ISD::FNEG: return LowerFABSorFNEG(Op, DAG); case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); Index: lib/Target/X86/X86TargetTransformInfo.cpp =================================================================== --- lib/Target/X86/X86TargetTransformInfo.cpp +++ lib/Target/X86/X86TargetTransformInfo.cpp @@ -832,6 +832,16 @@ { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ + + // The default cost model may consider these basic ops (addsd/subsd) more + // expensive just because we custom lower them. Explicitly set these so the + // cost is independent of our lowering implementation. + // TODO: The cost of "2" for FP ops is apparently due to the fact that P4 + // era chips were running integer units twice as fast as FP units. But these + // costs should be relative to other FP costs above here, so they should be + // "1". Alternatively, other FP costs should be scaled up by a factor of 2. + { ISD::FADD, MVT::f64, 2 }, + { ISD::FSUB, MVT::f64, 2 } }; if (ST->hasSSE2()) @@ -841,6 +851,16 @@ static const CostTblEntry SSE1CostTable[] = { { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ + + // The default cost model may consider these basic ops (addss/subss) more + // expensive just because we custom lower them. Explicitly set these so the + // cost is independent of our lowering implementation. + // TODO: The cost of "2" for FP ops is apparently due to the fact that P4 + // era chips were running integer units twice as fast as FP units. But these + // costs should be relative to other FP costs above here, so they should be + // "1". Alternatively, other FP costs should be scaled up by a factor of 2. + { ISD::FADD, MVT::f32, 2 }, + { ISD::FSUB, MVT::f32, 2 } }; if (ST->hasSSE1()) Index: test/CodeGen/X86/haddsub-undef.ll =================================================================== --- test/CodeGen/X86/haddsub-undef.ll +++ test/CodeGen/X86/haddsub-undef.ll @@ -84,17 +84,27 @@ } define <4 x float> @test4_undef(<4 x float> %a, <4 x float> %b) { -; SSE-LABEL: test4_undef: -; SSE: # %bb.0: -; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE-NEXT: addss %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE-SLOW-LABEL: test4_undef: +; SSE-SLOW: # %bb.0: +; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE-SLOW-NEXT: retq ; -; AVX-LABEL: test4_undef: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE-FAST-LABEL: test4_undef: +; SSE-FAST: # %bb.0: +; SSE-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: test4_undef: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: test4_undef: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %vecext = extractelement <4 x float> %a, i32 0 %vecext1 = extractelement <4 x float> %a, i32 1 %add = fadd float %vecext, %vecext1 @@ -103,19 +113,29 @@ } define <2 x double> @test5_undef(<2 x double> %a, <2 x double> %b) { -; SSE-LABEL: test5_undef: -; SSE: # %bb.0: -; SSE-NEXT: movapd %xmm0, %xmm1 -; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE-NEXT: addsd %xmm0, %xmm1 -; SSE-NEXT: movapd %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE-SLOW-LABEL: test5_undef: +; SSE-SLOW: # %bb.0: +; SSE-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE-SLOW-NEXT: retq ; -; AVX-LABEL: test5_undef: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE-FAST-LABEL: test5_undef: +; SSE-FAST: # %bb.0: +; SSE-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: test5_undef: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: test5_undef: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %vecext = extractelement <2 x double> %a, i32 0 %vecext1 = extractelement <2 x double> %a, i32 1 %add = fadd double %vecext, %vecext1 @@ -166,27 +186,48 @@ } define <4 x float> @test8_undef(<4 x float> %a, <4 x float> %b) { -; SSE-LABEL: test8_undef: -; SSE: # %bb.0: -; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE-NEXT: addss %xmm0, %xmm1 -; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] -; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; SSE-NEXT: addss %xmm2, %xmm0 -; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] -; SSE-NEXT: movaps %xmm1, %xmm0 -; SSE-NEXT: retq +; SSE-SLOW-LABEL: test8_undef: +; SSE-SLOW: # %bb.0: +; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE-SLOW-NEXT: addss %xmm0, %xmm1 +; SSE-SLOW-NEXT: movaps %xmm0, %xmm2 +; SSE-SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] +; SSE-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; SSE-SLOW-NEXT: addss %xmm2, %xmm0 +; SSE-SLOW-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE-SLOW-NEXT: movaps %xmm1, %xmm0 +; SSE-SLOW-NEXT: retq ; -; AVX-LABEL: test8_undef: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1 -; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0 -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] -; AVX-NEXT: retq +; SSE-FAST-LABEL: test8_undef: +; SSE-FAST: # %bb.0: +; SSE-FAST-NEXT: movaps %xmm0, %xmm1 +; SSE-FAST-NEXT: haddps %xmm0, %xmm1 +; SSE-FAST-NEXT: movaps %xmm0, %xmm2 +; SSE-FAST-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1] +; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; SSE-FAST-NEXT: addss %xmm2, %xmm0 +; SSE-FAST-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0] +; SSE-FAST-NEXT: movaps %xmm1, %xmm0 +; SSE-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: test8_undef: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1 +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3] +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: test8_undef: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1 +; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX-FAST-NEXT: retq %vecext = extractelement <4 x float> %a, i32 0 %vecext1 = extractelement <4 x float> %a, i32 1 %add = fadd float %vecext, %vecext1 @@ -241,14 +282,21 @@ } define <8 x float> @test11_undef(<8 x float> %a, <8 x float> %b) { -; SSE-LABEL: test11_undef: -; SSE: # %bb.0: -; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE-NEXT: addss %xmm1, %xmm0 -; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3] -; SSE-NEXT: addss %xmm3, %xmm1 -; SSE-NEXT: movddup {{.*#+}} xmm1 = xmm1[0,0] -; SSE-NEXT: retq +; SSE-SLOW-LABEL: test11_undef: +; SSE-SLOW: # %bb.0: +; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3] +; SSE-SLOW-NEXT: addss %xmm3, %xmm1 +; SSE-SLOW-NEXT: movddup {{.*#+}} xmm1 = xmm1[0,0] +; SSE-SLOW-NEXT: retq +; +; SSE-FAST-LABEL: test11_undef: +; SSE-FAST: # %bb.0: +; SSE-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE-FAST-NEXT: haddps %xmm3, %xmm3 +; SSE-FAST-NEXT: movddup {{.*#+}} xmm1 = xmm3[0,0] +; SSE-FAST-NEXT: retq ; ; AVX-LABEL: test11_undef: ; AVX: # %bb.0: @@ -334,23 +382,40 @@ ; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX1-FAST-NEXT: retq ; -; AVX512-LABEL: test13_v16f32_undef: -; AVX512: # %bb.0: -; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1 -; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX512-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3] -; AVX512-NEXT: vaddss %xmm3, %xmm2, %xmm2 -; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] -; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] -; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm2 -; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] -; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] -; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; AVX512-NEXT: vaddss %xmm0, %xmm2, %xmm0 -; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] -; AVX512-NEXT: retq +; AVX512-SLOW-LABEL: test13_v16f32_undef: +; AVX512-SLOW: # %bb.0: +; AVX512-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX512-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1 +; AVX512-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3] +; AVX512-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2 +; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] +; AVX512-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX512-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX512-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm2 +; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX512-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX512-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX512-SLOW-NEXT: retq +; +; AVX512-FAST-LABEL: test13_v16f32_undef: +; AVX512-FAST: # %bb.0: +; AVX512-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1 +; AVX512-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX512-FAST-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3] +; AVX512-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2 +; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3] +; AVX512-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX512-FAST-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX512-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm2 +; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; AVX512-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX512-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX512-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX512-FAST-NEXT: retq %vecext = extractelement <16 x float> %a, i32 0 %vecext1 = extractelement <16 x float> %a, i32 1 %add1 = fadd float %vecext, %vecext1 Index: test/CodeGen/X86/haddsub.ll =================================================================== --- test/CodeGen/X86/haddsub.ll +++ test/CodeGen/X86/haddsub.ll @@ -588,17 +588,27 @@ ; 128-bit vectors, float/double, fadd/fsub define float @extract_extract_v4f32_fadd_f32(<4 x float> %x) { -; SSE3-LABEL: extract_extract_v4f32_fadd_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f32_fadd_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fadd float %x0, %x1 @@ -606,17 +616,27 @@ } define float @extract_extract_v4f32_fadd_f32_commute(<4 x float> %x) { -; SSE3-LABEL: extract_extract_v4f32_fadd_f32_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f32_fadd_f32_commute: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fadd float %x1, %x0 @@ -624,19 +644,29 @@ } define double @extract_extract_v2f64_fadd_f64(<2 x double> %x) { -; SSE3-LABEL: extract_extract_v2f64_fadd_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v2f64_fadd_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fadd double %x0, %x1 @@ -644,19 +674,29 @@ } define double @extract_extract_v2f64_fadd_f64_commute(<2 x double> %x) { -; SSE3-LABEL: extract_extract_v2f64_fadd_f64_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v2f64_fadd_f64_commute: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v2f64_fadd_f64_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v2f64_fadd_f64_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fadd double %x1, %x0 @@ -664,17 +704,27 @@ } define float @extract_extract_v4f32_fsub_f32(<4 x float> %x) { -; SSE3-LABEL: extract_extract_v4f32_fsub_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: subss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f32_fsub_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: subss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f32_fsub_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f32_fsub_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f32_fsub_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f32_fsub_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fsub float %x0, %x1 @@ -701,18 +751,28 @@ } define double @extract_extract_v2f64_fsub_f64(<2 x double> %x) { -; SSE3-LABEL: extract_extract_v2f64_fsub_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: subsd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v2f64_fsub_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v2f64_fsub_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v2f64_fsub_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v2f64_fsub_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v2f64_fsub_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fsub double %x0, %x1 @@ -742,18 +802,29 @@ ; 256-bit vectors, float/double, fadd/fsub define float @extract_extract_v8f32_fadd_f32(<8 x float> %x) { -; SSE3-LABEL: extract_extract_v8f32_fadd_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v8f32_fadd_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fadd float %x0, %x1 @@ -761,18 +832,29 @@ } define float @extract_extract_v8f32_fadd_f32_commute(<8 x float> %x) { -; SSE3-LABEL: extract_extract_v8f32_fadd_f32_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v8f32_fadd_f32_commute: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v8f32_fadd_f32_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v8f32_fadd_f32_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fadd float %x1, %x0 @@ -780,20 +862,31 @@ } define double @extract_extract_v4f64_fadd_f64(<4 x double> %x) { -; SSE3-LABEL: extract_extract_v4f64_fadd_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f64_fadd_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fadd double %x0, %x1 @@ -801,20 +894,31 @@ } define double @extract_extract_v4f64_fadd_f64_commute(<4 x double> %x) { -; SSE3-LABEL: extract_extract_v4f64_fadd_f64_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f64_fadd_f64_commute: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f64_fadd_f64_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f64_fadd_f64_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fadd double %x1, %x0 @@ -822,24 +926,37 @@ } define float @extract_extract_v8f32_fsub_f32(<8 x float> %x) { -; SSE3-LABEL: extract_extract_v8f32_fsub_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: subss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v8f32_fsub_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: subss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v8f32_fsub_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v8f32_fsub_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v8f32_fsub_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v8f32_fsub_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fsub float %x0, %x1 ret float %x01 } +; Negative test...or get hoppy and negate? + define float @extract_extract_v8f32_fsub_f32_commute(<8 x float> %x) { ; SSE3-LABEL: extract_extract_v8f32_fsub_f32_commute: ; SSE3: # %bb.0: @@ -861,25 +978,38 @@ } define double @extract_extract_v4f64_fsub_f64(<4 x double> %x) { -; SSE3-LABEL: extract_extract_v4f64_fsub_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: subsd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f64_fsub_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f64_fsub_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f64_fsub_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f64_fsub_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f64_fsub_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fsub double %x0, %x1 ret double %x01 } +; Negative test...or get hoppy and negate? + define double @extract_extract_v4f64_fsub_f64_commute(<4 x double> %x) { ; SSE3-LABEL: extract_extract_v4f64_fsub_f64_commute: ; SSE3: # %bb.0: @@ -904,18 +1034,29 @@ ; 512-bit vectors, float/double, fadd/fsub define float @extract_extract_v16f32_fadd_f32(<16 x float> %x) { -; SSE3-LABEL: extract_extract_v16f32_fadd_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v16f32_fadd_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v16f32_fadd_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v16f32_fadd_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v16f32_fadd_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v16f32_fadd_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <16 x float> %x, i32 0 %x1 = extractelement <16 x float> %x, i32 1 %x01 = fadd float %x0, %x1 @@ -923,18 +1064,29 @@ } define float @extract_extract_v16f32_fadd_f32_commute(<16 x float> %x) { -; SSE3-LABEL: extract_extract_v16f32_fadd_f32_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v16f32_fadd_f32_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v16f32_fadd_f32_commute: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v16f32_fadd_f32_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v16f32_fadd_f32_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v16f32_fadd_f32_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <16 x float> %x, i32 0 %x1 = extractelement <16 x float> %x, i32 1 %x01 = fadd float %x1, %x0 @@ -942,20 +1094,31 @@ } define double @extract_extract_v8f64_fadd_f64(<8 x double> %x) { -; SSE3-LABEL: extract_extract_v8f64_fadd_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v8f64_fadd_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v8f64_fadd_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v8f64_fadd_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v8f64_fadd_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v8f64_fadd_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <8 x double> %x, i32 0 %x1 = extractelement <8 x double> %x, i32 1 %x01 = fadd double %x0, %x1 @@ -963,20 +1126,31 @@ } define double @extract_extract_v8f64_fadd_f64_commute(<8 x double> %x) { -; SSE3-LABEL: extract_extract_v8f64_fadd_f64_commute: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: addsd %xmm0, %xmm1 -; SSE3-NEXT: movapd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v8f64_fadd_f64_commute: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: addsd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v8f64_fadd_f64_commute: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v8f64_fadd_f64_commute: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v8f64_fadd_f64_commute: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v8f64_fadd_f64_commute: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <8 x double> %x, i32 0 %x1 = extractelement <8 x double> %x, i32 1 %x01 = fadd double %x1, %x0 @@ -984,18 +1158,29 @@ } define float @extract_extract_v16f32_fsub_f32(<16 x float> %x) { -; SSE3-LABEL: extract_extract_v16f32_fsub_f32: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: subss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v16f32_fsub_f32: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: subss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v16f32_fsub_f32: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v16f32_fsub_f32: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v16f32_fsub_f32: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v16f32_fsub_f32: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <16 x float> %x, i32 0 %x1 = extractelement <16 x float> %x, i32 1 %x01 = fsub float %x0, %x1 @@ -1023,19 +1208,30 @@ } define double @extract_extract_v8f64_fsub_f64(<8 x double> %x) { -; SSE3-LABEL: extract_extract_v8f64_fsub_f64: -; SSE3: # %bb.0: -; SSE3-NEXT: movapd %xmm0, %xmm1 -; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] -; SSE3-NEXT: subsd %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v8f64_fsub_f64: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 +; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] +; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v8f64_fsub_f64: -; AVX: # %bb.0: -; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vzeroupper -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v8f64_fsub_f64: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v8f64_fsub_f64: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] +; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v8f64_fsub_f64: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq %x0 = extractelement <8 x double> %x, i32 0 %x1 = extractelement <8 x double> %x, i32 1 %x01 = fsub double %x0, %x1 @@ -1066,19 +1262,31 @@ ; Check output when 1 or both extracts have extra uses. define float @extract_extract_v4f32_fadd_f32_uses1(<4 x float> %x, float* %p) { -; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses1: -; SSE3: # %bb.0: -; SSE3-NEXT: movss %xmm0, (%rdi) -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses1: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movss %xmm0, (%rdi) +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses1: -; AVX: # %bb.0: -; AVX-NEXT: vmovss %xmm0, (%rdi) -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses1: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: movss %xmm0, (%rdi) +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses1: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovss %xmm0, (%rdi) +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses1: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vmovss %xmm0, (%rdi) +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 store float %x0, float* %p %x1 = extractelement <4 x float> %x, i32 1 @@ -1087,19 +1295,32 @@ } define float @extract_extract_v4f32_fadd_f32_uses2(<4 x float> %x, float* %p) { -; SSE3-LABEL: extract_extract_v4f32_fadd_f32_uses2: -; SSE3: # %bb.0: -; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; SSE3-NEXT: movss %xmm1, (%rdi) -; SSE3-NEXT: addss %xmm1, %xmm0 -; SSE3-NEXT: retq +; SSE3-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses2: +; SSE3-SLOW: # %bb.0: +; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-SLOW-NEXT: movss %xmm1, (%rdi) +; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 +; SSE3-SLOW-NEXT: retq ; -; AVX-LABEL: extract_extract_v4f32_fadd_f32_uses2: -; AVX: # %bb.0: -; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] -; AVX-NEXT: vmovss %xmm1, (%rdi) -; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; SSE3-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses2: +; SSE3-FAST: # %bb.0: +; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; SSE3-FAST-NEXT: movss %xmm1, (%rdi) +; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 +; SSE3-FAST-NEXT: retq +; +; AVX-SLOW-LABEL: extract_extract_v4f32_fadd_f32_uses2: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vmovss %xmm1, (%rdi) +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: extract_extract_v4f32_fadd_f32_uses2: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vextractps $1, %xmm0, (%rdi) +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 store float %x1, float* %p