Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -3859,14 +3859,23 @@ } /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that -/// is suitable for input to PSHUFD or PSHUFW. That is, it doesn't reference -/// the second operand. -static bool isPSHUFDMask(ArrayRef Mask, MVT VT) { - if (VT == MVT::v4f32 || VT == MVT::v4i32 ) - return (Mask[0] < 4 && Mask[1] < 4 && Mask[2] < 4 && Mask[3] < 4); - if (VT == MVT::v2f64 || VT == MVT::v2i64) - return (Mask[0] < 2 && Mask[1] < 2); - return false; +/// is suitable for input to PSHUFD. That is, it doesn't reference the other +/// operand - by default will match for first operand. +static bool isPSHUFDMask(ArrayRef Mask, MVT VT, + bool TestSecondOperand = false) { + if (VT != MVT::v4f32 && VT != MVT::v4i32 && + VT != MVT::v2f64 && VT != MVT::v2i64) + return false; + + unsigned NumElems = VT.getVectorNumElements(); + unsigned Lo = TestSecondOperand ? NumElems : 0; + unsigned Hi = Lo + NumElems; + + for (unsigned i = 0; i < NumElems; ++i) + if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi)) + return false; + + return true; } /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that @@ -19638,7 +19647,9 @@ isMOVLMask(M, SVT) || isMOVHLPSMask(M, SVT) || isSHUFPMask(M, SVT) || + isSHUFPMask(M, SVT, /* Commuted */ true) || isPSHUFDMask(M, SVT) || + isPSHUFDMask(M, SVT, /* SecondOperand */ true) || isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) || isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) || isPALIGNRMask(M, SVT, Subtarget) || Index: llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -614,22 +614,15 @@ ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3,0,1] ; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = ymm0[1,0,3,2] ; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2,3] -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] ; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,3,2] -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] -; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: stress_test1: ; AVX2: # BB#0: ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm1[3,1,1,0] -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,1,2,3] ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,3,1,3] -; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7] -; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,0] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7] +; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] ; AVX2-NEXT: retq %c = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> %d = shufflevector <4 x i64> %c, <4 x i64> undef, <4 x i32> Index: llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll +++ llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll @@ -1121,12 +1121,7 @@ define <4 x float> @combine_test1(<4 x float> %a, <4 x float> %b) { ; SSE2-LABEL: combine_test1: ; SSE2: # BB#0: -; SSE2-NEXT: movaps %xmm1, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: combine_test1: @@ -1248,12 +1243,7 @@ define <4 x i32> @combine_test6(<4 x i32> %a, <4 x i32> %b) { ; SSE2-LABEL: combine_test6: ; SSE2: # BB#0: -; SSE2-NEXT: movaps %xmm1, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: combine_test6: @@ -1601,21 +1591,13 @@ define <4 x float> @combine_test1b(<4 x float> %a, <4 x float> %b) { ; SSE2-LABEL: combine_test1b: ; SSE2: # BB#0: -; SSE2-NEXT: movaps %xmm1, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[0,0] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,0] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,2,0] ; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: combine_test1b: ; SSSE3: # BB#0: -; SSSE3-NEXT: movaps %xmm1, %xmm2 -; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3] -; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[0,0] -; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,0] +; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1,2,0] ; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: retq ; @@ -1637,36 +1619,25 @@ define <4 x float> @combine_test2b(<4 x float> %a, <4 x float> %b) { ; SSE2-LABEL: combine_test2b: ; SSE2: # BB#0: -; SSE2-NEXT: movaps %xmm1, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[1,1] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE2-NEXT: movaps %xmm2, %xmm0 +; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0,0] +; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: combine_test2b: ; SSSE3: # BB#0: -; SSSE3-NEXT: movaps %xmm1, %xmm2 -; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3] -; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[1,1] -; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSSE3-NEXT: movaps %xmm2, %xmm0 +; SSSE3-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0,0] +; SSSE3-NEXT: movapd %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: combine_test2b: ; SSE41: # BB#0: -; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[1,1] -; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3] +; SSE41-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0,0] +; SSE41-NEXT: movapd %xmm1, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_test2b: ; AVX: # BB#0: -; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] -; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0],xmm1[1,1] -; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3] +; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0,0] ; AVX-NEXT: retq %1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> @@ -1698,21 +1669,13 @@ define <4 x float> @combine_test4b(<4 x float> %a, <4 x float> %b) { ; SSE2-LABEL: combine_test4b: ; SSE2: # BB#0: -; SSE2-NEXT: movaps %xmm1, %xmm2 -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[3,0] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[0,2] +; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: combine_test4b: ; SSSE3: # BB#0: -; SSSE3-NEXT: movaps %xmm1, %xmm2 -; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[1,3] -; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,1,3] -; SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm1[3,0] -; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[0,2] +; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3] ; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: retq ; @@ -1968,17 +1931,11 @@ ; SSE2-LABEL: combine_blend_01: ; SSE2: # BB#0: ; SSE2-NEXT: movsd %xmm1, %xmm0 -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3] -; SSE2-NEXT: movaps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: combine_blend_01: ; SSSE3: # BB#0: ; SSSE3-NEXT: movsd %xmm1, %xmm0 -; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0] -; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3] -; SSSE3-NEXT: movaps %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: combine_blend_01: @@ -2113,16 +2070,12 @@ define <4 x float> @combine_undef_input_test1(<4 x float> %a, <4 x float> %b) { ; SSE2-LABEL: combine_undef_input_test1: ; SSE2: # BB#0: -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,2] -; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: movsd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: combine_undef_input_test1: ; SSSE3: # BB#0: -; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1] -; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,2] -; SSSE3-NEXT: movaps %xmm1, %xmm0 +; SSSE3-NEXT: movsd %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: combine_undef_input_test1: @@ -2302,16 +2255,12 @@ define <4 x float> @combine_undef_input_test11(<4 x float> %a, <4 x float> %b) { ; SSE2-LABEL: combine_undef_input_test11: ; SSE2: # BB#0: -; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1] -; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,2] -; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: movsd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: combine_undef_input_test11: ; SSSE3: # BB#0: -; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,3,1] -; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[1,2] -; SSSE3-NEXT: movaps %xmm1, %xmm0 +; SSSE3-NEXT: movsd %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: combine_undef_input_test11: