diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -19286,6 +19286,30 @@ return V; } + // A shuffle of a concat of the same narrow vector can be reduced to use + // only low-half elements of a concat with undef: + // shuf (concat X, X), undef, Mask --> shuf (concat X, undef), undef, Mask' + if (N0.getOpcode() == ISD::CONCAT_VECTORS && N1.isUndef() && + N0.getNumOperands() == 2 && + N0.getOperand(0) == N0.getOperand(1)) { + int HalfNumElts = (int)NumElts / 2; + SmallVector NewMask; + for (unsigned i = 0; i != NumElts; ++i) { + int Idx = SVN->getMaskElt(i); + if (Idx >= HalfNumElts) { + assert(Idx < (int)NumElts && "Shuffle mask chooses undef op"); + Idx -= HalfNumElts; + } + NewMask.push_back(Idx); + } + if (TLI.isShuffleMaskLegal(NewMask, VT)) { + SDValue UndefVec = DAG.getUNDEF(N0.getOperand(0).getValueType()); + SDValue NewCat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, + N0.getOperand(0), UndefVec); + return DAG.getVectorShuffle(VT, SDLoc(N), NewCat, N1, NewMask); + } + } + // Attempt to combine a shuffle of 2 inputs of 'scalar sources' - // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR. if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll @@ -423,23 +423,19 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1] -; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,0,3,3] ; AVX1-NEXT: ret{{[l|q]}} ; ; AVX2-LABEL: concat_self_v4i64: ; AVX2: # %bb.0: ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 -; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,1] ; AVX2-NEXT: ret{{[l|q]}} ; ; AVX512-LABEL: concat_self_v4i64: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 -; AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 -; AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] +; AVX512-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,1] ; AVX512-NEXT: ret{{[l|q]}} %cat = shufflevector <2 x i64> %x, <2 x i64> undef, <4 x i32> %s = shufflevector <4 x i64> %cat, <4 x i64> undef, <4 x i32> @@ -449,11 +445,8 @@ define <8 x i32> @concat_self_v8i32(<4 x i32> %x) { ; AVX1-LABEL: concat_self_v8i32: ; AVX1: # %bb.0: -; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1] -; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,1,0,4,6,5,7] -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[3,2,1,0] +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,2,1,3] ; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm2 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 @@ -462,19 +455,19 @@ ; AVX2-LABEL: concat_self_v8i32: ; AVX2: # %bb.0: ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [7,6,5,4,0,2,1,3] -; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm1 -; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [3,2,1,0,0,2,1,3] +; AVX2-NEXT: vpermd %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: ret{{[l|q]}} ; ; AVX512-LABEL: concat_self_v8i32: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 -; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 -; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [7,6,5,4,0,2,1,3] -; AVX512-NEXT: vpermd %ymm0, %ymm1, %ymm1 -; AVX512-NEXT: vpaddd %ymm0, %ymm1, %ymm0 +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [3,2,1,0,0,2,1,3] +; AVX512-NEXT: vpermd %ymm0, %ymm2, %ymm0 +; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: ret{{[l|q]}} %cat = shufflevector <4 x i32> %x, <4 x i32> undef, <8 x i32> %s = shufflevector <8 x i32> %cat, <8 x i32> undef, <8 x i32>