Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11472,7 +11472,7 @@ } // If it is a splat, check if the argument vector is another splat or a - // build_vector with all scalar elements the same. + // build_vector. if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) { SDNode *V = N0.getNode(); @@ -11509,6 +11509,17 @@ // Splat of , return if (AllSame) return N0; + + // If the splatted element is a constant, just build the vector out of + // constants directly. + const SDValue& Splatted = V->getOperand(SVN->getSplatIndex()); + if (isa(Splatted) || isa(Splatted)) { + SmallVector Ops; + for (unsigned i = 0; i != NumElts; ++i) { + Ops.push_back(Splatted); + } + return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), V->getValueType(0), Ops); + } } } Index: test/CodeGen/X86/splat-const.ll =================================================================== --- test/CodeGen/X86/splat-const.ll +++ test/CodeGen/X86/splat-const.ll @@ -0,0 +1,40 @@ +; RUN: llc < %s -mcpu=penryn | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mcpu=sandybridge | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mcpu=haswell | FileCheck %s --check-prefix=AVX2 +; This checks that lowering for creation of constant vectors is sane and +; doesn't use redundant shuffles. (fixes PR22276) +target triple = "x86_64-unknown-unknown" + +define <4 x i32> @zero_vector() { +; SSE-LABEL: zero_vector: +; SSE: xorps %xmm0, %xmm0 +; SSE-NEXT: retq +; AVX-LABEL: zero_vector: +; AVX: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq +; AVX2-LABEL: zero_vector: +; AVX2: vxorps %xmm0, %xmm0, %xmm0 +; AVX2-NEXT: retq + %zero = insertelement <4 x i32> undef, i32 0, i32 0 + %splat = shufflevector <4 x i32> %zero, <4 x i32> undef, <4 x i32> zeroinitializer + ret <4 x i32> %splat +} + +; Note that for the "const_vector" versions, lowering that uses a shuffle +; instead of a load would be legitimate, if it's a single broadcast shuffle. +; (as opposed to the previous mess) +; However, this is not the current preferred lowering. +define <4 x i32> @const_vector() { +; SSE-LABEL: const_vector: +; SSE: movaps {{.*}}, %xmm0 # xmm0 = [42,42,42,42] +; SSE-NEXT: retq +; AVX-LABEL: const_vector: +; AVX: vmovaps {{.*}}, %xmm0 # xmm0 = [42,42,42,42] +; AVX-NEXT: retq +; AVX2-LABEL: const_vector: +; AVX2: vbroadcastss {{[^%].*}}, %xmm0 +; AVX2-NEXT: retq + %zero = insertelement <4 x i32> undef, i32 42, i32 0 + %splat = shufflevector <4 x i32> %zero, <4 x i32> undef, <4 x i32> zeroinitializer + ret <4 x i32> %splat +} Index: test/CodeGen/X86/sse41.ll =================================================================== --- test/CodeGen/X86/sse41.ll +++ test/CodeGen/X86/sse41.ll @@ -1005,14 +1005,14 @@ ; X32-LABEL: insertps_pr20411: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: pshufd {{.*#+}} xmm0 = mem[3,1,2,3] +; X32-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3] ; X32-NEXT: insertps $-36, LCPI49_1+12, %xmm0 ; X32-NEXT: movups %xmm0, (%eax) ; X32-NEXT: retl ; ; X64-LABEL: insertps_pr20411: ; X64: ## BB#0: -; X64-NEXT: pshufd {{.*#+}} xmm0 = mem[3,1,2,3] +; X64-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3] ; X64-NEXT: insertps $-36, LCPI49_1+{{.*}}(%rip), %xmm0 ; X64-NEXT: movups %xmm0, (%rdi) ; X64-NEXT: retq Index: test/CodeGen/X86/widen_shuffle-1.ll =================================================================== --- test/CodeGen/X86/widen_shuffle-1.ll +++ test/CodeGen/X86/widen_shuffle-1.ll @@ -82,8 +82,8 @@ ; CHECK-LABEL: shuf5: ; CHECK: # BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <4,33,u,u,u,u,u,u> -; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[2,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [33,33,33,33,33,33,33,33] +; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; CHECK-NEXT: movlpd %xmm0, (%eax) ; CHECK-NEXT: retl %v = shufflevector <2 x i8> , <2 x i8> undef, <8 x i32>