diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -16974,6 +16974,15 @@ } assert(VT.is128BitVector() && "Only 128-bit vector types should be left!"); + // This will be just movd/movq/movss/movsd + if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) { + if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 || + (EltVT == MVT::i64 && Subtarget.is64Bit())) { + N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1); + return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG); + } + } + // Transform it so it match pinsr{b,w} which expects a GR32 as its second // argument. SSE41 required for pinsrb. if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) { diff --git a/llvm/test/CodeGen/X86/insertelement-zero.ll b/llvm/test/CodeGen/X86/insertelement-zero.ll --- a/llvm/test/CodeGen/X86/insertelement-zero.ll +++ b/llvm/test/CodeGen/X86/insertelement-zero.ll @@ -526,19 +526,16 @@ ; ; SSE41-LABEL: PR41512: ; SSE41: # %bb.0: -; SSE41-NEXT: pxor %xmm1, %xmm1 -; SSE41-NEXT: pxor %xmm0, %xmm0 -; SSE41-NEXT: pinsrd $0, %edi, %xmm0 -; SSE41-NEXT: pinsrd $0, %esi, %xmm1 +; SSE41-NEXT: movd %edi, %xmm0 +; SSE41-NEXT: movd %esi, %xmm1 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE41-NEXT: retq ; ; AVX-LABEL: PR41512: ; AVX: # %bb.0: -; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm1 -; AVX-NEXT: vpinsrd $0, %esi, %xmm0, %xmm0 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX-NEXT: vmovd %edi, %xmm0 +; AVX-NEXT: vmovd %esi, %xmm1 +; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX-NEXT: retq %ins1 = insertelement <4 x i32> , i32 %x, i32 0 %ins2 = insertelement <4 x i32> , i32 %y, i32 0 @@ -567,26 +564,22 @@ ; ; SSE41-LABEL: PR41512_v4i64: ; SSE41: # %bb.0: -; SSE41-NEXT: pxor %xmm1, %xmm1 -; SSE41-NEXT: pxor %xmm0, %xmm0 -; SSE41-NEXT: pinsrq $0, %rdi, %xmm0 -; SSE41-NEXT: pinsrq $0, %rsi, %xmm1 +; SSE41-NEXT: movq %rdi, %xmm0 +; SSE41-NEXT: movq %rsi, %xmm1 ; SSE41-NEXT: retq ; ; AVX1-LABEL: PR41512_v4i64: ; AVX1: # %bb.0: -; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 -; AVX1-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1 -; AVX1-NEXT: vpinsrq $0, %rsi, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovq %rdi, %xmm0 +; AVX1-NEXT: vmovq %rsi, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: PR41512_v4i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 -; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1 -; AVX2-NEXT: vpinsrq $0, %rsi, %xmm0, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: vmovq %rdi, %xmm0 +; AVX2-NEXT: vmovq %rsi, %xmm1 +; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %ins1 = insertelement <4 x i64> , i64 %x, i32 0 %ins2 = insertelement <4 x i64> , i64 %y, i32 0 @@ -671,19 +664,16 @@ ; ; SSE41-LABEL: PR41512_loads: ; SSE41: # %bb.0: -; SSE41-NEXT: pxor %xmm1, %xmm1 -; SSE41-NEXT: pxor %xmm0, %xmm0 -; SSE41-NEXT: pinsrd $0, (%rdi), %xmm0 -; SSE41-NEXT: pinsrd $0, (%rsi), %xmm1 -; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE41-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; SSE41-NEXT: retq ; ; AVX-LABEL: PR41512_loads: ; AVX: # %bb.0: -; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0 -; AVX-NEXT: vpinsrd $0, (%rdi), %xmm0, %xmm1 -; AVX-NEXT: vpinsrd $0, (%rsi), %xmm0, %xmm0 -; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX-NEXT: retq %x = load i32, i32* %p1 %y = load i32, i32* %p2