Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -12301,6 +12301,7 @@ SelectionDAG &DAG) const { MVT VT = Op.getSimpleValueType(); MVT EltVT = VT.getVectorElementType(); + unsigned NumElts = VT.getVectorNumElements(); if (EltVT == MVT::i1) return InsertBitToMaskVector(Op, DAG); @@ -12314,6 +12315,19 @@ auto *N2C = cast(N2); unsigned IdxVal = N2C->getZExtValue(); + // If we are clearing out a element, we do this more efficiently with a + // blend shuffle than a costly integer insertion. + // TODO: would other rematerializable values (e.g. allbits) benefit as well? + // TODO: pre-SSE41 targets will tend to use bit masking - this could still + // be beneficial if we are inserting several zeros and can combine the masks. + if (X86::isZeroNode(N1) && Subtarget.hasSSE41() && NumElts <= 8) { + SmallVector ClearMask; + for (unsigned i = 0; i != NumElts; ++i) + ClearMask.push_back(i == IdxVal ? i + NumElts : i); + SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, dl); + return DAG.getVectorShuffle(VT, dl, N0, ZeroVector, ClearMask); + } + // If the vector is wider than 128 bits, extract the 128-bit subvector, insert // into that, and then insert the subvector back into the result. if (VT.is256BitVector() || VT.is512BitVector()) { Index: llvm/trunk/test/CodeGen/X86/insertelement-zero.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/insertelement-zero.ll +++ llvm/trunk/test/CodeGen/X86/insertelement-zero.ll @@ -10,37 +10,73 @@ target triple = "x86_64-unknown-unknown" define <2 x double> @insert_v2f64_z1(<2 x double> %a) { -; SSE-LABEL: insert_v2f64_z1: -; SSE: # BB#0: -; SSE-NEXT: xorpd %xmm1, %xmm1 -; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] -; SSE-NEXT: retq +; SSE2-LABEL: insert_v2f64_z1: +; SSE2: # BB#0: +; SSE2-NEXT: xorpd %xmm1, %xmm1 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-NEXT: retq +; +; SSE3-LABEL: insert_v2f64_z1: +; SSE3: # BB#0: +; SSE3-NEXT: xorpd %xmm1, %xmm1 +; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE3-NEXT: retq +; +; SSSE3-LABEL: insert_v2f64_z1: +; SSSE3: # BB#0: +; SSSE3-NEXT: xorpd %xmm1, %xmm1 +; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSSE3-NEXT: retq +; +; SSE41-LABEL: insert_v2f64_z1: +; SSE41: # BB#0: +; SSE41-NEXT: xorpd %xmm1, %xmm1 +; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v2f64_z1: ; AVX: # BB#0: ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-NEXT: retq %1 = insertelement <2 x double> %a, double 0.0, i32 0 ret <2 x double> %1 } define <4 x double> @insert_v4f64_0zz3(<4 x double> %a) { -; SSE-LABEL: insert_v4f64_0zz3: -; SSE: # BB#0: -; SSE-NEXT: xorpd %xmm2, %xmm2 -; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] -; SSE-NEXT: retq +; SSE2-LABEL: insert_v4f64_0zz3: +; SSE2: # BB#0: +; SSE2-NEXT: xorpd %xmm2, %xmm2 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSE2-NEXT: retq +; +; SSE3-LABEL: insert_v4f64_0zz3: +; SSE3: # BB#0: +; SSE3-NEXT: xorpd %xmm2, %xmm2 +; SSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSE3-NEXT: retq +; +; SSSE3-LABEL: insert_v4f64_0zz3: +; SSSE3: # BB#0: +; SSSE3-NEXT: xorpd %xmm2, %xmm2 +; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSSE3-NEXT: retq +; +; SSE41-LABEL: insert_v4f64_0zz3: +; SSE41: # BB#0: +; SSE41-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero +; SSE41-NEXT: xorpd %xmm2, %xmm2 +; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v4f64_0zz3: ; AVX: # BB#0: -; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm0[0],xmm1[0] -; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3] -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-NEXT: vmovsd {{.*#+}} xmm1 = xmm1[0],xmm2[1] -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3] +; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3] ; AVX-NEXT: retq %1 = insertelement <4 x double> %a, double 0.0, i32 1 %2 = insertelement <4 x double> %1, double 0.0, i32 2 @@ -68,15 +104,21 @@ ; ; SSE41-LABEL: insert_v2i64_z1: ; SSE41: # BB#0: -; SSE41-NEXT: xorl %eax, %eax -; SSE41-NEXT: pinsrq $0, %rax, %xmm0 +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE41-NEXT: retq ; -; AVX-LABEL: insert_v2i64_z1: -; AVX: # BB#0: -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: vpinsrq $0, %rax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: insert_v2i64_z1: +; AVX1: # BB#0: +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: insert_v2i64_z1: +; AVX2: # BB#0: +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX2-NEXT: retq %1 = insertelement <2 x i64> %a, i64 0, i32 0 ret <2 x i64> %1 } @@ -102,24 +144,20 @@ ; ; SSE41-LABEL: insert_v4i64_01z3: ; SSE41: # BB#0: -; SSE41-NEXT: xorl %eax, %eax -; SSE41-NEXT: pinsrq $0, %rax, %xmm1 +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: insert_v4i64_01z3: ; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: xorl %eax, %eax -; AVX1-NEXT: vpinsrq $0, %rax, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: insert_v4i64_01z3: ; AVX2: # BB#0: -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: xorl %eax, %eax -; AVX2-NEXT: vpinsrq $0, %rax, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] ; AVX2-NEXT: retq %1 = insertelement <4 x i64> %a, i64 0, i32 2 ret <4 x i64> %1 @@ -150,13 +188,13 @@ ; SSE41-LABEL: insert_v4f32_01z3: ; SSE41: # BB#0: ; SSE41-NEXT: xorps %xmm1, %xmm1 -; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3] ; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v4f32_01z3: ; AVX: # BB#0: ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] +; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3] ; AVX-NEXT: retq %1 = insertelement <4 x float> %a, float 0.0, i32 2 ret <4 x float> %1 @@ -191,16 +229,14 @@ ; SSE41: # BB#0: ; SSE41-NEXT: xorps %xmm2, %xmm2 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] -; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3] ; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v8f32_z12345z7: ; AVX: # BB#0: -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vxorps %ymm1, %ymm1, %ymm1 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7] ; AVX-NEXT: retq %1 = insertelement <8 x float> %a, float 0.0, i32 0 %2 = insertelement <8 x float> %1, float 0.0, i32 6 @@ -234,15 +270,21 @@ ; ; SSE41-LABEL: insert_v4i32_01z3: ; SSE41: # BB#0: -; SSE41-NEXT: xorl %eax, %eax -; SSE41-NEXT: pinsrd $2, %eax, %xmm0 +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7] ; SSE41-NEXT: retq ; -; AVX-LABEL: insert_v4i32_01z3: -; AVX: # BB#0: -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: insert_v4i32_01z3: +; AVX1: # BB#0: +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: insert_v4i32_01z3: +; AVX2: # BB#0: +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3] +; AVX2-NEXT: retq %1 = insertelement <4 x i32> %a, i32 0, i32 2 ret <4 x i32> %1 } @@ -280,29 +322,23 @@ ; ; SSE41-LABEL: insert_v8i32_z12345z7: ; SSE41: # BB#0: -; SSE41-NEXT: xorl %eax, %eax -; SSE41-NEXT: pinsrd $0, %eax, %xmm0 -; SSE41-NEXT: pinsrd $2, %eax, %xmm1 +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5],xmm1[6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: insert_v8i32_z12345z7: ; AVX1: # BB#0: -; AVX1-NEXT: xorl %eax, %eax -; AVX1-NEXT: vpinsrd $0, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: insert_v8i32_z12345z7: ; AVX2: # BB#0: -; AVX2-NEXT: xorl %eax, %eax -; AVX2-NEXT: vmovd %eax, %xmm1 +; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7] ; AVX2-NEXT: retq %1 = insertelement <8 x i32> %a, i32 0, i32 0 %2 = insertelement <8 x i32> %1, i32 0, i32 6 @@ -310,18 +346,39 @@ } define <8 x i16> @insert_v8i16_z12345z7(<8 x i16> %a) { -; SSE-LABEL: insert_v8i16_z12345z7: -; SSE: # BB#0: -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: pinsrw $0, %eax, %xmm0 -; SSE-NEXT: pinsrw $6, %eax, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: insert_v8i16_z12345z7: +; SSE2: # BB#0: +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: pinsrw $0, %eax, %xmm0 +; SSE2-NEXT: pinsrw $6, %eax, %xmm0 +; SSE2-NEXT: retq +; +; SSE3-LABEL: insert_v8i16_z12345z7: +; SSE3: # BB#0: +; SSE3-NEXT: xorl %eax, %eax +; SSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSE3-NEXT: pinsrw $6, %eax, %xmm0 +; SSE3-NEXT: retq +; +; SSSE3-LABEL: insert_v8i16_z12345z7: +; SSSE3: # BB#0: +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSSE3-NEXT: pinsrw $6, %eax, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: insert_v8i16_z12345z7: +; SSE41: # BB#0: +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7] +; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v8i16_z12345z7: ; AVX: # BB#0: -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7] +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7] ; AVX-NEXT: retq %1 = insertelement <8 x i16> %a, i16 0, i32 0 %2 = insertelement <8 x i16> %1, i16 0, i32 6 @@ -329,35 +386,59 @@ } define <16 x i16> @insert_v16i16_z12345z789ABZDEz(<16 x i16> %a) { -; SSE-LABEL: insert_v16i16_z12345z789ABZDEz: -; SSE: # BB#0: -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: pinsrw $0, %eax, %xmm0 -; SSE-NEXT: pinsrw $6, %eax, %xmm0 -; SSE-NEXT: pinsrw $7, %eax, %xmm1 -; SSE-NEXT: retq +; SSE2-LABEL: insert_v16i16_z12345z789ABZDEz: +; SSE2: # BB#0: +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: pinsrw $0, %eax, %xmm0 +; SSE2-NEXT: pinsrw $6, %eax, %xmm0 +; SSE2-NEXT: pinsrw $7, %eax, %xmm1 +; SSE2-NEXT: retq +; +; SSE3-LABEL: insert_v16i16_z12345z789ABZDEz: +; SSE3: # BB#0: +; SSE3-NEXT: xorl %eax, %eax +; SSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSE3-NEXT: pinsrw $6, %eax, %xmm0 +; SSE3-NEXT: pinsrw $7, %eax, %xmm1 +; SSE3-NEXT: retq +; +; SSSE3-LABEL: insert_v16i16_z12345z789ABZDEz: +; SSSE3: # BB#0: +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSSE3-NEXT: pinsrw $6, %eax, %xmm0 +; SSSE3-NEXT: pinsrw $7, %eax, %xmm1 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: insert_v16i16_z12345z789ABZDEz: +; SSE41: # BB#0: +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm2[6],xmm0[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7] +; SSE41-NEXT: retq ; ; AVX1-LABEL: insert_v16i16_z12345z789ABZDEz: ; AVX1: # BB#0: -; AVX1-NEXT: xorl %eax, %eax -; AVX1-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: insert_v16i16_z12345z789ABZDEz: ; AVX2: # BB#0: -; AVX2-NEXT: xorl %eax, %eax -; AVX2-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5,6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %1 = insertelement <16 x i16> %a, i16 0, i32 0 Index: llvm/trunk/test/CodeGen/X86/vec_insert-7.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/vec_insert-7.ll +++ llvm/trunk/test/CodeGen/X86/vec_insert-7.ll @@ -12,11 +12,7 @@ ; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero ; CHECK-NEXT: movl $32, %eax ; CHECK-NEXT: pinsrd $0, %eax, %xmm0 -; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: pinsrd $1, %eax, %xmm0 -; CHECK-NEXT: pinsrd $2, %eax, %xmm0 -; CHECK-NEXT: pinsrd $3, %eax, %xmm0 -; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; CHECK-NEXT: movq %xmm0, (%esp) ; CHECK-NEXT: movq (%esp), %mm0 ; CHECK-NEXT: addl $20, %esp