Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1847,7 +1847,7 @@ setPrefLoopAlignment(4); // 2^4 bytes. // An out-of-order CPU can speculatively execute past a predictable branch, - // but a conditional move could be stalled by an expensive earlier operation. + // but a conditional move could be stalled by an expensive earlier operation. PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder(); EnableExtLdPromotion = true; setPrefFunctionAlignment(4); // 2^4 bytes. @@ -4339,6 +4339,17 @@ return true; } +/// Return true if every element in Mask, beginning +/// from position Pos and ending in Pos+Size, falls within the specified +/// sequential range (Low, Low+Size], or is undef or is zero. +static bool isSequentialOrUndefOrZeroInRange(ArrayRef Mask, unsigned Pos, + unsigned Size, int Low) { + for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low) + if (!isUndefOrZero(Mask[i]) && Mask[i] != Low) + return false; + return true; +} + /// Return true if the specified EXTRACT_SUBVECTOR operand specifies a vector /// extract that is suitable for instruction that extract 128 or 256 bit vectors static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) { @@ -12286,6 +12297,7 @@ SelectionDAG &DAG) const { MVT VT = Op.getSimpleValueType(); MVT EltVT = VT.getVectorElementType(); + unsigned NumElts = VT.getVectorNumElements(); if (EltVT == MVT::i1) return InsertBitToMaskVector(Op, DAG); @@ -12299,6 +12311,19 @@ auto *N2C = cast(N2); unsigned IdxVal = N2C->getZExtValue(); + // If we are clearing out a element, we do this more efficiently with a + // blend shuffle than a costly integer insertion. + // TODO: would other rematerializable values (e.g. allbits) benefit as well? + // TODO: pre-SSE41 targets will tend to use bit masking - this could still + // be beneficial if we are inserting several zeros and can combine the masks. + if (X86::isZeroNode(N1) && Subtarget.hasSSE41() && NumElts <= 8) { + SmallVector ClearMask; + for (unsigned i = 0; i != NumElts; ++i) + ClearMask.push_back(i == IdxVal ? i + NumElts : i); + SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, dl); + return DAG.getVectorShuffle(VT, dl, N0, ZeroVector, ClearMask); + } + // If the vector is wider than 128 bits, extract the 128-bit subvector, insert // into that, and then insert the subvector back into the result. if (VT.is256BitVector() || VT.is512BitVector()) { @@ -23637,6 +23662,51 @@ return true; } + // Attempt to blend with zero. + if (VT.getVectorNumElements() <= 8 && + ((Subtarget.hasSSE41() && VT.is128BitVector()) || + (Subtarget.hasAVX() && VT.is256BitVector()))) { + // Convert VT to a type compatible with X86ISD::BLENDI. + MVT ShuffleVT = VT; + if (Subtarget.hasAVX2()) { + if (VT == MVT::v4i64) + ShuffleVT = MVT::v8i32; + else if (VT == MVT::v2i64) + ShuffleVT = MVT::v4i32; + } else { + if (VT == MVT::v2i64 || VT == MVT::v4i32) + ShuffleVT = MVT::v8i16; + else if (VT == MVT::v4i64) + ShuffleVT = MVT::v4f64; + else if (VT == MVT::v8i32) + ShuffleVT = MVT::v8f32; + } + + if (isSequentialOrUndefOrZeroInRange(Mask, 0, Mask.size(), 0) && + Mask.size() <= ShuffleVT.getVectorNumElements()) { + unsigned BlendMask = 0; + unsigned ShuffleSize = ShuffleVT.getVectorNumElements(); + unsigned MaskRatio = ShuffleSize / Mask.size(); + + for (unsigned i = 0; i != ShuffleSize; ++i) + if (Mask[i / MaskRatio] < 0) + BlendMask |= 1u << i; + + if (Root.getOpcode() != X86ISD::BLENDI || + Root->getConstantOperandVal(2) != BlendMask) { + SDValue Zero = getZeroVector(ShuffleVT, Subtarget, DAG, DL); + Res = DAG.getBitcast(ShuffleVT, Input); + DCI.AddToWorklist(Res.getNode()); + Res = DAG.getNode(X86ISD::BLENDI, DL, ShuffleVT, Res, Zero, + DAG.getConstant(BlendMask, DL, MVT::i8)); + DCI.AddToWorklist(Res.getNode()); + DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res), + /*AddTo*/ true); + return true; + } + } + } + // Don't try to re-form single instruction chains under any circumstances now // that we've done encoding canonicalization for them. if (Depth < 2) Index: test/CodeGen/X86/insertelement-zero.ll =================================================================== --- test/CodeGen/X86/insertelement-zero.ll +++ test/CodeGen/X86/insertelement-zero.ll @@ -10,37 +10,72 @@ target triple = "x86_64-unknown-unknown" define <2 x double> @insert_v2f64_z1(<2 x double> %a) { -; SSE-LABEL: insert_v2f64_z1: -; SSE: # BB#0: -; SSE-NEXT: xorpd %xmm1, %xmm1 -; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] -; SSE-NEXT: retq +; SSE2-LABEL: insert_v2f64_z1: +; SSE2: # BB#0: +; SSE2-NEXT: xorpd %xmm1, %xmm1 +; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE2-NEXT: retq +; +; SSE3-LABEL: insert_v2f64_z1: +; SSE3: # BB#0: +; SSE3-NEXT: xorpd %xmm1, %xmm1 +; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE3-NEXT: retq +; +; SSSE3-LABEL: insert_v2f64_z1: +; SSSE3: # BB#0: +; SSSE3-NEXT: xorpd %xmm1, %xmm1 +; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSSE3-NEXT: retq +; +; SSE41-LABEL: insert_v2f64_z1: +; SSE41: # BB#0: +; SSE41-NEXT: xorpd %xmm1, %xmm1 +; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v2f64_z1: ; AVX: # BB#0: ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-NEXT: retq %1 = insertelement <2 x double> %a, double 0.0, i32 0 ret <2 x double> %1 } define <4 x double> @insert_v4f64_0zz3(<4 x double> %a) { -; SSE-LABEL: insert_v4f64_0zz3: -; SSE: # BB#0: -; SSE-NEXT: xorpd %xmm2, %xmm2 -; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] -; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] -; SSE-NEXT: retq +; SSE2-LABEL: insert_v4f64_0zz3: +; SSE2: # BB#0: +; SSE2-NEXT: xorpd %xmm2, %xmm2 +; SSE2-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSE2-NEXT: retq +; +; SSE3-LABEL: insert_v4f64_0zz3: +; SSE3: # BB#0: +; SSE3-NEXT: xorpd %xmm2, %xmm2 +; SSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSE3-NEXT: retq +; +; SSSE3-LABEL: insert_v4f64_0zz3: +; SSSE3: # BB#0: +; SSSE3-NEXT: xorpd %xmm2, %xmm2 +; SSSE3-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; SSSE3-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSSE3-NEXT: retq +; +; SSE41-LABEL: insert_v4f64_0zz3: +; SSE41: # BB#0: +; SSE41-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero +; SSE41-NEXT: xorpd %xmm2, %xmm2 +; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v4f64_0zz3: ; AVX: # BB#0: -; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm0[0],xmm1[0] -; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3] -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-NEXT: vmovsd {{.*#+}} xmm1 = xmm1[0],xmm2[1] -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] ; AVX-NEXT: retq %1 = insertelement <4 x double> %a, double 0.0, i32 1 %2 = insertelement <4 x double> %1, double 0.0, i32 2 @@ -68,15 +103,21 @@ ; ; SSE41-LABEL: insert_v2i64_z1: ; SSE41: # BB#0: -; SSE41-NEXT: xorl %eax, %eax -; SSE41-NEXT: pinsrq $0, %rax, %xmm0 +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE41-NEXT: retq ; -; AVX-LABEL: insert_v2i64_z1: -; AVX: # BB#0: -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: vpinsrq $0, %rax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: insert_v2i64_z1: +; AVX1: # BB#0: +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: insert_v2i64_z1: +; AVX2: # BB#0: +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX2-NEXT: retq %1 = insertelement <2 x i64> %a, i64 0, i32 0 ret <2 x i64> %1 } @@ -102,24 +143,20 @@ ; ; SSE41-LABEL: insert_v4i64_01z3: ; SSE41: # BB#0: -; SSE41-NEXT: xorl %eax, %eax -; SSE41-NEXT: pinsrq $0, %rax, %xmm1 +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: insert_v4i64_01z3: ; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: xorl %eax, %eax -; AVX1-NEXT: vpinsrq $0, %rax, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: insert_v4i64_01z3: ; AVX2: # BB#0: -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: xorl %eax, %eax -; AVX2-NEXT: vpinsrq $0, %rax, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] ; AVX2-NEXT: retq %1 = insertelement <4 x i64> %a, i64 0, i32 2 ret <4 x i64> %1 @@ -150,13 +187,13 @@ ; SSE41-LABEL: insert_v4f32_01z3: ; SSE41: # BB#0: ; SSE41-NEXT: xorps %xmm1, %xmm1 -; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3] ; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v4f32_01z3: ; AVX: # BB#0: ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] +; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3] ; AVX-NEXT: retq %1 = insertelement <4 x float> %a, float 0.0, i32 2 ret <4 x float> %1 @@ -191,16 +228,13 @@ ; SSE41: # BB#0: ; SSE41-NEXT: xorps %xmm2, %xmm2 ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] -; SSE41-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] +; SSE41-NEXT: blendps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3] ; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v8f32_z12345z7: ; AVX: # BB#0: -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3] -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7] ; AVX-NEXT: retq %1 = insertelement <8 x float> %a, float 0.0, i32 0 %2 = insertelement <8 x float> %1, float 0.0, i32 6 @@ -234,15 +268,21 @@ ; ; SSE41-LABEL: insert_v4i32_01z3: ; SSE41: # BB#0: -; SSE41-NEXT: xorl %eax, %eax -; SSE41-NEXT: pinsrd $2, %eax, %xmm0 +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7] ; SSE41-NEXT: retq ; -; AVX-LABEL: insert_v4i32_01z3: -; AVX: # BB#0: -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: vpinsrd $2, %eax, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: insert_v4i32_01z3: +; AVX1: # BB#0: +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: insert_v4i32_01z3: +; AVX2: # BB#0: +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3] +; AVX2-NEXT: retq %1 = insertelement <4 x i32> %a, i32 0, i32 2 ret <4 x i32> %1 } @@ -280,29 +320,21 @@ ; ; SSE41-LABEL: insert_v8i32_z12345z7: ; SSE41: # BB#0: -; SSE41-NEXT: xorl %eax, %eax -; SSE41-NEXT: pinsrd $0, %eax, %xmm0 -; SSE41-NEXT: pinsrd $2, %eax, %xmm1 +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3,4,5,6,7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5],xmm1[6,7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: insert_v8i32_z12345z7: ; AVX1: # BB#0: -; AVX1-NEXT: xorl %eax, %eax -; AVX1-NEXT: vpinsrd $0, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: insert_v8i32_z12345z7: ; AVX2: # BB#0: -; AVX2-NEXT: xorl %eax, %eax -; AVX2-NEXT: vmovd %eax, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7] ; AVX2-NEXT: retq %1 = insertelement <8 x i32> %a, i32 0, i32 0 %2 = insertelement <8 x i32> %1, i32 0, i32 6 @@ -310,18 +342,37 @@ } define <8 x i16> @insert_v8i16_z12345z7(<8 x i16> %a) { -; SSE-LABEL: insert_v8i16_z12345z7: -; SSE: # BB#0: -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: pinsrw $0, %eax, %xmm0 -; SSE-NEXT: pinsrw $6, %eax, %xmm0 -; SSE-NEXT: retq +; SSE2-LABEL: insert_v8i16_z12345z7: +; SSE2: # BB#0: +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: pinsrw $0, %eax, %xmm0 +; SSE2-NEXT: pinsrw $6, %eax, %xmm0 +; SSE2-NEXT: retq +; +; SSE3-LABEL: insert_v8i16_z12345z7: +; SSE3: # BB#0: +; SSE3-NEXT: xorl %eax, %eax +; SSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSE3-NEXT: pinsrw $6, %eax, %xmm0 +; SSE3-NEXT: retq +; +; SSSE3-LABEL: insert_v8i16_z12345z7: +; SSSE3: # BB#0: +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSSE3-NEXT: pinsrw $6, %eax, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: insert_v8i16_z12345z7: +; SSE41: # BB#0: +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7] +; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v8i16_z12345z7: ; AVX: # BB#0: -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 +; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7] ; AVX-NEXT: retq %1 = insertelement <8 x i16> %a, i16 0, i32 0 %2 = insertelement <8 x i16> %1, i16 0, i32 6 @@ -329,35 +380,58 @@ } define <16 x i16> @insert_v16i16_z12345z789ABZDEz(<16 x i16> %a) { -; SSE-LABEL: insert_v16i16_z12345z789ABZDEz: -; SSE: # BB#0: -; SSE-NEXT: xorl %eax, %eax -; SSE-NEXT: pinsrw $0, %eax, %xmm0 -; SSE-NEXT: pinsrw $6, %eax, %xmm0 -; SSE-NEXT: pinsrw $7, %eax, %xmm1 -; SSE-NEXT: retq +; SSE2-LABEL: insert_v16i16_z12345z789ABZDEz: +; SSE2: # BB#0: +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: pinsrw $0, %eax, %xmm0 +; SSE2-NEXT: pinsrw $6, %eax, %xmm0 +; SSE2-NEXT: pinsrw $7, %eax, %xmm1 +; SSE2-NEXT: retq +; +; SSE3-LABEL: insert_v16i16_z12345z789ABZDEz: +; SSE3: # BB#0: +; SSE3-NEXT: xorl %eax, %eax +; SSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSE3-NEXT: pinsrw $6, %eax, %xmm0 +; SSE3-NEXT: pinsrw $7, %eax, %xmm1 +; SSE3-NEXT: retq +; +; SSSE3-LABEL: insert_v16i16_z12345z789ABZDEz: +; SSSE3: # BB#0: +; SSSE3-NEXT: xorl %eax, %eax +; SSSE3-NEXT: pinsrw $0, %eax, %xmm0 +; SSSE3-NEXT: pinsrw $6, %eax, %xmm0 +; SSSE3-NEXT: pinsrw $7, %eax, %xmm1 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: insert_v16i16_z12345z789ABZDEz: +; SSE41: # BB#0: +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7] +; SSE41-NEXT: retq ; ; AVX1-LABEL: insert_v16i16_z12345z789ABZDEz: ; AVX1: # BB#0: -; AVX1-NEXT: xorl %eax, %eax -; AVX1-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: insert_v16i16_z12345z789ABZDEz: ; AVX2: # BB#0: -; AVX2-NEXT: xorl %eax, %eax -; AVX2-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0],xmm0[1,2,3,4,5,6,7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3,4,5,6],xmm1[7] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %1 = insertelement <16 x i16> %a, i16 0, i32 0 Index: test/CodeGen/X86/insertps-combine.ll =================================================================== --- test/CodeGen/X86/insertps-combine.ll +++ test/CodeGen/X86/insertps-combine.ll @@ -117,12 +117,14 @@ define <4 x float> @insertps_undef_input1(<4 x float> %a0, <4 x float> %a1) { ; SSE-LABEL: insertps_undef_input1: ; SSE: # BB#0: -; SSE-NEXT: insertps {{.*#+}} xmm0 = zero,zero,zero,xmm0[3] +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] ; SSE-NEXT: retq ; ; AVX-LABEL: insertps_undef_input1: ; AVX: # BB#0: -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,zero,xmm0[3] +; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] ; AVX-NEXT: retq %res0 = fadd <4 x float> %a1, %res1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %res0, i8 21) Index: test/CodeGen/X86/vec_insert-7.ll =================================================================== --- test/CodeGen/X86/vec_insert-7.ll +++ test/CodeGen/X86/vec_insert-7.ll @@ -12,12 +12,9 @@ ; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero ; CHECK-NEXT: movl $32, %eax ; CHECK-NEXT: pinsrd $0, %eax, %xmm0 -; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: pinsrd $1, %eax, %xmm0 -; CHECK-NEXT: pinsrd $2, %eax, %xmm0 -; CHECK-NEXT: pinsrd $3, %eax, %xmm0 -; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; CHECK-NEXT: movq %xmm0, (%esp) +; CHECK-NEXT: pxor %xmm1, %xmm1 +; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] +; CHECK-NEXT: movq %xmm1, (%esp) ; CHECK-NEXT: movq (%esp), %mm0 ; CHECK-NEXT: addl $20, %esp ; CHECK-NEXT: retl Index: test/CodeGen/X86/vector-shuffle-128-v2.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-128-v2.ll +++ test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -932,7 +932,7 @@ ; SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE41-NEXT: xorps %xmm1, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE41-NEXT: retq ; ; AVX1-LABEL: shuffle_v2i64_bitcast_z123: @@ -940,7 +940,7 @@ ; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v2i64_bitcast_z123: Index: test/CodeGen/X86/vector-zext.ll =================================================================== --- test/CodeGen/X86/vector-zext.ll +++ test/CodeGen/X86/vector-zext.ll @@ -1162,8 +1162,8 @@ ; AVX1-LABEL: shuf_zext_4i32_to_4i64: ; AVX1: # BB#0: # %entry ; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero -; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq @@ -1592,8 +1592,8 @@ ; AVX1-LABEL: shuf_zext_4i32_to_4i64_offset1: ; AVX1: # BB#0: # %entry ; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[3],zero,zero,zero -; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ;