Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -10551,6 +10551,20 @@ // If the vector is wider than 128 bits, extract the 128-bit subvector, insert // into that, and then insert the subvector back into the result. if (VT.is256BitVector() || VT.is512BitVector()) { + // With a 256-bit vector, we can insert into the zero element efficiently + // using a blend if we have AVX or AVX2 and the right data type. + if (VT.is256BitVector() && IdxVal == 0) { + // TODO: It is worthwhile to cast integer to floating point and back + // and incur a domain crossing penalty if that's what we'll end up + // doing anyway after extracting to a 128-bit vector. + if ((Subtarget->hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) || + (Subtarget->hasAVX2() && EltVT == MVT::i32)) { + SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1); + N2 = DAG.getIntPtrConstant(1); + return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2); + } + } + // Get the desired 128-bit vector chunk. SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl); Index: test/CodeGen/X86/avx-insertelt.ll =================================================================== --- test/CodeGen/X86/avx-insertelt.ll +++ test/CodeGen/X86/avx-insertelt.ll @@ -0,0 +1,83 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX2 + +define <8 x float> @insert_f32(<8 x float> %y, float %f, <8 x float> %x) { +; ALL-LABEL: insert_f32: +; ALL: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] +; ALL-NEXT: retq + %i0 = insertelement <8 x float> %y, float %f, i32 0 + ret <8 x float> %i0 +} + +define <4 x double> @insert_f64(<4 x double> %y, double %f, <4 x double> %x) { +; ALL-LABEL: insert_f64: +; ALL: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3] +; ALL-NEXT: retq + %i0 = insertelement <4 x double> %y, double %f, i32 0 + ret <4 x double> %i0 +} + +define <32 x i8> @insert_i8(<32 x i8> %y, i8 %f, <32 x i8> %x) { +; AVX-LABEL: insert_i8: +; AVX: # BB#0: +; AVX-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: retq +; +; AVX2-LABEL: insert_i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpinsrb $0, %edi, %xmm0, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: retq + %i0 = insertelement <32 x i8> %y, i8 %f, i32 0 + ret <32 x i8> %i0 +} + +define <16 x i16> @insert_i16(<16 x i16> %y, i16 %f, <16 x i16> %x) { +; AVX-LABEL: insert_i16: +; AVX: # BB#0: +; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: retq +; +; AVX2-LABEL: insert_i16: +; AVX2: # BB#0: +; AVX2-NEXT: vpinsrw $0, %edi, %xmm0, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: retq + %i0 = insertelement <16 x i16> %y, i16 %f, i32 0 + ret <16 x i16> %i0 +} + +define <8 x i32> @insert_i32(<8 x i32> %y, i32 %f, <8 x i32> %x) { +; AVX-LABEL: insert_i32: +; AVX: # BB#0: +; AVX-NEXT: vpinsrd $0, %edi, %xmm0, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: retq +; +; AVX2-LABEL: insert_i32: +; AVX2: # BB#0: +; AVX2-NEXT: vmovd %edi, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] +; AVX2-NEXT: retq + %i0 = insertelement <8 x i32> %y, i32 %f, i32 0 + ret <8 x i32> %i0 +} + +define <4 x i64> @insert_i64(<4 x i64> %y, i64 %f, <4 x i64> %x) { +; AVX-LABEL: insert_i64: +; AVX: # BB#0: +; AVX-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX-NEXT: retq +; +; AVX2-LABEL: insert_i64: +; AVX2: # BB#0: +; AVX2-NEXT: vpinsrq $0, %rdi, %xmm0, %xmm1 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: retq + %i0 = insertelement <4 x i64> %y, i64 %f, i32 0 + ret <4 x i64> %i0 +} +