diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -18970,6 +18970,9 @@ MVT IdxSVT = MVT::getIntegerVT(EltSizeInBits); MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts); + if (!isTypeLegal(IdxSVT) || !isTypeLegal(IdxVT)) + return SDValue(); + SDValue IdxExt = DAG.getZExtOrTrunc(N2, dl, IdxSVT); SDValue IdxSplat = DAG.getSplatBuildVector(IdxVT, dl, IdxExt); SDValue EltSplat = DAG.getSplatBuildVector(VT, dl, N1); diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll --- a/llvm/test/CodeGen/X86/insertelement-var-index.ll +++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll @@ -5,11 +5,12 @@ ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,AVX,AVX1OR2,AVX2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512F ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=ALL,AVX,AVX512,AVX512BW +; RUN: llc < %s -mtriple=i686-- -mattr=+avx2 | FileCheck %s --check-prefixes=ALL,X86AVX2 define <16 x i8> @undef_index(i8 %x) nounwind { ; ALL-LABEL: undef_index: ; ALL: # %bb.0: -; ALL-NEXT: retq +; ALL-NEXT: ret{{[l|q]}} %ins = insertelement <16 x i8> undef, i8 %x, i64 undef ret <16 x i8> %ins } @@ -17,7 +18,7 @@ define <16 x i8> @undef_scalar(<16 x i8> %x, i32 %index) nounwind { ; ALL-LABEL: undef_scalar: ; ALL: # %bb.0: -; ALL-NEXT: retq +; ALL-NEXT: ret{{[l|q]}} %ins = insertelement <16 x i8> %x, i8 undef, i32 %index ret <16 x i8> %ins } @@ -65,6 +66,11 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpbroadcastb %edi, %xmm0 ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: arg_i8_v16i8_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vpbroadcastb {{[0-9]+}}(%esp), %xmm0 +; X86AVX2-NEXT: retl %ins = insertelement <16 x i8> undef, i8 %x, i32 %y ret <16 x i8> %ins } @@ -100,6 +106,11 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpbroadcastw %edi, %xmm0 ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: arg_i16_v8i16_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vpbroadcastw {{[0-9]+}}(%esp), %xmm0 +; X86AVX2-NEXT: retl %ins = insertelement <8 x i16> undef, i16 %x, i32 %y ret <8 x i16> %ins } @@ -127,6 +138,11 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpbroadcastd %edi, %xmm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_i32_v4i32_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0 +; X86AVX2-NEXT: retl %ins = insertelement <4 x i32> undef, i32 %x, i32 %y ret <4 x i32> %ins } @@ -154,6 +170,11 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpbroadcastq %rdi, %xmm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_i64_v2i64_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; X86AVX2-NEXT: retl %ins = insertelement <2 x i64> undef, i64 %x, i32 %y ret <2 x i64> %ins } @@ -178,6 +199,11 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vbroadcastss %xmm0, %xmm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_f32_v4f32_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0 +; X86AVX2-NEXT: retl %ins = insertelement <4 x float> undef, float %x, i32 %y ret <4 x float> %ins } @@ -197,6 +223,11 @@ ; AVX: # %bb.0: ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX-NEXT: retq +; +; X86AVX2-LABEL: arg_f64_v2f64_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; X86AVX2-NEXT: retl %ins = insertelement <2 x double> undef, double %x, i32 %y ret <2 x double> %ins } @@ -236,6 +267,12 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpbroadcastb (%rdi), %xmm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_i8_v16i8_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vpbroadcastb (%eax), %xmm0 +; X86AVX2-NEXT: retl %x = load i8, i8* %p %ins = insertelement <16 x i8> undef, i8 %x, i32 %y ret <16 x i8> %ins @@ -267,6 +304,12 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpbroadcastw (%rdi), %xmm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_i16_v8i16_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vpbroadcastw (%eax), %xmm0 +; X86AVX2-NEXT: retl %x = load i16, i16* %p %ins = insertelement <8 x i16> undef, i16 %x, i32 %y ret <8 x i16> %ins @@ -283,6 +326,12 @@ ; AVX: # %bb.0: ; AVX-NEXT: vbroadcastss (%rdi), %xmm0 ; AVX-NEXT: retq +; +; X86AVX2-LABEL: load_i32_v4i32_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vbroadcastss (%eax), %xmm0 +; X86AVX2-NEXT: retl %x = load i32, i32* %p %ins = insertelement <4 x i32> undef, i32 %x, i32 %y ret <4 x i32> %ins @@ -299,6 +348,12 @@ ; AVX: # %bb.0: ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; AVX-NEXT: retq +; +; X86AVX2-LABEL: load_i64_v2i64_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; X86AVX2-NEXT: retl %x = load i64, i64* %p %ins = insertelement <2 x i64> undef, i64 %x, i32 %y ret <2 x i64> %ins @@ -315,6 +370,12 @@ ; AVX: # %bb.0: ; AVX-NEXT: vbroadcastss (%rdi), %xmm0 ; AVX-NEXT: retq +; +; X86AVX2-LABEL: load_f32_v4f32_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vbroadcastss (%eax), %xmm0 +; X86AVX2-NEXT: retl %x = load float, float* %p %ins = insertelement <4 x float> undef, float %x, i32 %y ret <4 x float> %ins @@ -336,6 +397,12 @@ ; AVX: # %bb.0: ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] ; AVX-NEXT: retq +; +; X86AVX2-LABEL: load_f64_v2f64_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; X86AVX2-NEXT: retl %x = load double, double* %p %ins = insertelement <2 x double> undef, double %x, i32 %y ret <2 x double> %ins @@ -375,6 +442,11 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpbroadcastb %edi, %ymm0 ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: arg_i8_v32i8_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vpbroadcastb {{[0-9]+}}(%esp), %ymm0 +; X86AVX2-NEXT: retl %ins = insertelement <32 x i8> undef, i8 %x, i32 %y ret <32 x i8> %ins } @@ -413,6 +485,11 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpbroadcastw %edi, %ymm0 ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: arg_i16_v16i16_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vpbroadcastw {{[0-9]+}}(%esp), %ymm0 +; X86AVX2-NEXT: retl %ins = insertelement <16 x i16> undef, i16 %x, i32 %y ret <16 x i16> %ins } @@ -444,6 +521,11 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpbroadcastd %edi, %ymm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_i32_v8i32_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0 +; X86AVX2-NEXT: retl %ins = insertelement <8 x i32> undef, i32 %x, i32 %y ret <8 x i32> %ins } @@ -475,6 +557,11 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpbroadcastq %rdi, %ymm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_i64_v4i64_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0 +; X86AVX2-NEXT: retl %ins = insertelement <4 x i64> undef, i64 %x, i32 %y ret <4 x i64> %ins } @@ -504,6 +591,11 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vbroadcastss %xmm0, %ymm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_f32_v8f32_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm0 +; X86AVX2-NEXT: retl %ins = insertelement <8 x float> undef, float %x, i32 %y ret <8 x float> %ins } @@ -533,6 +625,11 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vbroadcastsd %xmm0, %ymm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_f64_v4f64_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vbroadcastsd {{[0-9]+}}(%esp), %ymm0 +; X86AVX2-NEXT: retl %ins = insertelement <4 x double> undef, double %x, i32 %y ret <4 x double> %ins } @@ -566,6 +663,12 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpbroadcastb (%rdi), %ymm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_i8_v32i8_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vpbroadcastb (%eax), %ymm0 +; X86AVX2-NEXT: retl %x = load i8, i8* %p %ins = insertelement <32 x i8> undef, i8 %x, i32 %y ret <32 x i8> %ins @@ -600,6 +703,12 @@ ; AVX512: # %bb.0: ; AVX512-NEXT: vpbroadcastw (%rdi), %ymm0 ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_i16_v16i16_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vpbroadcastw (%eax), %ymm0 +; X86AVX2-NEXT: retl %x = load i16, i16* %p %ins = insertelement <16 x i16> undef, i16 %x, i32 %y ret <16 x i16> %ins @@ -620,6 +729,12 @@ ; AVX: # %bb.0: ; AVX-NEXT: vbroadcastss (%rdi), %ymm0 ; AVX-NEXT: retq +; +; X86AVX2-LABEL: load_i32_v8i32_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vbroadcastss (%eax), %ymm0 +; X86AVX2-NEXT: retl %x = load i32, i32* %p %ins = insertelement <8 x i32> undef, i32 %x, i32 %y ret <8 x i32> %ins @@ -640,6 +755,12 @@ ; AVX: # %bb.0: ; AVX-NEXT: vbroadcastsd (%rdi), %ymm0 ; AVX-NEXT: retq +; +; X86AVX2-LABEL: load_i64_v4i64_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vbroadcastsd (%eax), %ymm0 +; X86AVX2-NEXT: retl %x = load i64, i64* %p %ins = insertelement <4 x i64> undef, i64 %x, i32 %y ret <4 x i64> %ins @@ -660,6 +781,12 @@ ; AVX: # %bb.0: ; AVX-NEXT: vbroadcastss (%rdi), %ymm0 ; AVX-NEXT: retq +; +; X86AVX2-LABEL: load_f32_v8f32_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vbroadcastss (%eax), %ymm0 +; X86AVX2-NEXT: retl %x = load float, float* %p %ins = insertelement <8 x float> undef, float %x, i32 %y ret <8 x float> %ins @@ -680,6 +807,12 @@ ; AVX: # %bb.0: ; AVX-NEXT: vbroadcastsd (%rdi), %ymm0 ; AVX-NEXT: retq +; +; X86AVX2-LABEL: load_f64_v4f64_undef: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vbroadcastsd (%eax), %ymm0 +; X86AVX2-NEXT: retl %x = load double, double* %p %ins = insertelement <4 x double> undef, double %x, i32 %y ret <4 x double> %ins @@ -723,6 +856,22 @@ ; AVX512BW-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512BW-NEXT: vpbroadcastb %edi, %xmm0 {%k1} ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: arg_i8_v16i8: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $32, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $15, %eax +; X86AVX2-NEXT: movb 8(%ebp), %cl +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: movb %cl, (%esp,%eax) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <16 x i8> %v, i8 %x, i32 %y ret <16 x i8> %ins } @@ -761,6 +910,22 @@ ; AVX512BW-NEXT: vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512BW-NEXT: vpbroadcastw %edi, %xmm0 {%k1} ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: arg_i16_v8i16: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $32, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $7, %eax +; X86AVX2-NEXT: movzwl 8(%ebp), %ecx +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: movw %cx, (%esp,%eax,2) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <8 x i16> %v, i16 %x, i32 %y ret <8 x i16> %ins } @@ -790,6 +955,22 @@ ; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512-NEXT: vpbroadcastd %edi, %xmm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_i32_v4i32: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $32, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $3, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: movl %ecx, (%esp,%eax,4) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <4 x i32> %v, i32 %x, i32 %y ret <4 x i32> %ins } @@ -820,6 +1001,31 @@ ; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_i64_v2i64: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: pushl %esi +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $48, %esp +; X86AVX2-NEXT: movl 8(%ebp), %eax +; X86AVX2-NEXT: movl 12(%ebp), %ecx +; X86AVX2-NEXT: movl 16(%ebp), %edx +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: leal (%edx,%edx), %esi +; X86AVX2-NEXT: andl $3, %esi +; X86AVX2-NEXT: movl %eax, (%esp,%esi,4) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp) +; X86AVX2-NEXT: leal 1(%edx,%edx), %eax +; X86AVX2-NEXT: andl $3, %eax +; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4) +; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0 +; X86AVX2-NEXT: leal -4(%ebp), %esp +; X86AVX2-NEXT: popl %esi +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <2 x i64> %v, i64 %x, i32 %y ret <2 x i64> %ins } @@ -869,6 +1075,14 @@ ; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1 ; AVX512-NEXT: vbroadcastss %xmm1, %xmm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_f32_v4f32: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %xmm1 +; X86AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm2 +; X86AVX2-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 +; X86AVX2-NEXT: retl %ins = insertelement <4 x float> %v, float %x, i32 %y ret <4 x float> %ins } @@ -922,6 +1136,22 @@ ; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1 ; AVX512-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = xmm1[0,0] ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_f64_v2f64: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $32, %esp +; X86AVX2-NEXT: movl 16(%ebp), %eax +; X86AVX2-NEXT: andl $1, %eax +; X86AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: vmovsd %xmm1, (%esp,%eax,8) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <2 x double> %v, double %x, i32 %y ret <2 x double> %ins } @@ -963,6 +1193,23 @@ ; AVX512BW-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512BW-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: load_i8_v16i8: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $32, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $15, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: movb (%ecx), %cl +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: movb %cl, (%esp,%eax) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load i8, i8* %p %ins = insertelement <16 x i8> %v, i8 %x, i32 %y ret <16 x i8> %ins @@ -1005,6 +1252,23 @@ ; AVX512BW-NEXT: vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512BW-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: load_i16_v8i16: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $32, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $7, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: movzwl (%ecx), %ecx +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: movw %cx, (%esp,%eax,2) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load i16, i16* %p %ins = insertelement <8 x i16> %v, i16 %x, i32 %y ret <8 x i16> %ins @@ -1037,6 +1301,23 @@ ; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_i32_v4i32: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $32, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $3, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: movl (%ecx), %ecx +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: movl %ecx, (%esp,%eax,4) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load i32, i32* %p %ins = insertelement <4 x i32> %v, i32 %x, i32 %y ret <4 x i32> %ins @@ -1070,6 +1351,32 @@ ; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_i64_v2i64: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: pushl %esi +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $48, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: movl (%ecx), %edx +; X86AVX2-NEXT: movl 4(%ecx), %ecx +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: leal (%eax,%eax), %esi +; X86AVX2-NEXT: andl $3, %esi +; X86AVX2-NEXT: movl %edx, (%esp,%esi,4) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp) +; X86AVX2-NEXT: leal 1(%eax,%eax), %eax +; X86AVX2-NEXT: andl $3, %eax +; X86AVX2-NEXT: movl %ecx, 16(%esp,%eax,4) +; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %xmm0 +; X86AVX2-NEXT: leal -4(%ebp), %esp +; X86AVX2-NEXT: popl %esi +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load i64, i64* %p %ins = insertelement <2 x i64> %v, i64 %x, i32 %y ret <2 x i64> %ins @@ -1122,6 +1429,15 @@ ; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_f32_v4f32: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %xmm1 +; X86AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; X86AVX2-NEXT: vbroadcastss (%eax), %xmm2 +; X86AVX2-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 +; X86AVX2-NEXT: retl %x = load float, float* %p %ins = insertelement <4 x float> %v, float %x, i32 %y ret <4 x float> %ins @@ -1177,6 +1493,23 @@ ; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %k1 ; AVX512-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0] ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_f64_v2f64: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-16, %esp +; X86AVX2-NEXT: subl $32, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $1, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; X86AVX2-NEXT: vmovaps %xmm0, (%esp) +; X86AVX2-NEXT: vmovsd %xmm1, (%esp,%eax,8) +; X86AVX2-NEXT: vmovaps (%esp), %xmm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load double, double* %p %ins = insertelement <2 x double> %v, double %x, i32 %y ret <2 x double> %ins @@ -1230,6 +1563,22 @@ ; AVX512BW-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512BW-NEXT: vpbroadcastb %edi, %ymm0 {%k1} ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: arg_i8_v32i8: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $64, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $31, %eax +; X86AVX2-NEXT: movb 8(%ebp), %cl +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: movb %cl, (%esp,%eax) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <32 x i8> %v, i8 %x, i32 %y ret <32 x i8> %ins } @@ -1282,6 +1631,22 @@ ; AVX512BW-NEXT: vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512BW-NEXT: vpbroadcastw %edi, %ymm0 {%k1} ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: arg_i16_v16i16: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $64, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $15, %eax +; X86AVX2-NEXT: movzwl 8(%ebp), %ecx +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: movw %cx, (%esp,%eax,2) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <16 x i16> %v, i16 %x, i32 %y ret <16 x i16> %ins } @@ -1319,6 +1684,22 @@ ; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512-NEXT: vpbroadcastd %edi, %ymm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_i32_v8i32: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $64, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $7, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: movl %ecx, (%esp,%eax,4) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <8 x i32> %v, i32 %x, i32 %y ret <8 x i32> %ins } @@ -1357,6 +1738,31 @@ ; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_i64_v4i64: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: pushl %esi +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $96, %esp +; X86AVX2-NEXT: movl 8(%ebp), %eax +; X86AVX2-NEXT: movl 12(%ebp), %ecx +; X86AVX2-NEXT: movl 16(%ebp), %edx +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: leal (%edx,%edx), %esi +; X86AVX2-NEXT: andl $7, %esi +; X86AVX2-NEXT: movl %eax, (%esp,%esi,4) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp) +; X86AVX2-NEXT: leal 1(%edx,%edx), %eax +; X86AVX2-NEXT: andl $7, %eax +; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4) +; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0 +; X86AVX2-NEXT: leal -4(%ebp), %esp +; X86AVX2-NEXT: popl %esi +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <4 x i64> %v, i64 %x, i32 %y ret <4 x i64> %ins } @@ -1400,6 +1806,14 @@ ; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %k1 ; AVX512-NEXT: vbroadcastss %xmm1, %ymm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_f32_v8f32: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %ymm1 +; X86AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 +; X86AVX2-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm2 +; X86AVX2-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 +; X86AVX2-NEXT: retl %ins = insertelement <8 x float> %v, float %x, i32 %y ret <8 x float> %ins } @@ -1446,6 +1860,22 @@ ; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %k1 ; AVX512-NEXT: vbroadcastsd %xmm1, %ymm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: arg_f64_v4f64: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $64, %esp +; X86AVX2-NEXT: movl 16(%ebp), %eax +; X86AVX2-NEXT: andl $3, %eax +; X86AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: vmovsd %xmm1, (%esp,%eax,8) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %ins = insertelement <4 x double> %v, double %x, i32 %y ret <4 x double> %ins } @@ -1501,6 +1931,23 @@ ; AVX512BW-NEXT: vpcmpeqb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512BW-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: load_i8_v32i8: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $64, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $31, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: movb (%ecx), %cl +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: movb %cl, (%esp,%eax) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load i8, i8* %p %ins = insertelement <32 x i8> %v, i8 %x, i32 %y ret <32 x i8> %ins @@ -1557,6 +2004,23 @@ ; AVX512BW-NEXT: vpcmpeqw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512BW-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} ; AVX512BW-NEXT: retq +; +; X86AVX2-LABEL: load_i16_v16i16: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $64, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $15, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: movzwl (%ecx), %ecx +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: movw %cx, (%esp,%eax,2) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load i16, i16* %p %ins = insertelement <16 x i16> %v, i16 %x, i32 %y ret <16 x i16> %ins @@ -1597,6 +2061,23 @@ ; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_i32_v8i32: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $64, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $7, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: movl (%ecx), %ecx +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: movl %ecx, (%esp,%eax,4) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load i32, i32* %p %ins = insertelement <8 x i32> %v, i32 %x, i32 %y ret <8 x i32> %ins @@ -1638,6 +2119,32 @@ ; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_i64_v4i64: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: pushl %esi +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $96, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: movl (%ecx), %edx +; X86AVX2-NEXT: movl 4(%ecx), %ecx +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: leal (%eax,%eax), %esi +; X86AVX2-NEXT: andl $7, %esi +; X86AVX2-NEXT: movl %edx, (%esp,%esi,4) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%esp) +; X86AVX2-NEXT: leal 1(%eax,%eax), %eax +; X86AVX2-NEXT: andl $7, %eax +; X86AVX2-NEXT: movl %ecx, 32(%esp,%eax,4) +; X86AVX2-NEXT: vmovaps {{[0-9]+}}(%esp), %ymm0 +; X86AVX2-NEXT: leal -4(%ebp), %esp +; X86AVX2-NEXT: popl %esi +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load i64, i64* %p %ins = insertelement <4 x i64> %v, i64 %x, i32 %y ret <4 x i64> %ins @@ -1682,6 +2189,15 @@ ; AVX512-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_f32_v8f32: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %ymm1 +; X86AVX2-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 +; X86AVX2-NEXT: vbroadcastss (%eax), %ymm2 +; X86AVX2-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 +; X86AVX2-NEXT: retl %x = load float, float* %p %ins = insertelement <8 x float> %v, float %x, i32 %y ret <8 x float> %ins @@ -1729,6 +2245,23 @@ ; AVX512-NEXT: vpcmpeqq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %k1 ; AVX512-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} ; AVX512-NEXT: retq +; +; X86AVX2-LABEL: load_f64_v4f64: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: pushl %ebp +; X86AVX2-NEXT: movl %esp, %ebp +; X86AVX2-NEXT: andl $-32, %esp +; X86AVX2-NEXT: subl $64, %esp +; X86AVX2-NEXT: movl 12(%ebp), %eax +; X86AVX2-NEXT: andl $3, %eax +; X86AVX2-NEXT: movl 8(%ebp), %ecx +; X86AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; X86AVX2-NEXT: vmovaps %ymm0, (%esp) +; X86AVX2-NEXT: vmovsd %xmm1, (%esp,%eax,8) +; X86AVX2-NEXT: vmovaps (%esp), %ymm0 +; X86AVX2-NEXT: movl %ebp, %esp +; X86AVX2-NEXT: popl %ebp +; X86AVX2-NEXT: retl %x = load double, double* %p %ins = insertelement <4 x double> %v, double %x, i32 %y ret <4 x double> %ins @@ -1737,18 +2270,44 @@ ; Don't die trying to insert to an invalid index. define i32 @PR44139(<16 x i64>* %p) { -; ALL-LABEL: PR44139: -; ALL: # %bb.0: -; ALL-NEXT: movl (%rdi), %eax -; ALL-NEXT: leal 2147483647(%rax), %ecx -; ALL-NEXT: testl %eax, %eax -; ALL-NEXT: cmovnsl %eax, %ecx -; ALL-NEXT: andl $-2147483648, %ecx # imm = 0x80000000 -; ALL-NEXT: addl %eax, %ecx -; ALL-NEXT: # kill: def $eax killed $eax killed $rax -; ALL-NEXT: xorl %edx, %edx -; ALL-NEXT: divl %ecx -; ALL-NEXT: retq +; SSE-LABEL: PR44139: +; SSE: # %bb.0: +; SSE-NEXT: movl (%rdi), %eax +; SSE-NEXT: leal 2147483647(%rax), %ecx +; SSE-NEXT: testl %eax, %eax +; SSE-NEXT: cmovnsl %eax, %ecx +; SSE-NEXT: andl $-2147483648, %ecx # imm = 0x80000000 +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: # kill: def $eax killed $eax killed $rax +; SSE-NEXT: xorl %edx, %edx +; SSE-NEXT: divl %ecx +; SSE-NEXT: retq +; +; AVX-LABEL: PR44139: +; AVX: # %bb.0: +; AVX-NEXT: movl (%rdi), %eax +; AVX-NEXT: leal 2147483647(%rax), %ecx +; AVX-NEXT: testl %eax, %eax +; AVX-NEXT: cmovnsl %eax, %ecx +; AVX-NEXT: andl $-2147483648, %ecx # imm = 0x80000000 +; AVX-NEXT: addl %eax, %ecx +; AVX-NEXT: # kill: def $eax killed $eax killed $rax +; AVX-NEXT: xorl %edx, %edx +; AVX-NEXT: divl %ecx +; AVX-NEXT: retq +; +; X86AVX2-LABEL: PR44139: +; X86AVX2: # %bb.0: +; X86AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86AVX2-NEXT: movl (%eax), %eax +; X86AVX2-NEXT: leal 2147483647(%eax), %ecx +; X86AVX2-NEXT: testl %eax, %eax +; X86AVX2-NEXT: cmovnsl %eax, %ecx +; X86AVX2-NEXT: andl $-2147483648, %ecx # imm = 0x80000000 +; X86AVX2-NEXT: addl %eax, %ecx +; X86AVX2-NEXT: xorl %edx, %edx +; X86AVX2-NEXT: divl %ecx +; X86AVX2-NEXT: retl %L = load <16 x i64>, <16 x i64>* %p %E1 = extractelement <16 x i64> %L, i64 0 %tempvector = insertelement <16 x i64> undef, i64 %E1, i32 0