diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -970,6 +970,17 @@ } if (SimplifyDemandedBits(Src, DemandedBits, SrcElts, Known, TLO, Depth + 1)) return true; + + // Attempt to avoid multi-use src if we don't need anything from it. + if (!DemandedBits.isAllOnesValue() || !SrcElts.isAllOnesValue()) { + SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( + Src, DemandedBits, SrcElts, TLO.DAG, Depth + 1); + if (DemandedSrc) { + SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, + Op.getOperand(1)); + return TLO.CombineTo(Op, NewOp); + } + } break; } case ISD::CONCAT_VECTORS: { diff --git a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll --- a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll @@ -129,8 +129,6 @@ define i32 @test_v3i32(<3 x i32> %a) nounwind { ; CHECK-LABEL: test_v3i32: ; CHECK: // %bb.0: -; CHECK-NEXT: mov w8, #-1 -; CHECK-NEXT: mov v0.s[3], w8 ; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 ; CHECK-NEXT: and v1.8b, v0.8b, v1.8b ; CHECK-NEXT: mov w8, v0.s[1] diff --git a/llvm/test/CodeGen/X86/avx-vperm2x128.ll b/llvm/test/CodeGen/X86/avx-vperm2x128.ll --- a/llvm/test/CodeGen/X86/avx-vperm2x128.ll +++ b/llvm/test/CodeGen/X86/avx-vperm2x128.ll @@ -627,11 +627,11 @@ define <4 x i64> @ld1_hi0_hi1_4i64(<4 x i64> %a, <4 x i64> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_4i64: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] -; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1 +; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_4i64: @@ -672,12 +672,11 @@ define <8 x i32> @ld1_hi0_hi1_8i32(<8 x i32> %a, <8 x i32> * %pb) nounwind uwtable readnone ssp { ; AVX1-LABEL: ld1_hi0_hi1_8i32: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,2,3,4] -; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,3,4] +; AVX1-NEXT: vpaddd 16(%rdi), %xmm1, %xmm2 +; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: ld1_hi0_hi1_8i32: diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-setcc-256.ll --- a/llvm/test/CodeGen/X86/bitcast-setcc-256.ll +++ b/llvm/test/CodeGen/X86/bitcast-setcc-256.ll @@ -375,8 +375,6 @@ ; ; AVX2-LABEL: bitcast_16i16_store: ; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax diff --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll --- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll +++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll @@ -296,8 +296,6 @@ ; ; AVX2-LABEL: bitcast_v16i16_to_v2i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %ecx diff --git a/llvm/test/CodeGen/X86/masked_load.ll b/llvm/test/CodeGen/X86/masked_load.ll --- a/llvm/test/CodeGen/X86/masked_load.ll +++ b/llvm/test/CodeGen/X86/masked_load.ll @@ -3198,8 +3198,6 @@ ; ; AVX2-LABEL: load_v16i16_v16i16: ; AVX2: ## %bb.0: -; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpcmpgtw %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax diff --git a/llvm/test/CodeGen/X86/movmsk-cmp.ll b/llvm/test/CodeGen/X86/movmsk-cmp.ll --- a/llvm/test/CodeGen/X86/movmsk-cmp.ll +++ b/llvm/test/CodeGen/X86/movmsk-cmp.ll @@ -418,8 +418,6 @@ ; ; AVX2-LABEL: allones_v16i16_sign: ; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax @@ -473,8 +471,6 @@ ; ; AVX2-LABEL: allzeros_v16i16_sign: ; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtw %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovmskb %xmm0, %eax diff --git a/llvm/test/CodeGen/X86/pr31956.ll b/llvm/test/CodeGen/X86/pr31956.ll --- a/llvm/test/CodeGen/X86/pr31956.ll +++ b/llvm/test/CodeGen/X86/pr31956.ll @@ -11,9 +11,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3,4,5,6,7] -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 -; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[2,0] -; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3] +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],mem[0,2] +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1] ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq entry: diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll --- a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll +++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll @@ -1068,29 +1068,27 @@ ; ; AVX2-64-LABEL: uitofp_v4i64_v4f32: ; AVX2-64: # %bb.0: -; AVX2-64-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-64-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm1 -; AVX2-64-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-64-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 -; AVX2-64-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] -; AVX2-64-NEXT: vpand %ymm2, %ymm0, %ymm2 -; AVX2-64-NEXT: vpsrlq $1, %ymm0, %ymm3 -; AVX2-64-NEXT: vpor %ymm2, %ymm3, %ymm2 -; AVX2-64-NEXT: vblendvpd %ymm0, %ymm2, %ymm0, %ymm0 -; AVX2-64-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm4, %xmm2 -; AVX2-64-NEXT: vmovq %xmm0, %rax -; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm4, %xmm3 +; AVX2-64-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1] +; AVX2-64-NEXT: vpand %ymm1, %ymm0, %ymm1 +; AVX2-64-NEXT: vpsrlq $1, %ymm0, %ymm2 +; AVX2-64-NEXT: vpor %ymm1, %ymm2, %ymm1 +; AVX2-64-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm1 +; AVX2-64-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2 +; AVX2-64-NEXT: vmovq %xmm1, %rax +; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm3, %xmm3 ; AVX2-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX2-64-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-64-NEXT: vmovq %xmm0, %rax +; AVX2-64-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-64-NEXT: vmovq %xmm1, %rax ; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm4, %xmm3 ; AVX2-64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] -; AVX2-64-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm4, %xmm0 -; AVX2-64-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] -; AVX2-64-NEXT: vaddps %xmm0, %xmm0, %xmm2 -; AVX2-64-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 +; AVX2-64-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-64-NEXT: vcvtsi2ss %rax, %xmm4, %xmm1 +; AVX2-64-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] +; AVX2-64-NEXT: vaddps %xmm1, %xmm1, %xmm2 +; AVX2-64-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2-64-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 +; AVX2-64-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 ; AVX2-64-NEXT: vzeroupper ; AVX2-64-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp.ll b/llvm/test/CodeGen/X86/vec_int_to_fp.ll --- a/llvm/test/CodeGen/X86/vec_int_to_fp.ll +++ b/llvm/test/CodeGen/X86/vec_int_to_fp.ll @@ -2194,10 +2194,7 @@ ; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm1 ; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] ; AVX2-NEXT: vaddps %xmm1, %xmm1, %xmm2 -; AVX2-NEXT: vxorps %xmm3, %xmm3, %xmm3 -; AVX2-NEXT: vpcmpgtq %ymm0, %ymm3, %ymm0 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vpackssdw %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -2593,29 +2590,27 @@ ; ; AVX2-LABEL: uitofp_4i64_to_4f32: ; AVX2: # %bb.0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] -; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm2 -; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm3 -; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 -; AVX2-NEXT: vblendvpd %ymm0, %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm2 -; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm3 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1] +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm2 +; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2 +; AVX2-NEXT: vmovq %xmm1, %rax +; AVX2-NEXT: vcvtsi2ss %rax, %xmm3, %xmm3 ; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX2-NEXT: vmovq %xmm0, %rax +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vmovq %xmm1, %rax ; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm3 ; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] -; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm0 -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] -; AVX2-NEXT: vaddps %xmm0, %xmm0, %xmm2 -; AVX2-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpextrq $1, %xmm1, %rax +; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm1 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[0] +; AVX2-NEXT: vaddps %xmm1, %xmm1, %xmm2 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vpackssdw %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -4512,29 +4507,27 @@ ; AVX2-LABEL: uitofp_load_4i64_to_4f32: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtq %ymm0, %ymm1, %ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] -; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm2 -; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm3 -; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 -; AVX2-NEXT: vblendvpd %ymm0, %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1] +; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm1 +; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm2 +; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm2 +; AVX2-NEXT: vcvtsi2ss %rax, %xmm3, %xmm1 ; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm3 -; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] +; AVX2-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3] ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm3 -; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] +; AVX2-NEXT: vcvtsi2ss %rax, %xmm3, %xmm2 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm2[0],xmm1[3] ; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm4, %xmm0 -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] -; AVX2-NEXT: vaddps %xmm0, %xmm0, %xmm2 -; AVX2-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vcvtsi2ss %rax, %xmm3, %xmm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0] +; AVX2-NEXT: vaddps %xmm0, %xmm0, %xmm1 +; AVX2-NEXT: vmovdqa (%rdi), %xmm2 +; AVX2-NEXT: vpackssdw 16(%rdi), %xmm2, %xmm2 +; AVX2-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; @@ -4993,50 +4986,47 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vmovaps (%rdi), %ymm0 ; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 -; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX2-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3 -; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4 -; AVX2-NEXT: vpackssdw %xmm4, %xmm3, %xmm3 -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [1,1,1,1] -; AVX2-NEXT: vpand %ymm4, %ymm1, %ymm5 -; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm6 -; AVX2-NEXT: vpor %ymm5, %ymm6, %ymm5 -; AVX2-NEXT: vblendvpd %ymm1, %ymm5, %ymm1, %ymm1 +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [1,1,1,1] +; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm3 +; AVX2-NEXT: vpsrlq $1, %ymm1, %ymm4 +; AVX2-NEXT: vpor %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vblendvpd %ymm1, %ymm3, %ymm1, %ymm1 ; AVX2-NEXT: vpextrq $1, %xmm1, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm7, %xmm5 +; AVX2-NEXT: vcvtsi2ss %rax, %xmm5, %xmm3 ; AVX2-NEXT: vmovq %xmm1, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm7, %xmm6 -; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[2,3] +; AVX2-NEXT: vcvtsi2ss %rax, %xmm5, %xmm4 +; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3] ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 ; AVX2-NEXT: vmovq %xmm1, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm7, %xmm6 -; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0],xmm5[3] +; AVX2-NEXT: vcvtsi2ss %rax, %xmm5, %xmm4 +; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3] ; AVX2-NEXT: vpextrq $1, %xmm1, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm7, %xmm1 -; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm5[0,1,2],xmm1[0] -; AVX2-NEXT: vaddps %xmm1, %xmm1, %xmm5 -; AVX2-NEXT: vblendvps %xmm3, %xmm5, %xmm1, %xmm1 -; AVX2-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 -; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vpand %ymm4, %ymm0, %ymm3 -; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm4 -; AVX2-NEXT: vpor %ymm3, %ymm4, %ymm3 -; AVX2-NEXT: vblendvpd %ymm0, %ymm3, %ymm0, %ymm0 +; AVX2-NEXT: vcvtsi2ss %rax, %xmm5, %xmm1 +; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm3[0,1,2],xmm1[0] +; AVX2-NEXT: vaddps %xmm1, %xmm1, %xmm3 +; AVX2-NEXT: vmovdqa (%rdi), %xmm4 +; AVX2-NEXT: vmovdqa 32(%rdi), %xmm5 +; AVX2-NEXT: vpackssdw 48(%rdi), %xmm5, %xmm5 +; AVX2-NEXT: vblendvps %xmm5, %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vandps %ymm2, %ymm0, %ymm2 +; AVX2-NEXT: vpsrlq $1, %ymm0, %ymm3 +; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vblendvpd %ymm0, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm7, %xmm3 +; AVX2-NEXT: vcvtsi2ss %rax, %xmm6, %xmm2 ; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm7, %xmm4 -; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[2,3] +; AVX2-NEXT: vcvtsi2ss %rax, %xmm6, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3] ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 ; AVX2-NEXT: vmovq %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm7, %xmm4 -; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3] +; AVX2-NEXT: vcvtsi2ss %rax, %xmm6, %xmm3 +; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3] ; AVX2-NEXT: vpextrq $1, %xmm0, %rax -; AVX2-NEXT: vcvtsi2ss %rax, %xmm7, %xmm0 -; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0] -; AVX2-NEXT: vaddps %xmm0, %xmm0, %xmm3 -; AVX2-NEXT: vblendvps %xmm2, %xmm3, %xmm0, %xmm0 +; AVX2-NEXT: vcvtsi2ss %rax, %xmm6, %xmm0 +; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0] +; AVX2-NEXT: vaddps %xmm0, %xmm0, %xmm2 +; AVX2-NEXT: vpackssdw 16(%rdi), %xmm4, %xmm3 +; AVX2-NEXT: vblendvps %xmm3, %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ;