Index: x86-interleaved-access.ll =================================================================== --- x86-interleaved-access.ll +++ x86-interleaved-access.ll @@ -139,3 +139,96 @@ %add3 = add <4 x i64> %add2, %strided.v3 ret <4 x i64> %add3 } + +define void @store_factorf64_4(<16 x double>* %ptr, <4 x double> %v0, <4 x double> %v1, <4 x double> %v2, <4 x double> %v3) { +; AVX-LABEL: store_factorf64_4: +; AVX: # BB#0: +; AVX-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm2[0],xmm3[0] +; AVX-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 +; AVX-NEXT: vunpcklpd {{.*#+}} xmm5 = xmm0[0],xmm1[0] +; AVX-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3] +; AVX-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm2[1],xmm3[1] +; AVX-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 +; AVX-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm0[1],xmm1[1] +; AVX-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3] +; AVX-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] +; AVX-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX-NEXT: vextractf128 $1, %ymm7, %xmm7 +; AVX-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] +; AVX-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] +; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] +; AVX-NEXT: vmovupd %ymm0, 96(%rdi) +; AVX-NEXT: vmovupd %ymm6, 64(%rdi) +; AVX-NEXT: vmovupd %ymm5, 32(%rdi) +; AVX-NEXT: vmovupd %ymm4, (%rdi) +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq + %s0 = shufflevector <4 x double> %v0, <4 x double> %v1, <8 x i32> + %s1 = shufflevector <4 x double> %v2, <4 x double> %v3, <8 x i32> + %interleaved.vec = shufflevector <8 x double> %s0, <8 x double> %s1, <16 x i32> + store <16 x double> %interleaved.vec, <16 x double>* %ptr, align 16 + ret void +} + +define void @store_factori64_4(<16 x i64>* %ptr, <4 x i64> %v0, <4 x i64> %v1, <4 x i64> %v2, <4 x i64> %v3) { +; AVX1-LABEL: store_factori64_4: +; AVX1: # BB#0: +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm4 = xmm2[0],xmm3[0] +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm4 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm0[0],xmm1[0] +; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3] +; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm5 = xmm2[1],xmm3[1] +; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm5 +; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm6 = xmm0[1],xmm1[1] +; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3] +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm7 +; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3] +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] +; AVX1-NEXT: vmovupd %ymm0, 96(%rdi) +; AVX1-NEXT: vmovupd %ymm6, 64(%rdi) +; AVX1-NEXT: vmovupd %ymm5, 32(%rdi) +; AVX1-NEXT: vmovupd %ymm4, (%rdi) +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: store_factori64_4: +; AVX2: # BB#0: +; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5 +; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm1[0,2,2,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm5[0,1],xmm6[2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1,2,3],ymm4[4,5,6,7] +; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm5 = ymm2[1],ymm3[1],ymm2[3],ymm3[3] +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm6 +; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm0[3,1,2,3] +; AVX2-NEXT: vpblendd {{.*#+}} xmm6 = xmm7[0,1],xmm6[2,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5,6,7] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm6 +; AVX2-NEXT: vpbroadcastq %xmm3, %ymm7 +; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm6[0,1,2,3,4,5],ymm7[6,7] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm7 = xmm0[0],xmm1[0] +; AVX2-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3],ymm6[4,5,6,7] +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3 +; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,1,3] +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],ymm3[6,7] +; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; AVX2-NEXT: vmovdqu %ymm0, 32(%rdi) +; AVX2-NEXT: vmovdqu %ymm6, (%rdi) +; AVX2-NEXT: vmovdqu %ymm5, 96(%rdi) +; AVX2-NEXT: vmovdqu %ymm4, 64(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %s0 = shufflevector <4 x i64> %v0, <4 x i64> %v1, <8 x i32> + %s1 = shufflevector <4 x i64> %v2, <4 x i64> %v3, <8 x i32> + %interleaved.vec = shufflevector <8 x i64> %s0, <8 x i64> %s1, <16 x i32> + store <16 x i64> %interleaved.vec, <16 x i64>* %ptr, align 16 + ret void +}