Changeset View
Changeset View
Standalone View
Standalone View
llvm/test/CodeGen/X86/oddshuffles.ll
Show First 20 Lines • Show All 1,491 Lines • ▼ Show 20 Lines | ; XOP-NEXT: retq | ||||
store <8 x i32> %s2, <8 x i32>* %q2, align 4 | store <8 x i32> %s2, <8 x i32>* %q2, align 4 | ||||
store <8 x i32> %s3, <8 x i32>* %q3, align 4 | store <8 x i32> %s3, <8 x i32>* %q3, align 4 | ||||
ret void | ret void | ||||
} | } | ||||
define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, <8 x i32>* %q3) nounwind { | define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2, <8 x i32>* %q3) nounwind { | ||||
; SSE2-LABEL: interleave_24i32_in: | ; SSE2-LABEL: interleave_24i32_in: | ||||
; SSE2: # %bb.0: | ; SSE2: # %bb.0: | ||||
; SSE2-NEXT: movups (%rsi), %xmm5 | ; SSE2-NEXT: movups (%rsi), %xmm1 | ||||
; SSE2-NEXT: movups 16(%rsi), %xmm8 | ; SSE2-NEXT: movups 16(%rsi), %xmm0 | ||||
; SSE2-NEXT: movups (%rdx), %xmm6 | ; SSE2-NEXT: movups (%rdx), %xmm8 | ||||
; SSE2-NEXT: movups 16(%rdx), %xmm3 | ; SSE2-NEXT: movups 16(%rdx), %xmm5 | ||||
; SSE2-NEXT: movups (%rcx), %xmm0 | ; SSE2-NEXT: movups (%rcx), %xmm3 | ||||
; SSE2-NEXT: movups 16(%rcx), %xmm4 | ; SSE2-NEXT: movups 16(%rcx), %xmm6 | ||||
; SSE2-NEXT: movaps %xmm0, %xmm7 | ; SSE2-NEXT: movaps %xmm3, %xmm7 | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm5[1,0] | ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm1[1,0] | ||||
; SSE2-NEXT: movaps %xmm5, %xmm1 | ; SSE2-NEXT: movaps %xmm1, %xmm9 | ||||
; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1] | ; SSE2-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1] | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm7[0,2] | ; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm7[0,2] | ||||
; SSE2-NEXT: movaps %xmm5, %xmm7 | ; SSE2-NEXT: movaps %xmm5, %xmm7 | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm6[2,1] | ; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,2],xmm6[3,2] | ||||
; SSE2-NEXT: movaps %xmm0, %xmm2 | ; SSE2-NEXT: movaps %xmm6, %xmm4 | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm6[1,0] | ; SSE2-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3] | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm7[0,2] | |||||
; SSE2-NEXT: movaps %xmm0, %xmm7 | |||||
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[2,1] | |||||
; SSE2-NEXT: movaps %xmm6, %xmm2 | |||||
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm5[1,0] | |||||
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[0,2] | ; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[0,2] | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,2],xmm0[3,2] | ; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,0],xmm0[1,0] | ||||
; SSE2-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3] | ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm6[0,2] | ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0,2] | ||||
; SSE2-NEXT: movaps %xmm4, %xmm5 | |||||
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm8[1,0] | |||||
; SSE2-NEXT: movaps %xmm8, %xmm6 | |||||
; SSE2-NEXT: unpcklps {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1] | |||||
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[0,2] | |||||
; SSE2-NEXT: movaps %xmm8, %xmm5 | ; SSE2-NEXT: movaps %xmm8, %xmm5 | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[2,1] | ; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,2],xmm3[3,2] | ||||
; SSE2-NEXT: movaps %xmm4, %xmm7 | ; SSE2-NEXT: movaps %xmm3, %xmm6 | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm3[1,0] | ; SSE2-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3] | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm5[0,2] | ; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm5[0,2] | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,2],xmm4[3,2] | ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm8[2,1] | ||||
; SSE2-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm8[2],xmm4[3],xmm8[3] | ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm8[1,0] | ||||
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm3[0,2] | ; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[0,2] | ||||
; SSE2-NEXT: movups %xmm3, 16(%rdi) | |||||
; SSE2-NEXT: movups %xmm6, 32(%rdi) | |||||
; SSE2-NEXT: movups %xmm0, 48(%rdi) | |||||
; SSE2-NEXT: movups %xmm2, 64(%rdi) | |||||
; SSE2-NEXT: movups %xmm4, 80(%rdi) | ; SSE2-NEXT: movups %xmm4, 80(%rdi) | ||||
; SSE2-NEXT: movups %xmm7, 64(%rdi) | ; SSE2-NEXT: movups %xmm9, (%rdi) | ||||
; SSE2-NEXT: movups %xmm6, 48(%rdi) | |||||
; SSE2-NEXT: movups %xmm0, 32(%rdi) | |||||
; SSE2-NEXT: movups %xmm2, 16(%rdi) | |||||
; SSE2-NEXT: movups %xmm1, (%rdi) | |||||
; SSE2-NEXT: retq | ; SSE2-NEXT: retq | ||||
; | ; | ||||
; SSE42-LABEL: interleave_24i32_in: | ; SSE42-LABEL: interleave_24i32_in: | ||||
; SSE42: # %bb.0: | ; SSE42: # %bb.0: | ||||
; SSE42-NEXT: movdqu (%rsi), %xmm5 | ; SSE42-NEXT: movdqu (%rsi), %xmm8 | ||||
; SSE42-NEXT: movdqu 16(%rsi), %xmm2 | ; SSE42-NEXT: movdqu 16(%rsi), %xmm4 | ||||
; SSE42-NEXT: movdqu (%rdx), %xmm6 | ; SSE42-NEXT: movdqu (%rdx), %xmm2 | ||||
; SSE42-NEXT: movdqu 16(%rdx), %xmm1 | ; SSE42-NEXT: movdqu 16(%rdx), %xmm5 | ||||
; SSE42-NEXT: movdqu (%rcx), %xmm7 | ; SSE42-NEXT: movdqu (%rcx), %xmm3 | ||||
; SSE42-NEXT: movdqu 16(%rcx), %xmm4 | ; SSE42-NEXT: movdqu 16(%rcx), %xmm6 | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,0,1,1] | ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,1,1] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,1,0,1] | ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5,6,7] | ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,3],xmm7[4,5,6,7] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,1,0,1] | ; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,0,1] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5],xmm3[6,7] | ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4,5],xmm7[6,7] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,2,2] | ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm7[2,3],xmm3[4,5,6,7] | ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5],xmm3[6,7] | ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3],xmm0[4,5,6,7] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,0,1] | ; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,2,3,3] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,2,3] | ; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm0[0,1,2,3],xmm7[4,5],xmm0[6,7] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm5[2,3],xmm7[4,5,6,7] | ; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,2] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,2,3,3] | ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5,6,7] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm7[0,1,2,3],xmm5[4,5],xmm7[6,7] | ; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5],xmm0[6,7] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,1,1] | ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,1,1] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,1,0,1] | ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,1] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5,6,7] | ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5,6,7] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm4[0,1,0,1] | ; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,1,0,1] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm7[0,1,2,3],xmm6[4,5],xmm7[6,7] | ; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4,5],xmm4[6,7] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,2,2] | ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,0,1] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm4[2,3],xmm7[4,5,6,7] | ; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm2[4,5],xmm7[6,7] | ; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm4[2,3],xmm6[4,5,6,7] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] | ; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,2,3,3] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3] | ; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5],xmm6[6,7] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7] | ; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,2] | ||||
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] | ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7] | ||||
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm4[0,1,2,3],xmm1[4,5],xmm4[6,7] | ; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm8[4,5],xmm2[6,7] | ||||
; SSE42-NEXT: movdqu %xmm1, 80(%rdi) | ; SSE42-NEXT: movdqu %xmm2, 16(%rdi) | ||||
; SSE42-NEXT: movdqu %xmm7, 64(%rdi) | ; SSE42-NEXT: movdqu %xmm4, 32(%rdi) | ||||
; SSE42-NEXT: movdqu %xmm6, 48(%rdi) | ; SSE42-NEXT: movdqu %xmm5, 48(%rdi) | ||||
; SSE42-NEXT: movdqu %xmm5, 32(%rdi) | ; SSE42-NEXT: movdqu %xmm0, 64(%rdi) | ||||
; SSE42-NEXT: movdqu %xmm3, 16(%rdi) | ; SSE42-NEXT: movdqu %xmm7, 80(%rdi) | ||||
; SSE42-NEXT: movdqu %xmm0, (%rdi) | ; SSE42-NEXT: movdqu %xmm1, (%rdi) | ||||
; SSE42-NEXT: retq | ; SSE42-NEXT: retq | ||||
; | ; | ||||
; AVX1-LABEL: interleave_24i32_in: | ; AVX1-LABEL: interleave_24i32_in: | ||||
; AVX1: # %bb.0: | ; AVX1: # %bb.0: | ||||
; AVX1-NEXT: vmovupd (%rsi), %ymm0 | ; AVX1-NEXT: vmovupd (%rsi), %ymm0 | ||||
; AVX1-NEXT: vmovupd (%rcx), %ymm1 | ; AVX1-NEXT: vmovupd (%rcx), %ymm1 | ||||
; AVX1-NEXT: vmovups (%rdx), %xmm2 | ; AVX1-NEXT: vmovups 16(%rcx), %xmm2 | ||||
; AVX1-NEXT: vmovups 16(%rdx), %xmm3 | ; AVX1-NEXT: vmovups (%rdx), %xmm3 | ||||
; AVX1-NEXT: vmovups (%rsi), %xmm4 | ; AVX1-NEXT: vmovups 16(%rdx), %xmm4 | ||||
; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,0],xmm2[2,0] | ; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,0],xmm2[3,0] | ||||
; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm2[1,1],xmm5[0,2] | ; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm2[2,1],xmm5[0,2] | ||||
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,0],xmm4[0,0] | ; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm4[1,0] | ||||
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,1] | ; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,2] | ||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 | ; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 | ||||
; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = xmm1[0,0] | |||||
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4 | |||||
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7] | |||||
; AVX1-NEXT: vmovups 16(%rcx), %xmm4 | |||||
; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm3[3,0],xmm4[3,0] | |||||
; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,1],xmm5[0,2] | |||||
; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,0],xmm3[1,0] | |||||
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm4[2,0],xmm3[2,2] | |||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 | |||||
; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = ymm0[1,1,3,3] | ; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = ymm0[1,1,3,3] | ||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3] | ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3] | ||||
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7] | |||||
; AVX1-NEXT: vmovups (%rsi), %xmm4 | |||||
; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,0],xmm3[2,0] | |||||
; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm3[1,1],xmm5[0,2] | |||||
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,0],xmm4[0,0] | |||||
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,1] | |||||
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 | |||||
; AVX1-NEXT: vmovddup {{.*#+}} xmm4 = xmm1[0,0] | |||||
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4 | |||||
; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] | ; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] | ||||
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,2] | ; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,2] | ||||
; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2] | ; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2] | ||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7] | ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7] | ||||
; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7] | ; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7] | ||||
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] | ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] | ||||
; AVX1-NEXT: vmovups %ymm0, 32(%rdi) | ; AVX1-NEXT: vmovups %ymm0, 32(%rdi) | ||||
; AVX1-NEXT: vmovups %ymm3, 64(%rdi) | ; AVX1-NEXT: vmovups %ymm3, (%rdi) | ||||
; AVX1-NEXT: vmovups %ymm2, (%rdi) | ; AVX1-NEXT: vmovups %ymm2, 64(%rdi) | ||||
; AVX1-NEXT: vzeroupper | ; AVX1-NEXT: vzeroupper | ||||
; AVX1-NEXT: retq | ; AVX1-NEXT: retq | ||||
; | ; | ||||
; AVX2-SLOW-LABEL: interleave_24i32_in: | ; AVX2-SLOW-LABEL: interleave_24i32_in: | ||||
; AVX2-SLOW: # %bb.0: | ; AVX2-SLOW: # %bb.0: | ||||
; AVX2-SLOW-NEXT: vmovups (%rsi), %ymm0 | ; AVX2-SLOW-NEXT: vmovups (%rsi), %ymm0 | ||||
; AVX2-SLOW-NEXT: vmovups (%rdx), %ymm1 | ; AVX2-SLOW-NEXT: vmovups (%rdx), %ymm1 | ||||
; AVX2-SLOW-NEXT: vmovups (%rcx), %ymm2 | ; AVX2-SLOW-NEXT: vmovups (%rcx), %ymm2 | ||||
Show All 26 Lines | |||||
; AVX2-FAST-NEXT: vmovups (%rdx), %ymm1 | ; AVX2-FAST-NEXT: vmovups (%rdx), %ymm1 | ||||
; AVX2-FAST-NEXT: vmovups (%rcx), %ymm2 | ; AVX2-FAST-NEXT: vmovups (%rcx), %ymm2 | ||||
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm3 = [5,6,5,6,5,6,7,7] | ; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm3 = [5,6,5,6,5,6,7,7] | ||||
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm3, %ymm3 | ; AVX2-FAST-NEXT: vpermps %ymm1, %ymm3, %ymm3 | ||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3] | ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3] | ||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7] | ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7] | ||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,3,3,3] | ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,3,3,3] | ||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] | ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] | ||||
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm4 = [1,0,2,2,1,0,2,2] | ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[1,1,2,2] | ||||
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm4, %ymm4 | |||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1] | |||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7] | |||||
; AVX2-FAST-NEXT: vbroadcastsd (%rcx), %ymm5 | |||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7] | |||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2] | |||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] | ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2] | ||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7] | ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7] | ||||
; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7] | ; AVX2-FAST-NEXT: vpermilps {{.*#+}} ymm4 = ymm1[0,0,3,3,4,4,7,7] | ||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7] | |||||
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm4 = [1,0,2,2,1,0,2,2] | |||||
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm4, %ymm1 | |||||
; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1] | |||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7] | |||||
; AVX2-FAST-NEXT: vbroadcastsd (%rcx), %ymm1 | |||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] | ; AVX2-FAST-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] | ||||
; AVX2-FAST-NEXT: vmovups %ymm0, 32(%rdi) | ; AVX2-FAST-NEXT: vmovups %ymm0, (%rdi) | ||||
; AVX2-FAST-NEXT: vmovups %ymm4, (%rdi) | ; AVX2-FAST-NEXT: vmovups %ymm2, 32(%rdi) | ||||
; AVX2-FAST-NEXT: vmovups %ymm3, 64(%rdi) | ; AVX2-FAST-NEXT: vmovups %ymm3, 64(%rdi) | ||||
; AVX2-FAST-NEXT: vzeroupper | ; AVX2-FAST-NEXT: vzeroupper | ||||
; AVX2-FAST-NEXT: retq | ; AVX2-FAST-NEXT: retq | ||||
; | ; | ||||
; XOP-LABEL: interleave_24i32_in: | ; XOP-LABEL: interleave_24i32_in: | ||||
; XOP: # %bb.0: | ; XOP: # %bb.0: | ||||
; XOP-NEXT: vmovupd (%rsi), %ymm0 | ; XOP-NEXT: vmovupd (%rsi), %ymm0 | ||||
; XOP-NEXT: vmovups (%rcx), %ymm1 | ; XOP-NEXT: vmovups (%rcx), %ymm1 | ||||
; XOP-NEXT: vmovups (%rdx), %xmm2 | ; XOP-NEXT: vmovups 16(%rcx), %xmm2 | ||||
; XOP-NEXT: vmovups 16(%rdx), %xmm3 | ; XOP-NEXT: vmovups (%rdx), %xmm3 | ||||
; XOP-NEXT: vmovups (%rsi), %xmm4 | ; XOP-NEXT: vmovups 16(%rdx), %xmm4 | ||||
; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,0],xmm2[2,0] | ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,0],xmm2[3,0] | ||||
; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm2[1,1],xmm5[0,2] | ; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm2[2,1],xmm5[0,2] | ||||
; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,0],xmm4[0,0] | ; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm4[1,0] | ||||
; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,1] | ; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,2] | ||||
; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 | ; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 | ||||
; XOP-NEXT: vmovddup {{.*#+}} xmm4 = xmm1[0,0] | |||||
; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4 | |||||
; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7] | |||||
; XOP-NEXT: vmovups 16(%rcx), %xmm4 | |||||
; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm3[3,0],xmm4[3,0] | |||||
; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,1],xmm5[0,2] | |||||
; XOP-NEXT: vshufps {{.*#+}} xmm4 = xmm4[1,0],xmm3[1,0] | |||||
; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm4[2,0],xmm3[2,2] | |||||
; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 | |||||
; XOP-NEXT: vpermilpd {{.*#+}} ymm4 = ymm0[1,1,3,3] | ; XOP-NEXT: vpermilpd {{.*#+}} ymm4 = ymm0[1,1,3,3] | ||||
; XOP-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3] | ; XOP-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm4[2,3,2,3] | ||||
; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7] | |||||
; XOP-NEXT: vmovups (%rsi), %xmm4 | |||||
; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm4[2,0],xmm3[2,0] | |||||
; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm3[1,1],xmm5[0,2] | |||||
; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,0],xmm4[0,0] | |||||
; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,1] | |||||
; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 | |||||
; XOP-NEXT: vmovddup {{.*#+}} xmm4 = xmm1[0,0] | |||||
; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm4, %ymm4 | |||||
; XOP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] | ; XOP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7] | ||||
; XOP-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm1[2],ymm0[3],ymm1[2,3],ymm0[4],ymm1[5,4],ymm0[5] | ; XOP-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm1[2],ymm0[3],ymm1[2,3],ymm0[4],ymm1[5,4],ymm0[5] | ||||
; XOP-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7] | ; XOP-NEXT: vpermilps {{.*#+}} ymm1 = mem[0,0,3,3,4,4,7,7] | ||||
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] | ; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7] | ||||
; XOP-NEXT: vmovups %ymm0, 32(%rdi) | ; XOP-NEXT: vmovups %ymm0, 32(%rdi) | ||||
; XOP-NEXT: vmovups %ymm3, 64(%rdi) | ; XOP-NEXT: vmovups %ymm3, (%rdi) | ||||
; XOP-NEXT: vmovups %ymm2, (%rdi) | ; XOP-NEXT: vmovups %ymm2, 64(%rdi) | ||||
; XOP-NEXT: vzeroupper | ; XOP-NEXT: vzeroupper | ||||
; XOP-NEXT: retq | ; XOP-NEXT: retq | ||||
%s1 = load <8 x i32>, <8 x i32>* %q1, align 4 | %s1 = load <8 x i32>, <8 x i32>* %q1, align 4 | ||||
%s2 = load <8 x i32>, <8 x i32>* %q2, align 4 | %s2 = load <8 x i32>, <8 x i32>* %q2, align 4 | ||||
%s3 = load <8 x i32>, <8 x i32>* %q3, align 4 | %s3 = load <8 x i32>, <8 x i32>* %q3, align 4 | ||||
%t1 = shufflevector <8 x i32> %s1, <8 x i32> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> | %t1 = shufflevector <8 x i32> %s1, <8 x i32> %s2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> | ||||
%t2 = shufflevector <8 x i32> %s3, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> | %t2 = shufflevector <8 x i32> %s3, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef> | ||||
%interleaved = shufflevector <16 x i32> %t1, <16 x i32> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23> | %interleaved = shufflevector <16 x i32> %t1, <16 x i32> %t2, <24 x i32> <i32 0, i32 8, i32 16, i32 1, i32 9, i32 17, i32 2, i32 10, i32 18, i32 3, i32 11, i32 19, i32 4, i32 12, i32 20, i32 5, i32 13, i32 21, i32 6, i32 14, i32 22, i32 7, i32 15, i32 23> | ||||
▲ Show 20 Lines • Show All 59 Lines • Show Last 20 Lines |