diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -20801,6 +20801,29 @@ SmallVector ConcatOps(2, DAG.getUNDEF(InVT2)); ConcatOps[0] = VecIn2; VecIn2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps); + } else if (InVT1Size / VTSize > 1 && InVT1Size % VTSize == 0) { + if (!TLI.isExtractSubvectorCheap(VT, InVT1, NumElems) || + !TLI.isTypeLegal(InVT1) || !TLI.isTypeLegal(InVT2)) + return SDValue(); + // If dest vector has less than two elements, then use shuffle and extract + // from larger regs will cost even more. + if (VT.getVectorNumElements() <= 2 || !VecIn2.getNode()) + return SDValue(); + assert(InVT2Size <= InVT1Size && + "Second input is not going to be larger than the first one."); + + // VecIn1 is wider than the output, and we have another, possibly + // smaller input. Pad the smaller input with undefs, shuffle at the + // input vector width, and extract the output. + // The shuffle type is different than VT, so check legality again. + if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1)) + return SDValue(); + + if (InVT1 != InVT2) { + VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1, + DAG.getUNDEF(InVT1), VecIn2, ZeroIdx); + } + ShuffleNumElems = InVT1Size / VTSize * NumElems; } else { // TODO: Support cases where the length mismatch isn't exactly by a // factor of 2. diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll --- a/llvm/test/CodeGen/X86/avg.ll +++ b/llvm/test/CodeGen/X86/avg.ll @@ -2253,257 +2253,161 @@ ; AVX512-NEXT: pushq %r13 ; AVX512-NEXT: pushq %r12 ; AVX512-NEXT: pushq %rbx -; AVX512-NEXT: subq $24, %rsp -; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero -; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero +; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm0 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero +; AVX512-NEXT: vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX512-NEXT: vpextrq $1, %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm4 +; AVX512-NEXT: vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX512-NEXT: vpextrq $1, %xmm4, %rbp +; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm3 +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero ; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm4 -; AVX512-NEXT: vmovq %xmm4, %r13 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero +; AVX512-NEXT: vmovq %xmm4, %rdi +; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm5 +; AVX512-NEXT: vmovq %xmm5, %r8 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero +; AVX512-NEXT: vmovq %xmm3, %r9 +; AVX512-NEXT: vpextrq $1, %xmm3, %r10 +; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm3 +; AVX512-NEXT: vmovq %xmm3, %r11 +; AVX512-NEXT: vpextrq $1, %xmm3, %rbx +; AVX512-NEXT: vpextrq $1, %xmm5, %rax ; AVX512-NEXT: vpextrq $1, %xmm4, %r12 -; AVX512-NEXT: vmovq %xmm3, %r15 -; AVX512-NEXT: vpextrq $1, %xmm3, %r14 +; AVX512-NEXT: vpextrq $1, %xmm1, %r15 +; AVX512-NEXT: vpextrq $1, %xmm0, %r14 +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm5 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm3 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm4 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm5 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm5, %xmm6 ; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm7 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm7 = xmm7[0],zero,xmm7[1],zero,xmm7[2],zero,xmm7[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm7, %xmm8 ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero -; AVX512-NEXT: vmovq %xmm2, %rbx +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm9 +; AVX512-NEXT: vpextrq $1, %xmm8, %rsi +; AVX512-NEXT: addq %rax, %rsi +; AVX512-NEXT: vpextrq $1, %xmm7, %rdx +; AVX512-NEXT: addq %r12, %rdx +; AVX512-NEXT: vpextrq $1, %xmm4, %rcx +; AVX512-NEXT: addq %r15, %rcx +; AVX512-NEXT: vpextrq $1, %xmm3, %rax +; AVX512-NEXT: addq %r14, %rax +; AVX512-NEXT: vpextrq $1, %xmm9, %r14 +; AVX512-NEXT: leaq -1(%rbx,%r14), %r13 +; AVX512-NEXT: vmovq %xmm9, %rbx +; AVX512-NEXT: leaq -1(%r11,%rbx), %r12 ; AVX512-NEXT: vpextrq $1, %xmm2, %r11 -; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 +; AVX512-NEXT: leaq -1(%r10,%r11), %r15 ; AVX512-NEXT: vmovq %xmm2, %r10 -; AVX512-NEXT: vpextrq $1, %xmm2, %rax -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1 -; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX512-NEXT: vmovq %xmm2, %rdi -; AVX512-NEXT: vpextrq $1, %xmm2, %r8 -; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 -; AVX512-NEXT: vmovq %xmm2, %rsi -; AVX512-NEXT: vpextrq $1, %xmm2, %rdx -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1 -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX512-NEXT: vmovq %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: vpextrq $1, %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1 -; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero -; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm4 -; AVX512-NEXT: vmovq %xmm4, %rbp -; AVX512-NEXT: addq %r13, %rbp -; AVX512-NEXT: vpextrq $1, %xmm4, %r13 -; AVX512-NEXT: addq %r12, %r13 -; AVX512-NEXT: vmovq %xmm3, %rcx -; AVX512-NEXT: addq %r15, %rcx -; AVX512-NEXT: vpextrq $1, %xmm3, %r9 -; AVX512-NEXT: addq %r14, %r9 -; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero -; AVX512-NEXT: vmovq %xmm2, %r9 -; AVX512-NEXT: addq %rbx, %r9 -; AVX512-NEXT: vpextrq $1, %xmm2, %rbx -; AVX512-NEXT: addq %r11, %rbx -; AVX512-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 -; AVX512-NEXT: vmovq %xmm2, %r11 -; AVX512-NEXT: addq %r10, %r11 -; AVX512-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: vpextrq $1, %xmm2, %r10 -; AVX512-NEXT: addq %rax, %r10 -; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX512-NEXT: vmovq %xmm2, %rax -; AVX512-NEXT: addq %rdi, %rax -; AVX512-NEXT: movq %rax, %r12 -; AVX512-NEXT: vpextrq $1, %xmm2, %rax -; AVX512-NEXT: addq %r8, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm2 -; AVX512-NEXT: vmovq %xmm2, %rax -; AVX512-NEXT: addq %rsi, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: vpextrq $1, %xmm2, %r15 -; AVX512-NEXT: addq %rdx, %r15 -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX512-NEXT: vmovq %xmm0, %r10 -; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload -; AVX512-NEXT: vpextrq $1, %xmm0, %r8 -; AVX512-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload -; AVX512-NEXT: vmovq %xmm1, %rax -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512-NEXT: leaq -1(%r9,%r10), %r14 +; AVX512-NEXT: vmovq %xmm8, %r9 +; AVX512-NEXT: leaq -1(%r8,%r9), %r11 +; AVX512-NEXT: vmovq %xmm7, %r8 +; AVX512-NEXT: leaq -1(%rdi,%r8), %r10 +; AVX512-NEXT: vpextrq $1, %xmm6, %rdi +; AVX512-NEXT: leaq -1(%rbp,%rdi), %r9 +; AVX512-NEXT: vmovq %xmm6, %rdi +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload +; AVX512-NEXT: leaq -1(%r8,%rdi), %rdi +; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: vpextrq $1, %xmm5, %rdi +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload +; AVX512-NEXT: leaq -1(%r8,%rdi), %rdi +; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: vmovq %xmm5, %rdi +; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload +; AVX512-NEXT: leaq -1(%r8,%rdi), %rdi +; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: vmovq %xmm1, %rdi +; AVX512-NEXT: vmovq %xmm4, %r8 +; AVX512-NEXT: leaq -1(%rdi,%r8), %rdi +; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; AVX512-NEXT: vmovq %xmm0, %rdi -; AVX512-NEXT: addq %rax, %rdi -; AVX512-NEXT: vpextrq $1, %xmm1, %rsi -; AVX512-NEXT: vpextrq $1, %xmm0, %rdx -; AVX512-NEXT: addq %rsi, %rdx -; AVX512-NEXT: addq $-1, %rbp -; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movl $0, %r14d -; AVX512-NEXT: adcq $-1, %r14 -; AVX512-NEXT: addq $-1, %r13 -; AVX512-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movl $0, %ebx -; AVX512-NEXT: adcq $-1, %rbx -; AVX512-NEXT: addq $-1, %rcx -; AVX512-NEXT: movq %rcx, (%rsp) # 8-byte Spill -; AVX512-NEXT: movl $0, %esi -; AVX512-NEXT: adcq $-1, %rsi -; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movl $0, %r11d -; AVX512-NEXT: adcq $-1, %r11 -; AVX512-NEXT: addq $-1, %r9 -; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: movl $0, %r9d -; AVX512-NEXT: adcq $-1, %r9 -; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movl $0, %eax -; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movl $0, %eax -; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movl $0, %eax -; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: addq $-1, %r12 -; AVX512-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: vmovq %xmm3, %r8 +; AVX512-NEXT: leaq -1(%rdi,%r8), %rdi +; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; AVX512-NEXT: xorl %r8d, %r8d +; AVX512-NEXT: addq $-1, %rsi +; AVX512-NEXT: movl $0, %edi +; AVX512-NEXT: adcq $-1, %rdi +; AVX512-NEXT: addq $-1, %rdx ; AVX512-NEXT: movl $0, %ebp ; AVX512-NEXT: adcq $-1, %rbp -; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movl $0, %eax -; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX512-NEXT: movl $0, %eax -; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: addq $-1, %r15 -; AVX512-NEXT: movl $0, %eax -; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: addq $-1, %r10 -; AVX512-NEXT: movl $0, %r12d -; AVX512-NEXT: adcq $-1, %r12 -; AVX512-NEXT: addq $-1, %r8 -; AVX512-NEXT: movl $0, %ecx -; AVX512-NEXT: adcq $-1, %rcx -; AVX512-NEXT: addq $-1, %rdi -; AVX512-NEXT: movl $0, %eax -; AVX512-NEXT: adcq $-1, %rax -; AVX512-NEXT: addq $-1, %rdx -; AVX512-NEXT: movl $0, %r13d -; AVX512-NEXT: adcq $-1, %r13 -; AVX512-NEXT: shldq $63, %rdx, %r13 -; AVX512-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq $63, %rdi, %rax -; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; AVX512-NEXT: shldq $63, %r8, %rcx -; AVX512-NEXT: movq %rcx, %r13 -; AVX512-NEXT: shldq $63, %r10, %r12 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload -; AVX512-NEXT: shldq $63, %r15, %r8 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %rdi -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %r10 -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %rbp -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %rdx -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %r15 +; AVX512-NEXT: addq $-1, %rcx +; AVX512-NEXT: movl $0, %ebx +; AVX512-NEXT: adcq $-1, %rbx +; AVX512-NEXT: addq $-1, %rax +; AVX512-NEXT: adcq $-1, %r8 +; AVX512-NEXT: shldq $63, %rax, %r8 +; AVX512-NEXT: shldq $63, %rcx, %rbx +; AVX512-NEXT: shldq $63, %rdx, %rbp +; AVX512-NEXT: shldq $63, %rsi, %rdi +; AVX512-NEXT: shrq %r13 +; AVX512-NEXT: vmovq %r13, %xmm0 +; AVX512-NEXT: shrq %r12 +; AVX512-NEXT: vmovq %r12, %xmm1 +; AVX512-NEXT: shrq %r15 +; AVX512-NEXT: vmovq %r15, %xmm2 +; AVX512-NEXT: shrq %r14 +; AVX512-NEXT: vmovq %r14, %xmm3 +; AVX512-NEXT: vmovq %rdi, %xmm4 +; AVX512-NEXT: shrq %r11 +; AVX512-NEXT: vmovq %r11, %xmm5 +; AVX512-NEXT: vmovq %rbp, %xmm6 +; AVX512-NEXT: shrq %r10 +; AVX512-NEXT: vmovq %r10, %xmm7 +; AVX512-NEXT: shrq %r9 +; AVX512-NEXT: vmovq %r9, %xmm8 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %rcx +; AVX512-NEXT: shrq %rax +; AVX512-NEXT: vmovq %rax, %xmm9 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %r9 +; AVX512-NEXT: shrq %rax +; AVX512-NEXT: vmovq %rax, %xmm10 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %r11 -; AVX512-NEXT: movq (%rsp), %rax # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %rsi +; AVX512-NEXT: shrq %rax +; AVX512-NEXT: vmovq %rax, %xmm11 +; AVX512-NEXT: vmovq %rbx, %xmm12 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %rbx +; AVX512-NEXT: shrq %rax +; AVX512-NEXT: vmovq %rax, %xmm13 +; AVX512-NEXT: vmovq %r8, %xmm14 ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload -; AVX512-NEXT: shldq $63, %rax, %r14 -; AVX512-NEXT: vmovq %r14, %xmm0 -; AVX512-NEXT: vmovq %rbx, %xmm1 -; AVX512-NEXT: vmovq %r11, %xmm2 -; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 -; AVX512-NEXT: vmovq %rsi, %xmm1 -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 -; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm2 -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1 -; AVX512-NEXT: vmovd %esi, %xmm3 -; AVX512-NEXT: vmovd %xmm1, %eax -; AVX512-NEXT: vpinsrb $1, %eax, %xmm3, %xmm1 -; AVX512-NEXT: vmovd %xmm2, %eax -; AVX512-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm0 -; AVX512-NEXT: vmovd %xmm0, %eax -; AVX512-NEXT: vpinsrb $3, %eax, %xmm1, %xmm0 -; AVX512-NEXT: vpinsrb $4, %r9d, %xmm0, %xmm0 -; AVX512-NEXT: vmovq %r9, %xmm1 -; AVX512-NEXT: vmovq %rcx, %xmm2 -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512-NEXT: vmovd %xmm2, %eax -; AVX512-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vmovq %r15, %xmm2 -; AVX512-NEXT: vmovq %rdx, %xmm3 -; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 -; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2 -; AVX512-NEXT: vmovd %xmm2, %eax -; AVX512-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1 -; AVX512-NEXT: vmovd %xmm1, %eax -; AVX512-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vpinsrb $8, %ebp, %xmm0, %xmm0 -; AVX512-NEXT: vmovq %rbp, %xmm1 -; AVX512-NEXT: vmovq %r10, %xmm2 -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512-NEXT: vmovd %xmm2, %eax -; AVX512-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vmovq %rdi, %xmm2 -; AVX512-NEXT: vmovq %r8, %xmm3 -; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 -; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2 -; AVX512-NEXT: vmovd %xmm2, %eax -; AVX512-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1 -; AVX512-NEXT: vmovd %xmm1, %eax -; AVX512-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vpinsrb $12, %r12d, %xmm0, %xmm0 -; AVX512-NEXT: vmovq %r12, %xmm1 -; AVX512-NEXT: vmovq %r13, %xmm2 -; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 -; AVX512-NEXT: vmovd %xmm2, %eax -; AVX512-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 8-byte Folded Reload -; AVX512-NEXT: # xmm2 = mem[0],zero -; AVX512-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 8-byte Folded Reload -; AVX512-NEXT: # xmm3 = mem[0],zero -; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2 -; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 -; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2 -; AVX512-NEXT: vmovd %xmm2, %eax -; AVX512-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1 -; AVX512-NEXT: vmovd %xmm1, %eax -; AVX512-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX512-NEXT: shrq %rax +; AVX512-NEXT: vmovq %rax, %xmm15 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; AVX512-NEXT: vpbroadcastw %xmm0, %xmm0 +; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1 +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7] +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] +; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1 +; AVX512-NEXT: vpbroadcastw %xmm2, %xmm2 +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3] +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] +; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] +; AVX512-NEXT: vpsllq $48, %xmm2, %xmm2 +; AVX512-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7] +; AVX512-NEXT: vpbroadcastw %xmm3, %xmm3 +; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2],xmm2[3],xmm3[4,5,6,7] +; AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3] +; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX512-NEXT: vmovdqu %xmm0, (%rax) -; AVX512-NEXT: addq $24, %rsp ; AVX512-NEXT: popq %rbx ; AVX512-NEXT: popq %r12 ; AVX512-NEXT: popq %r13 diff --git a/llvm/test/CodeGen/X86/pr29112.ll b/llvm/test/CodeGen/X86/pr29112.ll --- a/llvm/test/CodeGen/X86/pr29112.ll +++ b/llvm/test/CodeGen/X86/pr29112.ll @@ -8,57 +8,57 @@ define <4 x float> @bar(ptr %a1p, ptr %a2p, <4 x float> %a3, <4 x float> %a4, <16 x float>%c1, <16 x float>%c2) { ; CHECK-LABEL: bar: ; CHECK: # %bb.0: -; CHECK-NEXT: subq $72, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 80 +; CHECK-NEXT: subq $136, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 144 ; CHECK-NEXT: vmovaps %xmm1, %xmm13 -; CHECK-NEXT: vmovaps {{.*#+}} xmm0 = [4,22,1,17] +; CHECK-NEXT: vmovaps {{.*#+}} xmm5 = [3,20,1,17] +; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm5 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[4],ymm3[4],ymm2[5],ymm3[5] +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[2,1,2,3] +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [4,21,1,17,4,21,5,21] ; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm0 -; CHECK-NEXT: vmovaps {{.*#+}} xmm12 = [4,30,1,22] +; CHECK-NEXT: vmovaps %zmm0, %zmm6 +; CHECK-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill +; CHECK-NEXT: vmovaps {{.*#+}} xmm4 = [4,20,1,27] +; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm4 +; CHECK-NEXT: vmovaps {{.*#+}} ymm7 = [5,20,1,19,5,20,5,23] +; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm7 +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [4,20,1,19,4,20,5,23] +; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm0 +; CHECK-NEXT: vmovaps {{.*#+}} xmm12 = [4,28,1,17] ; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm12 -; CHECK-NEXT: vmovaps {{.*#+}} xmm8 = [4,28,1,29] +; CHECK-NEXT: vmovaps {{.*#+}} ymm8 = [5,20,1,17,5,20,5,21] ; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm8 -; CHECK-NEXT: vmovaps {{.*#+}} xmm7 = <5,20,u,u> -; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm7 -; CHECK-NEXT: vmovaps {{.*#+}} xmm4 = [4,21,1,7] -; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm4 -; CHECK-NEXT: vextractf128 $1, %ymm3, %xmm5 -; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm9 -; CHECK-NEXT: vunpcklps {{.*#+}} xmm9 = xmm9[0],xmm5[0],xmm9[1],xmm5[1] -; CHECK-NEXT: vinsertps {{.*#+}} xmm10 = xmm9[0,1],xmm2[1],xmm9[3] -; CHECK-NEXT: vinsertps {{.*#+}} xmm1 = xmm10[0,1,2],xmm3[1] -; CHECK-NEXT: vinsertps {{.*#+}} xmm6 = xmm4[0,1,2],xmm3[1] -; CHECK-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vextractf32x4 $2, %zmm3, %xmm4 -; CHECK-NEXT: vblendps {{.*#+}} xmm4 = xmm10[0,1,2],xmm4[3] -; CHECK-NEXT: vpermilps {{.*#+}} xmm11 = xmm2[3,3,3,3] -; CHECK-NEXT: vunpcklps {{.*#+}} xmm5 = xmm11[0],xmm5[0],xmm11[1],xmm5[1] -; CHECK-NEXT: vshufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[1,3] -; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1,2],xmm3[1] -; CHECK-NEXT: vinsertps {{.*#+}} xmm11 = xmm7[0,1],xmm2[1],xmm7[3] -; CHECK-NEXT: vblendps {{.*#+}} xmm7 = xmm11[0,1,2],xmm3[3] -; CHECK-NEXT: vblendps {{.*#+}} xmm10 = xmm10[0,1,2],xmm3[3] -; CHECK-NEXT: vinsertps {{.*#+}} xmm8 = xmm8[0,1,2],xmm3[1] -; CHECK-NEXT: vinsertps {{.*#+}} xmm11 = xmm11[0,1,2],xmm3[1] -; CHECK-NEXT: vaddps %xmm8, %xmm11, %xmm8 -; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3,3] -; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[2] -; CHECK-NEXT: vaddps %xmm0, %xmm2, %xmm2 -; CHECK-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vaddps %xmm1, %xmm12, %xmm9 -; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm3 +; CHECK-NEXT: vmovaps {{.*#+}} xmm9 = [4,30,1,22] +; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm9 +; CHECK-NEXT: vmovaps {{.*#+}} ymm10 = [4,22,1,17,4,22,5,21] +; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm10 +; CHECK-NEXT: vmovaps {{.*#+}} ymm11 = [4,20,3,18,4,20,7,22] +; CHECK-NEXT: vpermi2ps %zmm3, %zmm2, %zmm11 +; CHECK-NEXT: vaddps %xmm10, %xmm11, %xmm2 +; CHECK-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vaddps %xmm1, %xmm9, %xmm3 +; CHECK-NEXT: vaddps %xmm12, %xmm8, %xmm9 +; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8 ; CHECK-NEXT: vaddps %xmm0, %xmm10, %xmm0 -; CHECK-NEXT: vaddps %xmm0, %xmm8, %xmm0 +; CHECK-NEXT: vaddps %xmm0, %xmm9, %xmm0 ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 -; CHECK-NEXT: vmovaps %xmm3, {{[0-9]+}}(%rsp) -; CHECK-NEXT: vmovaps %xmm9, (%rsp) +; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp) +; CHECK-NEXT: vmovaps %xmm3, (%rsp) +; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 killed $ymm1 ; CHECK-NEXT: vmovaps %xmm13, %xmm3 +; CHECK-NEXT: # kill: def $xmm4 killed $xmm4 killed $zmm4 +; CHECK-NEXT: # kill: def $xmm5 killed $xmm5 killed $zmm5 +; CHECK-NEXT: # kill: def $xmm6 killed $xmm6 killed $zmm6 +; CHECK-NEXT: # kill: def $xmm7 killed $xmm7 killed $zmm7 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: callq foo@PLT -; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload +; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload ; CHECK-NEXT: vaddps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0 -; CHECK-NEXT: addq $72, %rsp +; CHECK-NEXT: addq $136, %rsp ; CHECK-NEXT: .cfi_def_cfa_offset 8 +; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %a1 = shufflevector <16 x float>%c1, <16 x float>%c2, <4 x i32> %a2 = shufflevector <16 x float>%c1, <16 x float>%c2, <4 x i32> diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-5.ll @@ -566,71 +566,23 @@ ; ; AVX512-LABEL: vf8: ; AVX512: # %bb.0: -; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = <4,9,14,u,u,u,u,u> -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm1 -; AVX512-NEXT: vpermw %zmm1, %zmm0, %zmm0 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <3,8,13,u,u,u,u,u> -; AVX512-NEXT: vpermw %zmm1, %zmm2, %zmm4 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <2,7,12,u,u,u,u,u> -; AVX512-NEXT: vpermw %zmm1, %zmm2, %zmm5 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <1,6,11,u,u,u,u,u> -; AVX512-NEXT: vpermw %zmm1, %zmm2, %zmm6 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <0,5,10,u,u,u,u,u> -; AVX512-NEXT: vpermw %zmm1, %zmm2, %zmm1 -; AVX512-NEXT: vmovdqa 16(%rdi), %xmm2 -; AVX512-NEXT: vpextrw $7, %xmm2, %eax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX512-NEXT: vmovdqa 32(%rdi), %xmm2 -; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm1[0,1,2,3],xmm2[4],xmm1[5,6,7] -; AVX512-NEXT: vmovdqa 48(%rdi), %xmm1 -; AVX512-NEXT: vpextrw $1, %xmm1, %eax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3 -; AVX512-NEXT: vpblendw {{.*#+}} xmm7 = xmm3[0,1,2,3,4,5],xmm1[6],xmm3[7] -; AVX512-NEXT: vmovdqa 64(%rdi), %xmm3 -; AVX512-NEXT: vpextrw $3, %xmm3, %eax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm7, %xmm7 -; AVX512-NEXT: vmovd %xmm2, %eax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm6, %xmm6 -; AVX512-NEXT: vpextrw $5, %xmm2, %eax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm6, %xmm6 -; AVX512-NEXT: vpextrw $2, %xmm1, %eax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm6, %xmm6 -; AVX512-NEXT: vpextrw $7, %xmm1, %eax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm6, %xmm6 -; AVX512-NEXT: vpextrw $4, %xmm3, %eax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm6, %xmm6 -; AVX512-NEXT: vpextrw $1, %xmm2, %eax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm5, %xmm5 -; AVX512-NEXT: vpextrw $6, %xmm2, %eax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm5, %xmm5 -; AVX512-NEXT: vpextrw $3, %xmm1, %eax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm5, %xmm5 -; AVX512-NEXT: vmovd %xmm3, %eax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm5, %xmm5 -; AVX512-NEXT: vpextrw $5, %xmm3, %eax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm5, %xmm5 -; AVX512-NEXT: vpextrw $2, %xmm2, %eax -; AVX512-NEXT: vpinsrw $3, %eax, %xmm4, %xmm4 -; AVX512-NEXT: vpextrw $7, %xmm2, %eax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm4, %xmm4 -; AVX512-NEXT: vpextrw $4, %xmm1, %eax -; AVX512-NEXT: vpinsrw $5, %eax, %xmm4, %xmm4 -; AVX512-NEXT: vpextrw $1, %xmm3, %eax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm4, %xmm4 -; AVX512-NEXT: vpextrw $6, %xmm3, %eax -; AVX512-NEXT: vpinsrw $7, %eax, %xmm4, %xmm4 -; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[3],xmm0[4,5,6,7] -; AVX512-NEXT: vmovd %xmm1, %eax -; AVX512-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm1[5],xmm0[6,7] -; AVX512-NEXT: vpextrw $2, %xmm3, %eax -; AVX512-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0 -; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm3[7] -; AVX512-NEXT: vmovdqa %xmm7, (%rsi) -; AVX512-NEXT: vmovdqa %xmm6, (%rdx) -; AVX512-NEXT: vmovdqa %xmm5, (%rcx) -; AVX512-NEXT: vmovdqa %xmm4, (%r8) -; AVX512-NEXT: vmovdqa %xmm0, (%r9) +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqa 64(%rdi), %ymm1 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [0,5,10,15,20,25,30,35] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [1,6,11,16,21,26,31,36] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,7,12,17,22,27,32,37] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [3,8,13,18,23,28,33,38] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm5 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [4,9,14,19,24,29,34,39] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm6 +; AVX512-NEXT: vmovdqa %xmm2, (%rsi) +; AVX512-NEXT: vmovdqa %xmm3, (%rdx) +; AVX512-NEXT: vmovdqa %xmm4, (%rcx) +; AVX512-NEXT: vmovdqa %xmm5, (%r8) +; AVX512-NEXT: vmovdqa %xmm6, (%r9) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <40 x i16>, ptr %in.vec, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll @@ -684,95 +684,26 @@ ; AVX512-LABEL: vf8: ; AVX512: # %bb.0: ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = <3,9,15,u,u,u,u,u> -; AVX512-NEXT: vmovdqu64 (%rdi), %zmm1 -; AVX512-NEXT: vpermw %zmm1, %zmm0, %zmm4 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = <2,8,14,u,u,u,u,u> -; AVX512-NEXT: vpermw %zmm1, %zmm0, %zmm5 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = <1,7,13,u,u,u,u,u> -; AVX512-NEXT: vpermw %zmm1, %zmm0, %zmm6 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm0 = <0,6,12,u,u,u,u,u> -; AVX512-NEXT: vpermw %zmm1, %zmm0, %zmm2 -; AVX512-NEXT: vmovdqa (%rdi), %xmm7 -; AVX512-NEXT: vmovdqa 16(%rdi), %xmm8 -; AVX512-NEXT: vmovdqa 32(%rdi), %xmm1 -; AVX512-NEXT: vmovdqa 48(%rdi), %xmm0 -; AVX512-NEXT: vpextrw $2, %xmm1, %r10d -; AVX512-NEXT: vpinsrw $3, %r10d, %xmm2, %xmm2 -; AVX512-NEXT: vmovd %xmm0, %r10d -; AVX512-NEXT: vpinsrw $4, %r10d, %xmm2, %xmm2 -; AVX512-NEXT: vpextrw $6, %xmm0, %r10d -; AVX512-NEXT: vpinsrw $5, %r10d, %xmm2, %xmm2 -; AVX512-NEXT: vmovdqa 64(%rdi), %xmm3 -; AVX512-NEXT: vpextrw $4, %xmm3, %r10d -; AVX512-NEXT: vpinsrw $6, %r10d, %xmm2, %xmm9 -; AVX512-NEXT: vmovdqa 80(%rdi), %xmm2 -; AVX512-NEXT: vpextrw $2, %xmm2, %edi -; AVX512-NEXT: vpinsrw $7, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2],xmm1[3],xmm6[4,5,6,7] -; AVX512-NEXT: vpextrw $1, %xmm0, %edi -; AVX512-NEXT: vpinsrw $4, %edi, %xmm6, %xmm6 -; AVX512-NEXT: vpextrw $7, %xmm0, %edi -; AVX512-NEXT: vpinsrw $5, %edi, %xmm6, %xmm6 -; AVX512-NEXT: vpextrw $5, %xmm3, %edi -; AVX512-NEXT: vpinsrw $6, %edi, %xmm6, %xmm6 -; AVX512-NEXT: vpextrw $3, %xmm2, %edi -; AVX512-NEXT: vpinsrw $7, %edi, %xmm6, %xmm6 -; AVX512-NEXT: vpextrw $4, %xmm1, %edi -; AVX512-NEXT: vpinsrw $3, %edi, %xmm5, %xmm5 -; AVX512-NEXT: vpextrw $2, %xmm0, %edi -; AVX512-NEXT: vpinsrw $4, %edi, %xmm5, %xmm5 -; AVX512-NEXT: vmovd %xmm3, %edi -; AVX512-NEXT: vpinsrw $5, %edi, %xmm5, %xmm5 -; AVX512-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5],xmm3[6],xmm5[7] -; AVX512-NEXT: vpextrw $4, %xmm2, %edi -; AVX512-NEXT: vpinsrw $7, %edi, %xmm5, %xmm5 -; AVX512-NEXT: vpextrw $5, %xmm1, %edi -; AVX512-NEXT: vpinsrw $3, %edi, %xmm4, %xmm4 -; AVX512-NEXT: vpextrw $3, %xmm0, %edi -; AVX512-NEXT: vpinsrw $4, %edi, %xmm4, %xmm4 -; AVX512-NEXT: vpextrw $1, %xmm3, %edi -; AVX512-NEXT: vpinsrw $5, %edi, %xmm4, %xmm4 -; AVX512-NEXT: vpextrw $7, %xmm3, %edi -; AVX512-NEXT: vpinsrw $6, %edi, %xmm4, %xmm4 -; AVX512-NEXT: vpextrw $5, %xmm2, %edi -; AVX512-NEXT: vpinsrw $7, %edi, %xmm4, %xmm4 -; AVX512-NEXT: vpextrw $2, %xmm8, %edi -; AVX512-NEXT: vpextrw $4, %xmm7, %r10d -; AVX512-NEXT: vmovd %r10d, %xmm10 -; AVX512-NEXT: vpinsrw $1, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vmovd %xmm1, %edi -; AVX512-NEXT: vpinsrw $2, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrw $6, %xmm1, %edi -; AVX512-NEXT: vpinsrw $3, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1,2,3],xmm0[4],xmm10[5,6,7] -; AVX512-NEXT: vpextrw $2, %xmm3, %edi -; AVX512-NEXT: vpinsrw $5, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vmovd %xmm2, %edi -; AVX512-NEXT: vpinsrw $6, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrw $6, %xmm2, %edi -; AVX512-NEXT: vpinsrw $7, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrw $3, %xmm8, %edi -; AVX512-NEXT: vpextrw $5, %xmm7, %r10d -; AVX512-NEXT: vmovd %r10d, %xmm7 -; AVX512-NEXT: vpinsrw $1, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrw $1, %xmm1, %edi -; AVX512-NEXT: vpinsrw $2, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrw $7, %xmm1, %edi -; AVX512-NEXT: vpinsrw $3, %edi, %xmm7, %xmm1 -; AVX512-NEXT: vpextrw $5, %xmm0, %edi -; AVX512-NEXT: vpinsrw $4, %edi, %xmm1, %xmm0 -; AVX512-NEXT: vpextrw $3, %xmm3, %edi -; AVX512-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0 -; AVX512-NEXT: vpextrw $1, %xmm2, %edi -; AVX512-NEXT: vpinsrw $6, %edi, %xmm0, %xmm0 -; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6],xmm2[7] -; AVX512-NEXT: vmovdqa %xmm9, (%rsi) -; AVX512-NEXT: vmovdqa %xmm6, (%rdx) -; AVX512-NEXT: vmovdqa %xmm5, (%rcx) -; AVX512-NEXT: vmovdqa %xmm4, (%r8) -; AVX512-NEXT: vmovdqa %xmm10, (%r9) -; AVX512-NEXT: vmovdqa %xmm0, (%rax) +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqa 64(%rdi), %ymm1 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [0,6,12,18,24,30,36,42] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [1,7,13,19,25,31,37,43] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,8,14,20,26,32,38,44] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [3,9,15,21,27,33,39,45] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm5 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [4,10,16,22,28,34,40,46] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm6 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [5,11,17,23,29,35,41,47] +; AVX512-NEXT: vpermi2w %zmm1, %zmm0, %zmm7 +; AVX512-NEXT: vmovdqa %xmm2, (%rsi) +; AVX512-NEXT: vmovdqa %xmm3, (%rdx) +; AVX512-NEXT: vmovdqa %xmm4, (%rcx) +; AVX512-NEXT: vmovdqa %xmm5, (%r8) +; AVX512-NEXT: vmovdqa %xmm6, (%r9) +; AVX512-NEXT: vmovdqa %xmm7, (%rax) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <48 x i16>, ptr %in.vec, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i32-stride-6.ll @@ -287,51 +287,27 @@ ; AVX512-LABEL: load_i32_stride6_vf4: ; AVX512: # %bb.0: ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: vmovdqa (%rdi), %xmm2 -; AVX512-NEXT: vmovdqa 16(%rdi), %xmm0 -; AVX512-NEXT: vmovdqa 32(%rdi), %xmm1 -; AVX512-NEXT: vmovdqa 48(%rdi), %xmm3 -; AVX512-NEXT: vpextrd $2, %xmm0, %r10d -; AVX512-NEXT: vpinsrd $1, %r10d, %xmm2, %xmm4 -; AVX512-NEXT: vmovd %xmm3, %r10d -; AVX512-NEXT: vpinsrd $2, %r10d, %xmm4, %xmm4 -; AVX512-NEXT: vmovdqa 64(%rdi), %xmm5 -; AVX512-NEXT: vpextrd $2, %xmm5, %r10d -; AVX512-NEXT: vpinsrd $3, %r10d, %xmm4, %xmm4 -; AVX512-NEXT: vpextrd $1, %xmm3, %r10d -; AVX512-NEXT: vpblendd {{.*#+}} xmm6 = xmm2[0,1],xmm0[2,3] -; AVX512-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,3,2,3] -; AVX512-NEXT: vpinsrd $2, %r10d, %xmm6, %xmm6 -; AVX512-NEXT: vpblendd {{.*#+}} xmm6 = xmm6[0,1,2],xmm5[3] -; AVX512-NEXT: vpblendd {{.*#+}} xmm7 = xmm1[0,1],xmm2[2,3] -; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[2,0,2,3] -; AVX512-NEXT: vpblendd {{.*#+}} xmm7 = xmm7[0,1],xmm3[2],xmm7[3] -; AVX512-NEXT: vmovdqa 80(%rdi), %xmm8 -; AVX512-NEXT: vmovd %xmm8, %edi -; AVX512-NEXT: vpinsrd $3, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3] -; AVX512-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm1[1],xmm2[2,3] -; AVX512-NEXT: vpextrd $3, %xmm3, %edi -; AVX512-NEXT: vpinsrd $2, %edi, %xmm2, %xmm2 -; AVX512-NEXT: vpextrd $1, %xmm8, %edi -; AVX512-NEXT: vpinsrd $3, %edi, %xmm2, %xmm2 -; AVX512-NEXT: vpextrd $2, %xmm1, %edi -; AVX512-NEXT: vpinsrd $1, %edi, %xmm0, %xmm3 -; AVX512-NEXT: vmovd %xmm5, %edi -; AVX512-NEXT: vpinsrd $2, %edi, %xmm3, %xmm3 -; AVX512-NEXT: vpextrd $2, %xmm8, %edi -; AVX512-NEXT: vpinsrd $3, %edi, %xmm3, %xmm3 -; AVX512-NEXT: vpextrd $1, %xmm5, %edi -; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] -; AVX512-NEXT: vpinsrd $2, %edi, %xmm0, %xmm0 -; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm8[3] -; AVX512-NEXT: vmovdqa %xmm4, (%rsi) -; AVX512-NEXT: vmovdqa %xmm6, (%rdx) -; AVX512-NEXT: vmovdqa %xmm7, (%rcx) -; AVX512-NEXT: vmovdqa %xmm2, (%r8) -; AVX512-NEXT: vmovdqa %xmm3, (%r9) -; AVX512-NEXT: vmovdqa %xmm0, (%rax) +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: vmovdqa 64(%rdi), %ymm1 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [0,6,12,18] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [1,7,13,19] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm3 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm4 = [2,8,14,20] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm4 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [3,9,15,21] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm5 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [4,10,16,22] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm6 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [5,11,17,23] +; AVX512-NEXT: vpermi2d %zmm1, %zmm0, %zmm7 +; AVX512-NEXT: vmovdqa %xmm2, (%rsi) +; AVX512-NEXT: vmovdqa %xmm3, (%rdx) +; AVX512-NEXT: vmovdqa %xmm4, (%rcx) +; AVX512-NEXT: vmovdqa %xmm5, (%r8) +; AVX512-NEXT: vmovdqa %xmm6, (%r9) +; AVX512-NEXT: vmovdqa %xmm7, (%rax) +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <24 x i32>, ptr %in.vec, align 32 diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll --- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll +++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll @@ -1063,190 +1063,71 @@ ; AVX512-LABEL: load_i8_stride6_vf16: ; AVX512: # %bb.0: ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax -; AVX512-NEXT: vmovdqa (%rdi), %xmm5 -; AVX512-NEXT: vmovdqa 16(%rdi), %xmm2 -; AVX512-NEXT: vmovdqa 32(%rdi), %xmm1 -; AVX512-NEXT: vmovdqa 48(%rdi), %xmm0 -; AVX512-NEXT: vpextrb $2, %xmm2, %r10d -; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm5[0,6,12],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u] -; AVX512-NEXT: vpinsrb $3, %r10d, %xmm3, %xmm3 -; AVX512-NEXT: vpextrb $8, %xmm2, %r10d -; AVX512-NEXT: vpinsrb $4, %r10d, %xmm3, %xmm3 -; AVX512-NEXT: vpextrb $14, %xmm2, %r10d -; AVX512-NEXT: vpinsrb $5, %r10d, %xmm3, %xmm3 -; AVX512-NEXT: vpextrb $4, %xmm1, %r10d -; AVX512-NEXT: vpinsrb $6, %r10d, %xmm3, %xmm3 -; AVX512-NEXT: vpextrb $10, %xmm1, %r10d -; AVX512-NEXT: vpinsrb $7, %r10d, %xmm3, %xmm3 -; AVX512-NEXT: vmovd %xmm0, %r10d -; AVX512-NEXT: vpinsrb $8, %r10d, %xmm3, %xmm3 -; AVX512-NEXT: vpextrb $6, %xmm0, %r10d -; AVX512-NEXT: vpinsrb $9, %r10d, %xmm3, %xmm3 -; AVX512-NEXT: vpextrb $12, %xmm0, %r10d -; AVX512-NEXT: vpinsrb $10, %r10d, %xmm3, %xmm4 -; AVX512-NEXT: vmovdqa 64(%rdi), %xmm3 -; AVX512-NEXT: vpextrb $2, %xmm3, %r10d -; AVX512-NEXT: vpinsrb $11, %r10d, %xmm4, %xmm4 -; AVX512-NEXT: vpextrb $8, %xmm3, %r10d -; AVX512-NEXT: vpinsrb $12, %r10d, %xmm4, %xmm4 -; AVX512-NEXT: vpextrb $14, %xmm3, %r10d -; AVX512-NEXT: vpinsrb $13, %r10d, %xmm4, %xmm6 -; AVX512-NEXT: vmovdqa 80(%rdi), %xmm4 -; AVX512-NEXT: vpextrb $4, %xmm4, %edi -; AVX512-NEXT: vpinsrb $14, %edi, %xmm6, %xmm6 -; AVX512-NEXT: vpextrb $10, %xmm4, %edi -; AVX512-NEXT: vpinsrb $15, %edi, %xmm6, %xmm6 -; AVX512-NEXT: vpextrb $3, %xmm2, %edi -; AVX512-NEXT: vpshufb {{.*#+}} xmm7 = xmm5[1,7,13],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u] -; AVX512-NEXT: vpinsrb $3, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $9, %xmm2, %edi -; AVX512-NEXT: vpinsrb $4, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $15, %xmm2, %edi -; AVX512-NEXT: vpinsrb $5, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $5, %xmm1, %edi -; AVX512-NEXT: vpinsrb $6, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $11, %xmm1, %edi -; AVX512-NEXT: vpinsrb $7, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $1, %xmm0, %edi -; AVX512-NEXT: vpinsrb $8, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $7, %xmm0, %edi -; AVX512-NEXT: vpinsrb $9, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $13, %xmm0, %edi -; AVX512-NEXT: vpinsrb $10, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $3, %xmm3, %edi -; AVX512-NEXT: vpinsrb $11, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $9, %xmm3, %edi -; AVX512-NEXT: vpinsrb $12, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $15, %xmm3, %edi -; AVX512-NEXT: vpinsrb $13, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $5, %xmm4, %edi -; AVX512-NEXT: vpinsrb $14, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $11, %xmm4, %edi -; AVX512-NEXT: vpinsrb $15, %edi, %xmm7, %xmm7 -; AVX512-NEXT: vpextrb $4, %xmm2, %edi -; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm5[2,8,14],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u] -; AVX512-NEXT: vpinsrb $3, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $10, %xmm2, %edi -; AVX512-NEXT: vpinsrb $4, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vmovd %xmm1, %edi -; AVX512-NEXT: vpinsrb $5, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $6, %xmm1, %edi -; AVX512-NEXT: vpinsrb $6, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $12, %xmm1, %edi -; AVX512-NEXT: vpinsrb $7, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $2, %xmm0, %edi -; AVX512-NEXT: vpinsrb $8, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $8, %xmm0, %edi -; AVX512-NEXT: vpinsrb $9, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $14, %xmm0, %edi -; AVX512-NEXT: vpinsrb $10, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $4, %xmm3, %edi -; AVX512-NEXT: vpinsrb $11, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $10, %xmm3, %edi -; AVX512-NEXT: vpinsrb $12, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vmovd %xmm4, %edi -; AVX512-NEXT: vpinsrb $13, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $6, %xmm4, %edi -; AVX512-NEXT: vpinsrb $14, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $12, %xmm4, %edi -; AVX512-NEXT: vpinsrb $15, %edi, %xmm8, %xmm8 -; AVX512-NEXT: vpextrb $5, %xmm2, %edi -; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[3,9,15],zero,xmm5[u,u,u,u,u,u,u,u,u,u,u,u] -; AVX512-NEXT: vpinsrb $3, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $11, %xmm2, %edi -; AVX512-NEXT: vpinsrb $4, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $1, %xmm1, %edi -; AVX512-NEXT: vpinsrb $5, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $7, %xmm1, %edi -; AVX512-NEXT: vpinsrb $6, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $13, %xmm1, %edi -; AVX512-NEXT: vpinsrb $7, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $3, %xmm0, %edi -; AVX512-NEXT: vpinsrb $8, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $9, %xmm0, %edi -; AVX512-NEXT: vpinsrb $9, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $15, %xmm0, %edi -; AVX512-NEXT: vpinsrb $10, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $5, %xmm3, %edi -; AVX512-NEXT: vpinsrb $11, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $11, %xmm3, %edi -; AVX512-NEXT: vpinsrb $12, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $1, %xmm4, %edi -; AVX512-NEXT: vpinsrb $13, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $7, %xmm4, %edi -; AVX512-NEXT: vpinsrb $14, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $13, %xmm4, %edi -; AVX512-NEXT: vpinsrb $15, %edi, %xmm9, %xmm9 -; AVX512-NEXT: vpextrb $10, %xmm5, %edi -; AVX512-NEXT: vpextrb $4, %xmm5, %r10d -; AVX512-NEXT: vmovd %r10d, %xmm10 -; AVX512-NEXT: vpinsrb $1, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vmovd %xmm2, %edi -; AVX512-NEXT: vpinsrb $2, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $6, %xmm2, %edi -; AVX512-NEXT: vpinsrb $3, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $12, %xmm2, %edi -; AVX512-NEXT: vpinsrb $4, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $2, %xmm1, %edi -; AVX512-NEXT: vpinsrb $5, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $8, %xmm1, %edi -; AVX512-NEXT: vpinsrb $6, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $14, %xmm1, %edi -; AVX512-NEXT: vpinsrb $7, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $4, %xmm0, %edi -; AVX512-NEXT: vpinsrb $8, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $10, %xmm0, %edi -; AVX512-NEXT: vpinsrb $9, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vmovd %xmm3, %edi -; AVX512-NEXT: vpinsrb $10, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $6, %xmm3, %edi -; AVX512-NEXT: vpinsrb $11, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $12, %xmm3, %edi -; AVX512-NEXT: vpinsrb $12, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $2, %xmm4, %edi -; AVX512-NEXT: vpinsrb $13, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $8, %xmm4, %edi -; AVX512-NEXT: vpinsrb $14, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $14, %xmm4, %edi -; AVX512-NEXT: vpinsrb $15, %edi, %xmm10, %xmm10 -; AVX512-NEXT: vpextrb $11, %xmm5, %edi -; AVX512-NEXT: vpextrb $5, %xmm5, %r10d -; AVX512-NEXT: vmovd %r10d, %xmm5 -; AVX512-NEXT: vpinsrb $1, %edi, %xmm5, %xmm5 -; AVX512-NEXT: vpextrb $1, %xmm2, %edi -; AVX512-NEXT: vpinsrb $2, %edi, %xmm5, %xmm5 -; AVX512-NEXT: vpextrb $7, %xmm2, %edi -; AVX512-NEXT: vpinsrb $3, %edi, %xmm5, %xmm5 -; AVX512-NEXT: vpextrb $13, %xmm2, %edi -; AVX512-NEXT: vpinsrb $4, %edi, %xmm5, %xmm2 -; AVX512-NEXT: vpextrb $3, %xmm1, %edi -; AVX512-NEXT: vpinsrb $5, %edi, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $9, %xmm1, %edi -; AVX512-NEXT: vpinsrb $6, %edi, %xmm2, %xmm2 -; AVX512-NEXT: vpextrb $15, %xmm1, %edi -; AVX512-NEXT: vpinsrb $7, %edi, %xmm2, %xmm1 -; AVX512-NEXT: vpextrb $5, %xmm0, %edi -; AVX512-NEXT: vpinsrb $8, %edi, %xmm1, %xmm1 -; AVX512-NEXT: vpextrb $11, %xmm0, %edi -; AVX512-NEXT: vpinsrb $9, %edi, %xmm1, %xmm0 -; AVX512-NEXT: vpextrb $1, %xmm3, %edi -; AVX512-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0 -; AVX512-NEXT: vpextrb $7, %xmm3, %edi -; AVX512-NEXT: vpinsrb $11, %edi, %xmm0, %xmm0 -; AVX512-NEXT: vpextrb $13, %xmm3, %edi -; AVX512-NEXT: vpinsrb $12, %edi, %xmm0, %xmm0 -; AVX512-NEXT: vpextrb $3, %xmm4, %edi -; AVX512-NEXT: vpinsrb $13, %edi, %xmm0, %xmm0 -; AVX512-NEXT: vpextrb $9, %xmm4, %edi -; AVX512-NEXT: vpinsrb $14, %edi, %xmm0, %xmm0 -; AVX512-NEXT: vpextrb $15, %xmm4, %edi -; AVX512-NEXT: vpinsrb $15, %edi, %xmm0, %xmm0 -; AVX512-NEXT: vmovdqa %xmm6, (%rsi) -; AVX512-NEXT: vmovdqa %xmm7, (%rdx) +; AVX512-NEXT: vmovdqa (%rdi), %ymm0 +; AVX512-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX512-NEXT: movw $18724, %r10w # imm = 0x4924 +; AVX512-NEXT: kmovd %r10d, %k1 +; AVX512-NEXT: vpblendmw %ymm1, %ymm0, %ymm2 {%k1} +; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm2[0,6,12],zero,zero,zero,xmm2[4,10],zero,zero,zero,xmm2[u,u,u,u,u] +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm4 +; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,xmm4[2,8,14],zero,zero,xmm4[0,6,12,u,u,u,u,u] +; AVX512-NEXT: vpor %xmm3, %xmm5, %xmm3 +; AVX512-NEXT: vmovdqa 80(%rdi), %xmm5 +; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[4,10] +; AVX512-NEXT: vmovdqa 64(%rdi), %xmm7 +; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = xmm7[u,u,u,u,u,u,u,u,u,u,u,2,8,14],zero,zero +; AVX512-NEXT: vpor %xmm6, %xmm8, %xmm6 +; AVX512-NEXT: movw $-2048, %di # imm = 0xF800 +; AVX512-NEXT: kmovd %edi, %k2 +; AVX512-NEXT: vmovdqu8 %xmm6, %xmm3 {%k2} +; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[1,7,13],zero,zero,zero,xmm2[5,11],zero,zero,zero,xmm2[u,u,u,u,u] +; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,zero,xmm4[3,9,15],zero,zero,xmm4[1,7,13,u,u,u,u,u] +; AVX512-NEXT: vpor %xmm2, %xmm4, %xmm2 +; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[5,11] +; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,3,9,15],zero,zero +; AVX512-NEXT: vpor %xmm4, %xmm6, %xmm4 +; AVX512-NEXT: vmovdqu8 %xmm4, %xmm2 {%k2} +; AVX512-NEXT: movw $9362, %di # imm = 0x2492 +; AVX512-NEXT: kmovd %edi, %k3 +; AVX512-NEXT: vpblendmw %ymm0, %ymm1, %ymm4 {%k3} +; AVX512-NEXT: vextracti128 $1, %ymm4, %xmm6 +; AVX512-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,zero,xmm6[4,10],zero,zero,zero,xmm6[2,8,14,u,u,u,u,u] +; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm4[2,8,14],zero,zero,xmm4[0,6,12],zero,zero,zero,xmm4[u,u,u,u,u] +; AVX512-NEXT: vpor %xmm8, %xmm9, %xmm8 +; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm7[u,u,u,u,u,u,u,u,u,u,u,4,10],zero,zero,zero +; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[0,6,12] +; AVX512-NEXT: vpor %xmm9, %xmm10, %xmm9 +; AVX512-NEXT: vmovdqu8 %xmm9, %xmm8 {%k2} +; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[5,11],zero,zero,zero,xmm6[3,9,15,u,u,u,u,u] +; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[3,9,15],zero,zero,xmm4[1,7,13],zero,zero,zero,xmm4[u,u,u,u,u] +; AVX512-NEXT: vpor %xmm6, %xmm4, %xmm4 +; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[u,u,u,u,u,u,u,u,u,u,u,5,11],zero,zero,zero +; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u,u],zero,zero,xmm5[1,7,13] +; AVX512-NEXT: vpor %xmm6, %xmm9, %xmm6 +; AVX512-NEXT: vmovdqu8 %xmm6, %xmm4 {%k2} +; AVX512-NEXT: vmovdqu16 %ymm0, %ymm1 {%k1} +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0 +; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm0[0,6,12],zero,zero,zero,xmm0[4,10,u,u,u,u,u,u] +; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[2,8,14] +; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm9[5,6,7] +; AVX512-NEXT: vpshufb {{.*#+}} xmm9 = xmm1[4,10],zero,zero,zero,xmm1[2,8,14],zero,zero,xmm1[u,u,u,u,u,u] +; AVX512-NEXT: vpshufb {{.*#+}} xmm10 = xmm7[u,u,u,u,u,u,u,u,u,u,0,6,12],zero,zero,zero +; AVX512-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1,2,3,4],xmm10[5,6,7] +; AVX512-NEXT: vpor %xmm6, %xmm9, %xmm6 +; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[1,7,13],zero,zero,zero,xmm0[5,11,u,u,u,u,u,u] +; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u,u,u,u,u,u,u,u],zero,zero,zero,xmm5[3,9,15] +; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7] +; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[5,11],zero,zero,zero,xmm1[3,9,15],zero,zero,xmm1[u,u,u,u,u,u] +; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm7[u,u,u,u,u,u,u,u,u,u,1,7,13],zero,zero,zero +; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7] +; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: vmovdqa %xmm3, (%rsi) +; AVX512-NEXT: vmovdqa %xmm2, (%rdx) ; AVX512-NEXT: vmovdqa %xmm8, (%rcx) -; AVX512-NEXT: vmovdqa %xmm9, (%r8) -; AVX512-NEXT: vmovdqa %xmm10, (%r9) +; AVX512-NEXT: vmovdqa %xmm4, (%r8) +; AVX512-NEXT: vmovdqa %xmm6, (%r9) ; AVX512-NEXT: vmovdqa %xmm0, (%rax) +; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <96 x i8>, ptr %in.vec, align 32