diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -28204,8 +28204,10 @@ MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2); // Simple i8 add case - if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) + if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) { + R = DAG.getNode(ISD::FREEZE, dl, VT, R); return DAG.getNode(ISD::ADD, dl, VT, R, R); + } // ashr(R, 7) === cmp_slt(R, 0) if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) { @@ -43601,8 +43603,11 @@ // We shift all of the values by one. In many cases we do not have // hardware support for this operation. This is better expressed as an ADD // of two values. - if (N1SplatC->isOne()) - return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0); + if (N1SplatC->isOne()) { + SDLoc DL(N); + N0 = DAG.getNode(ISD::FREEZE, DL, VT, N0); + return DAG.getNode(ISD::ADD, DL, VT, N0, N0); + } } return SDValue(); diff --git a/llvm/test/CodeGen/X86/bitreverse.ll b/llvm/test/CodeGen/X86/bitreverse.ll --- a/llvm/test/CodeGen/X86/bitreverse.ll +++ b/llvm/test/CodeGen/X86/bitreverse.ll @@ -69,11 +69,11 @@ ; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-NEXT: psrlw $2, %xmm0 ; X64-NEXT: por %xmm1, %xmm0 -; X64-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; X64-NEXT: movdqa {{.*#+}} xmm1 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; X64-NEXT: pand %xmm0, %xmm1 -; X64-NEXT: paddb %xmm1, %xmm1 +; X64-NEXT: psrlw $1, %xmm1 ; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; X64-NEXT: psrlw $1, %xmm0 +; X64-NEXT: paddb %xmm0, %xmm0 ; X64-NEXT: por %xmm1, %xmm0 ; X64-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/combine-bitreverse.ll b/llvm/test/CodeGen/X86/combine-bitreverse.ll --- a/llvm/test/CodeGen/X86/combine-bitreverse.ll +++ b/llvm/test/CodeGen/X86/combine-bitreverse.ll @@ -61,11 +61,11 @@ ; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: psrlw $2, %xmm0 ; X86-NEXT: por %xmm1, %xmm0 -; X86-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; X86-NEXT: movdqa {{.*#+}} xmm1 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; X86-NEXT: pand %xmm0, %xmm1 -; X86-NEXT: paddb %xmm1, %xmm1 +; X86-NEXT: psrlw $1, %xmm1 ; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 -; X86-NEXT: psrlw $1, %xmm0 +; X86-NEXT: paddb %xmm0, %xmm0 ; X86-NEXT: por %xmm1, %xmm0 ; X86-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll --- a/llvm/test/CodeGen/X86/oddsubvector.ll +++ b/llvm/test/CodeGen/X86/oddsubvector.ll @@ -232,36 +232,37 @@ ; ; AVX1-LABEL: PR42833: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovdqa c+128(%rip), %xmm0 -; AVX1-NEXT: vmovd %xmm0, %eax +; AVX1-NEXT: vmovdqu c+128(%rip), %ymm0 +; AVX1-NEXT: vmovdqa c+128(%rip), %xmm1 +; AVX1-NEXT: vmovd %xmm1, %eax ; AVX1-NEXT: addl b(%rip), %eax -; AVX1-NEXT: vmovd %eax, %xmm1 -; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1 -; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; AVX1-NEXT: vmovdqa c+144(%rip), %xmm3 -; AVX1-NEXT: vpaddd %xmm3, %xmm3, %xmm3 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7] +; AVX1-NEXT: vmovd %eax, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm2 +; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6,7] ; AVX1-NEXT: vmovdqa d+144(%rip), %xmm2 ; AVX1-NEXT: vpsubd c+144(%rip), %xmm2, %xmm2 -; AVX1-NEXT: vmovups %ymm1, c+128(%rip) -; AVX1-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0 -; AVX1-NEXT: vmovdqa d+128(%rip), %xmm1 -; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0 -; AVX1-NEXT: vmovdqa d+176(%rip), %xmm1 -; AVX1-NEXT: vmovdqa c+176(%rip), %xmm3 -; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vmovups %ymm0, c+128(%rip) +; AVX1-NEXT: vmovdqu c+160(%rip), %ymm0 +; AVX1-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa d+128(%rip), %xmm3 +; AVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vmovdqa d+176(%rip), %xmm3 +; AVX1-NEXT: vpsubd c+176(%rip), %xmm3, %xmm3 ; AVX1-NEXT: vmovdqa d+160(%rip), %xmm4 -; AVX1-NEXT: vmovdqa c+160(%rip), %xmm5 -; AVX1-NEXT: vpsubd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpsubd c+160(%rip), %xmm4, %xmm4 ; AVX1-NEXT: vmovdqa %xmm2, d+144(%rip) ; AVX1-NEXT: vmovdqa %xmm4, d+160(%rip) -; AVX1-NEXT: vmovdqa %xmm1, d+176(%rip) -; AVX1-NEXT: vmovdqa %xmm0, d+128(%rip) -; AVX1-NEXT: vpaddd %xmm3, %xmm3, %xmm0 -; AVX1-NEXT: vpaddd %xmm5, %xmm5, %xmm1 -; AVX1-NEXT: vmovdqa %xmm1, c+160(%rip) -; AVX1-NEXT: vmovdqa %xmm0, c+176(%rip) +; AVX1-NEXT: vmovdqa %xmm3, d+176(%rip) +; AVX1-NEXT: vmovdqa %xmm1, d+128(%rip) +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vpaddd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, c+160(%rip) +; AVX1-NEXT: vmovdqa %xmm1, c+176(%rip) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; @@ -314,36 +315,37 @@ ; ; XOP-LABEL: PR42833: ; XOP: # %bb.0: -; XOP-NEXT: vmovdqa c+128(%rip), %xmm0 -; XOP-NEXT: vmovd %xmm0, %eax +; XOP-NEXT: vmovdqu c+128(%rip), %ymm0 +; XOP-NEXT: vmovdqa c+128(%rip), %xmm1 +; XOP-NEXT: vmovd %xmm1, %eax ; XOP-NEXT: addl b(%rip), %eax -; XOP-NEXT: vmovd %eax, %xmm1 -; XOP-NEXT: vpaddd %xmm1, %xmm0, %xmm1 -; XOP-NEXT: vpaddd %xmm0, %xmm0, %xmm2 -; XOP-NEXT: vmovdqa c+144(%rip), %xmm3 -; XOP-NEXT: vpaddd %xmm3, %xmm3, %xmm3 -; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 -; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7] +; XOP-NEXT: vmovd %eax, %xmm2 +; XOP-NEXT: vpaddd %xmm2, %xmm1, %xmm2 +; XOP-NEXT: vpaddd %xmm0, %xmm0, %xmm3 +; XOP-NEXT: vextractf128 $1, %ymm0, %xmm0 +; XOP-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 +; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6,7] ; XOP-NEXT: vmovdqa d+144(%rip), %xmm2 ; XOP-NEXT: vpsubd c+144(%rip), %xmm2, %xmm2 -; XOP-NEXT: vmovups %ymm1, c+128(%rip) -; XOP-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0 -; XOP-NEXT: vmovdqa d+128(%rip), %xmm1 -; XOP-NEXT: vpsubd %xmm0, %xmm1, %xmm0 -; XOP-NEXT: vmovdqa d+176(%rip), %xmm1 -; XOP-NEXT: vmovdqa c+176(%rip), %xmm3 -; XOP-NEXT: vpsubd %xmm3, %xmm1, %xmm1 +; XOP-NEXT: vmovups %ymm0, c+128(%rip) +; XOP-NEXT: vmovdqu c+160(%rip), %ymm0 +; XOP-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1 +; XOP-NEXT: vmovdqa d+128(%rip), %xmm3 +; XOP-NEXT: vpsubd %xmm1, %xmm3, %xmm1 +; XOP-NEXT: vmovdqa d+176(%rip), %xmm3 +; XOP-NEXT: vpsubd c+176(%rip), %xmm3, %xmm3 ; XOP-NEXT: vmovdqa d+160(%rip), %xmm4 -; XOP-NEXT: vmovdqa c+160(%rip), %xmm5 -; XOP-NEXT: vpsubd %xmm5, %xmm4, %xmm4 +; XOP-NEXT: vpsubd c+160(%rip), %xmm4, %xmm4 ; XOP-NEXT: vmovdqa %xmm2, d+144(%rip) ; XOP-NEXT: vmovdqa %xmm4, d+160(%rip) -; XOP-NEXT: vmovdqa %xmm1, d+176(%rip) -; XOP-NEXT: vmovdqa %xmm0, d+128(%rip) -; XOP-NEXT: vpaddd %xmm3, %xmm3, %xmm0 -; XOP-NEXT: vpaddd %xmm5, %xmm5, %xmm1 -; XOP-NEXT: vmovdqa %xmm1, c+160(%rip) -; XOP-NEXT: vmovdqa %xmm0, c+176(%rip) +; XOP-NEXT: vmovdqa %xmm3, d+176(%rip) +; XOP-NEXT: vmovdqa %xmm1, d+128(%rip) +; XOP-NEXT: vextractf128 $1, %ymm0, %xmm1 +; XOP-NEXT: vpaddd %xmm1, %xmm1, %xmm1 +; XOP-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; XOP-NEXT: vmovdqa %xmm0, c+160(%rip) +; XOP-NEXT: vmovdqa %xmm1, c+176(%rip) ; XOP-NEXT: vzeroupper ; XOP-NEXT: retq %1 = load i32, i32* @b, align 4 diff --git a/llvm/test/CodeGen/X86/sdiv_fix_sat.ll b/llvm/test/CodeGen/X86/sdiv_fix_sat.ll --- a/llvm/test/CodeGen/X86/sdiv_fix_sat.ll +++ b/llvm/test/CodeGen/X86/sdiv_fix_sat.ll @@ -576,137 +576,154 @@ ; X64-NEXT: pushq %r12 ; X64-NEXT: pushq %rbx ; X64-NEXT: subq $104, %rsp -; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; X64-NEXT: pxor %xmm2, %xmm2 -; X64-NEXT: pcmpgtd %xmm0, %xmm2 -; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; X64-NEXT: paddq %xmm0, %xmm0 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; X64-NEXT: movq %xmm0, %rbx -; X64-NEXT: movq %rbx, %rbp -; X64-NEXT: sarq $63, %rbp -; X64-NEXT: shldq $31, %rbx, %rbp +; X64-NEXT: pxor %xmm3, %xmm3 +; X64-NEXT: pcmpgtd %xmm0, %xmm3 +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] +; X64-NEXT: paddq %xmm2, %xmm2 +; X64-NEXT: psllq $31, %xmm2 +; X64-NEXT: movdqa %xmm2, %xmm0 +; X64-NEXT: psrad $31, %xmm0 +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] +; X64-NEXT: psrlq $31, %xmm2 +; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] +; X64-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; X64-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; X64-NEXT: movq %xmm2, %r12 +; X64-NEXT: movq %r12, %r14 +; X64-NEXT: sarq $63, %r14 +; X64-NEXT: shldq $31, %r12, %r14 +; X64-NEXT: movq %r12, %r15 +; X64-NEXT: shlq $31, %r15 ; X64-NEXT: pxor %xmm0, %xmm0 +; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: pcmpgtd %xmm1, %xmm0 ; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movq %xmm1, %rdx ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; X64-NEXT: movq %rdx, %r15 -; X64-NEXT: sarq $63, %r15 -; X64-NEXT: movq %rbx, %r12 -; X64-NEXT: shlq $31, %r12 -; X64-NEXT: movq %r12, %rdi -; X64-NEXT: movq %rbp, %rsi -; X64-NEXT: movq %r15, %rcx +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: sarq $63, %rbx +; X64-NEXT: movq %r15, %rdi +; X64-NEXT: movq %r14, %rsi +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: callq __divti3@PLT ; X64-NEXT: movq %rax, %r13 ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; X64-NEXT: movq %rdx, %r14 +; X64-NEXT: movq %rdx, %rbp ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: subq $1, %r13 -; X64-NEXT: sbbq $0, %r14 -; X64-NEXT: shrq $63, %rbx -; X64-NEXT: xorl %r15d, %ebx -; X64-NEXT: movq %r12, %rdi -; X64-NEXT: movq %rbp, %rsi +; X64-NEXT: sbbq $0, %rbp +; X64-NEXT: movq %r15, %rdi +; X64-NEXT: movq %r14, %rsi ; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; X64-NEXT: movq %r15, %rcx +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: callq __modti3@PLT ; X64-NEXT: orq %rax, %rdx ; X64-NEXT: setne %al +; X64-NEXT: shrq $63, %r12 +; X64-NEXT: xorl %r12d, %ebx ; X64-NEXT: testb %bl, %al -; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload +; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload ; X64-NEXT: movl $4294967295, %edx # imm = 0xFFFFFFFF ; X64-NEXT: cmpq %rdx, %r13 ; X64-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF ; X64-NEXT: cmovbq %r13, %rax ; X64-NEXT: xorl %ecx, %ecx -; X64-NEXT: testq %r14, %r14 +; X64-NEXT: testq %rbp, %rbp ; X64-NEXT: cmovnsq %rdx, %r13 ; X64-NEXT: cmoveq %rax, %r13 -; X64-NEXT: cmovnsq %rcx, %r14 +; X64-NEXT: cmovnsq %rcx, %rbp ; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 ; X64-NEXT: cmpq %rcx, %r13 ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: cmovaq %r13, %rax -; X64-NEXT: testq %r14, %r14 +; X64-NEXT: testq %rbp, %rbp ; X64-NEXT: cmovsq %rcx, %r13 -; X64-NEXT: cmpq $-1, %r14 +; X64-NEXT: cmpq $-1, %rbp ; X64-NEXT: cmoveq %rax, %r13 ; X64-NEXT: movq %r13, %xmm0 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; X64-NEXT: # xmm0 = mem[2,3,2,3] -; X64-NEXT: movq %xmm0, %rbx -; X64-NEXT: movq %rbx, %r13 -; X64-NEXT: sarq $63, %r13 -; X64-NEXT: shldq $31, %rbx, %r13 +; X64-NEXT: movq %xmm0, %r15 +; X64-NEXT: movq %r15, %rbp +; X64-NEXT: sarq $63, %rbp +; X64-NEXT: shldq $31, %r15, %rbp +; X64-NEXT: movq %r15, %r14 +; X64-NEXT: shlq $31, %r14 ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; X64-NEXT: # xmm0 = mem[2,3,2,3] ; X64-NEXT: movq %xmm0, %rdx ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; X64-NEXT: movq %rdx, %rbp -; X64-NEXT: sarq $63, %rbp -; X64-NEXT: movq %rbx, %r15 -; X64-NEXT: shlq $31, %r15 -; X64-NEXT: movq %r15, %rdi -; X64-NEXT: movq %r13, %rsi -; X64-NEXT: movq %rbp, %rcx +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: sarq $63, %rbx +; X64-NEXT: movq %r14, %rdi +; X64-NEXT: movq %rbp, %rsi +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: callq __divti3@PLT ; X64-NEXT: movq %rax, %r12 ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; X64-NEXT: movq %rdx, %r14 +; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: subq $1, %r12 -; X64-NEXT: sbbq $0, %r14 -; X64-NEXT: shrq $63, %rbx -; X64-NEXT: xorl %ebp, %ebx -; X64-NEXT: movq %r15, %rdi -; X64-NEXT: movq %r13, %rsi +; X64-NEXT: sbbq $0, %r13 +; X64-NEXT: movq %r14, %rdi +; X64-NEXT: movq %rbp, %rsi ; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; X64-NEXT: movq %rbp, %rcx +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: callq __modti3@PLT ; X64-NEXT: orq %rax, %rdx ; X64-NEXT: setne %al +; X64-NEXT: shrq $63, %r15 +; X64-NEXT: xorl %r15d, %ebx ; X64-NEXT: testb %bl, %al -; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload +; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload ; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF ; X64-NEXT: cmpq %rcx, %r12 ; X64-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF ; X64-NEXT: cmovbq %r12, %rax -; X64-NEXT: testq %r14, %r14 +; X64-NEXT: testq %r13, %r13 ; X64-NEXT: cmovnsq %rcx, %r12 ; X64-NEXT: cmoveq %rax, %r12 ; X64-NEXT: movl $0, %eax -; X64-NEXT: cmovnsq %rax, %r14 +; X64-NEXT: cmovnsq %rax, %r13 ; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 ; X64-NEXT: cmpq %rcx, %r12 ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: cmovaq %r12, %rax -; X64-NEXT: testq %r14, %r14 +; X64-NEXT: testq %r13, %r13 ; X64-NEXT: cmovsq %rcx, %r12 -; X64-NEXT: cmpq $-1, %r14 +; X64-NEXT: cmpq $-1, %r13 ; X64-NEXT: cmoveq %rax, %r12 ; X64-NEXT: movq %r12, %xmm0 ; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; X64-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; X64-NEXT: psrlq $1, %xmm1 ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload -; X64-NEXT: # xmm1 = mem[2,3,2,3] -; X64-NEXT: pxor %xmm0, %xmm0 -; X64-NEXT: pcmpgtd %xmm1, %xmm0 -; X64-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X64-NEXT: paddq %xmm1, %xmm1 -; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill -; X64-NEXT: movq %xmm1, %rbx -; X64-NEXT: movq %rbx, %r12 -; X64-NEXT: sarq $63, %r12 -; X64-NEXT: shldq $31, %rbx, %r12 +; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload +; X64-NEXT: # xmm0 = mem[2,3,2,3] +; X64-NEXT: pxor %xmm1, %xmm1 +; X64-NEXT: pcmpgtd %xmm0, %xmm1 +; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; X64-NEXT: paddq %xmm0, %xmm0 +; X64-NEXT: psllq $31, %xmm0 +; X64-NEXT: movdqa %xmm0, %xmm1 +; X64-NEXT: psrad $31, %xmm1 +; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3] +; X64-NEXT: psrlq $31, %xmm0 +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; X64-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; X64-NEXT: movq %xmm0, %r13 +; X64-NEXT: movq %r13, %rbp +; X64-NEXT: sarq $63, %rbp +; X64-NEXT: shldq $31, %r13, %rbp +; X64-NEXT: movq %r13, %r14 +; X64-NEXT: shlq $31, %r14 ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; X64-NEXT: # xmm1 = mem[2,3,2,3] ; X64-NEXT: pxor %xmm0, %xmm0 @@ -715,103 +732,101 @@ ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: movq %xmm1, %rdx ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; X64-NEXT: movq %rdx, %rbp -; X64-NEXT: sarq $63, %rbp -; X64-NEXT: movq %rbx, %r15 -; X64-NEXT: shlq $31, %r15 -; X64-NEXT: movq %r15, %rdi -; X64-NEXT: movq %r12, %rsi -; X64-NEXT: movq %rbp, %rcx +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: sarq $63, %rbx +; X64-NEXT: movq %r14, %rdi +; X64-NEXT: movq %rbp, %rsi +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: callq __divti3@PLT -; X64-NEXT: movq %rax, %r13 +; X64-NEXT: movq %rax, %r12 ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; X64-NEXT: movq %rdx, %r14 +; X64-NEXT: movq %rdx, %r15 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; X64-NEXT: subq $1, %r13 -; X64-NEXT: sbbq $0, %r14 -; X64-NEXT: shrq $63, %rbx -; X64-NEXT: xorl %ebp, %ebx -; X64-NEXT: movq %r15, %rdi -; X64-NEXT: movq %r12, %rsi +; X64-NEXT: subq $1, %r12 +; X64-NEXT: sbbq $0, %r15 +; X64-NEXT: movq %r14, %rdi +; X64-NEXT: movq %rbp, %rsi ; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; X64-NEXT: movq %rbp, %rcx +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: callq __modti3@PLT ; X64-NEXT: orq %rax, %rdx ; X64-NEXT: setne %al +; X64-NEXT: shrq $63, %r13 +; X64-NEXT: xorl %r13d, %ebx ; X64-NEXT: testb %bl, %al -; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload -; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload +; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload +; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload ; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF -; X64-NEXT: cmpq %rcx, %r13 +; X64-NEXT: cmpq %rcx, %r12 ; X64-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF -; X64-NEXT: cmovbq %r13, %rax -; X64-NEXT: testq %r14, %r14 -; X64-NEXT: cmovnsq %rcx, %r13 -; X64-NEXT: cmoveq %rax, %r13 +; X64-NEXT: cmovbq %r12, %rax +; X64-NEXT: testq %r15, %r15 +; X64-NEXT: cmovnsq %rcx, %r12 +; X64-NEXT: cmoveq %rax, %r12 ; X64-NEXT: movl $0, %eax -; X64-NEXT: cmovnsq %rax, %r14 +; X64-NEXT: cmovnsq %rax, %r15 ; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 -; X64-NEXT: cmpq %rcx, %r13 +; X64-NEXT: cmpq %rcx, %r12 ; X64-NEXT: movq %rcx, %rax -; X64-NEXT: cmovaq %r13, %rax -; X64-NEXT: testq %r14, %r14 -; X64-NEXT: cmovsq %rcx, %r13 -; X64-NEXT: cmpq $-1, %r14 -; X64-NEXT: cmoveq %rax, %r13 -; X64-NEXT: movq %r13, %xmm0 +; X64-NEXT: cmovaq %r12, %rax +; X64-NEXT: testq %r15, %r15 +; X64-NEXT: cmovsq %rcx, %r12 +; X64-NEXT: cmpq $-1, %r15 +; X64-NEXT: cmoveq %rax, %r12 +; X64-NEXT: movq %r12, %xmm0 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; X64-NEXT: # xmm0 = mem[2,3,2,3] -; X64-NEXT: movq %xmm0, %rbx -; X64-NEXT: movq %rbx, %r13 -; X64-NEXT: sarq $63, %r13 -; X64-NEXT: shldq $31, %rbx, %r13 +; X64-NEXT: movq %xmm0, %r15 +; X64-NEXT: movq %r15, %rbp +; X64-NEXT: sarq $63, %rbp +; X64-NEXT: shldq $31, %r15, %rbp +; X64-NEXT: movq %r15, %r14 +; X64-NEXT: shlq $31, %r14 ; X64-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; X64-NEXT: # xmm0 = mem[2,3,2,3] ; X64-NEXT: movq %xmm0, %rdx ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; X64-NEXT: movq %rdx, %rbp -; X64-NEXT: sarq $63, %rbp -; X64-NEXT: movq %rbx, %r15 -; X64-NEXT: shlq $31, %r15 -; X64-NEXT: movq %r15, %rdi -; X64-NEXT: movq %r13, %rsi -; X64-NEXT: movq %rbp, %rcx +; X64-NEXT: movq %rdx, %rbx +; X64-NEXT: sarq $63, %rbx +; X64-NEXT: movq %r14, %rdi +; X64-NEXT: movq %rbp, %rsi +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: callq __divti3@PLT ; X64-NEXT: movq %rax, %r12 ; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill -; X64-NEXT: movq %rdx, %r14 +; X64-NEXT: movq %rdx, %r13 ; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; X64-NEXT: subq $1, %r12 -; X64-NEXT: sbbq $0, %r14 -; X64-NEXT: shrq $63, %rbx -; X64-NEXT: xorl %ebp, %ebx -; X64-NEXT: movq %r15, %rdi -; X64-NEXT: movq %r13, %rsi +; X64-NEXT: sbbq $0, %r13 +; X64-NEXT: movq %r14, %rdi +; X64-NEXT: movq %rbp, %rsi ; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload -; X64-NEXT: movq %rbp, %rcx +; X64-NEXT: movq %rbx, %rcx ; X64-NEXT: callq __modti3@PLT ; X64-NEXT: orq %rax, %rdx ; X64-NEXT: setne %al +; X64-NEXT: shrq $63, %r15 +; X64-NEXT: xorl %r15d, %ebx ; X64-NEXT: testb %bl, %al -; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload ; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload +; X64-NEXT: cmoveq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload ; X64-NEXT: movl $4294967295, %ecx # imm = 0xFFFFFFFF ; X64-NEXT: cmpq %rcx, %r12 ; X64-NEXT: movl $4294967295, %eax # imm = 0xFFFFFFFF ; X64-NEXT: cmovbq %r12, %rax -; X64-NEXT: testq %r14, %r14 +; X64-NEXT: testq %r13, %r13 ; X64-NEXT: cmovnsq %rcx, %r12 ; X64-NEXT: cmoveq %rax, %r12 ; X64-NEXT: movl $0, %eax -; X64-NEXT: cmovnsq %rax, %r14 +; X64-NEXT: cmovnsq %rax, %r13 ; X64-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 ; X64-NEXT: cmpq %rcx, %r12 ; X64-NEXT: movq %rcx, %rax ; X64-NEXT: cmovaq %r12, %rax -; X64-NEXT: testq %r14, %r14 +; X64-NEXT: testq %r13, %r13 ; X64-NEXT: cmovsq %rcx, %r12 -; X64-NEXT: cmpq $-1, %r14 +; X64-NEXT: cmpq $-1, %r13 ; X64-NEXT: cmoveq %rax, %r12 ; X64-NEXT: movq %r12, %xmm0 ; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll --- a/llvm/test/CodeGen/X86/vector-bitreverse.ll +++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll @@ -693,11 +693,11 @@ ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -781,11 +781,11 @@ ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -881,11 +881,11 @@ ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -983,11 +983,11 @@ ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: paddb %xmm1, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -1079,13 +1079,13 @@ ; SSE2-NEXT: pand %xmm5, %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm4, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: paddb %xmm6, %xmm6 -; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] +; SSE2-NEXT: psrlw $1, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; SSE2-NEXT: pand %xmm7, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm2, %xmm6 ; SSE2-NEXT: psllw $4, %xmm6 @@ -1099,9 +1099,9 @@ ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm4 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 ; SSE2-NEXT: pand %xmm7, %xmm1 -; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: retq ; @@ -1256,13 +1256,13 @@ ; SSE2-NEXT: pand %xmm5, %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm4, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: movdqa %xmm0, %xmm7 ; SSE2-NEXT: pand %xmm4, %xmm7 -; SSE2-NEXT: paddb %xmm7, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] +; SSE2-NEXT: psrlw $1, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; SSE2-NEXT: pand %xmm6, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm7, %xmm0 ; SSE2-NEXT: movdqa %xmm2, %xmm7 ; SSE2-NEXT: psrlw $8, %xmm7 @@ -1280,9 +1280,9 @@ ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm4 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 ; SSE2-NEXT: pand %xmm6, %xmm1 -; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: retq ; @@ -1459,13 +1459,13 @@ ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 -; SSE2-NEXT: paddb %xmm6, %xmm6 -; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] +; SSE2-NEXT: psrlw $1, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; SSE2-NEXT: pand %xmm7, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm2, %xmm6 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15] @@ -1487,9 +1487,9 @@ ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm5 -; SSE2-NEXT: paddb %xmm5, %xmm5 +; SSE2-NEXT: psrlw $1, %xmm5 ; SSE2-NEXT: pand %xmm7, %xmm1 -; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm5, %xmm1 ; SSE2-NEXT: retq ; @@ -1668,13 +1668,13 @@ ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm5, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 -; SSE2-NEXT: paddb %xmm6, %xmm6 -; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] +; SSE2-NEXT: psrlw $1, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; SSE2-NEXT: pand %xmm7, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm2, %xmm6 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15] @@ -1698,9 +1698,9 @@ ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm5 -; SSE2-NEXT: paddb %xmm5, %xmm5 +; SSE2-NEXT: psrlw $1, %xmm5 ; SSE2-NEXT: pand %xmm7, %xmm1 -; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm5, %xmm1 ; SSE2-NEXT: retq ; @@ -1868,13 +1868,13 @@ ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm6, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: movdqa %xmm0, %xmm7 ; SSE2-NEXT: pand %xmm6, %xmm7 -; SSE2-NEXT: paddb %xmm7, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] +; SSE2-NEXT: psrlw $1, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm7, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm7 ; SSE2-NEXT: psllw $4, %xmm7 @@ -1891,9 +1891,9 @@ ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: pand %xmm6, %xmm4 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 ; SSE2-NEXT: pand %xmm9, %xmm1 -; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: psllw $4, %xmm4 @@ -1910,9 +1910,9 @@ ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: pand %xmm6, %xmm4 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 ; SSE2-NEXT: pand %xmm9, %xmm2 -; SSE2-NEXT: psrlw $1, %xmm2 +; SSE2-NEXT: paddb %xmm2, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm10, %xmm4 ; SSE2-NEXT: psllw $4, %xmm4 @@ -1926,9 +1926,9 @@ ; SSE2-NEXT: psrlw $2, %xmm3 ; SSE2-NEXT: por %xmm5, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm6 -; SSE2-NEXT: paddb %xmm6, %xmm6 +; SSE2-NEXT: psrlw $1, %xmm6 ; SSE2-NEXT: pand %xmm9, %xmm3 -; SSE2-NEXT: psrlw $1, %xmm3 +; SSE2-NEXT: paddb %xmm3, %xmm3 ; SSE2-NEXT: por %xmm6, %xmm3 ; SSE2-NEXT: retq ; @@ -2160,13 +2160,13 @@ ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm6, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: movdqa %xmm0, %xmm7 ; SSE2-NEXT: pand %xmm6, %xmm7 -; SSE2-NEXT: paddb %xmm7, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] +; SSE2-NEXT: psrlw $1, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm7, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm7 ; SSE2-NEXT: psrlw $8, %xmm7 @@ -2187,9 +2187,9 @@ ; SSE2-NEXT: por %xmm5, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm5 ; SSE2-NEXT: pand %xmm6, %xmm5 -; SSE2-NEXT: paddb %xmm5, %xmm5 +; SSE2-NEXT: psrlw $1, %xmm5 ; SSE2-NEXT: pand %xmm9, %xmm1 -; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm5, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm5 ; SSE2-NEXT: psrlw $8, %xmm5 @@ -2210,9 +2210,9 @@ ; SSE2-NEXT: por %xmm5, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm5 ; SSE2-NEXT: pand %xmm6, %xmm5 -; SSE2-NEXT: paddb %xmm5, %xmm5 +; SSE2-NEXT: psrlw $1, %xmm5 ; SSE2-NEXT: pand %xmm9, %xmm2 -; SSE2-NEXT: psrlw $1, %xmm2 +; SSE2-NEXT: paddb %xmm2, %xmm2 ; SSE2-NEXT: por %xmm5, %xmm2 ; SSE2-NEXT: movdqa %xmm4, %xmm5 ; SSE2-NEXT: psrlw $8, %xmm5 @@ -2230,9 +2230,9 @@ ; SSE2-NEXT: psrlw $2, %xmm3 ; SSE2-NEXT: por %xmm10, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm6 -; SSE2-NEXT: paddb %xmm6, %xmm6 +; SSE2-NEXT: psrlw $1, %xmm6 ; SSE2-NEXT: pand %xmm9, %xmm3 -; SSE2-NEXT: psrlw $1, %xmm3 +; SSE2-NEXT: paddb %xmm3, %xmm3 ; SSE2-NEXT: por %xmm6, %xmm3 ; SSE2-NEXT: retq ; @@ -2504,13 +2504,13 @@ ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm7, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: pand %xmm7, %xmm6 -; SSE2-NEXT: paddb %xmm6, %xmm6 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] +; SSE2-NEXT: psrlw $1, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm6 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm10[8],xmm6[9],xmm10[9],xmm6[10],xmm10[10],xmm6[11],xmm10[11],xmm6[12],xmm10[12],xmm6[13],xmm10[13],xmm6[14],xmm10[14],xmm6[15],xmm10[15] @@ -2535,9 +2535,9 @@ ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: pand %xmm7, %xmm4 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 ; SSE2-NEXT: pand %xmm9, %xmm1 -; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15] @@ -2562,9 +2562,9 @@ ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: pand %xmm7, %xmm4 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 ; SSE2-NEXT: pand %xmm9, %xmm2 -; SSE2-NEXT: psrlw $1, %xmm2 +; SSE2-NEXT: paddb %xmm2, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm11, %xmm4 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15] @@ -2586,9 +2586,9 @@ ; SSE2-NEXT: psrlw $2, %xmm3 ; SSE2-NEXT: por %xmm5, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm7 -; SSE2-NEXT: paddb %xmm7, %xmm7 +; SSE2-NEXT: psrlw $1, %xmm7 ; SSE2-NEXT: pand %xmm9, %xmm3 -; SSE2-NEXT: psrlw $1, %xmm3 +; SSE2-NEXT: paddb %xmm3, %xmm3 ; SSE2-NEXT: por %xmm7, %xmm3 ; SSE2-NEXT: retq ; @@ -2862,13 +2862,13 @@ ; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $2, %xmm0 ; SSE2-NEXT: por %xmm7, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] ; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: pand %xmm7, %xmm6 -; SSE2-NEXT: paddb %xmm6, %xmm6 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] +; SSE2-NEXT: psrlw $1, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: paddb %xmm0, %xmm0 ; SSE2-NEXT: por %xmm6, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm6 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm10[8],xmm6[9],xmm10[9],xmm6[10],xmm10[10],xmm6[11],xmm10[11],xmm6[12],xmm10[12],xmm6[13],xmm10[13],xmm6[14],xmm10[14],xmm6[15],xmm10[15] @@ -2895,9 +2895,9 @@ ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: pand %xmm7, %xmm4 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 ; SSE2-NEXT: pand %xmm9, %xmm1 -; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15] @@ -2924,9 +2924,9 @@ ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: pand %xmm7, %xmm4 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 ; SSE2-NEXT: pand %xmm9, %xmm2 -; SSE2-NEXT: psrlw $1, %xmm2 +; SSE2-NEXT: paddb %xmm2, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm11, %xmm4 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15] @@ -2950,9 +2950,9 @@ ; SSE2-NEXT: psrlw $2, %xmm3 ; SSE2-NEXT: por %xmm5, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm7 -; SSE2-NEXT: paddb %xmm7, %xmm7 +; SSE2-NEXT: psrlw $1, %xmm7 ; SSE2-NEXT: pand %xmm9, %xmm3 -; SSE2-NEXT: psrlw $1, %xmm3 +; SSE2-NEXT: paddb %xmm3, %xmm3 ; SSE2-NEXT: por %xmm7, %xmm3 ; SSE2-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll --- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll @@ -517,10 +517,10 @@ ; SSE2-NEXT: pandn %xmm3, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: paddb %xmm2, %xmm3 +; SSE2-NEXT: psrlw $7, %xmm3 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: psrlw $7, %xmm4 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 +; SSE2-NEXT: paddb %xmm2, %xmm4 ; SSE2-NEXT: por %xmm3, %xmm4 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm0 @@ -553,10 +553,10 @@ ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: paddb %xmm1, %xmm0 +; SSE41-NEXT: psrlw $7, %xmm0 +; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: psrlw $7, %xmm3 -; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm3 ; SSE41-NEXT: por %xmm0, %xmm3 ; SSE41-NEXT: paddb %xmm2, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 @@ -580,10 +580,10 @@ ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3 -; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 -; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpsrlw $7, %xmm0, %xmm2 +; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm3 +; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -728,10 +728,10 @@ ; X86-SSE2-NEXT: pandn %xmm3, %xmm2 ; X86-SSE2-NEXT: por %xmm4, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm3 -; X86-SSE2-NEXT: paddb %xmm2, %xmm3 +; X86-SSE2-NEXT: psrlw $7, %xmm3 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 -; X86-SSE2-NEXT: psrlw $7, %xmm4 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm4 +; X86-SSE2-NEXT: paddb %xmm2, %xmm4 ; X86-SSE2-NEXT: por %xmm3, %xmm4 ; X86-SSE2-NEXT: paddb %xmm1, %xmm1 ; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll --- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll @@ -443,10 +443,10 @@ ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 -; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3 -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 -; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm2 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm3 +; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -467,10 +467,10 @@ ; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm2 -; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3 -; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 -; AVX512F-NEXT: vpor %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm2 +; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 +; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm3 +; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll --- a/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll @@ -272,154 +272,154 @@ ; ; AVX512BW-LABEL: var_funnnel_v64i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm2 -; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2 -; AVX512BW-NEXT: vpsllw $5, %zmm2, %zmm2 -; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm4 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm3 +; AVX512BW-NEXT: vpsllw $5, %zmm3, %zmm3 +; AVX512BW-NEXT: vpaddb %zmm3, %zmm3, %zmm4 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 -; AVX512BW-NEXT: vpmovb2m %zmm2, %k2 -; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm2 -; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 -; AVX512BW-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k2} -; AVX512BW-NEXT: vpsrlw $2, %zmm2, %zmm5 -; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 -; AVX512BW-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} -; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm5 +; AVX512BW-NEXT: vpmovb2m %zmm3, %k2 +; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm3 +; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 +; AVX512BW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k2} +; AVX512BW-NEXT: vpsllw $2, %zmm3, %zmm5 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 +; AVX512BW-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} ; AVX512BW-NEXT: vpaddb %zmm4, %zmm4, %zmm4 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 -; AVX512BW-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} -; AVX512BW-NEXT: vpandq %zmm3, %zmm1, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1} +; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512BW-NEXT: vpsubb %zmm1, %zmm4, %zmm1 +; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1 -; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm3 -; AVX512BW-NEXT: vpmovb2m %zmm3, %k1 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm2 +; AVX512BW-NEXT: vpmovb2m %zmm2, %k1 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k2 -; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2} -; AVX512BW-NEXT: vpsllw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} -; AVX512BW-NEXT: vpaddb %zmm3, %zmm3, %zmm1 -; AVX512BW-NEXT: vpmovb2m %zmm1, %k1 -; AVX512BW-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1} -; AVX512BW-NEXT: vporq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm2 +; AVX512BW-NEXT: vpmovb2m %zmm2, %k1 +; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512BW-NEXT: vporq %zmm0, %zmm3, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: var_funnnel_v64i8: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm2, %zmm2 -; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512VLBW-NEXT: vpandq %zmm3, %zmm2, %zmm2 -; AVX512VLBW-NEXT: vpsllw $5, %zmm2, %zmm2 -; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm4 +; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512VLBW-NEXT: vpandq %zmm2, %zmm1, %zmm3 +; AVX512VLBW-NEXT: vpsllw $5, %zmm3, %zmm3 +; AVX512VLBW-NEXT: vpaddb %zmm3, %zmm3, %zmm4 ; AVX512VLBW-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k2 -; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm2 -; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 -; AVX512VLBW-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k2} -; AVX512VLBW-NEXT: vpsrlw $2, %zmm2, %zmm5 -; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 -; AVX512VLBW-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} -; AVX512VLBW-NEXT: vpsrlw $1, %zmm2, %zmm5 +; AVX512VLBW-NEXT: vpmovb2m %zmm3, %k2 +; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm3 +; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 +; AVX512VLBW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k2} +; AVX512VLBW-NEXT: vpsllw $2, %zmm3, %zmm5 ; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 +; AVX512VLBW-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} ; AVX512VLBW-NEXT: vpaddb %zmm4, %zmm4, %zmm4 ; AVX512VLBW-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VLBW-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} -; AVX512VLBW-NEXT: vpandq %zmm3, %zmm1, %zmm1 +; AVX512VLBW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1} +; AVX512VLBW-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm4, %zmm1 +; AVX512VLBW-NEXT: vpandq %zmm2, %zmm1, %zmm1 ; AVX512VLBW-NEXT: vpsllw $5, %zmm1, %zmm1 -; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm3 -; AVX512VLBW-NEXT: vpmovb2m %zmm3, %k1 +; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm2 +; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1 ; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k2 -; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1 +; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm1 ; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2} -; AVX512VLBW-NEXT: vpsllw $2, %zmm0, %zmm1 +; AVX512VLBW-NEXT: vpsrlw $2, %zmm0, %zmm1 ; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} -; AVX512VLBW-NEXT: vpaddb %zmm3, %zmm3, %zmm1 -; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1 -; AVX512VLBW-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1} -; AVX512VLBW-NEXT: vporq %zmm2, %zmm0, %zmm0 +; AVX512VLBW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm2 +; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1 +; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512VLBW-NEXT: vporq %zmm0, %zmm3, %zmm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: var_funnnel_v64i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX512VBMI2-NEXT: vpsubb %zmm1, %zmm2, %zmm2 -; AVX512VBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2 -; AVX512VBMI2-NEXT: vpsllw $5, %zmm2, %zmm2 -; AVX512VBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm4 +; AVX512VBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512VBMI2-NEXT: vpandq %zmm2, %zmm1, %zmm3 +; AVX512VBMI2-NEXT: vpsllw $5, %zmm3, %zmm3 +; AVX512VBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm4 ; AVX512VBMI2-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VBMI2-NEXT: vpmovb2m %zmm2, %k2 -; AVX512VBMI2-NEXT: vpsrlw $4, %zmm0, %zmm2 -; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 -; AVX512VBMI2-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k2} -; AVX512VBMI2-NEXT: vpsrlw $2, %zmm2, %zmm5 -; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 -; AVX512VBMI2-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} -; AVX512VBMI2-NEXT: vpsrlw $1, %zmm2, %zmm5 +; AVX512VBMI2-NEXT: vpmovb2m %zmm3, %k2 +; AVX512VBMI2-NEXT: vpsllw $4, %zmm0, %zmm3 +; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 +; AVX512VBMI2-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k2} +; AVX512VBMI2-NEXT: vpsllw $2, %zmm3, %zmm5 ; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 +; AVX512VBMI2-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} ; AVX512VBMI2-NEXT: vpaddb %zmm4, %zmm4, %zmm4 ; AVX512VBMI2-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VBMI2-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} -; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm1, %zmm1 +; AVX512VBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1} +; AVX512VBMI2-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VBMI2-NEXT: vpsubb %zmm1, %zmm4, %zmm1 +; AVX512VBMI2-NEXT: vpandq %zmm2, %zmm1, %zmm1 ; AVX512VBMI2-NEXT: vpsllw $5, %zmm1, %zmm1 -; AVX512VBMI2-NEXT: vpaddb %zmm1, %zmm1, %zmm3 -; AVX512VBMI2-NEXT: vpmovb2m %zmm3, %k1 +; AVX512VBMI2-NEXT: vpaddb %zmm1, %zmm1, %zmm2 +; AVX512VBMI2-NEXT: vpmovb2m %zmm2, %k1 ; AVX512VBMI2-NEXT: vpmovb2m %zmm1, %k2 -; AVX512VBMI2-NEXT: vpsllw $4, %zmm0, %zmm1 +; AVX512VBMI2-NEXT: vpsrlw $4, %zmm0, %zmm1 ; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2} -; AVX512VBMI2-NEXT: vpsllw $2, %zmm0, %zmm1 +; AVX512VBMI2-NEXT: vpsrlw $2, %zmm0, %zmm1 ; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} -; AVX512VBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm1 -; AVX512VBMI2-NEXT: vpmovb2m %zmm1, %k1 -; AVX512VBMI2-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1} -; AVX512VBMI2-NEXT: vporq %zmm2, %zmm0, %zmm0 +; AVX512VBMI2-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512VBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm2 +; AVX512VBMI2-NEXT: vpmovb2m %zmm2, %k1 +; AVX512VBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512VBMI2-NEXT: vporq %zmm0, %zmm3, %zmm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLVBMI2-LABEL: var_funnnel_v64i8: ; AVX512VLVBMI2: # %bb.0: -; AVX512VLVBMI2-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; AVX512VLVBMI2-NEXT: vpsubb %zmm1, %zmm2, %zmm2 -; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2 -; AVX512VLVBMI2-NEXT: vpsllw $5, %zmm2, %zmm2 -; AVX512VLVBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm4 +; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512VLVBMI2-NEXT: vpandq %zmm2, %zmm1, %zmm3 +; AVX512VLVBMI2-NEXT: vpsllw $5, %zmm3, %zmm3 +; AVX512VLVBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm4 ; AVX512VLVBMI2-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VLVBMI2-NEXT: vpmovb2m %zmm2, %k2 -; AVX512VLVBMI2-NEXT: vpsrlw $4, %zmm0, %zmm2 -; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 -; AVX512VLVBMI2-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k2} -; AVX512VLVBMI2-NEXT: vpsrlw $2, %zmm2, %zmm5 -; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 -; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} -; AVX512VLVBMI2-NEXT: vpsrlw $1, %zmm2, %zmm5 +; AVX512VLVBMI2-NEXT: vpmovb2m %zmm3, %k2 +; AVX512VLVBMI2-NEXT: vpsllw $4, %zmm0, %zmm3 +; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 +; AVX512VLVBMI2-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k2} +; AVX512VLVBMI2-NEXT: vpsllw $2, %zmm3, %zmm5 ; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 +; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} ; AVX512VLVBMI2-NEXT: vpaddb %zmm4, %zmm4, %zmm4 ; AVX512VLVBMI2-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} -; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm1, %zmm1 +; AVX512VLVBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm3 {%k1} +; AVX512VLVBMI2-NEXT: vpxor %xmm4, %xmm4, %xmm4 +; AVX512VLVBMI2-NEXT: vpsubb %zmm1, %zmm4, %zmm1 +; AVX512VLVBMI2-NEXT: vpandq %zmm2, %zmm1, %zmm1 ; AVX512VLVBMI2-NEXT: vpsllw $5, %zmm1, %zmm1 -; AVX512VLVBMI2-NEXT: vpaddb %zmm1, %zmm1, %zmm3 -; AVX512VLVBMI2-NEXT: vpmovb2m %zmm3, %k1 +; AVX512VLVBMI2-NEXT: vpaddb %zmm1, %zmm1, %zmm2 +; AVX512VLVBMI2-NEXT: vpmovb2m %zmm2, %k1 ; AVX512VLVBMI2-NEXT: vpmovb2m %zmm1, %k2 -; AVX512VLVBMI2-NEXT: vpsllw $4, %zmm0, %zmm1 +; AVX512VLVBMI2-NEXT: vpsrlw $4, %zmm0, %zmm1 ; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2} -; AVX512VLVBMI2-NEXT: vpsllw $2, %zmm0, %zmm1 +; AVX512VLVBMI2-NEXT: vpsrlw $2, %zmm0, %zmm1 ; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} -; AVX512VLVBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm1 -; AVX512VLVBMI2-NEXT: vpmovb2m %zmm1, %k1 -; AVX512VLVBMI2-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1} -; AVX512VLVBMI2-NEXT: vporq %zmm2, %zmm0, %zmm0 +; AVX512VLVBMI2-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512VLVBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm2 +; AVX512VLVBMI2-NEXT: vpmovb2m %zmm2, %k1 +; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512VLVBMI2-NEXT: vporq %zmm0, %zmm3, %zmm0 ; AVX512VLVBMI2-NEXT: retq %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> %amt) ret <64 x i8> %res diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll --- a/llvm/test/CodeGen/X86/vector-fshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll @@ -732,65 +732,65 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt) nounwind { ; SSE2-LABEL: var_funnnel_v16i8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: pandn %xmm5, %xmm4 -; SSE2-NEXT: psllw $5, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; SSE2-NEXT: movdqa %xmm2, %xmm5 +; SSE2-NEXT: pand %xmm4, %xmm5 +; SSE2-NEXT: psllw $5, %xmm5 ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: pxor %xmm6, %xmm6 -; SSE2-NEXT: pcmpgtb %xmm4, %xmm6 -; SSE2-NEXT: paddb %xmm0, %xmm0 +; SSE2-NEXT: pcmpgtb %xmm5, %xmm6 ; SSE2-NEXT: movdqa %xmm6, %xmm7 -; SSE2-NEXT: pandn %xmm0, %xmm7 -; SSE2-NEXT: psllw $4, %xmm0 -; SSE2-NEXT: pand %xmm6, %xmm0 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE2-NEXT: por %xmm7, %xmm0 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: pandn %xmm1, %xmm7 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: pand %xmm6, %xmm1 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: por %xmm7, %xmm1 +; SSE2-NEXT: paddb %xmm5, %xmm5 ; SSE2-NEXT: pxor %xmm6, %xmm6 -; SSE2-NEXT: pcmpgtb %xmm4, %xmm6 +; SSE2-NEXT: pcmpgtb %xmm5, %xmm6 ; SSE2-NEXT: movdqa %xmm6, %xmm7 -; SSE2-NEXT: pandn %xmm0, %xmm7 -; SSE2-NEXT: psllw $2, %xmm0 -; SSE2-NEXT: pand %xmm6, %xmm0 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 -; SSE2-NEXT: por %xmm7, %xmm0 -; SSE2-NEXT: paddb %xmm4, %xmm4 +; SSE2-NEXT: pandn %xmm1, %xmm7 +; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm6, %xmm1 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 +; SSE2-NEXT: por %xmm7, %xmm1 +; SSE2-NEXT: paddb %xmm5, %xmm5 ; SSE2-NEXT: pxor %xmm6, %xmm6 -; SSE2-NEXT: pcmpgtb %xmm4, %xmm6 -; SSE2-NEXT: movdqa %xmm6, %xmm4 -; SSE2-NEXT: pandn %xmm0, %xmm4 -; SSE2-NEXT: paddb %xmm0, %xmm0 -; SSE2-NEXT: pand %xmm6, %xmm0 -; SSE2-NEXT: pand %xmm5, %xmm2 -; SSE2-NEXT: psllw $5, %xmm2 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtb %xmm2, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pandn %xmm1, %xmm6 -; SSE2-NEXT: psrlw $4, %xmm1 -; SSE2-NEXT: pand %xmm5, %xmm1 +; SSE2-NEXT: pcmpgtb %xmm5, %xmm6 +; SSE2-NEXT: movdqa %xmm6, %xmm5 +; SSE2-NEXT: pandn %xmm1, %xmm5 +; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: pand %xmm6, %xmm1 ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 +; SSE2-NEXT: por %xmm5, %xmm1 +; SSE2-NEXT: pandn %xmm4, %xmm2 +; SSE2-NEXT: psllw $5, %xmm2 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm4 +; SSE2-NEXT: paddb %xmm0, %xmm0 +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: pandn %xmm0, %xmm5 +; SSE2-NEXT: psllw $4, %xmm0 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: paddb %xmm2, %xmm2 -; SSE2-NEXT: pxor %xmm5, %xmm5 -; SSE2-NEXT: pcmpgtb %xmm2, %xmm5 -; SSE2-NEXT: movdqa %xmm5, %xmm6 -; SSE2-NEXT: pandn %xmm1, %xmm6 -; SSE2-NEXT: psrlw $2, %xmm1 -; SSE2-NEXT: pand %xmm5, %xmm1 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtb %xmm2, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: pandn %xmm0, %xmm5 +; SSE2-NEXT: psllw $2, %xmm0 +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: paddb %xmm2, %xmm2 ; SSE2-NEXT: pcmpgtb %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: pandn %xmm1, %xmm2 -; SSE2-NEXT: psrlw $1, %xmm1 -; SSE2-NEXT: pand %xmm3, %xmm1 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 -; SSE2-NEXT: por %xmm2, %xmm1 -; SSE2-NEXT: por %xmm4, %xmm1 -; SSE2-NEXT: por %xmm1, %xmm0 +; SSE2-NEXT: pandn %xmm0, %xmm2 +; SSE2-NEXT: por %xmm1, %xmm2 +; SSE2-NEXT: paddb %xmm0, %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: var_funnnel_v16i8: @@ -981,77 +981,77 @@ ; XOP-LABEL: var_funnnel_v16i8: ; XOP: # %bb.0: ; XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; XOP-NEXT: vpandn %xmm3, %xmm2, %xmm4 +; XOP-NEXT: vpand %xmm3, %xmm2, %xmm4 +; XOP-NEXT: vpxor %xmm5, %xmm5, %xmm5 +; XOP-NEXT: vpsubb %xmm4, %xmm5, %xmm4 +; XOP-NEXT: vpshlb %xmm4, %xmm1, %xmm1 +; XOP-NEXT: vpandn %xmm3, %xmm2, %xmm2 ; XOP-NEXT: vpaddb %xmm0, %xmm0, %xmm0 -; XOP-NEXT: vpshlb %xmm4, %xmm0, %xmm0 -; XOP-NEXT: vpand %xmm3, %xmm2, %xmm2 -; XOP-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; XOP-NEXT: vpsubb %xmm2, %xmm3, %xmm2 -; XOP-NEXT: vpshlb %xmm2, %xmm1, %xmm1 +; XOP-NEXT: vpshlb %xmm2, %xmm0, %xmm0 ; XOP-NEXT: vpor %xmm1, %xmm0, %xmm0 ; XOP-NEXT: retq ; ; X86-SSE2-LABEL: var_funnnel_v16i8: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: movdqa {{.*#+}} xmm5 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 -; X86-SSE2-NEXT: pandn %xmm5, %xmm4 -; X86-SSE2-NEXT: psllw $5, %xmm4 +; X86-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; X86-SSE2-NEXT: movdqa %xmm2, %xmm5 +; X86-SSE2-NEXT: pand %xmm4, %xmm5 +; X86-SSE2-NEXT: psllw $5, %xmm5 ; X86-SSE2-NEXT: pxor %xmm3, %xmm3 ; X86-SSE2-NEXT: pxor %xmm6, %xmm6 -; X86-SSE2-NEXT: pcmpgtb %xmm4, %xmm6 -; X86-SSE2-NEXT: paddb %xmm0, %xmm0 +; X86-SSE2-NEXT: pcmpgtb %xmm5, %xmm6 ; X86-SSE2-NEXT: movdqa %xmm6, %xmm7 -; X86-SSE2-NEXT: pandn %xmm0, %xmm7 -; X86-SSE2-NEXT: psllw $4, %xmm0 -; X86-SSE2-NEXT: pand %xmm6, %xmm0 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 -; X86-SSE2-NEXT: por %xmm7, %xmm0 -; X86-SSE2-NEXT: paddb %xmm4, %xmm4 +; X86-SSE2-NEXT: pandn %xmm1, %xmm7 +; X86-SSE2-NEXT: psrlw $4, %xmm1 +; X86-SSE2-NEXT: pand %xmm6, %xmm1 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: por %xmm7, %xmm1 +; X86-SSE2-NEXT: paddb %xmm5, %xmm5 ; X86-SSE2-NEXT: pxor %xmm6, %xmm6 -; X86-SSE2-NEXT: pcmpgtb %xmm4, %xmm6 +; X86-SSE2-NEXT: pcmpgtb %xmm5, %xmm6 ; X86-SSE2-NEXT: movdqa %xmm6, %xmm7 -; X86-SSE2-NEXT: pandn %xmm0, %xmm7 -; X86-SSE2-NEXT: psllw $2, %xmm0 -; X86-SSE2-NEXT: pand %xmm6, %xmm0 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 -; X86-SSE2-NEXT: por %xmm7, %xmm0 -; X86-SSE2-NEXT: paddb %xmm4, %xmm4 +; X86-SSE2-NEXT: pandn %xmm1, %xmm7 +; X86-SSE2-NEXT: psrlw $2, %xmm1 +; X86-SSE2-NEXT: pand %xmm6, %xmm1 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: por %xmm7, %xmm1 +; X86-SSE2-NEXT: paddb %xmm5, %xmm5 ; X86-SSE2-NEXT: pxor %xmm6, %xmm6 -; X86-SSE2-NEXT: pcmpgtb %xmm4, %xmm6 -; X86-SSE2-NEXT: movdqa %xmm6, %xmm4 -; X86-SSE2-NEXT: pandn %xmm0, %xmm4 -; X86-SSE2-NEXT: paddb %xmm0, %xmm0 -; X86-SSE2-NEXT: pand %xmm6, %xmm0 -; X86-SSE2-NEXT: pand %xmm5, %xmm2 -; X86-SSE2-NEXT: psllw $5, %xmm2 -; X86-SSE2-NEXT: pxor %xmm5, %xmm5 -; X86-SSE2-NEXT: pcmpgtb %xmm2, %xmm5 -; X86-SSE2-NEXT: movdqa %xmm5, %xmm6 -; X86-SSE2-NEXT: pandn %xmm1, %xmm6 -; X86-SSE2-NEXT: psrlw $4, %xmm1 -; X86-SSE2-NEXT: pand %xmm5, %xmm1 +; X86-SSE2-NEXT: pcmpgtb %xmm5, %xmm6 +; X86-SSE2-NEXT: movdqa %xmm6, %xmm5 +; X86-SSE2-NEXT: pandn %xmm1, %xmm5 +; X86-SSE2-NEXT: psrlw $1, %xmm1 +; X86-SSE2-NEXT: pand %xmm6, %xmm1 ; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 -; X86-SSE2-NEXT: por %xmm6, %xmm1 +; X86-SSE2-NEXT: por %xmm5, %xmm1 +; X86-SSE2-NEXT: pandn %xmm4, %xmm2 +; X86-SSE2-NEXT: psllw $5, %xmm2 +; X86-SSE2-NEXT: pxor %xmm4, %xmm4 +; X86-SSE2-NEXT: pcmpgtb %xmm2, %xmm4 +; X86-SSE2-NEXT: paddb %xmm0, %xmm0 +; X86-SSE2-NEXT: movdqa %xmm4, %xmm5 +; X86-SSE2-NEXT: pandn %xmm0, %xmm5 +; X86-SSE2-NEXT: psllw $4, %xmm0 +; X86-SSE2-NEXT: pand %xmm4, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: por %xmm5, %xmm0 ; X86-SSE2-NEXT: paddb %xmm2, %xmm2 -; X86-SSE2-NEXT: pxor %xmm5, %xmm5 -; X86-SSE2-NEXT: pcmpgtb %xmm2, %xmm5 -; X86-SSE2-NEXT: movdqa %xmm5, %xmm6 -; X86-SSE2-NEXT: pandn %xmm1, %xmm6 -; X86-SSE2-NEXT: psrlw $2, %xmm1 -; X86-SSE2-NEXT: pand %xmm5, %xmm1 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 -; X86-SSE2-NEXT: por %xmm6, %xmm1 +; X86-SSE2-NEXT: pxor %xmm4, %xmm4 +; X86-SSE2-NEXT: pcmpgtb %xmm2, %xmm4 +; X86-SSE2-NEXT: movdqa %xmm4, %xmm5 +; X86-SSE2-NEXT: pandn %xmm0, %xmm5 +; X86-SSE2-NEXT: psllw $2, %xmm0 +; X86-SSE2-NEXT: pand %xmm4, %xmm0 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: por %xmm5, %xmm0 ; X86-SSE2-NEXT: paddb %xmm2, %xmm2 ; X86-SSE2-NEXT: pcmpgtb %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm3, %xmm2 -; X86-SSE2-NEXT: pandn %xmm1, %xmm2 -; X86-SSE2-NEXT: psrlw $1, %xmm1 -; X86-SSE2-NEXT: pand %xmm3, %xmm1 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 -; X86-SSE2-NEXT: por %xmm2, %xmm1 -; X86-SSE2-NEXT: por %xmm4, %xmm1 -; X86-SSE2-NEXT: por %xmm1, %xmm0 +; X86-SSE2-NEXT: pandn %xmm0, %xmm2 +; X86-SSE2-NEXT: por %xmm1, %xmm2 +; X86-SSE2-NEXT: paddb %xmm0, %xmm0 +; X86-SSE2-NEXT: pand %xmm3, %xmm0 +; X86-SSE2-NEXT: por %xmm2, %xmm0 ; X86-SSE2-NEXT: retl %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt) ret <16 x i8> %res diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll --- a/llvm/test/CodeGen/X86/vector-fshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll @@ -758,45 +758,45 @@ ; XOPAVX1-LABEL: var_funnnel_v32i8: ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vmovaps {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; XOPAVX1-NEXT: vandnps %ymm3, %ymm2, %ymm4 +; XOPAVX1-NEXT: vandps %ymm3, %ymm2, %ymm4 ; XOPAVX1-NEXT: vextractf128 $1, %ymm4, %xmm5 -; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm6 -; XOPAVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm6 -; XOPAVX1-NEXT: vpshlb %xmm5, %xmm6, %xmm5 -; XOPAVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm0 -; XOPAVX1-NEXT: vpshlb %xmm4, %xmm0, %xmm0 -; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0 -; XOPAVX1-NEXT: vandps %ymm3, %ymm2, %ymm2 +; XOPAVX1-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; XOPAVX1-NEXT: vpsubb %xmm5, %xmm6, %xmm5 +; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm7 +; XOPAVX1-NEXT: vpshlb %xmm5, %xmm7, %xmm5 +; XOPAVX1-NEXT: vpsubb %xmm4, %xmm6, %xmm4 +; XOPAVX1-NEXT: vpshlb %xmm4, %xmm1, %xmm1 +; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1 +; XOPAVX1-NEXT: vandnps %ymm3, %ymm2, %ymm2 ; XOPAVX1-NEXT: vextractf128 $1, %ymm2, %xmm3 -; XOPAVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; XOPAVX1-NEXT: vpsubb %xmm3, %xmm4, %xmm3 -; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 -; XOPAVX1-NEXT: vpshlb %xmm3, %xmm5, %xmm3 -; XOPAVX1-NEXT: vpsubb %xmm2, %xmm4, %xmm2 -; XOPAVX1-NEXT: vpshlb %xmm2, %xmm1, %xmm1 -; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; XOPAVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4 +; XOPAVX1-NEXT: vpshlb %xmm3, %xmm4, %xmm3 +; XOPAVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm0 +; XOPAVX1-NEXT: vpshlb %xmm2, %xmm0, %xmm0 +; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; XOPAVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 ; XOPAVX1-NEXT: retq ; ; XOPAVX2-LABEL: var_funnnel_v32i8: ; XOPAVX2: # %bb.0: ; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; XOPAVX2-NEXT: vpandn %ymm3, %ymm2, %ymm4 +; XOPAVX2-NEXT: vpand %ymm3, %ymm2, %ymm4 ; XOPAVX2-NEXT: vextracti128 $1, %ymm4, %xmm5 -; XOPAVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm0 -; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm6 -; XOPAVX2-NEXT: vpshlb %xmm5, %xmm6, %xmm5 -; XOPAVX2-NEXT: vpshlb %xmm4, %xmm0, %xmm0 -; XOPAVX2-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm0 -; XOPAVX2-NEXT: vpand %ymm3, %ymm2, %ymm2 +; XOPAVX2-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; XOPAVX2-NEXT: vpsubb %xmm5, %xmm6, %xmm5 +; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm7 +; XOPAVX2-NEXT: vpshlb %xmm5, %xmm7, %xmm5 +; XOPAVX2-NEXT: vpsubb %xmm4, %xmm6, %xmm4 +; XOPAVX2-NEXT: vpshlb %xmm4, %xmm1, %xmm1 +; XOPAVX2-NEXT: vinserti128 $1, %xmm5, %ymm1, %ymm1 +; XOPAVX2-NEXT: vpandn %ymm3, %ymm2, %ymm2 ; XOPAVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 -; XOPAVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; XOPAVX2-NEXT: vpsubb %xmm3, %xmm4, %xmm3 -; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm5 -; XOPAVX2-NEXT: vpshlb %xmm3, %xmm5, %xmm3 -; XOPAVX2-NEXT: vpsubb %xmm2, %xmm4, %xmm2 -; XOPAVX2-NEXT: vpshlb %xmm2, %xmm1, %xmm1 -; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1 +; XOPAVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm0 +; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4 +; XOPAVX2-NEXT: vpshlb %xmm3, %xmm4, %xmm3 +; XOPAVX2-NEXT: vpshlb %xmm2, %xmm0, %xmm0 +; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 ; XOPAVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; XOPAVX2-NEXT: retq %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %amt) diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll --- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll @@ -553,10 +553,10 @@ ; SSE2-NEXT: pandn %xmm1, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm1 -; SSE2-NEXT: paddb %xmm2, %xmm1 +; SSE2-NEXT: psrlw $7, %xmm1 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: psrlw $7, %xmm4 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 +; SSE2-NEXT: paddb %xmm2, %xmm4 ; SSE2-NEXT: por %xmm1, %xmm4 ; SSE2-NEXT: paddb %xmm3, %xmm3 ; SSE2-NEXT: pcmpgtb %xmm3, %xmm0 @@ -588,10 +588,10 @@ ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 -; SSE41-NEXT: paddb %xmm2, %xmm1 +; SSE41-NEXT: psrlw $7, %xmm1 +; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: psrlw $7, %xmm3 -; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; SSE41-NEXT: paddb %xmm2, %xmm3 ; SSE41-NEXT: por %xmm1, %xmm3 ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 @@ -616,10 +616,10 @@ ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3 -; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 -; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpsrlw $7, %xmm0, %xmm2 +; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm3 +; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -768,10 +768,10 @@ ; X86-SSE2-NEXT: pandn %xmm1, %xmm2 ; X86-SSE2-NEXT: por %xmm4, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm1 -; X86-SSE2-NEXT: paddb %xmm2, %xmm1 +; X86-SSE2-NEXT: psrlw $7, %xmm1 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 -; X86-SSE2-NEXT: psrlw $7, %xmm4 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm4 +; X86-SSE2-NEXT: paddb %xmm2, %xmm4 ; X86-SSE2-NEXT: por %xmm1, %xmm4 ; X86-SSE2-NEXT: paddb %xmm3, %xmm3 ; X86-SSE2-NEXT: pcmpgtb %xmm3, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll --- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll @@ -480,10 +480,10 @@ ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 -; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3 -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 -; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm2 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm3 +; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -506,10 +506,10 @@ ; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm2 -; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3 -; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 -; AVX512F-NEXT: vpor %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm2 +; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 +; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm3 +; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll --- a/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll @@ -272,154 +272,154 @@ ; ; AVX512BW-LABEL: var_funnnel_v64i8: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm3 -; AVX512BW-NEXT: vpsllw $5, %zmm3, %zmm3 -; AVX512BW-NEXT: vpaddb %zmm3, %zmm3, %zmm4 +; AVX512BW-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512BW-NEXT: vpsubb %zmm1, %zmm2, %zmm2 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2 +; AVX512BW-NEXT: vpsllw $5, %zmm2, %zmm2 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm4 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 -; AVX512BW-NEXT: vpmovb2m %zmm3, %k2 -; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm3 -; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 -; AVX512BW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k2} -; AVX512BW-NEXT: vpsrlw $2, %zmm3, %zmm5 -; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 -; AVX512BW-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} -; AVX512BW-NEXT: vpsrlw $1, %zmm3, %zmm5 +; AVX512BW-NEXT: vpmovb2m %zmm2, %k2 +; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm2 +; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 +; AVX512BW-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k2} +; AVX512BW-NEXT: vpsllw $2, %zmm2, %zmm5 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 +; AVX512BW-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} ; AVX512BW-NEXT: vpaddb %zmm4, %zmm4, %zmm4 ; AVX512BW-NEXT: vpmovb2m %zmm4, %k1 -; AVX512BW-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} -; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX512BW-NEXT: vpsubb %zmm1, %zmm4, %zmm1 -; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm2 {%k1} +; AVX512BW-NEXT: vpandq %zmm3, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsllw $5, %zmm1, %zmm1 -; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm2 -; AVX512BW-NEXT: vpmovb2m %zmm2, %k1 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm1, %zmm3 +; AVX512BW-NEXT: vpmovb2m %zmm3, %k1 ; AVX512BW-NEXT: vpmovb2m %zmm1, %k2 -; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1 +; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2} -; AVX512BW-NEXT: vpsllw $2, %zmm0, %zmm1 +; AVX512BW-NEXT: vpsrlw $2, %zmm0, %zmm1 ; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} -; AVX512BW-NEXT: vpaddb %zmm2, %zmm2, %zmm1 -; AVX512BW-NEXT: vpmovb2m %zmm1, %k1 -; AVX512BW-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1} -; AVX512BW-NEXT: vporq %zmm0, %zmm3, %zmm0 +; AVX512BW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512BW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 +; AVX512BW-NEXT: vpmovb2m %zmm3, %k1 +; AVX512BW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512BW-NEXT: vporq %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: var_funnnel_v64i8: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512VLBW-NEXT: vpandq %zmm2, %zmm1, %zmm3 -; AVX512VLBW-NEXT: vpsllw $5, %zmm3, %zmm3 -; AVX512VLBW-NEXT: vpaddb %zmm3, %zmm3, %zmm4 +; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm2, %zmm2 +; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512VLBW-NEXT: vpandq %zmm3, %zmm2, %zmm2 +; AVX512VLBW-NEXT: vpsllw $5, %zmm2, %zmm2 +; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm4 ; AVX512VLBW-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VLBW-NEXT: vpmovb2m %zmm3, %k2 -; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm3 -; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 -; AVX512VLBW-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k2} -; AVX512VLBW-NEXT: vpsrlw $2, %zmm3, %zmm5 -; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 -; AVX512VLBW-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} -; AVX512VLBW-NEXT: vpsrlw $1, %zmm3, %zmm5 +; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k2 +; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm2 +; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 +; AVX512VLBW-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k2} +; AVX512VLBW-NEXT: vpsllw $2, %zmm2, %zmm5 ; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 +; AVX512VLBW-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} ; AVX512VLBW-NEXT: vpaddb %zmm4, %zmm4, %zmm4 ; AVX512VLBW-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VLBW-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} -; AVX512VLBW-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX512VLBW-NEXT: vpsubb %zmm1, %zmm4, %zmm1 -; AVX512VLBW-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm2 {%k1} +; AVX512VLBW-NEXT: vpandq %zmm3, %zmm1, %zmm1 ; AVX512VLBW-NEXT: vpsllw $5, %zmm1, %zmm1 -; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm2 -; AVX512VLBW-NEXT: vpmovb2m %zmm2, %k1 +; AVX512VLBW-NEXT: vpaddb %zmm1, %zmm1, %zmm3 +; AVX512VLBW-NEXT: vpmovb2m %zmm3, %k1 ; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k2 -; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1 +; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm1 ; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2} -; AVX512VLBW-NEXT: vpsllw $2, %zmm0, %zmm1 +; AVX512VLBW-NEXT: vpsrlw $2, %zmm0, %zmm1 ; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} -; AVX512VLBW-NEXT: vpaddb %zmm2, %zmm2, %zmm1 -; AVX512VLBW-NEXT: vpmovb2m %zmm1, %k1 -; AVX512VLBW-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1} -; AVX512VLBW-NEXT: vporq %zmm0, %zmm3, %zmm0 +; AVX512VLBW-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512VLBW-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512VLBW-NEXT: vpaddb %zmm3, %zmm3, %zmm3 +; AVX512VLBW-NEXT: vpmovb2m %zmm3, %k1 +; AVX512VLBW-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512VLBW-NEXT: vporq %zmm2, %zmm0, %zmm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VBMI2-LABEL: var_funnnel_v64i8: ; AVX512VBMI2: # %bb.0: -; AVX512VBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512VBMI2-NEXT: vpandq %zmm2, %zmm1, %zmm3 -; AVX512VBMI2-NEXT: vpsllw $5, %zmm3, %zmm3 -; AVX512VBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm4 +; AVX512VBMI2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VBMI2-NEXT: vpsubb %zmm1, %zmm2, %zmm2 +; AVX512VBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2 +; AVX512VBMI2-NEXT: vpsllw $5, %zmm2, %zmm2 +; AVX512VBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm4 ; AVX512VBMI2-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VBMI2-NEXT: vpmovb2m %zmm3, %k2 -; AVX512VBMI2-NEXT: vpsrlw $4, %zmm0, %zmm3 -; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 -; AVX512VBMI2-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k2} -; AVX512VBMI2-NEXT: vpsrlw $2, %zmm3, %zmm5 -; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 -; AVX512VBMI2-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} -; AVX512VBMI2-NEXT: vpsrlw $1, %zmm3, %zmm5 +; AVX512VBMI2-NEXT: vpmovb2m %zmm2, %k2 +; AVX512VBMI2-NEXT: vpsllw $4, %zmm0, %zmm2 +; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 +; AVX512VBMI2-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k2} +; AVX512VBMI2-NEXT: vpsllw $2, %zmm2, %zmm5 ; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 +; AVX512VBMI2-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} ; AVX512VBMI2-NEXT: vpaddb %zmm4, %zmm4, %zmm4 ; AVX512VBMI2-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VBMI2-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} -; AVX512VBMI2-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX512VBMI2-NEXT: vpsubb %zmm1, %zmm4, %zmm1 -; AVX512VBMI2-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; AVX512VBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm2 {%k1} +; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm1, %zmm1 ; AVX512VBMI2-NEXT: vpsllw $5, %zmm1, %zmm1 -; AVX512VBMI2-NEXT: vpaddb %zmm1, %zmm1, %zmm2 -; AVX512VBMI2-NEXT: vpmovb2m %zmm2, %k1 +; AVX512VBMI2-NEXT: vpaddb %zmm1, %zmm1, %zmm3 +; AVX512VBMI2-NEXT: vpmovb2m %zmm3, %k1 ; AVX512VBMI2-NEXT: vpmovb2m %zmm1, %k2 -; AVX512VBMI2-NEXT: vpsllw $4, %zmm0, %zmm1 +; AVX512VBMI2-NEXT: vpsrlw $4, %zmm0, %zmm1 ; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2} -; AVX512VBMI2-NEXT: vpsllw $2, %zmm0, %zmm1 +; AVX512VBMI2-NEXT: vpsrlw $2, %zmm0, %zmm1 ; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} -; AVX512VBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm1 -; AVX512VBMI2-NEXT: vpmovb2m %zmm1, %k1 -; AVX512VBMI2-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1} -; AVX512VBMI2-NEXT: vporq %zmm0, %zmm3, %zmm0 +; AVX512VBMI2-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512VBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512VBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm3 +; AVX512VBMI2-NEXT: vpmovb2m %zmm3, %k1 +; AVX512VBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512VBMI2-NEXT: vporq %zmm2, %zmm0, %zmm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLVBMI2-LABEL: var_funnnel_v64i8: ; AVX512VLVBMI2: # %bb.0: -; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512VLVBMI2-NEXT: vpandq %zmm2, %zmm1, %zmm3 -; AVX512VLVBMI2-NEXT: vpsllw $5, %zmm3, %zmm3 -; AVX512VLVBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm4 +; AVX512VLVBMI2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX512VLVBMI2-NEXT: vpsubb %zmm1, %zmm2, %zmm2 +; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2 +; AVX512VLVBMI2-NEXT: vpsllw $5, %zmm2, %zmm2 +; AVX512VLVBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm4 ; AVX512VLVBMI2-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VLVBMI2-NEXT: vpmovb2m %zmm3, %k2 -; AVX512VLVBMI2-NEXT: vpsrlw $4, %zmm0, %zmm3 -; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm3 -; AVX512VLVBMI2-NEXT: vpblendmb %zmm3, %zmm0, %zmm3 {%k2} -; AVX512VLVBMI2-NEXT: vpsrlw $2, %zmm3, %zmm5 -; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 -; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} -; AVX512VLVBMI2-NEXT: vpsrlw $1, %zmm3, %zmm5 +; AVX512VLVBMI2-NEXT: vpmovb2m %zmm2, %k2 +; AVX512VLVBMI2-NEXT: vpsllw $4, %zmm0, %zmm2 +; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2 +; AVX512VLVBMI2-NEXT: vpblendmb %zmm2, %zmm0, %zmm2 {%k2} +; AVX512VLVBMI2-NEXT: vpsllw $2, %zmm2, %zmm5 ; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm5 +; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm5, %zmm2 {%k1} ; AVX512VLVBMI2-NEXT: vpaddb %zmm4, %zmm4, %zmm4 ; AVX512VLVBMI2-NEXT: vpmovb2m %zmm4, %k1 -; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} -; AVX512VLVBMI2-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX512VLVBMI2-NEXT: vpsubb %zmm1, %zmm4, %zmm1 -; AVX512VLVBMI2-NEXT: vpandq %zmm2, %zmm1, %zmm1 +; AVX512VLVBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm2 {%k1} +; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm1, %zmm1 ; AVX512VLVBMI2-NEXT: vpsllw $5, %zmm1, %zmm1 -; AVX512VLVBMI2-NEXT: vpaddb %zmm1, %zmm1, %zmm2 -; AVX512VLVBMI2-NEXT: vpmovb2m %zmm2, %k1 +; AVX512VLVBMI2-NEXT: vpaddb %zmm1, %zmm1, %zmm3 +; AVX512VLVBMI2-NEXT: vpmovb2m %zmm3, %k1 ; AVX512VLVBMI2-NEXT: vpmovb2m %zmm1, %k2 -; AVX512VLVBMI2-NEXT: vpsllw $4, %zmm0, %zmm1 +; AVX512VLVBMI2-NEXT: vpsrlw $4, %zmm0, %zmm1 ; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k2} -; AVX512VLVBMI2-NEXT: vpsllw $2, %zmm0, %zmm1 +; AVX512VLVBMI2-NEXT: vpsrlw $2, %zmm0, %zmm1 ; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 ; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} -; AVX512VLVBMI2-NEXT: vpaddb %zmm2, %zmm2, %zmm1 -; AVX512VLVBMI2-NEXT: vpmovb2m %zmm1, %k1 -; AVX512VLVBMI2-NEXT: vpaddb %zmm0, %zmm0, %zmm0 {%k1} -; AVX512VLVBMI2-NEXT: vporq %zmm0, %zmm3, %zmm0 +; AVX512VLVBMI2-NEXT: vpsrlw $1, %zmm0, %zmm1 +; AVX512VLVBMI2-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1 +; AVX512VLVBMI2-NEXT: vpaddb %zmm3, %zmm3, %zmm3 +; AVX512VLVBMI2-NEXT: vpmovb2m %zmm3, %k1 +; AVX512VLVBMI2-NEXT: vmovdqu8 %zmm1, %zmm0 {%k1} +; AVX512VLVBMI2-NEXT: vporq %zmm2, %zmm0, %zmm0 ; AVX512VLVBMI2-NEXT: retq %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> %amt) ret <64 x i8> %res diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll --- a/llvm/test/CodeGen/X86/vector-gep.ll +++ b/llvm/test/CodeGen/X86/vector-gep.ll @@ -122,10 +122,11 @@ ; CHECK-NEXT: movl %esp, %ebp ; CHECK-NEXT: andl $-32, %esp ; CHECK-NEXT: subl $160, %esp -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm3 +; CHECK-NEXT: vmovdqa 40(%ebp), %ymm3 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm4 ; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm5 -; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 -; CHECK-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4 +; CHECK-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 @@ -144,47 +145,46 @@ ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 40(%ebp), %xmm0 -; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 56(%ebp), %xmm0 +; CHECK-NEXT: vextractf128 $1, %ymm3, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill -; CHECK-NEXT: vmovdqa 72(%ebp), %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm0 +; CHECK-NEXT: vmovdqa 72(%ebp), %ymm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm4 +; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm0 ; CHECK-NEXT: vmovdqa %xmm0, (%esp) # 16-byte Spill -; CHECK-NEXT: vmovdqa 88(%ebp), %xmm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4 -; CHECK-NEXT: vmovdqa 104(%ebp), %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1 -; CHECK-NEXT: vpaddd %xmm1, %xmm5, %xmm1 -; CHECK-NEXT: vmovdqa 120(%ebp), %xmm6 -; CHECK-NEXT: vpaddd %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vextractf128 $1, %ymm3, %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 +; CHECK-NEXT: vmovdqa 104(%ebp), %ymm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm6 ; CHECK-NEXT: vpaddd %xmm6, %xmm5, %xmm6 -; CHECK-NEXT: vmovdqa 136(%ebp), %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpaddd %xmm2, %xmm5, %xmm2 -; CHECK-NEXT: vmovdqa 152(%ebp), %xmm7 -; CHECK-NEXT: vpaddd %xmm7, %xmm7, %xmm7 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm2 +; CHECK-NEXT: vmovdqa 136(%ebp), %ymm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm7 ; CHECK-NEXT: vpaddd %xmm7, %xmm5, %xmm7 -; CHECK-NEXT: vmovdqa 168(%ebp), %xmm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0 -; CHECK-NEXT: vmovdqa 184(%ebp), %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3 -; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3 +; CHECK-NEXT: vmovdqa 168(%ebp), %ymm1 +; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm4 +; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1 +; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vpaddd %xmm1, %xmm5, %xmm1 ; CHECK-NEXT: movl 8(%ebp), %eax -; CHECK-NEXT: vmovdqa %xmm3, 240(%eax) -; CHECK-NEXT: vmovdqa %xmm0, 224(%eax) -; CHECK-NEXT: vmovdqa %xmm7, 208(%eax) -; CHECK-NEXT: vmovdqa %xmm2, 192(%eax) -; CHECK-NEXT: vmovdqa %xmm6, 176(%eax) -; CHECK-NEXT: vmovdqa %xmm1, 160(%eax) -; CHECK-NEXT: vmovdqa %xmm4, 144(%eax) +; CHECK-NEXT: vmovdqa %xmm1, 240(%eax) +; CHECK-NEXT: vmovdqa %xmm4, 224(%eax) +; CHECK-NEXT: vmovdqa %xmm0, 208(%eax) +; CHECK-NEXT: vmovdqa %xmm7, 192(%eax) +; CHECK-NEXT: vmovdqa %xmm2, 176(%eax) +; CHECK-NEXT: vmovdqa %xmm6, 160(%eax) +; CHECK-NEXT: vmovdqa %xmm3, 144(%eax) ; CHECK-NEXT: vmovaps (%esp), %xmm0 # 16-byte Reload ; CHECK-NEXT: vmovaps %xmm0, 128(%eax) ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll --- a/llvm/test/CodeGen/X86/vector-rotate-128.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll @@ -506,10 +506,10 @@ ; SSE2-NEXT: pandn %xmm3, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: paddb %xmm2, %xmm3 +; SSE2-NEXT: psrlw $7, %xmm3 +; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: psrlw $7, %xmm4 -; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4 +; SSE2-NEXT: paddb %xmm2, %xmm4 ; SSE2-NEXT: por %xmm3, %xmm4 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm0 @@ -542,10 +542,10 @@ ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 -; SSE41-NEXT: paddb %xmm1, %xmm0 +; SSE41-NEXT: psrlw $7, %xmm0 +; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: psrlw $7, %xmm3 -; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 +; SSE41-NEXT: paddb %xmm1, %xmm3 ; SSE41-NEXT: por %xmm0, %xmm3 ; SSE41-NEXT: paddb %xmm2, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 @@ -569,10 +569,10 @@ ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm2 -; AVX-NEXT: vpsrlw $7, %xmm0, %xmm3 -; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 -; AVX-NEXT: vpor %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpsrlw $7, %xmm0, %xmm2 +; AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 +; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm3 +; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -699,10 +699,10 @@ ; X86-SSE2-NEXT: pandn %xmm3, %xmm2 ; X86-SSE2-NEXT: por %xmm4, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm3 -; X86-SSE2-NEXT: paddb %xmm2, %xmm3 +; X86-SSE2-NEXT: psrlw $7, %xmm3 +; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 -; X86-SSE2-NEXT: psrlw $7, %xmm4 -; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm4 +; X86-SSE2-NEXT: paddb %xmm2, %xmm4 ; X86-SSE2-NEXT: por %xmm3, %xmm4 ; X86-SSE2-NEXT: paddb %xmm1, %xmm1 ; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll --- a/llvm/test/CodeGen/X86/vector-rotate-256.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll @@ -439,10 +439,10 @@ ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 -; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm3 -; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 -; AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm2 +; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 +; AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm3 +; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -463,10 +463,10 @@ ; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 -; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm2 -; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm3 -; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3 -; AVX512F-NEXT: vpor %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm2 +; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2 +; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm3 +; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: retq